gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from __future__ import print_function, unicode_literals import argparse import getpass import inspect import logging import pkg_resources import platform import os import sys from colorlog import ColoredFormatter from six.moves import input from six.moves.urllib.parse import urlparse from rbtools import get_version_string from rbtools.api.capabilities import Capabilities from rbtools.api.client import RBClient from rbtools.api.errors import APIError, ServerInterfaceError from rbtools.clients import scan_usable_client from rbtools.clients.errors import OptionsCheckError from rbtools.utils.filesystem import (cleanup_tempfiles, get_home_path, is_exe_in_path, load_config) from rbtools.utils.process import die RB_MAIN = 'rbt' class CommandExit(Exception): def __init__(self, exit_code=0): super(CommandExit, self).__init__('Exit with code %s' % exit_code) self.exit_code = exit_code class CommandError(Exception): pass class ParseError(CommandError): pass class SmartHelpFormatter(argparse.HelpFormatter): """Smartly formats help text, preserving paragraphs.""" def _split_lines(self, text, width): # NOTE: This function depends on overriding _split_lines's behavior. # It is clearly documented that this function should not be # considered public API. However, given that the width we need # is calculated by HelpFormatter, and HelpFormatter has no # blessed public API, we have no other choice but to override # it here. lines = [] for line in text.splitlines(): lines += super(SmartHelpFormatter, self)._split_lines(line, width) lines.append('') return lines[:-1] class Option(object): """Represents an option for a command. The arguments to the constructor should be treated like those to argparse's add_argument, with the exception that the keyword argument 'config_key' is also valid. If config_key is provided it will be used to retrieve the config value as a default if the option is not specified. This will take precedence over the default argument. Serves as a wrapper around the ArgumentParser options, allowing us to specify defaults which will be grabbed from the configuration after it is loaded. """ def __init__(self, *opts, **attrs): self.opts = opts self.attrs = attrs def add_to(self, parent, config={}, argv=[]): """Adds the option to the parent parser or group. If the option maps to a configuration key, this will handle figuring out the correct default. Once we've determined the right set of flags, the option will be added to the parser. """ attrs = self.attrs.copy() if 'config_key' in attrs: config_key = attrs.pop('config_key') if config_key in config: attrs['default'] = config[config_key] if 'deprecated_in' in attrs: attrs['help'] += '\n[Deprecated since %s]' % attrs['deprecated_in'] # These are used for other purposes, and are not supported by # argparse. for attr in ('added_in', 'deprecated_in', 'extended_help', 'versions_changed'): attrs.pop(attr, None) parent.add_argument(*self.opts, **attrs) class OptionGroup(object): """Represents a named group of options. Each group has a name, an optional description, and a list of options. It serves as a way to organize related options, making it easier for users to scan for the options they want. This works like argparse's argument groups, but is designed to work with our special Option class. """ def __init__(self, name=None, description=None, option_list=[]): self.name = name self.description = description self.option_list = option_list def add_to(self, parser, config={}, argv=[]): """Adds the group and all its contained options to the parser.""" group = parser.add_argument_group(self.name, self.description) for option in self.option_list: option.add_to(group, config, argv) class LogLevelFilter(logging.Filter): """Filters log messages of a given level. Only log messages that have the specified level will be allowed by this filter. This prevents propagation of higher level types to lower log handlers. """ def __init__(self, level): self.level = level def filter(self, record): return record.levelno == self.level class Command(object): """Base class for rb commands. This class will handle retrieving the configuration, and parsing command line options. ``description`` is a string containing a short description of the command which is suitable for display in usage text. ``usage`` is a list of usage strings each showing a use case. These should not include the main rbt command or the command name; they will be added automatically. ``args`` is a string containing the usage text for what arguments the command takes. ``option_list`` is a list of command line options for the command. Each list entry should be an Option or OptionGroup instance. """ name = '' author = '' description = '' args = '' option_list = [] _global_options = [ Option('-d', '--debug', action='store_true', dest='debug', config_key='DEBUG', default=False, help='Displays debug output.', extended_help='This information can be valuable when debugging ' 'problems running the command.'), OptionGroup( name='RBTools Warning and Error Color Options', description='Default color settings for warnings and errors', option_list=[ Option('--color', dest='color', config_key='COLOR', default='auto', help='Enables color output.', extended_help='Enables color output on terminal.'), Option('--log-warn-color', dest='warn_color', config_key='LOG_WARNING_COLOR', default='yellow'), Option('--log-error-color', dest='error_color', config_key='LOG_ERROR_COLOR', default='red'), Option('--log-critical-color', dest='critical_color', config_key='LOG_CRITICAL_COLOR', default='bold_red'), ] ) ] server_options = OptionGroup( name='Review Board Server Options', description='Options necessary to communicate and authenticate ' 'with a Review Board server.', option_list=[ Option('--server', dest='server', metavar='URL', config_key='REVIEWBOARD_URL', default=None, help='Specifies the Review Board server to use.'), Option('--username', dest='username', metavar='USERNAME', config_key='USERNAME', default=None, help='The user name to be supplied to the Review Board ' 'server.'), Option('--password', dest='password', metavar='PASSWORD', config_key='PASSWORD', default=None, help='The password to be supplied to the Review Board ' 'server.'), Option('--ext-auth-cookies', dest='ext_auth_cookies', metavar='EXT_AUTH_COOKIES', config_key='EXT_AUTH_COOKIES', default=None, help='Use an external cookie store with pre-fetched ' 'authentication data. This is useful with servers ' 'that require extra web authentication to access ' 'Review Board, e.g. on single sign-on enabled sites.', added_in='0.7.5'), Option('--api-token', dest='api_token', metavar='TOKEN', config_key='API_TOKEN', default=None, help='The API token to use for authentication, instead of ' 'using a username and password.', added_in='0.7'), Option('--disable-proxy', action='store_false', dest='enable_proxy', config_key='ENABLE_PROXY', default=True, help='Prevents requests from going through a proxy ' 'server.'), Option('--disable-ssl-verification', action='store_true', dest='disable_ssl_verification', config_key='DISABLE_SSL_VERIFICATION', default=False, help='Disable SSL certificate verification. This is useful ' 'with servers that have self-signed certificates.', added_in='0.7.3'), Option('--disable-cookie-storage', config_key='SAVE_COOKIES', dest='save_cookies', action='store_false', default=True, help='Use an in-memory cookie store instead of writing ' 'them to a file. No credentials will be saved or ' 'loaded.', added_in='0.7.3'), Option('--disable-cache', dest='disable_cache', config_key='DISABLE_CACHE', action='store_true', default=False, help='Disable the HTTP cache completely. This will ' 'result in slower requests.', added_in='0.7.3'), Option('--disable-cache-storage', dest='in_memory_cache', config_key='IN_MEMORY_CACHE', action='store_true', default=False, help='Disable storing the API cache on the filesystem, ' 'instead keeping it in memory temporarily.', added_in='0.7.3'), Option('--cache-location', dest='cache_location', metavar='FILE', config_key='CACHE_LOCATION', default=None, help='The file to use for the API cache database.', added_in='0.7.3'), ] ) repository_options = OptionGroup( name='Repository Options', option_list=[ Option('--repository', dest='repository_name', metavar='NAME', config_key='REPOSITORY', default=None, help='The name of the repository configured on ' 'Review Board that matches the local repository.'), Option('--repository-url', dest='repository_url', metavar='URL', config_key='REPOSITORY_URL', default=None, help='The URL for a repository.' '\n' 'When generating diffs, this can be used for ' 'creating a diff outside of a working copy ' '(currently only supported by Subversion with ' 'specific revisions or --diff-filename, and by ' 'ClearCase with relative paths outside the view).' '\n' 'For Git, this specifies the origin URL of the ' 'current repository, overriding the origin URL ' 'supplied by the client.', versions_changed={ '0.6': 'Prior versions used the `REPOSITORY` setting ' 'in .reviewboardrc, and allowed a ' 'repository name to be passed to ' '--repository-url. This is no ' 'longer supported in 0.6 and higher. You ' 'may need to update your configuration and ' 'scripts appropriately.', }), Option('--repository-type', dest='repository_type', metavar='TYPE', config_key='REPOSITORY_TYPE', default=None, help='The type of repository in the current directory. ' 'In most cases this should be detected ' 'automatically, but some directory structures ' 'containing multiple repositories require this ' 'option to select the proper type. The ' '`rbt list-repo-types` command can be used to ' 'list the supported values.'), ] ) diff_options = OptionGroup( name='Diff Generation Options', description='Options for choosing what gets included in a diff, ' 'and how the diff is generated.', option_list=[ Option('--revision-range', dest='revision_range', metavar='REV1:REV2', default=None, help='Generates a diff for the given revision range.', deprecated_in='0.6'), Option('-I', '--include', metavar='FILENAME', dest='include_files', action='append', help='Includes only the specified file in the diff. ' 'This can be used multiple times to specify ' 'multiple files.' '\n' 'Supported by: Bazaar, CVS, Git, Mercurial, ' 'Perforce, and Subversion.', added_in='0.6'), Option('-X', '--exclude', metavar='PATTERN', dest='exclude_patterns', action='append', config_key='EXCLUDE_PATTERNS', help='Excludes all files that match the given pattern ' 'from the diff. This can be used multiple times to ' 'specify multiple patterns. UNIX glob syntax is used ' 'for pattern matching.' '\n' 'Supported by: Bazaar, CVS, Git, Mercurial, ' 'Perforce, and Subversion.', extended_help=( 'Patterns that begin with a path separator (/ on Mac ' 'OS and Linux, \\ on Windows) will be treated as being ' 'relative to the root of the repository. All other ' 'patterns are treated as being relative to the current ' 'working directory.' '\n' 'For example, to exclude all ".txt" files from the ' 'resulting diff, you would use "-X /\'*.txt\'".' '\n' 'When working with Mercurial, the patterns are ' 'provided directly to "hg" and are not limited to ' 'globs. For more information on advanced pattern ' 'syntax in Mercurial, run "hg help patterns"' '\n' 'When working with CVS all diffs are generated ' 'relative to the current working directory so ' 'patterns beginning with a path separator are treated ' 'as relative to the current working directory.' '\n' 'When working with Perforce, an exclude pattern ' 'beginning with `//` will be matched against depot ' 'paths; all other patterns will be matched against ' 'local paths.'), added_in='0.7'), Option('--parent', dest='parent_branch', metavar='BRANCH', config_key='PARENT_BRANCH', default=None, help='The parent branch this diff should be generated ' 'against (Bazaar/Git/Mercurial only).'), Option('--diff-filename', dest='diff_filename', default=None, metavar='FILENAME', help='Uploads an existing diff file, instead of ' 'generating a new diff.'), Option('--tracking-branch', dest='tracking', metavar='BRANCH', config_key='TRACKING_BRANCH', default=None, help='The remote tracking branch from which your local ' 'branch is derived (Git/Mercurial only).' '\n' 'For Git, the default is `origin/master`.' '\n' 'For Mercurial, the default is one of: ' '`reviewboard`, `origin`, `parent`, or `default`.'), ] ) perforce_options = OptionGroup( name='Perforce Options', description='Perforce-specific options for selecting the ' 'Perforce client and communicating with the ' 'repository.', option_list=[ Option('--p4-client', dest='p4_client', config_key='P4_CLIENT', default=None, metavar='CLIENT_NAME', help='The Perforce client name for the repository.'), Option('--p4-port', dest='p4_port', config_key='P4_PORT', default=None, metavar='PORT', help='The IP address for the Perforce server.'), Option('--p4-passwd', dest='p4_passwd', config_key='P4_PASSWD', default=None, metavar='PASSWORD', help='The Perforce password or ticket of the user ' 'in the P4USER environment variable.'), ] ) subversion_options = OptionGroup( name='Subversion Options', description='Subversion-specific options for controlling diff ' 'generation.', option_list=[ Option('--basedir', dest='basedir', config_key='BASEDIR', default=None, metavar='PATH', help='The path within the repository where the diff ' 'was generated. This overrides the detected path. ' 'Often used when passing --diff-filename.'), Option('--svn-username', dest='svn_username', default=None, metavar='USERNAME', help='The username for the SVN repository.'), Option('--svn-password', dest='svn_password', default=None, metavar='PASSWORD', help='The password for the SVN repository.'), Option('--svn-prompt-password', dest='svn_prompt_password', default=False, action='store_true', help="Prompt for the user's svn password. This option " "overrides the password provided by the " "--svn-password option.", added_in='0.7.3'), Option('--svn-show-copies-as-adds', dest='svn_show_copies_as_adds', metavar='y|n', default=None, help='Treat copied or moved files as new files.' '\n' 'This is only supported in Subversion 1.7+.', added_in='0.5.2'), Option('--svn-changelist', dest='svn_changelist', default=None, metavar='ID', help='Generates the diff for review based on a ' 'local changelist.', deprecated_in='0.6'), ] ) tfs_options = OptionGroup( name='TFS Options', description='Team Foundation Server specific options for ' 'communicating with the TFS server.', option_list=[ Option('--tfs-login', dest='tfs_login', default=None, metavar='TFS_LOGIN', help='Logs in to TFS as a specific user (ie.' 'user@domain,password). Visit https://msdn.microsoft.' 'com/en-us/library/hh190725.aspx to learn about ' 'saving credentials for reuse.'), Option('--tf-cmd', dest='tf_cmd', default=None, metavar='TF_CMD', config_key='TF_CMD', help='The full path of where to find the tf command. This ' 'overrides any detected path.'), ] ) def __init__(self): self.log = logging.getLogger('rb.%s' % self.name) def create_parser(self, config, argv=[]): """Create and return the argument parser for this command.""" parser = argparse.ArgumentParser( prog=RB_MAIN, usage=self.usage(), add_help=False, formatter_class=SmartHelpFormatter) for option in self.option_list: option.add_to(parser, config, argv) for option in self._global_options: option.add_to(parser, config, argv) return parser def post_process_options(self): if self.options.disable_ssl_verification: try: import ssl ssl._create_unverified_context() except: raise CommandError('The --disable-ssl-verification flag is ' 'only available with Python 2.7.9+') def usage(self): """Return a usage string for the command.""" usage = '%%(prog)s %s [options] %s' % (self.name, self.args) if self.description: return '%s\n\n%s' % (usage, self.description) else: return usage def init_logging(self): """Initializes logging for the command. This will set up different log handlers based on the formatting we want for the given levels. The INFO log handler will just show the text, like a print statement. WARNING and higher will show the level name as a prefix, in the form of "LEVEL: message". If debugging is enabled, a debug log handler will be set up showing debug messages in the form of ">>> message", making it easier to distinguish between debugging and other messages. """ root = logging.getLogger() if self.options.debug: handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('>>> %(message)s')) handler.setLevel(logging.DEBUG) handler.addFilter(LogLevelFilter(logging.DEBUG)) root.addHandler(handler) root.setLevel(logging.DEBUG) else: root.setLevel(logging.INFO) # Handler for info messages. We'll treat these like prints. handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(message)s')) handler.setLevel(logging.INFO) handler.addFilter(LogLevelFilter(logging.INFO)) root.addHandler(handler) # Handler for warnings, errors, and criticals. They'll show the # level prefix and the message. handler = logging.StreamHandler() if (self.options.color == 'always' or (self.options.color == 'auto' and sys.stderr.isatty())): handler.setFormatter(ColoredFormatter( '%(log_color)s%(levelname)-8s%(reset)s' '%(message_log_color)s%(message)s', datefmt=None, reset=True, log_colors={ 'WARNING': self.options.warn_color, 'ERROR': self.options.error_color, 'CRITICAL': self.options.critical_color, }, secondary_log_colors={ 'message': { 'WARNING': self.options.warn_color, 'ERROR': self.options.error_color, 'CRITICAL': self.options.critical_color, } }, style='%' )) else: handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) handler.setLevel(logging.WARNING) root.addHandler(handler) logging.debug('RBTools %s', get_version_string()) logging.debug('Python %s', sys.version) logging.debug('Running on %s', platform.platform()) logging.debug('Home = %s', get_home_path()) logging.debug('Current directory = %s', os.getcwd()) def run_from_argv(self, argv): """Execute the command using the provided arguments. The options and commandline arguments will be parsed from ``argv`` and the commands ``main`` method will be called. """ self.config = load_config() parser = self.create_parser(self.config, argv) parser.add_argument('args', nargs=argparse.REMAINDER) self.options = parser.parse_args(argv[2:]) args = self.options.args # Check that the proper number of arguments have been provided. argspec = inspect.getargspec(self.main) minargs = len(argspec[0]) - 1 maxargs = minargs # Arguments that have a default value are considered optional. if argspec[3] is not None: minargs -= len(argspec[3]) if argspec[1] is not None: maxargs = None if len(args) < minargs or (maxargs is not None and len(args) > maxargs): parser.error('Invalid number of arguments provided') sys.exit(1) self.init_logging() try: exit_code = self.main(*args) or 0 except CommandError as e: if isinstance(e, ParseError): parser.error(e) elif self.options.debug: raise logging.error(e) exit_code = 1 except CommandExit as e: exit_code = e.exit_code except Exception as e: # If debugging is on, we'll let python spit out the # stack trace and report the exception, otherwise # we'll suppress the trace and print the exception # manually. if self.options.debug: raise logging.critical(e) exit_code = 1 cleanup_tempfiles() sys.exit(exit_code) def initialize_scm_tool(self, client_name=None): """Initialize the SCM tool for the current working directory.""" repository_info, tool = scan_usable_client(self.config, self.options, client_name=client_name) try: tool.check_options() except OptionsCheckError as e: sys.stderr.write('%s\n' % e) sys.exit(1) return repository_info, tool def setup_tool(self, tool, api_root=None): """Performs extra initialization on the tool. If api_root is not provided we'll assume we want to initialize the tool using only local information """ tool.capabilities = self.get_capabilities(api_root) def get_server_url(self, repository_info, tool): """Returns the Review Board server url.""" if self.options.server: server_url = self.options.server else: server_url = tool.scan_for_server(repository_info) if not server_url: print('Unable to find a Review Board server for this source code ' 'tree.') sys.exit(1) return server_url def credentials_prompt(self, realm, uri, username=None, password=None, *args, **kwargs): """Prompt the user for credentials using the command line. This will prompt the user, and then return the provided username and password. This is used as a callback in the API when the user requires authorization. """ if username is None or password is None: if getattr(self.options, 'diff_filename', None) == '-': die('HTTP authentication is required, but cannot be ' 'used with --diff-filename=-') # Interactive prompts don't work correctly when input doesn't come # from a terminal. This could seem to be a rare case not worth # worrying about, but this is what happens when using native # Python in Cygwin terminal emulator under Windows and it's very # puzzling to the users, especially because stderr is also _not_ # flushed automatically in this case, so the program just appears # to hang. if not sys.stdin.isatty(): logging.error('Authentication is required but input is not a ' 'tty.') if sys.platform == 'win32': logging.info('Check that you are not running this script ' 'from a Cygwin terminal emulator (or use ' 'Cygwin Python to run it).') raise CommandError('Unable to log in to Review Board.') print() print('Please log in to the Review Board server at %s.' % urlparse(uri)[1]) # getpass will write its prompt to stderr but input # writes to stdout. See bug 2831. if username is None: sys.stderr.write('Username: ') username = input() if password is None: password = getpass.getpass(b'Password: ') return username, password def otp_token_prompt(self, uri, token_method, *args, **kwargs): """Prompt the user for a one-time password token. Their account is configured with two-factor authentication. The server will have sent a token to their configured mobile device or application. The user will be prompted for this token. """ if getattr(self.options, 'diff_filename', None) == '-': die('A two-factor authentication token is required, but cannot ' 'be used with --diff-filename=-') print() print('Please enter your two-factor authentication token for Review ' 'Board.') if token_method == 'sms': print('You should be getting a text message with ' 'an authentication token.') print('Enter the token below.') elif token_method == 'call': print('You should be getting an automated phone call with ' 'an authentication token.') print('Enter the token below.') elif token_method == 'generator': print('Enter the token shown on your token generator app below.') print() return getpass.getpass(b'Token: ') def _make_api_client(self, server_url): """Return an RBClient object for the server. The RBClient will be instantiated with the proper arguments for talking to the provided Review Board server url. """ return RBClient( server_url, username=self.options.username, password=self.options.password, api_token=self.options.api_token, auth_callback=self.credentials_prompt, otp_token_callback=self.otp_token_prompt, disable_proxy=not self.options.enable_proxy, verify_ssl=not self.options.disable_ssl_verification, allow_caching=not self.options.disable_cache, cache_location=self.options.cache_location, in_memory_cache=self.options.in_memory_cache, save_cookies=self.options.save_cookies, ext_auth_cookies=self.options.ext_auth_cookies) def get_api(self, server_url): """Returns an RBClient instance and the associated root resource. Commands should use this method to gain access to the API, instead of instantianting their own client. """ if not urlparse(server_url).scheme: server_url = '%s%s' % ('http://', server_url) api_client = self._make_api_client(server_url) try: api_root = api_client.get_root() except ServerInterfaceError as e: raise CommandError('Could not reach the Review Board ' 'server at %s: %s' % (server_url, e)) except APIError as e: raise CommandError('Unexpected API Error: %s' % e) return api_client, api_root def get_capabilities(self, api_root): """Retrieve Capabilities from the server and return them.""" if 'capabilities' in api_root: # Review Board 2.0+ provides capabilities in the root resource. return Capabilities(api_root.capabilities) info = api_root.get_info() if 'capabilities' in info: return Capabilities(info.capabilities) else: return Capabilities({}) def main(self, *args): """The main logic of the command. This method should be overridden to implement the commands functionality. """ raise NotImplementedError() def find_entry_point_for_command(command_name): """Return an entry point for the given rbtools command. If no entry point is found, None is returned. """ # Attempt to retrieve the command class from the entry points. We # first look in rbtools for the commands, and failing that, we look # for third-party commands. entry_point = pkg_resources.get_entry_info('rbtools', 'rbtools_commands', command_name) if not entry_point: try: entry_point = next(pkg_resources.iter_entry_points( 'rbtools_commands', command_name)) except StopIteration: # There aren't any custom entry points defined. pass return entry_point def command_exists(cmd_name): """Determine if the given command exists. This function checks for the existence of an RBTools command entry point with the given name and an executable named rbt-"cmd_name" on the path. Aliases are not considered. """ return (find_entry_point_for_command(cmd_name) or is_exe_in_path('rbt-%s' % cmd_name))
""" Retrieving the results of large queries from the INDRA Database REST API generally involves multiple individual calls. The Processor classes defined here manage the retrieval process for results of two types, Statements and Statement hashes. Instances of these Processors are returned by the query functions in :py:mod:`indra.sources.indra_db_rest.api`. """ import logging from copy import deepcopy from threading import Thread from datetime import datetime from requests import Timeout from indra.statements import stmts_from_json from indra.util.statement_presentation import get_available_source_counts, \ get_available_ev_counts from .query import Query from .util import RecordableLogger from .util import logger as util_logger from .exceptions import IndraDBRestResponseError logger = logging.getLogger('indra_db_rest.query_processor') request_logger = RecordableLogger('indra_db_rest.request_logs') class IndraDBQueryProcessor: """The parent of all db query processors. Parameters ---------- query : :py:class:`Query` The query to be evaluated in return for statements. limit : int or None Select the maximum number of statements to return. When set less than 500 the effect is much the same as setting persist to false, and will guarantee a faster response. Default is None. sort_by : str or None Options are currently 'ev_count' or 'belief'. Results will return in order of the given parameter. If None, results will be turned in an arbitrary order. persist : bool Default is True. When False, if a query comes back limited (not all results returned), just give up and pass along what was returned. Otherwise, make further queries to get the rest of the data (which may take some time). timeout : positive int or None If an int, return after `timeout` seconds, even if query is not done. Default is None. strict_stop : bool If True, the query will only be given timeout to complete before being abandoned entirely. Otherwise the timeout will simply wait for the thread to join for `timeout` seconds before returning, allowing other work to continue while the query runs in the background. The default is False. NOTE: in practice, due to overhead, the precision of the timeout is only around +/-0.1 seconds. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 3 api_key : str or None Override or use in place of the API key given in the INDRA config file. """ result_type = NotImplemented def __init__(self, query: Query, limit=None, sort_by='ev_count', timeout=None, strict_stop=False, persist=True, tries=3, api_key=None): self.query = query self.limit = limit self.sort_by = sort_by self.tries = tries self.__strict_stop = strict_stop self.__timeout = timeout self.__timed_out = False self.__offset = 0 self.__quota = limit self.__api_key = api_key self.__canceled = False self.__start_time = None self.__th = None self.requests_completed = 0 self._evidence_counts = {} self._belief_scores = {} self._source_counts = {} if limit != 0: self._run(persist=persist) # Metadata Retrieval methods. def get_ev_counts(self): """Get a dictionary of evidence counts.""" return self._evidence_counts.copy() def get_belief_scores(self): """Get a dictionary of belief scores.""" return self._belief_scores.copy() def get_source_counts(self): """Get the source counts as a dict per statement hash.""" return deepcopy(self._source_counts) # Process control methods def cancel(self): """Cancel the job, stopping the thread running in the background.""" self.__canceled = True def is_working(self): """Check if the thread is running.""" if not self.__th: return False return self.__th.is_alive() def timed_out(self): """Check if the processor timed out.""" return self.__timed_out def wait_until_done(self, timeout=None): """Wait for the background load to complete.""" if not self.__th: raise IndraDBRestResponseError("There is no thread waiting to " "complete.") start = datetime.now() self.__th.join(timeout) dt = datetime.now() - start if self.__th.is_alive(): logger.warning("Timed out after %0.3f seconds waiting for " "statement load to complete." % dt.total_seconds()) ret = False else: logger.info("Waited %0.3f seconds for statements to finish " "loading." % dt.total_seconds()) ret = True return ret @staticmethod def print_quiet_logs(): """Print the logs that were suppressed during the query.""" print(request_logger.get_quiet_logs()) # Helper methods def _get_next_offset(self): """Get the offset of the next web request that will be made.""" return self.__offset def _get_next_limit(self): """Get the limit of the next web request that will be made.""" return self.__quota def _mark_start(self): self.__start_time = datetime.now() def _time_since_start(self): dt = datetime.now() - self.__start_time return dt.total_seconds() def _strict_time_is_up(self): if self.__start_time is not None and self.__strict_stop: if self._time_since_start() > self.__timeout: return True return False def _done(self): return (self.__canceled or self.__offset is None or self.__offset > 0 and self.__quota == 0 or self._strict_time_is_up()) def _set_special_params(self, **params): self.__special_params = params def _run_query(self): # If we are in strict stop mode, we want to be sure we give up after # the given overall timeout, so we need to account for time spend on # other queries. if self.__strict_stop: query_timeout = self.__timeout - self._time_since_start() if query_timeout <= 0: return else: query_timeout = None # Run the query. try: r = self.requests_completed nth = f"{r}{['st', 'nd', 'rd'][r-1] if 0 < r < 4 else 'th'}" request_logger.info(f"Running {nth} request for {self.result_type}") request_logger.info(f" LIMIT: {self.__quota}") request_logger.info(f" OFFSET: {self.__offset}") if query_timeout: request_logger.info(f" TIMEOUT: {query_timeout}") result = self.query.get(self.result_type, offset=self.__offset, limit=self.__quota, sort_by=self.sort_by, timeout=query_timeout, n_tries=self.tries, api_key=self.__api_key, **self.__special_params) except Timeout: # Make sure this is the timeout we think it is. self.__timed_out = True if not self.__strict_stop or not self._strict_time_is_up(): raise logger.info(f"Query timed out after {self._time_since_start()} " f"seconds, {self.requests_completed} requests, and " f"after retrieving {len(self._evidence_counts)} " f"results, with {self.__quota} remaining.") return # Update results self._evidence_counts.update(result.evidence_counts) self._belief_scores.update(result.belief_scores) self._handle_new_result(result, self._source_counts) # Update the quota if self.__quota is not None: self.__quota -= len(result.results) # Increment the page self.__offset = result.next_offset # Increment the number of queries run. self.requests_completed += 1 return def _run_queries(self, persist): """Use paging to get all statements requested.""" self._mark_start() self._run_query() # Check if we want to keep going. if not persist: self._compile_results() return # Get the rest of the content. while not self._done(): self._run_query() # Create the actual statements. self._compile_results() # This is end of the loop, one way or another. Restore logging if it # was redirected. request_logger.unquiet() util_logger.unquiet() return def _run(self, persist=True): # Quiet the lowest level logger. util_logger.quiet() # Only get the query english if we aren't on a time constraint. self.__timed_out = False if self.__timeout is None: query_english = self.query.get_query_english() logger.info(f"Retrieving {self.result_type} that {query_english}.") else: logger.info(f"Retrieving {self.result_type} for {self.query}.") # Handle the content if we were limited. self.__th = Thread(target=self._run_queries, args=[persist]) self.__th.start() if self.__timeout is None: logger.debug("Waiting for thread to complete...") self.__th.join() else: if self.__timeout: # is not 0 logger.debug(f"Waiting at most {self.__timeout} seconds for " f"thread to complete...") self.__th.join(self.__timeout) if not self._done(): request_logger.quiet() logger.info("Leaving request to background thread. Logs " "may be viewed using the `print_quiet_logs()` " "method.") return # Child defined methods def _compile_results(self): raise NotImplementedError() def _handle_new_result(self, result, source_counts): raise NotImplementedError() class DBQueryStatementProcessor(IndraDBQueryProcessor): """A Processor to get Statements from the server. For information on thread control and other methods, see the docs for :py:class:`IndraDBQueryProcessor`. Parameters ---------- query : :py:class:`Query` The query to be evaluated in return for statements. limit : int or None Select the maximum number of statements to return. When set less than 500 the effect is much the same as setting persist to false, and will guarantee a faster response. Default is None. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 100. filter_ev : bool Indicate whether evidence should have the same filters applied as the statements themselves, where appropriate (e.g. in the case of a filter by paper). sort_by : str or None Options are currently 'ev_count' or 'belief'. Results will return in order of the given parameter. If None, results will be turned in an arbitrary order. persist : bool Default is True. When False, if a query comes back limited (not all results returned), just give up and pass along what was returned. Otherwise, make further queries to get the rest of the data (which may take some time). timeout : positive int or None If an int, return after `timeout` seconds, even if query is not done. Default is None. strict_stop : bool If True, the query will only be given timeout to complete before being abandoned entirely. Otherwise the timeout will simply wait for the thread to join for `timeout` seconds before returning, allowing other work to continue while the query runs in the background. The default is False. use_obtained_counts : Optional[bool] If True, evidence counts and source counts are reported based on the actual evidences returned for each statement in this query (as opposed to all existing evidences, even if not all were returned). Default: False tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 3. api_key : str or None Override or use in place of the API key given in the INDRA config file. """ result_type = 'statements' def __init__(self, query: Query, limit=None, sort_by='ev_count', ev_limit=10, filter_ev=True, timeout=None, strict_stop=False, persist=True, use_obtained_counts=False, tries=3, api_key=None): self.statements = [] self.statements_sample = None self.__statement_jsons = {} self.__started = False self.use_obtained_counts = use_obtained_counts self._set_special_params(ev_limit=ev_limit, filter_ev=filter_ev) super(DBQueryStatementProcessor, self).\ __init__(query, limit=limit, sort_by=sort_by, timeout=timeout, strict_stop=strict_stop, persist=persist, tries=tries, api_key=api_key) # Metadata Retrieval methods. def get_ev_count_by_hash(self, stmt_hash): """Get the total evidence count for a statement hash.""" return self._evidence_counts.get(stmt_hash, 0) def get_ev_count(self, stmt): """Get the total evidence count for a statement.""" return self.get_ev_count_by_hash(stmt.get_hash(shallow=True)) def get_belief_score_by_hash(self, stmt_hash): """Get the belief score for a statement hash.""" return self._belief_scores.get(stmt_hash, 0) def get_belief_score_by_stmt(self, stmt): """Get the belief score for a statement.""" return self.get_belief_score_by_hash(stmt.get_hash(shallow=True)) def get_hash_statements_dict(self): """Return a dict of Statements keyed by hashes.""" res = {stmt_hash: stmts_from_json([stmt])[0] for stmt_hash, stmt in self.__statement_jsons.items()} return res def get_source_count_by_hash(self, stmt_hash): """Get the source counts for a given statement.""" return self._source_counts.get(stmt_hash, {}) def get_source_count(self, stmt): """Get the source counts for a given statement.""" return self.get_source_count_by_hash(stmt.get_hash(shallow=True)) # Result merging methods def merge_results(self, other_processor): """Merge the results of this processor with those of another.""" if not isinstance(other_processor, self.__class__): raise ValueError(f"Can only extend with another " f"{self.__class__.__name__} instance.") # Where there is overlap, there _should_ be agreement. self._evidence_counts.update(other_processor._evidence_counts) self._source_counts.update(other_processor._source_counts) self._belief_scores.update(other_processor._belief_scores) # Merge the statement JSONs. for k, sj in other_processor.__statement_jsons.items(): if k not in self.__statement_jsons: self.__statement_jsons[k] = sj # This should be most of them else: # This should only happen rarely. for evj in sj['evidence']: self.__statement_jsons[k]['evidence'].append(evj) # Recompile the statements self._compile_results() return # Helper methods def _handle_new_result(self, result, source_counts): """Merge these statement jsons with new jsons.""" # Merge counts. source_counts.update(result.source_counts) # Merge JSONs for k, sj in result.results.items(): if k not in self.__statement_jsons: self.__statement_jsons[k] = sj # This should be most of them else: # This should only happen rarely. for evj in sj['evidence']: self.__statement_jsons[k]['evidence'].append(evj) # Add to the sample. if not self.__started: self.statements_sample = stmts_from_json(result.results.values()) self.__started = True return def _compile_results(self): """Generate statements from the jsons.""" self.statements = stmts_from_json(self.__statement_jsons.values()) if self.use_obtained_counts: self.__source_counts = get_available_source_counts(self.statements) self.__evidence_counts = get_available_ev_counts(self.statements) class DBQueryHashProcessor(IndraDBQueryProcessor): """A processor to get hashes from the server. Parameters ---------- query : :py:class:`Query` The query to be evaluated in return for statements. limit : int or None Select the maximum number of statements to return. When set less than 500 the effect is much the same as setting persist to false, and will guarantee a faster response. Default is None. sort_by : str or None Options are currently 'ev_count' or 'belief'. Results will return in order of the given parameter. If None, results will be turned in an arbitrary order. persist : bool Default is True. When False, if a query comes back limited (not all results returned), just give up and pass along what was returned. Otherwise, make further queries to get the rest of the data (which may take some time). timeout : positive int or None If an int, return after `timeout` seconds, even if query is not done. Default is None. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 3. """ result_type = 'hashes' def __init__(self, *args, **kwargs): self.hashes = [] super(DBQueryHashProcessor, self).__init__(*args, **kwargs) def _handle_new_result(self, result, source_counts): source_counts.update(result.source_counts) self.hashes.extend(result.results) def _compile_results(self): pass
#!/usr/bin/env python # # Copyright (C) 2011 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Build image output_image_file from input_directory and properties_file. Usage: build_image input_directory properties_file output_image_file """ import os import os.path import subprocess import sys import commands import shutil import tempfile FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7" def RunCommand(cmd): """ Echo and run the given command Args: cmd: the command represented as a list of strings. Returns: The exit code. """ print "Running: ", " ".join(cmd) p = subprocess.Popen(cmd) p.communicate() return p.returncode def GetVerityTreeSize(partition_size): cmd = "build_verity_tree -s %d" cmd %= partition_size status, output = commands.getstatusoutput(cmd) if status: print output return False, 0 return True, int(output) def GetVerityMetadataSize(partition_size): cmd = "system/extras/verity/build_verity_metadata.py -s %d" cmd %= partition_size status, output = commands.getstatusoutput(cmd) if status: print output return False, 0 return True, int(output) def AdjustPartitionSizeForVerity(partition_size): """Modifies the provided partition size to account for the verity metadata. This information is used to size the created image appropriately. Args: partition_size: the size of the partition to be verified. Returns: The size of the partition adjusted for verity metadata. """ success, verity_tree_size = GetVerityTreeSize(partition_size) if not success: return 0; success, verity_metadata_size = GetVerityMetadataSize(partition_size) if not success: return 0 return partition_size - verity_tree_size - verity_metadata_size def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict): cmd = ("build_verity_tree -A %s %s %s" % (FIXED_SALT, sparse_image_path, verity_image_path)) print cmd status, output = commands.getstatusoutput(cmd) if status: print "Could not build verity tree! Error: %s" % output return False root, salt = output.split() prop_dict["verity_root_hash"] = root prop_dict["verity_salt"] = salt return True def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt, block_device, signer_path, key): cmd = ("system/extras/verity/build_verity_metadata.py %s %s %s %s %s %s %s" % (image_size, verity_metadata_path, root_hash, salt, block_device, signer_path, key)) print cmd status, output = commands.getstatusoutput(cmd) if status: print "Could not build verity metadata! Error: %s" % output return False return True def Append2Simg(sparse_image_path, unsparse_image_path, error_message): """Appends the unsparse image to the given sparse image. Args: sparse_image_path: the path to the (sparse) image unsparse_image_path: the path to the (unsparse) image Returns: True on success, False on failure. """ cmd = "append2simg %s %s" cmd %= (sparse_image_path, unsparse_image_path) print cmd status, output = commands.getstatusoutput(cmd) if status: print "%s: %s" % (error_message, output) return False return True def BuildVerifiedImage(data_image_path, verity_image_path, verity_metadata_path): if not Append2Simg(data_image_path, verity_metadata_path, "Could not append verity metadata!"): return False if not Append2Simg(data_image_path, verity_image_path, "Could not append verity tree!"): return False return True def UnsparseImage(sparse_image_path, replace=True): img_dir = os.path.dirname(sparse_image_path) unsparse_image_path = "unsparse_" + os.path.basename(sparse_image_path) unsparse_image_path = os.path.join(img_dir, unsparse_image_path) if os.path.exists(unsparse_image_path): if replace: os.unlink(unsparse_image_path) else: return True, unsparse_image_path inflate_command = ["simg2img", sparse_image_path, unsparse_image_path] exit_code = RunCommand(inflate_command) if exit_code != 0: os.remove(unsparse_image_path) return False, None return True, unsparse_image_path def MakeVerityEnabledImage(out_file, prop_dict): """Creates an image that is verifiable using dm-verity. Args: out_file: the location to write the verifiable image at prop_dict: a dictionary of properties required for image creation and verification Returns: True on success, False otherwise. """ # get properties image_size = prop_dict["partition_size"] block_dev = prop_dict["verity_block_device"] signer_key = prop_dict["verity_key"] signer_path = prop_dict["verity_signer_cmd"] # make a tempdir tempdir_name = tempfile.mkdtemp(suffix="_verity_images") # get partial image paths verity_image_path = os.path.join(tempdir_name, "verity.img") verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img") # build the verity tree and get the root hash and salt if not BuildVerityTree(out_file, verity_image_path, prop_dict): shutil.rmtree(tempdir_name, ignore_errors=True) return False # build the metadata blocks root_hash = prop_dict["verity_root_hash"] salt = prop_dict["verity_salt"] if not BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt, block_dev, signer_path, signer_key): shutil.rmtree(tempdir_name, ignore_errors=True) return False # build the full verified image if not BuildVerifiedImage(out_file, verity_image_path, verity_metadata_path): shutil.rmtree(tempdir_name, ignore_errors=True) return False shutil.rmtree(tempdir_name, ignore_errors=True) return True def BuildImage(in_dir, prop_dict, out_file, fs_config=None, fc_config=None, block_list=None): """Build an image to out_file from in_dir with property prop_dict. Args: in_dir: path of input directory. prop_dict: property dictionary. out_file: path of the output image file. fs_config: path to the fs_config file (typically META/filesystem_config.txt). If None then the configuration in the local client will be used. fc_config: path to the SELinux file_contexts file. If None then the value from prop_dict['selinux_fc'] will be used. Returns: True iff the image is built successfully. """ build_command = [] fs_type = prop_dict.get("fs_type", "") run_fsck = False is_verity_partition = "verity_block_device" in prop_dict verity_supported = prop_dict.get("verity") == "true" # adjust the partition size to make room for the hashes if this is to be verified if verity_supported and is_verity_partition: partition_size = int(prop_dict.get("partition_size")) adjusted_size = AdjustPartitionSizeForVerity(partition_size) if not adjusted_size: return False prop_dict["partition_size"] = str(adjusted_size) prop_dict["original_partition_size"] = str(partition_size) if fs_type.startswith("ext"): build_command = ["mkuserimg.sh"] if "extfs_sparse_flag" in prop_dict: build_command.append(prop_dict["extfs_sparse_flag"]) run_fsck = True build_command.extend([in_dir, out_file, fs_type, prop_dict["mount_point"]]) build_command.append(prop_dict["partition_size"]) if "timestamp" in prop_dict: build_command.extend(["-T", str(prop_dict["timestamp"])]) if fs_config is not None: build_command.extend(["-C", fs_config]) if block_list is not None: build_command.extend(["-B", block_list]) if fc_config is not None: build_command.append(fc_config) elif "selinux_fc" in prop_dict: build_command.append(prop_dict["selinux_fc"]) elif fs_type.startswith("f2fs"): build_command = ["mkf2fsuserimg.sh"] build_command.extend([out_file, prop_dict["partition_size"]]) else: build_command = ["mkyaffs2image", "-f"] if prop_dict.get("mkyaffs2_extra_flags", None): build_command.extend(prop_dict["mkyaffs2_extra_flags"].split()) build_command.append(in_dir) build_command.append(out_file) if "selinux_fc" in prop_dict: build_command.append(prop_dict["selinux_fc"]) build_command.append(prop_dict["mount_point"]) exit_code = RunCommand(build_command) if exit_code != 0: return False # create the verified image if this is to be verified if verity_supported and is_verity_partition: if not MakeVerityEnabledImage(out_file, prop_dict): return False if run_fsck and prop_dict.get("skip_fsck") != "true": success, unsparse_image = UnsparseImage(out_file, replace=False) if not success: return False # Run e2fsck on the inflated image file e2fsck_command = ["e2fsck", "-f", "-n", unsparse_image] exit_code = RunCommand(e2fsck_command) os.remove(unsparse_image) return exit_code == 0 def ImagePropFromGlobalDict(glob_dict, mount_point): """Build an image property dictionary from the global dictionary. Args: glob_dict: the global dictionary from the build system. mount_point: such as "system", "data" etc. """ d = {} if "build.prop" in glob_dict: bp = glob_dict["build.prop"] if "ro.build.date.utc" in bp: d["timestamp"] = bp["ro.build.date.utc"] def copy_prop(src_p, dest_p): if src_p in glob_dict: d[dest_p] = str(glob_dict[src_p]) common_props = ( "extfs_sparse_flag", "mkyaffs2_extra_flags", "selinux_fc", "skip_fsck", "verity", "verity_key", "verity_signer_cmd" ) for p in common_props: copy_prop(p, p) d["mount_point"] = mount_point if mount_point == "system": copy_prop("fs_type", "fs_type") copy_prop("system_size", "partition_size") copy_prop("system_verity_block_device", "verity_block_device") elif mount_point == "data": # Copy the generic fs type first, override with specific one if available. copy_prop("fs_type", "fs_type") copy_prop("userdata_fs_type", "fs_type") copy_prop("userdata_size", "partition_size") elif mount_point == "cache": copy_prop("cache_fs_type", "fs_type") copy_prop("cache_size", "partition_size") elif mount_point == "vendor": copy_prop("vendor_fs_type", "fs_type") copy_prop("vendor_size", "partition_size") copy_prop("vendor_verity_block_device", "verity_block_device") elif mount_point == "oem": copy_prop("fs_type", "fs_type") copy_prop("oem_size", "partition_size") return d def LoadGlobalDict(filename): """Load "name=value" pairs from filename""" d = {} f = open(filename) for line in f: line = line.strip() if not line or line.startswith("#"): continue k, v = line.split("=", 1) d[k] = v f.close() return d def main(argv): if len(argv) != 3: print __doc__ sys.exit(1) in_dir = argv[0] glob_dict_file = argv[1] out_file = argv[2] glob_dict = LoadGlobalDict(glob_dict_file) image_filename = os.path.basename(out_file) mount_point = "" if image_filename == "system.img": mount_point = "system" elif image_filename == "userdata.img": mount_point = "data" elif image_filename == "cache.img": mount_point = "cache" elif image_filename == "vendor.img": mount_point = "vendor" elif image_filename == "oem.img": mount_point = "oem" else: print >> sys.stderr, "error: unknown image file name ", image_filename exit(1) image_properties = ImagePropFromGlobalDict(glob_dict, mount_point) if not BuildImage(in_dir, image_properties, out_file): print >> sys.stderr, "error: failed to build %s from %s" % (out_file, in_dir) exit(1) if __name__ == '__main__': main(sys.argv[1:])
import re from functools import update_wrapper from weakref import WeakSet from django.apps import apps from django.contrib.admin import ModelAdmin, actions from django.contrib.auth import REDIRECT_FIELD_NAME from django.core.exceptions import ImproperlyConfigured from django.db.models.base import ModelBase from django.http import Http404, HttpResponseRedirect from django.template.response import TemplateResponse from django.urls import NoReverseMatch, reverse from django.utils.functional import LazyObject from django.utils.module_loading import import_string from django.utils.text import capfirst from django.utils.translation import gettext as _, gettext_lazy from django.views.decorators.cache import never_cache from django.views.decorators.csrf import csrf_protect from django.views.i18n import JavaScriptCatalog all_sites = WeakSet() class AlreadyRegistered(Exception): pass class NotRegistered(Exception): pass class AdminSite: """ An AdminSite object encapsulates an instance of the Django admin application, ready to be hooked in to your URLconf. Models are registered with the AdminSite using the register() method, and the get_urls() method can then be used to access Django view functions that present a full admin interface for the collection of registered models. """ # Text to put at the end of each page's <title>. site_title = gettext_lazy('Django site admin') # Text to put in each page's <h1>. site_header = gettext_lazy('Django administration') # Text to put at the top of the admin index page. index_title = gettext_lazy('Site administration') # URL for the "View site" link at the top of each admin page. site_url = '/' enable_nav_sidebar = True _empty_value_display = '-' login_form = None index_template = None app_index_template = None login_template = None logout_template = None password_change_template = None password_change_done_template = None def __init__(self, name='admin'): self._registry = {} # model_class class -> admin_class instance self.name = name self._actions = {'delete_selected': actions.delete_selected} self._global_actions = self._actions.copy() all_sites.add(self) def check(self, app_configs): """ Run the system checks on all ModelAdmins, except if they aren't customized at all. """ if app_configs is None: app_configs = apps.get_app_configs() app_configs = set(app_configs) # Speed up lookups below errors = [] modeladmins = (o for o in self._registry.values() if o.__class__ is not ModelAdmin) for modeladmin in modeladmins: if modeladmin.model._meta.app_config in app_configs: errors.extend(modeladmin.check()) return errors def register(self, model_or_iterable, admin_class=None, **options): """ Register the given model(s) with the given admin class. The model(s) should be Model classes, not instances. If an admin class isn't given, use ModelAdmin (the default admin options). If keyword arguments are given -- e.g., list_display -- apply them as options to the admin class. If a model is already registered, raise AlreadyRegistered. If a model is abstract, raise ImproperlyConfigured. """ admin_class = admin_class or ModelAdmin if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model._meta.abstract: raise ImproperlyConfigured( 'The model %s is abstract, so it cannot be registered with admin.' % model.__name__ ) if model in self._registry: registered_admin = str(self._registry[model]) msg = 'The model %s is already registered ' % model.__name__ if registered_admin.endswith('.ModelAdmin'): # Most likely registered without a ModelAdmin subclass. msg += 'in app %r.' % re.sub(r'\.ModelAdmin$', '', registered_admin) else: msg += 'with %r.' % registered_admin raise AlreadyRegistered(msg) # Ignore the registration if the model has been # swapped out. if not model._meta.swapped: # If we got **options then dynamically construct a subclass of # admin_class with those **options. if options: # For reasons I don't quite understand, without a __module__ # the created class appears to "live" in the wrong place, # which causes issues later on. options['__module__'] = __name__ admin_class = type("%sAdmin" % model.__name__, (admin_class,), options) # Instantiate the admin class to save in the registry self._registry[model] = admin_class(model, self) def unregister(self, model_or_iterable): """ Unregister the given model(s). If a model isn't already registered, raise NotRegistered. """ if isinstance(model_or_iterable, ModelBase): model_or_iterable = [model_or_iterable] for model in model_or_iterable: if model not in self._registry: raise NotRegistered('The model %s is not registered' % model.__name__) del self._registry[model] def is_registered(self, model): """ Check if a model class is registered with this `AdminSite`. """ return model in self._registry def add_action(self, action, name=None): """ Register an action to be available globally. """ name = name or action.__name__ self._actions[name] = action self._global_actions[name] = action def disable_action(self, name): """ Disable a globally-registered action. Raise KeyError for invalid names. """ del self._actions[name] def get_action(self, name): """ Explicitly get a registered global action whether it's enabled or not. Raise KeyError for invalid names. """ return self._global_actions[name] @property def actions(self): """ Get all the enabled actions as an iterable of (name, func). """ return self._actions.items() @property def empty_value_display(self): return self._empty_value_display @empty_value_display.setter def empty_value_display(self, empty_value_display): self._empty_value_display = empty_value_display def has_permission(self, request): """ Return True if the given HttpRequest has permission to view *at least one* page in the admin site. """ return request.user.is_active and request.user.is_staff def admin_view(self, view, cacheable=False): """ Decorator to create an admin view attached to this ``AdminSite``. This wraps the view and provides permission checking by calling ``self.has_permission``. You'll want to use this from within ``AdminSite.get_urls()``: class MyAdminSite(AdminSite): def get_urls(self): from django.urls import path urls = super().get_urls() urls += [ path('my_view/', self.admin_view(some_view)) ] return urls By default, admin_views are marked non-cacheable using the ``never_cache`` decorator. If the view can be safely cached, set cacheable=True. """ def inner(request, *args, **kwargs): if not self.has_permission(request): if request.path == reverse('admin:logout', current_app=self.name): index_path = reverse('admin:index', current_app=self.name) return HttpResponseRedirect(index_path) # Inner import to prevent django.contrib.admin (app) from # importing django.contrib.auth.models.User (unrelated model). from django.contrib.auth.views import redirect_to_login return redirect_to_login( request.get_full_path(), reverse('admin:login', current_app=self.name) ) return view(request, *args, **kwargs) if not cacheable: inner = never_cache(inner) # We add csrf_protect here so this function can be used as a utility # function for any view, without having to repeat 'csrf_protect'. if not getattr(view, 'csrf_exempt', False): inner = csrf_protect(inner) return update_wrapper(inner, view) def get_urls(self): from django.urls import include, path, re_path # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level, # and django.contrib.contenttypes.views imports ContentType. from django.contrib.contenttypes import views as contenttype_views def wrap(view, cacheable=False): def wrapper(*args, **kwargs): return self.admin_view(view, cacheable)(*args, **kwargs) wrapper.admin_site = self return update_wrapper(wrapper, view) # Admin-site-wide views. urlpatterns = [ path('', wrap(self.index), name='index'), path('login/', self.login, name='login'), path('logout/', wrap(self.logout), name='logout'), path('password_change/', wrap(self.password_change, cacheable=True), name='password_change'), path( 'password_change/done/', wrap(self.password_change_done, cacheable=True), name='password_change_done', ), path('jsi18n/', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'), path( 'r/<int:content_type_id>/<path:object_id>/', wrap(contenttype_views.shortcut), name='view_on_site', ), ] # Add in each model's views, and create a list of valid URLS for the # app_index valid_app_labels = [] for model, model_admin in self._registry.items(): urlpatterns += [ path('%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)), ] if model._meta.app_label not in valid_app_labels: valid_app_labels.append(model._meta.app_label) # If there were ModelAdmins registered, we should have a list of app # labels for which we need to allow access to the app_index view, if valid_app_labels: regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$' urlpatterns += [ re_path(regex, wrap(self.app_index), name='app_list'), ] return urlpatterns @property def urls(self): return self.get_urls(), 'admin', self.name def each_context(self, request): """ Return a dictionary of variables to put in the template context for *every* page in the admin site. For sites running on a subpath, use the SCRIPT_NAME value if site_url hasn't been customized. """ script_name = request.META['SCRIPT_NAME'] site_url = script_name if self.site_url == '/' and script_name else self.site_url return { 'site_title': self.site_title, 'site_header': self.site_header, 'site_url': site_url, 'has_permission': self.has_permission(request), 'available_apps': self.get_app_list(request), 'is_popup': False, 'is_nav_sidebar_enabled': self.enable_nav_sidebar, } def password_change(self, request, extra_context=None): """ Handle the "change password" task -- both form display and validation. """ from django.contrib.admin.forms import AdminPasswordChangeForm from django.contrib.auth.views import PasswordChangeView url = reverse('admin:password_change_done', current_app=self.name) defaults = { 'form_class': AdminPasswordChangeForm, 'success_url': url, 'extra_context': {**self.each_context(request), **(extra_context or {})}, } if self.password_change_template is not None: defaults['template_name'] = self.password_change_template request.current_app = self.name return PasswordChangeView.as_view(**defaults)(request) def password_change_done(self, request, extra_context=None): """ Display the "success" page after a password change. """ from django.contrib.auth.views import PasswordChangeDoneView defaults = { 'extra_context': {**self.each_context(request), **(extra_context or {})}, } if self.password_change_done_template is not None: defaults['template_name'] = self.password_change_done_template request.current_app = self.name return PasswordChangeDoneView.as_view(**defaults)(request) def i18n_javascript(self, request, extra_context=None): """ Display the i18n JavaScript that the Django admin requires. `extra_context` is unused but present for consistency with the other admin views. """ return JavaScriptCatalog.as_view(packages=['django.contrib.admin'])(request) @never_cache def logout(self, request, extra_context=None): """ Log out the user for the given HttpRequest. This should *not* assume the user is already logged in. """ from django.contrib.auth.views import LogoutView defaults = { 'extra_context': { **self.each_context(request), # Since the user isn't logged out at this point, the value of # has_permission must be overridden. 'has_permission': False, **(extra_context or {}) }, } if self.logout_template is not None: defaults['template_name'] = self.logout_template request.current_app = self.name return LogoutView.as_view(**defaults)(request) @never_cache def login(self, request, extra_context=None): """ Display the login form for the given HttpRequest. """ if request.method == 'GET' and self.has_permission(request): # Already logged-in, redirect to admin index index_path = reverse('admin:index', current_app=self.name) return HttpResponseRedirect(index_path) from django.contrib.auth.views import LoginView # Since this module gets imported in the application's root package, # it cannot import models from other applications at the module level, # and django.contrib.admin.forms eventually imports User. from django.contrib.admin.forms import AdminAuthenticationForm context = { **self.each_context(request), 'title': _('Log in'), 'app_path': request.get_full_path(), 'username': request.user.get_username(), } if (REDIRECT_FIELD_NAME not in request.GET and REDIRECT_FIELD_NAME not in request.POST): context[REDIRECT_FIELD_NAME] = reverse('admin:index', current_app=self.name) context.update(extra_context or {}) defaults = { 'extra_context': context, 'authentication_form': self.login_form or AdminAuthenticationForm, 'template_name': self.login_template or 'admin/login.html', } request.current_app = self.name return LoginView.as_view(**defaults)(request) def _build_app_dict(self, request, label=None): """ Build the app dictionary. The optional `label` parameter filters models of a specific app. """ app_dict = {} if label: models = { m: m_a for m, m_a in self._registry.items() if m._meta.app_label == label } else: models = self._registry for model, model_admin in models.items(): app_label = model._meta.app_label has_module_perms = model_admin.has_module_permission(request) if not has_module_perms: continue perms = model_admin.get_model_perms(request) # Check whether user has any perm for this module. # If so, add the module to the model_list. if True not in perms.values(): continue info = (app_label, model._meta.model_name) model_dict = { 'name': capfirst(model._meta.verbose_name_plural), 'object_name': model._meta.object_name, 'perms': perms, 'admin_url': None, 'add_url': None, } if perms.get('change') or perms.get('view'): model_dict['view_only'] = not perms.get('change') try: model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name) except NoReverseMatch: pass if perms.get('add'): try: model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name) except NoReverseMatch: pass if app_label in app_dict: app_dict[app_label]['models'].append(model_dict) else: app_dict[app_label] = { 'name': apps.get_app_config(app_label).verbose_name, 'app_label': app_label, 'app_url': reverse( 'admin:app_list', kwargs={'app_label': app_label}, current_app=self.name, ), 'has_module_perms': has_module_perms, 'models': [model_dict], } if label: return app_dict.get(label) return app_dict def get_app_list(self, request): """ Return a sorted list of all the installed apps that have been registered in this site. """ app_dict = self._build_app_dict(request) # Sort the apps alphabetically. app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower()) # Sort the models alphabetically within each app. for app in app_list: app['models'].sort(key=lambda x: x['name']) return app_list @never_cache def index(self, request, extra_context=None): """ Display the main admin index page, which lists all of the installed apps that have been registered in this site. """ app_list = self.get_app_list(request) context = { **self.each_context(request), 'title': self.index_title, 'app_list': app_list, **(extra_context or {}), } request.current_app = self.name return TemplateResponse(request, self.index_template or 'admin/index.html', context) def app_index(self, request, app_label, extra_context=None): app_dict = self._build_app_dict(request, app_label) if not app_dict: raise Http404('The requested admin page does not exist.') # Sort the models alphabetically within each app. app_dict['models'].sort(key=lambda x: x['name']) app_name = apps.get_app_config(app_label).verbose_name context = { **self.each_context(request), 'title': _('%(app)s administration') % {'app': app_name}, 'app_list': [app_dict], 'app_label': app_label, **(extra_context or {}), } request.current_app = self.name return TemplateResponse(request, self.app_index_template or [ 'admin/%s/app_index.html' % app_label, 'admin/app_index.html' ], context) class DefaultAdminSite(LazyObject): def _setup(self): AdminSiteClass = import_string(apps.get_app_config('admin').default_site) self._wrapped = AdminSiteClass() # This global object represents the default admin site, for the common case. # You can provide your own AdminSite using the (Simple)AdminConfig.default_site # attribute. You can also instantiate AdminSite in your own code to create a # custom admin site. site = DefaultAdminSite()
from __future__ import absolute_import import six from collections import namedtuple from django.conf import settings from six.moves.urllib.parse import parse_qs, quote, urlencode, urljoin, urlparse from functools import partial from sentry import options from sentry.utils import json from sentry.utils.compat import map from sentry.utils.compat import filter ParsedUriMatch = namedtuple("ParsedUriMatch", ["scheme", "domain", "path"]) def absolute_uri(url=None): if not url: return options.get("system.url-prefix") return urljoin(options.get("system.url-prefix").rstrip("/") + "/", url.lstrip("/")) def origin_from_url(url): if not url: return url url = urlparse(url) return "%s://%s" % (url.scheme, url.netloc) def safe_urlencode(params, doseq=0): """ UTF-8-safe version of safe_urlencode The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values which can't fail down to ascii. """ # Snippet originally from pysolr: https://github.com/toastdriven/pysolr if hasattr(params, "items"): params = params.items() new_params = list() for k, v in params: k = k.encode("utf-8") if isinstance(v, six.string_types): new_params.append((k, v.encode("utf-8"))) elif isinstance(v, (list, tuple)): new_params.append((k, [i.encode("utf-8") for i in v])) else: new_params.append((k, six.text_type(v))) return urlencode(new_params, doseq) def is_same_domain(url1, url2): """ Returns true if the two urls should be treated as if they're from the same domain (trusted). """ url1 = urlparse(url1) url2 = urlparse(url2) return url1.netloc == url2.netloc def get_origins(project=None): if settings.SENTRY_ALLOW_ORIGIN == "*": return frozenset(["*"]) if settings.SENTRY_ALLOW_ORIGIN: result = settings.SENTRY_ALLOW_ORIGIN.split(" ") else: result = [] if project: optval = project.get_option("sentry:origins", ["*"]) if optval: result.extend(optval) # lowercase and strip the trailing slash from all origin values # filter out empty values return frozenset(filter(bool, map(lambda x: (x or "").lower().rstrip("/"), result))) def parse_uri_match(value): if "://" in value: scheme, value = value.split("://", 1) else: scheme = "*" if "/" in value: domain, path = value.split("/", 1) else: domain, path = value, "*" if ":" in domain: domain, port = value.split(":", 1) else: port = None # we need to coerce our unicode inputs into proper # idna/punycode encoded representation for normalization. if isinstance(domain, six.binary_type): domain = domain.decode("utf8") domain = domain.encode("idna").decode("utf-8") if port: domain = "%s:%s" % (domain, port) return ParsedUriMatch(scheme, domain, path) def is_valid_origin(origin, project=None, allowed=None): """ Given an ``origin`` which matches a base URI (e.g. http://example.com) determine if a valid origin is present in the project settings. Origins may be defined in several ways: - http://domain.com[:port]: exact match for base URI (must include port) - *: allow any domain - *.domain.com: matches domain.com and all subdomains, on any port - domain.com: matches domain.com on any port - *:port: wildcard on hostname, but explicit match on port """ if allowed is None: allowed = get_origins(project) if not allowed: return False if "*" in allowed: return True if not origin: return False # we always run a case insensitive check origin = origin.lower() # Fast check if origin in allowed: return True # XXX: In some cases origin might be localhost (or something similar) which causes a string value # of 'null' to be sent as the origin if origin == "null": return False if isinstance(origin, six.binary_type): try: origin = origin.decode("utf-8") except UnicodeDecodeError: try: origin = origin.decode("windows-1252") except UnicodeDecodeError: return False parsed = urlparse(origin) if parsed.hostname is None: parsed_hostname = "" else: try: parsed_hostname = parsed.hostname.encode("idna").decode("utf-8") except UnicodeError: # We sometimes shove in some garbage input here, so just opting to ignore and carry on parsed_hostname = parsed.hostname if parsed.port: domain_matches = ( "*", parsed_hostname, # Explicit hostname + port name "%s:%d" % (parsed_hostname, parsed.port), # Wildcard hostname with explicit port "*:%d" % parsed.port, ) else: domain_matches = ("*", parsed_hostname) for value in allowed: try: bits = parse_uri_match(value) except UnicodeError: # We hit a bad uri, so ignore this value continue # scheme supports exact and any match if bits.scheme not in ("*", parsed.scheme): continue # domain supports exact, any, and prefix match if bits.domain[:2] == "*.": if parsed_hostname.endswith(bits.domain[1:]) or parsed_hostname == bits.domain[2:]: return True continue elif bits.domain not in domain_matches: continue # path supports exact, any, and suffix match (with or without *) path = bits.path if path == "*": return True if path.endswith("*"): path = path[:-1] if parsed.path.startswith(path): return True return False def origin_from_request(request): """ Returns either the Origin or Referer value from the request headers, ignoring "null" Origins. """ rv = request.META.get("HTTP_ORIGIN", "null") # In some situation, an Origin header may be the literal value # "null". This means that the Origin header was stripped for # privacy reasons, but we should ignore this value entirely. # Behavior is specified in RFC6454. In either case, we should # treat a "null" Origin as a nonexistent one and fallback to Referer. if rv in ("", "null"): rv = origin_from_url(request.META.get("HTTP_REFERER")) return rv def heuristic_decode(data, possible_content_type=None): """ Attempt to decode a HTTP body by trying JSON and Form URL decoders, returning the decoded body (if decoding was successful) and the inferred content type. """ inferred_content_type = possible_content_type form_encoded_parser = partial(parse_qs, strict_parsing=True, keep_blank_values=True) decoders = [ ("application/json", json.loads), ("application/x-www-form-urlencoded", form_encoded_parser), ] # Prioritize the decoder which supports the possible content type first. decoders.sort(key=lambda d: d[0] == possible_content_type, reverse=True) for decoding_type, decoder in decoders: try: return (decoder(data), decoding_type) except Exception: # Try another decoder continue return (data, inferred_content_type) def percent_encode(val): # see https://en.wikipedia.org/wiki/Percent-encoding if isinstance(val, six.text_type): val = val.encode("utf8", errors="replace") return quote(val).replace("%7E", "~").replace("/", "%2F")
# =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from __future__ import absolute_import from datetime import datetime, timedelta from apptools.preferences.preference_binding import bind_preference # ============= standard library imports ======================== # ============= local library imports ========================== from sqlalchemy import and_ from sqlalchemy import or_ from sqlalchemy.exc import SQLAlchemyError from pychron.database.core.database_adapter import DatabaseAdapter from pychron.labspy.orm import ( Measurement, ProcessInfo, Version, Device, Experiment, Analysis, Connections, ) # , Version, Status, Experiment, Analysis, AnalysisType class LabspyDatabaseAdapter(DatabaseAdapter): kind = "mysql" def bind_preferences(self): bind_preference(self, "host", "pychron.labspy.host") # bind_preference(self, 'port', 'pychron.labspy.port') bind_preference(self, "username", "pychron.labspy.username") bind_preference(self, "password", "pychron.labspy.password") bind_preference(self, "name", "pychron.labspy.name") def add_experiment(self, **kw): exp = Experiment(**kw) return self._add_item(exp) def add_analysis(self, dbexp, rd): at = None # if 'analysis_type' in rd: # analysis_type = rd.pop('analysis_type') # at = self.get_analysis_type(analysis_type) # if not at: # at = self.add_analysis_type(analysis_type) an = Analysis(**rd) # if at: # an.analysis_type = at an.experiment = dbexp return self._add_item(an) def set_connection(self, ts, appname, username, devname, com, addr, status): try: conn = self.get_connection(appname, devname) except SQLAlchemyError as e: self.warning( "Error getting connection {}.{} exception: {}".format( appname, devname, e ) ) return add = False if conn is None: conn = Connections() add = True conn.appname = appname conn.username = username conn.devname = devname conn.com = com conn.address = addr conn.status = bool(status) conn.timestamp = ts if add: self._add_item(conn) def get_connection(self, appname, devname): q = self.session.query(Connections) q = q.filter( and_(Connections.appname == appname, Connections.devname == devname) ) return self._query_first(q, reraise=True) def update_experiment(self, hashid, **kw): exp = self.get_experiment(hashid) for k, v in kw.items(): setattr(exp, k, v) def add_device(self, dev): dev = Device(name=dev) return self._add_item(dev) def add_measurement(self, dev, name, value, unit): pinfo = self.get_process_info(dev, name) # if not pinfo: # pinfo = self.add_process_info(dev, name, unit) if pinfo: measurement = Measurement(value=value) measurement.process = pinfo return self._add_item(measurement) else: self.warning("ProcessInfo={} Device={} not available".format(name, dev)) def add_process_info(self, dev, name, unit): self.debug("add process info {} {} {}".format(dev, name, unit)) dbdev = self.get_device(dev) if not dbdev: self.debug("add device {}".format(dev)) dbdev = self.add_device(dev) p = ProcessInfo(name=name, units=unit) p.device = dbdev return self._add_item(p) # def add_status(self): # p = Status() # return self._add_item(p) # # def add_analysis_type(self, name): # obj = AnalysisType(Name=name) # return self._add_item(obj) # getters # def get_analysis_type(self, name): # return self._retrieve_item(AnalysisType, name, key='Name') # def get_experiment(self, hid): q = self.session.query(Experiment) q = q.filter(Experiment.hashid == hid) return q.first() # return self._retrieve_item(Experiment, hid, key='HashID') # # def get_status(self): # with self.session_ctx() as sess: # q = sess.query(Status) # return self._query_one(q) def get_migrate_version(self, **kw): q = self.session.query(Version) q = q.limit(1) mv = q.one() return mv def get_device(self, name): return self._retrieve_item(Device, name, key="name") def get_process_info(self, dev, name): q = self.session.query(ProcessInfo) q = q.join(Device) q = q.filter(Device.name == dev) q = q.filter(ProcessInfo.name == name) return self._query_one(q) def get_latest_lab_temperatures(self): return self._get_latest(("Temp",)) def get_latest_lab_humiditys(self): return self._get_latest(("Hum",)) def get_latest_lab_pneumatics(self): return self._get_latest("Pressure") def _get_latest(self, tag): values = [] with self.session_ctx(use_parent_session=False) as sess: q = sess.query(ProcessInfo) if not isinstance(tag, tuple): tag = (tag,) q = q.filter(or_(*[ProcessInfo.name.like("%{}%".format(t)) for t in tag])) ps = self._query_all(q) self.debug("get latest {}, ps={}".format(tag, len(ps))) min_date = datetime.now() - timedelta(hours=24) for p in ps: q = sess.query(Measurement) q = q.filter(Measurement.process_info_id == p.id) q = q.filter(Measurement.pub_date > min_date) q = q.order_by(Measurement.pub_date.desc()) record = self._query_first(q) if record: values.append( { "name": p.name, "title": p.graph_title, "pub_date": record.pub_date.isoformat(), "value": record.value, "device": p.device.name, } ) return values def get_measurements(self, device, name, low=None, high=None): q = self.session.query(Measurement) q = q.join(ProcessInfo, Device) q = q.filter(Device.name == device) q = q.filter(ProcessInfo.name == name) if low: q = q.filter(Measurement.pub_date >= low) return self._query_all(q) # ============= EOF =============================================
import inspect import os import re import sys from collections import abc from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Set, Tuple, Union from unittest.mock import MagicMock, patch import yaml from django.http import HttpResponse from jsonschema.exceptions import ValidationError from zerver.lib.request import _REQ, arguments_map from zerver.lib.rest import rest_dispatch from zerver.lib.test_classes import ZulipTestCase from zerver.lib.utils import assert_is_not_none from zerver.openapi.markdown_extension import ( generate_curl_example, parse_language_and_options, render_curl_example, ) from zerver.openapi.openapi import ( OPENAPI_SPEC_PATH, OpenAPISpec, SchemaError, find_openapi_endpoint, get_openapi_fixture, get_openapi_parameters, get_openapi_paths, openapi_spec, validate_against_openapi_schema, validate_request, validate_schema, ) from zerver.tornado.views import get_events, get_events_backend TEST_ENDPOINT = "/messages/{message_id}" TEST_METHOD = "patch" TEST_RESPONSE_BAD_REQ = "400" TEST_RESPONSE_SUCCESS = "200" VARMAP = { "integer": int, "string": str, "boolean": bool, "object": dict, "NoneType": type(None), } def schema_type(schema: Dict[str, Any]) -> Union[type, Tuple[type, object]]: if "oneOf" in schema: # Hack: Just use the type of the first value # Ideally, we'd turn this into a Union type. return schema_type(schema["oneOf"][0]) elif schema["type"] == "array": return (list, schema_type(schema["items"])) else: return VARMAP[schema["type"]] class OpenAPIToolsTest(ZulipTestCase): """Make sure that the tools we use to handle our OpenAPI specification (located in zerver/openapi/openapi.py) work as expected. These tools are mostly dedicated to fetching parts of the -already parsed- specification, and comparing them to objects returned by our REST API. """ def test_get_openapi_fixture(self) -> None: actual = get_openapi_fixture(TEST_ENDPOINT, TEST_METHOD, TEST_RESPONSE_BAD_REQ) expected = { "code": "BAD_REQUEST", "msg": "You don't have permission to edit this message", "result": "error", } self.assertEqual(actual, expected) def test_get_openapi_parameters(self) -> None: actual = get_openapi_parameters(TEST_ENDPOINT, TEST_METHOD) expected_item = { "name": "message_id", "in": "path", "description": "The target message's ID.\n", "example": 42, "required": True, "schema": {"type": "integer"}, } assert expected_item in actual def test_validate_against_openapi_schema(self) -> None: with self.assertRaises( ValidationError, msg="Additional properties are not allowed ('foo' was unexpected)" ): bad_content: Dict[str, object] = { "msg": "", "result": "success", "foo": "bar", } validate_against_openapi_schema( bad_content, TEST_ENDPOINT, TEST_METHOD, TEST_RESPONSE_SUCCESS ) with self.assertRaises(ValidationError, msg=("42 is not of type string")): bad_content = { "msg": 42, "result": "success", } validate_against_openapi_schema( bad_content, TEST_ENDPOINT, TEST_METHOD, TEST_RESPONSE_SUCCESS ) with self.assertRaises(ValidationError, msg='Expected to find the "msg" required key'): bad_content = { "result": "success", } validate_against_openapi_schema( bad_content, TEST_ENDPOINT, TEST_METHOD, TEST_RESPONSE_SUCCESS ) # No exceptions should be raised here. good_content = { "msg": "", "result": "success", } validate_against_openapi_schema( good_content, TEST_ENDPOINT, TEST_METHOD, TEST_RESPONSE_SUCCESS ) # Overwrite the exception list with a mocked one test_dict: Dict[str, Any] = {} # Check that validate_against_openapi_schema correctly # descends into 'deep' objects and arrays. Test 1 should # pass, Test 2 has a 'deep' extraneous key and Test 3 has a # 'deep' opaque object. Also the parameters are a heterogeneous # mix of arrays and objects to verify that our descent logic # correctly gets to the the deeply nested objects. with open(os.path.join(os.path.dirname(OPENAPI_SPEC_PATH), "testing.yaml")) as test_file: test_dict = yaml.safe_load(test_file) openapi_spec.openapi()["paths"]["testing"] = test_dict try: validate_against_openapi_schema( (test_dict["test1"]["responses"]["200"]["content"]["application/json"]["example"]), "testing", "test1", "200", ) with self.assertRaises( ValidationError, msg='Extraneous key "str4" in response\'s content' ): validate_against_openapi_schema( ( test_dict["test2"]["responses"]["200"]["content"]["application/json"][ "example" ] ), "testing", "test2", "200", ) with self.assertRaises(SchemaError, msg='Opaque object "obj"'): # Checks for opaque objects validate_schema( test_dict["test3"]["responses"]["200"]["content"]["application/json"]["schema"] ) finally: openapi_spec.openapi()["paths"].pop("testing", None) def test_live_reload(self) -> None: # Force the reload by making the last update date < the file's last # modified date openapi_spec.mtime = 0 get_openapi_fixture(TEST_ENDPOINT, TEST_METHOD) # Check that the file has been reloaded by verifying that the last # update date isn't zero anymore self.assertNotEqual(openapi_spec.mtime, 0) # Now verify calling it again doesn't call reload old_openapi = openapi_spec.openapi() get_openapi_fixture(TEST_ENDPOINT, TEST_METHOD) new_openapi = openapi_spec.openapi() self.assertIs(old_openapi, new_openapi) class OpenAPIArgumentsTest(ZulipTestCase): # This will be filled during test_openapi_arguments: checked_endpoints: Set[str] = set() pending_endpoints = { #### TODO: These endpoints are a priority to document: "/realm/presence", "/users/me/presence", "/users/me/alert_words", # These are a priority to document but don't match our normal URL schemes # and thus may be complicated to document with our current tooling. # (No /api/v1/ or /json prefix). "/avatar/{email_or_id}", ## This one is in zulip.yaml, but not the actual docs. # "/api/v1/user_uploads/{realm_id_str}/{filename}", ## And this one isn't, and isn't really representable # "/user_uploads/{realm_id_str}/{filename}", #### These realm administration settings are valuable to document: # Delete a file uploaded by current user. "/attachments/{attachment_id}", # List data exports for organization (GET) or request one (POST) "/export/realm", # Delete a data export. "/export/realm/{export_id}", # Manage default streams and default stream groups "/default_streams", "/default_stream_groups/create", "/default_stream_groups/{group_id}", "/default_stream_groups/{group_id}/streams", # Administer invitations "/invites", "/invites/multiuse", "/invites/{prereg_id}", "/invites/{prereg_id}/resend", "/invites/multiuse/{invite_id}", # Single-stream settings alternative to the bulk endpoint # users/me/subscriptions/properties; probably should just be a # section of the same page. "/users/me/subscriptions/{stream_id}", #### Mobile-app only endpoints; important for mobile developers. # Mobile interface for fetching API keys "/fetch_api_key", # Already documented; need to fix tracking bug "/dev_fetch_api_key", # Mobile interface for development environment login "/dev_list_users", # Registration for iOS/Android mobile push notifications. "/users/me/android_gcm_reg_id", "/users/me/apns_device_token", #### These personal settings endpoints have modest value to document: "/users/me/avatar", "/users/me/api_key/regenerate", # Much more valuable would be an org admin bulk-upload feature. "/users/me/profile_data", #### Should be documented as part of interactive bots documentation "/bot_storage", "/submessage", "/zcommand", #### These "organization settings" endpoint have modest value to document: "/realm", "/realm/domains", "/realm/domains/{domain}", "/bots", "/bots/{bot_id}", "/bots/{bot_id}/api_key/regenerate", #### These "organization settings" endpoints have low value to document: "/realm/profile_fields/{field_id}", "/realm/icon", "/realm/logo", "/realm/deactivate", "/realm/subdomain/{subdomain}", # API for Zoom video calls. Unclear if this can support other apps. "/calls/zoom/create", #### The following are fake endpoints that live in our zulip.yaml #### for tooling convenience reasons, and should eventually be moved. # Real-time-events endpoint "/real-time", # Rest error handling endpoint "/rest-error-handling", # Zulip outgoing webhook payload "/zulip-outgoing-webhook", } # Endpoints where the documentation is currently failing our # consistency tests. We aim to keep this list empty. buggy_documentation_endpoints: Set[str] = set() def convert_regex_to_url_pattern(self, regex_pattern: str) -> str: """Convert regular expressions style URL patterns to their corresponding OpenAPI style formats. All patterns are expected to start with ^ and end with $. Examples: 1. /messages/{message_id} <-> r'^messages/(?P<message_id>[0-9]+)$' 2. /events <-> r'^events$' 3. '/realm/domains' <-> r'/realm\\/domains$' """ # Handle the presence-email code which has a non-slashes syntax. regex_pattern = regex_pattern.replace("[^/]*", ".*").replace("[^/]+", ".*") self.assertTrue(regex_pattern.startswith("^")) self.assertTrue(regex_pattern.endswith("$")) url_pattern = "/" + regex_pattern[1:][:-1] url_pattern = re.sub(r"\(\?P<(\w+)>[^/]+\)", r"{\1}", url_pattern) url_pattern = url_pattern.replace("\\", "") return url_pattern def ensure_no_documentation_if_intentionally_undocumented( self, url_pattern: str, method: str, msg: Optional[str] = None ) -> None: try: get_openapi_parameters(url_pattern, method) if not msg: # nocoverage msg = f""" We found some OpenAPI documentation for {method} {url_pattern}, so maybe we shouldn't mark it as intentionally undocumented in the URLs. """ raise AssertionError(msg) # nocoverage except KeyError: return def check_for_non_existant_openapi_endpoints(self) -> None: """Here, we check to see if every endpoint documented in the OpenAPI documentation actually exists in urls.py and thus in actual code. Note: We define this as a helper called at the end of test_openapi_arguments instead of as a separate test to ensure that this test is only executed after test_openapi_arguments so that it's results can be used here in the set operations.""" openapi_paths = set(get_openapi_paths()) undocumented_paths = openapi_paths - self.checked_endpoints undocumented_paths -= self.buggy_documentation_endpoints undocumented_paths -= self.pending_endpoints try: self.assert_length(undocumented_paths, 0) except AssertionError: # nocoverage msg = "The following endpoints have been documented but can't be found in urls.py:" for undocumented_path in undocumented_paths: msg += f"\n + {undocumented_path}" raise AssertionError(msg) def get_type_by_priority( self, types: Sequence[Union[type, Tuple[type, object]]] ) -> Union[type, Tuple[type, object]]: priority = {list: 1, dict: 2, str: 3, int: 4, bool: 5} tyiroirp = {1: list, 2: dict, 3: str, 4: int, 5: bool} val = 6 for t in types: if isinstance(t, tuple): return t # e.g. (list, dict) or (list ,str) v = priority.get(t, 6) if v < val: val = v return tyiroirp.get(val, types[0]) def get_standardized_argument_type(self, t: Any) -> Union[type, Tuple[type, object]]: """Given a type from the typing module such as List[str] or Union[str, int], convert it into a corresponding Python type. Unions are mapped to a canonical choice among the options. E.g. typing.Union[typing.List[typing.Dict[str, typing.Any]], NoneType] needs to be mapped to list.""" origin = getattr(t, "__origin__", None) if sys.version_info < (3, 7): # nocoverage if origin == List: origin = list elif origin == Dict: origin = dict elif origin == Mapping: origin = abc.Mapping elif origin == Sequence: origin = abc.Sequence if not origin: # Then it's most likely one of the fundamental data types # I.E. Not one of the data types from the "typing" module. return t elif origin == Union: subtypes = [self.get_standardized_argument_type(st) for st in t.__args__] return self.get_type_by_priority(subtypes) elif origin in [list, abc.Sequence]: [st] = t.__args__ return (list, self.get_standardized_argument_type(st)) elif origin in [dict, abc.Mapping]: return dict raise AssertionError(f"Unknown origin {origin}") def render_openapi_type_exception( self, function: Callable[..., HttpResponse], openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]], function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]], diff: Set[Tuple[str, Union[type, Tuple[type, object]]]], ) -> None: # nocoverage """Print a *VERY* clear and verbose error message for when the types (between the OpenAPI documentation and the function declaration) don't match.""" msg = f""" The types for the request parameters in zerver/openapi/zulip.yaml do not match the types declared in the implementation of {function.__name__}.\n""" msg += "=" * 65 + "\n" msg += "{:<10s}{:^30s}{:>10s}\n".format( "parameter", "OpenAPI type", "function declaration type" ) msg += "=" * 65 + "\n" opvtype = None fdvtype = None for element in diff: vname = element[0] for element in openapi_params: if element[0] == vname: opvtype = element[1] break for element in function_params: if element[0] == vname: fdvtype = element[1] break msg += f"{vname:<10s}{str(opvtype):^30s}{str(fdvtype):>10s}\n" raise AssertionError(msg) def check_argument_types( self, function: Callable[..., HttpResponse], openapi_parameters: List[Dict[str, Any]] ) -> None: """We construct for both the OpenAPI data and the function's definition a set of tuples of the form (var_name, type) and then compare those sets to see if the OpenAPI data defines a different type than that actually accepted by the function. Otherwise, we print out the exact differences for convenient debugging and raise an AssertionError.""" openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set() json_params: Dict[str, Union[type, Tuple[type, object]]] = {} for element in openapi_parameters: name: str = element["name"] schema = {} if "content" in element: # The only content-type we use in our API is application/json. assert "schema" in element["content"]["application/json"] # If content_type is application/json, then the # parameter needs to be handled specially, as REQ can # either return the application/json as a string or it # can either decode it and return the required # elements. For example `to` array in /messages: POST # is processed by REQ as a string and then its type is # checked in the view code. # # Meanwhile `profile_data` in /users/{user_id}: GET is # taken as array of objects. So treat them separately. schema = element["content"]["application/json"]["schema"] json_params[name] = schema_type(schema) continue else: schema = element["schema"] openapi_params.add((name, schema_type(schema))) function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set() # Iterate through the decorators to find the original # function, wrapped by has_request_variables, so we can parse # its arguments. while getattr(function, "__wrapped__", None): function = getattr(function, "__wrapped__", None) # Tell mypy this is never None. assert function is not None # Now, we do inference mapping each REQ parameter's # declaration details to the Python/mypy types for the # arguments passed to it. # # Because the mypy types are the types used inside the inner # function (after the original data is processed by any # validators, converters, etc.), they will not always match # the API-level argument types. The main case where this # happens is when a `converter` is used that changes the types # of its parameters. for pname, defval in inspect.signature(function).parameters.items(): defval = defval.default if isinstance(defval, _REQ): # TODO: The below inference logic in cases where # there's a converter function declared is incorrect. # Theoretically, we could restructure the converter # function model so that we can check what type it # excepts to be passed to make validation here # possible. vtype = self.get_standardized_argument_type(function.__annotations__[pname]) vname = defval.post_var_name assert vname is not None if vname in json_params: # Here we have two cases. If the the REQ type is # string then there is no point in comparing as # JSON can always be returned as string. Ideally, # we wouldn't use REQ for a JSON object without a # validator in these cases, but it does happen. # # If the REQ type is not string then, insert the # REQ and OpenAPI data types of the variable in # the respective sets so that they can be dealt # with later. In either case remove the variable # from `json_params`. if vtype == str: json_params.pop(vname, None) continue else: openapi_params.add((vname, json_params[vname])) json_params.pop(vname, None) function_params.add((vname, vtype)) # After the above operations `json_params` should be empty. assert len(json_params) == 0 diff = openapi_params - function_params if diff: # nocoverage self.render_openapi_type_exception(function, openapi_params, function_params, diff) def test_openapi_arguments(self) -> None: """This end-to-end API documentation test compares the arguments defined in the actual code using @has_request_variables and REQ(), with the arguments declared in our API documentation for every API endpoint in Zulip. First, we import the fancy-Django version of zproject/urls.py by doing this, each has_request_variables wrapper around each imported view function gets called to generate the wrapped view function and thus filling the global arguments_map variable. Basically, we're exploiting code execution during import. Then we need to import some view modules not already imported in urls.py. We use this different syntax because of the linters complaining of an unused import (which is correct, but we do this for triggering the has_request_variables decorator). At the end, we perform a reverse mapping test that verifies that every URL pattern defined in the OpenAPI documentation actually exists in code. """ from zproject import urls as urlconf # We loop through all the API patterns, looking in particular # for those using the rest_dispatch decorator; we then parse # its mapping of (HTTP_METHOD -> FUNCTION). for p in urlconf.v1_api_and_json_patterns + urlconf.v1_api_mobile_patterns: if p.callback is not rest_dispatch: # Endpoints not using rest_dispatch don't have extra data. methods_endpoints: Dict[str, Any] = dict( GET=p.callback, ) else: methods_endpoints = assert_is_not_none(p.default_args) # since the module was already imported and is now residing in # memory, we won't actually face any performance penalties here. for method, value in methods_endpoints.items(): if callable(value): function: Callable[..., HttpResponse] = value tags: Set[str] = set() else: function, tags = value if function is get_events: # Work around the fact that the registered # get_events view function isn't where we do # @has_request_variables. # # TODO: Make this configurable via an optional argument # to has_request_variables, e.g. # @has_request_variables(view_func_name="zerver.tornado.views.get_events") function = get_events_backend function_name = f"{function.__module__}.{function.__name__}" # Our accounting logic in the `has_request_variables()` # code means we have the list of all arguments # accepted by every view function in arguments_map. accepted_arguments = set(arguments_map[function_name]) regex_pattern = p.pattern.regex.pattern url_pattern = self.convert_regex_to_url_pattern(regex_pattern) if "intentionally_undocumented" in tags: self.ensure_no_documentation_if_intentionally_undocumented(url_pattern, method) continue if url_pattern in self.pending_endpoints: # HACK: After all pending_endpoints have been resolved, we should remove # this segment and the "msg" part of the `ensure_no_...` method. msg = f""" We found some OpenAPI documentation for {method} {url_pattern}, so maybe we shouldn't include it in pending_endpoints. """ self.ensure_no_documentation_if_intentionally_undocumented( url_pattern, method, msg ) continue try: # Don't include OpenAPI parameters that live in # the path; these are not extracted by REQ. openapi_parameters = get_openapi_parameters( url_pattern, method, include_url_parameters=False ) except Exception: # nocoverage raise AssertionError(f"Could not find OpenAPI docs for {method} {url_pattern}") # We now have everything we need to understand the # function as defined in our urls.py: # # * method is the HTTP method, e.g. GET, POST, or PATCH # # * p.pattern.regex.pattern is the URL pattern; might require # some processing to match with OpenAPI rules # # * accepted_arguments is the full set of arguments # this method accepts (from the REQ declarations in # code). # # * The documented parameters for the endpoint as recorded in our # OpenAPI data in zerver/openapi/zulip.yaml. # # We now compare these to confirm that the documented # argument list matches what actually appears in the # codebase. openapi_parameter_names = {parameter["name"] for parameter in openapi_parameters} if len(accepted_arguments - openapi_parameter_names) > 0: # nocoverage print("Undocumented parameters for", url_pattern, method, function_name) print(" +", openapi_parameter_names) print(" -", accepted_arguments) assert url_pattern in self.buggy_documentation_endpoints elif len(openapi_parameter_names - accepted_arguments) > 0: # nocoverage print("Documented invalid parameters for", url_pattern, method, function_name) print(" -", openapi_parameter_names) print(" +", accepted_arguments) assert url_pattern in self.buggy_documentation_endpoints else: self.assertEqual(openapi_parameter_names, accepted_arguments) self.check_argument_types(function, openapi_parameters) self.checked_endpoints.add(url_pattern) self.check_for_non_existant_openapi_endpoints() class ModifyExampleGenerationTestCase(ZulipTestCase): def test_no_mod_argument(self) -> None: res = parse_language_and_options("python") self.assertEqual(res, ("python", {})) def test_single_simple_mod_argument(self) -> None: res = parse_language_and_options("curl, mod=1") self.assertEqual(res, ("curl", {"mod": 1})) res = parse_language_and_options("curl, mod='somevalue'") self.assertEqual(res, ("curl", {"mod": "somevalue"})) res = parse_language_and_options('curl, mod="somevalue"') self.assertEqual(res, ("curl", {"mod": "somevalue"})) def test_multiple_simple_mod_argument(self) -> None: res = parse_language_and_options("curl, mod1=1, mod2='a'") self.assertEqual(res, ("curl", {"mod1": 1, "mod2": "a"})) res = parse_language_and_options("curl, mod1=\"asdf\", mod2='thing', mod3=3") self.assertEqual(res, ("curl", {"mod1": "asdf", "mod2": "thing", "mod3": 3})) def test_single_list_mod_argument(self) -> None: res = parse_language_and_options("curl, exclude=['param1', 'param2']") self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"]})) res = parse_language_and_options('curl, exclude=["param1", "param2"]') self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"]})) res = parse_language_and_options("curl, exclude=['param1', \"param2\"]") self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"]})) def test_multiple_list_mod_argument(self) -> None: res = parse_language_and_options("curl, exclude=['param1', \"param2\"], special=['param3']") self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"], "special": ["param3"]})) def test_multiple_mixed_mod_arguments(self) -> None: res = parse_language_and_options( 'curl, exclude=["asdf", \'sdfg\'], other_key=\'asdf\', more_things="asdf", another_list=[1, "2"]' ) self.assertEqual( res, ( "curl", { "exclude": ["asdf", "sdfg"], "other_key": "asdf", "more_things": "asdf", "another_list": [1, "2"], }, ), ) class TestCurlExampleGeneration(ZulipTestCase): spec_mock_without_examples = { "security": [{"basicAuth": []}], "paths": { "/mark_stream_as_read": { "post": { "description": "Mark all the unread messages in a stream as read.", "parameters": [ { "name": "stream_id", "in": "query", "description": "The ID of the stream whose messages should be marked as read.", "schema": { "type": "integer", }, "required": True, }, { "name": "bool_param", "in": "query", "description": "Just a boolean parameter.", "schema": { "type": "boolean", }, "required": True, }, ], }, }, }, } spec_mock_with_invalid_method: Dict[str, object] = { "security": [{"basicAuth": []}], "paths": { "/endpoint": { "brew": {}, # the data is irrelevant as is should be rejected. }, }, } spec_mock_using_object = { "security": [{"basicAuth": []}], "paths": { "/endpoint": { "get": { "description": "Get some info.", "parameters": [ { "name": "param1", "in": "query", "description": "An object", "content": { "application/json": { "schema": {"type": "object"}, "example": { "key": "value", }, } }, "required": True, }, ], }, }, }, } spec_mock_using_param_in_path = { "security": [{"basicAuth": []}], "paths": { "/endpoint/{param1}": { "get": { "description": "Get some info.", "parameters": [ { "name": "param1", "in": "path", "description": "Param in path", "schema": { "type": "integer", }, "example": 35, "required": True, }, { "name": "param2", "in": "query", "description": "An object", "required": True, "content": { "application/json": { "schema": {"type": "object"}, "example": { "key": "value", }, } }, }, ], }, }, }, } spec_mock_using_object_without_example = { "security": [{"basicAuth": []}], "paths": { "/endpoint": { "get": { "description": "Get some info.", "parameters": [ { "name": "param1", "in": "query", "description": "An object", "schema": { "type": "object", }, "required": True, }, ], }, }, }, } spec_mock_using_array_without_example = { "security": [{"basicAuth": []}], "paths": { "/endpoint": { "get": { "description": "Get some info.", "parameters": [ { "name": "param1", "in": "query", "description": "An array", "schema": { "type": "array", }, "required": True, }, ], }, }, }, } def curl_example(self, endpoint: str, method: str, *args: Any, **kwargs: Any) -> List[str]: return generate_curl_example(endpoint, method, "http://localhost:9991/api", *args, **kwargs) def test_generate_and_render_curl_example(self) -> None: generated_curl_example = self.curl_example("/get_stream_id", "GET") expected_curl_example = [ "```curl", "curl -sSX GET -G http://localhost:9991/api/v1/get_stream_id \\", " -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\", " --data-urlencode stream=Denmark", "```", ] self.assertEqual(generated_curl_example, expected_curl_example) def test_generate_and_render_curl_example_with_nonexistant_endpoints(self) -> None: with self.assertRaises(KeyError): self.curl_example("/mark_this_stream_as_read", "POST") with self.assertRaises(KeyError): self.curl_example("/mark_stream_as_read", "GET") def test_generate_and_render_curl_without_auth(self) -> None: generated_curl_example = self.curl_example("/dev_fetch_api_key", "POST") expected_curl_example = [ "```curl", "curl -sSX POST http://localhost:9991/api/v1/dev_fetch_api_key \\", " --data-urlencode username=iago@zulip.com", "```", ] self.assertEqual(generated_curl_example, expected_curl_example) @patch("zerver.openapi.openapi.OpenAPISpec.openapi") def test_generate_and_render_curl_with_default_examples(self, spec_mock: MagicMock) -> None: spec_mock.return_value = self.spec_mock_without_examples generated_curl_example = self.curl_example("/mark_stream_as_read", "POST") expected_curl_example = [ "```curl", "curl -sSX POST http://localhost:9991/api/v1/mark_stream_as_read \\", " -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\", " --data-urlencode stream_id=1 \\", " --data-urlencode bool_param=false", "```", ] self.assertEqual(generated_curl_example, expected_curl_example) @patch("zerver.openapi.openapi.OpenAPISpec.openapi") def test_generate_and_render_curl_with_invalid_method(self, spec_mock: MagicMock) -> None: spec_mock.return_value = self.spec_mock_with_invalid_method with self.assertRaises(ValueError): self.curl_example("/endpoint", "BREW") # see: HTCPCP def test_generate_and_render_curl_with_array_example(self) -> None: generated_curl_example = self.curl_example("/messages", "GET") expected_curl_example = [ "```curl", "curl -sSX GET -G http://localhost:9991/api/v1/messages \\", " -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\", " --data-urlencode anchor=42 \\", " --data-urlencode num_before=4 \\", " --data-urlencode num_after=8 \\", ' --data-urlencode \'narrow=[{"operand": "Denmark", "operator": "stream"}]\' \\', " --data-urlencode client_gravatar=false \\", " --data-urlencode apply_markdown=false \\", " --data-urlencode use_first_unread_anchor=true", "```", ] self.assertEqual(generated_curl_example, expected_curl_example) @patch("zerver.openapi.openapi.OpenAPISpec.openapi") def test_generate_and_render_curl_with_object(self, spec_mock: MagicMock) -> None: spec_mock.return_value = self.spec_mock_using_object generated_curl_example = self.curl_example("/endpoint", "GET") expected_curl_example = [ "```curl", "curl -sSX GET -G http://localhost:9991/api/v1/endpoint \\", " -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\", ' --data-urlencode \'param1={"key": "value"}\'', "```", ] self.assertEqual(generated_curl_example, expected_curl_example) @patch("zerver.openapi.openapi.OpenAPISpec.openapi") def test_generate_and_render_curl_with_object_without_example( self, spec_mock: MagicMock ) -> None: spec_mock.return_value = self.spec_mock_using_object_without_example with self.assertRaises(ValueError): self.curl_example("/endpoint", "GET") @patch("zerver.openapi.openapi.OpenAPISpec.openapi") def test_generate_and_render_curl_with_array_without_example( self, spec_mock: MagicMock ) -> None: spec_mock.return_value = self.spec_mock_using_array_without_example with self.assertRaises(ValueError): self.curl_example("/endpoint", "GET") @patch("zerver.openapi.openapi.OpenAPISpec.openapi") def test_generate_and_render_curl_with_param_in_path(self, spec_mock: MagicMock) -> None: spec_mock.return_value = self.spec_mock_using_param_in_path generated_curl_example = self.curl_example("/endpoint/{param1}", "GET") expected_curl_example = [ "```curl", "curl -sSX GET -G http://localhost:9991/api/v1/endpoint/35 \\", " -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\", ' --data-urlencode \'param2={"key": "value"}\'', "```", ] self.assertEqual(generated_curl_example, expected_curl_example) def test_generate_and_render_curl_wrapper(self) -> None: generated_curl_example = render_curl_example( "/get_stream_id:GET:email:key", api_url="https://zulip.example.com/api" ) expected_curl_example = [ "```curl", "curl -sSX GET -G https://zulip.example.com/api/v1/get_stream_id \\", " -u email:key \\", " --data-urlencode stream=Denmark", "```", ] self.assertEqual(generated_curl_example, expected_curl_example) def test_generate_and_render_curl_example_with_excludes(self) -> None: generated_curl_example = self.curl_example( "/messages", "GET", exclude=["client_gravatar", "apply_markdown"] ) expected_curl_example = [ "```curl", "curl -sSX GET -G http://localhost:9991/api/v1/messages \\", " -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\", " --data-urlencode anchor=42 \\", " --data-urlencode num_before=4 \\", " --data-urlencode num_after=8 \\", ' --data-urlencode \'narrow=[{"operand": "Denmark", "operator": "stream"}]\' \\', " --data-urlencode use_first_unread_anchor=true", "```", ] self.assertEqual(generated_curl_example, expected_curl_example) class OpenAPIAttributesTest(ZulipTestCase): def test_attributes(self) -> None: """ Checks: * All endpoints have `operationId` and `tag` attributes. * All example responses match their schema. * That no opaque object exists. """ EXCLUDE = ["/real-time"] VALID_TAGS = [ "users", "server_and_organizations", "authentication", "real_time_events", "streams", "messages", "drafts", "webhooks", ] paths = OpenAPISpec(OPENAPI_SPEC_PATH).openapi()["paths"] for path, path_item in paths.items(): if path in EXCLUDE: continue for method, operation in path_item.items(): # Check if every file has an operationId assert "operationId" in operation assert "tags" in operation tag = operation["tags"][0] assert tag in VALID_TAGS for status_code, response in operation["responses"].items(): schema = response["content"]["application/json"]["schema"] if "oneOf" in schema: for subschema_index, subschema in enumerate(schema["oneOf"]): validate_schema(subschema) assert validate_against_openapi_schema( subschema["example"], path, method, status_code + "_" + str(subschema_index), ) continue validate_schema(schema) assert validate_against_openapi_schema( schema["example"], path, method, status_code ) class OpenAPIRegexTest(ZulipTestCase): def test_regex(self) -> None: """ Calls a few documented and undocumented endpoints and checks whether they find a match or not. """ # Some of the undocumentd endpoints which are very similar to # some of the documented endpoints. assert find_openapi_endpoint("/users/me/presence") is None assert find_openapi_endpoint("/users/me/subscriptions/23") is None assert find_openapi_endpoint("/users/iago/subscriptions/23") is None assert find_openapi_endpoint("/messages/matches_narrow") is None # Making sure documented endpoints are matched correctly. assert ( find_openapi_endpoint("/users/23/subscriptions/21") == "/users/{user_id}/subscriptions/{stream_id}" ) assert ( find_openapi_endpoint("/users/iago@zulip.com/presence") == "/users/{user_id_or_email}/presence" ) assert find_openapi_endpoint("/users/iago@zulip.com") == "/users/{email}" assert find_openapi_endpoint("/messages/23") == "/messages/{message_id}" assert find_openapi_endpoint("/realm/emoji/realm_emoji_1") == "/realm/emoji/{emoji_name}" class OpenAPIRequestValidatorTest(ZulipTestCase): def test_validator(self) -> None: """ Test to make sure the request validator works properly The tests cover both cases such as catching valid requests marked as invalid and making sure invalid requests are markded properly """ # `/users/me/subscriptions` doesn't require any parameters validate_request("/users/me/subscriptions", "get", {}, {}, False, "200") with self.assertRaises(SchemaError): # `/messages` POST does not work on an empty response validate_request("/messages", "post", {}, {}, False, "200") # 400 responses are allowed to fail validation. validate_request("/messages", "post", {}, {}, False, "400") # `intentionally_undocumented` allows validation errors on # 200 responses. validate_request( "/dev_fetch_api_key", "post", {}, {}, False, "200", intentionally_undocumented=True )
#!/usr/bin/env python # -*- coding: utf-8 -*- # ============================================================================= # Copyright (c) Ostap developpers. # ============================================================================= # @file ostap/histos/tests/test_histos_compare.py # Test module for ostap/histos/compare.py # - It tests comparision of 1D-histograms # ============================================================================= """Test module for ostap/histos/compare.py - It tests comparision of 1D-histograms """ # ============================================================================= __author__ = "Ostap developers" __all__ = () ## nothing to import # ============================================================================= import ROOT, random # ============================================================================= # logging # ============================================================================= from ostap.logger.logger import getLogger if '__main__' == __name__ or '__builtin__' == __name__ : logger = getLogger ( 'ostap.test_histos_compare' ) else : logger = getLogger ( __name__ ) # ============================================================================= logger.info ( 'Test for 1D-histogram compare') # ============================================================================= from ostap.math.ve import VE from ostap.core.core import hID from ostap.histos.histos import h1_axis import ostap.histos.compare from builtins import range # ## histos for gaussian distributions # h1g = ROOT.TH1D ( hID() , '' , 40 , -5 , 5 ) ; h1g.Sumw2() h2g = ROOT.TH1D ( hID() , '' , 40 , -5 , 5 ) ; h2g.Sumw2() h3g = ROOT.TH1D ( hID() , '' , 20 , -5 , 5 ) ; h3g.Sumw2() bins = [ -5 ] ## random.seed(10) for i in range(0, 15 ) : bins.append ( random.uniform ( -5 , 5 ) ) bins += [ 5 ] bins.sort() h4g = h1_axis ( bins ) # ## histos for uniform distributions # h1u = h1g.clone() h2u = h2g.clone() h3u = h3g.clone() h4u = h4g.clone() # ## histos for exponential distributions # h1e = h1g.clone() h2e = h2g.clone() h3e = h3g.clone() h4e = h4g.clone() ## get the value v = VE(1,1.75**2) random.seed(10) for i in range ( 0, 50000 ) : g1 = v.gauss() g2 = v.gauss() g3 = g2 ## the same as g2 g4 = v.gauss() h1g.Fill ( g1 ) h2g.Fill ( g2 ) h3g.Fill ( g3 ) h4g.Fill ( g4 ) u1 = random.uniform ( -5 , 5 ) u2 = random.uniform ( -5 , 5 ) u3 = u2 #3 the same as u2 u4 = random.uniform ( -5 , 5 ) h1u.Fill ( u1 ) h2u.Fill ( u2 ) h3u.Fill ( u3 ) h4u.Fill ( u4 ) e1 = -1 * random.expovariate( -0.5 ) -5 e2 = -1 * random.expovariate( -0.5 ) -5 e3 = e2 ## the same as e2 e4 = -1 * random.expovariate( -0.5 ) -5 if not -5 < e1 < 5 : continue if not -5 < e2 < 5 : continue if not -5 < e3 < 5 : continue if not -5 < e4 < 5 : continue h1e.Fill ( e1 ) h2e.Fill ( e2 ) h3e.Fill ( e3 ) h4e.Fill ( e4 ) h5g = h4g.rescale_bins(1) h5u = h4u.rescale_bins(1) h5e = h4e.rescale_bins(1) ## compare two histograms def compare ( h1 , h2 , title = '' , density = False ) : ## r1 = h1.cmp_fit ( h2 , opts = 'WL0Q' , density = density ) ## if r1 : logger.info ( 'h1 vs h2 : fit probability is %.5f%% ' % ( r1.Prob()*100 ) ) ## else : logger.warning ( 'h1 vs h2 : fit problems ') ## r2 = h2.cmp_fit ( h1 , opts = 'WL0Q' , density = density ) ## if r2 : logger.info ( 'h2 vs h1 : fit probability is %.5f%% ' % ( r2.Prob()*100 ) ) ## else : logger.warning ( 'h2 vs h1 : fit problems ') ## ct = h1.cmp_cos ( h2 , density = density ) ## logger.info ( 'h1 vs h2 : cos(theta) is %s ' % ct ) ## dd1 = h1.cmp_dist ( h2 , density = density ) ## logger.info ( 'h1 vs h2 : distance is %s ' % dd1 ) ## ## dd2 = h1.cmp_dist2 ( h2 , density = density ) ## ## logger.info ( 'h1 vs h2 : distance2 is %s ' % dd2 ) logger.info ( "%s\n%s" % ( title , h1.cmp_prnt ( h2 , density = density , title = title , prefix = '# ' ) ) ) logger.info ( "%s\n%s" % ( title , h1.cmp_diff_prnt ( h2 , density = density , title = title , prefix = '# ' ) ) ) # ============================================================================= ## compare gaussians def test_compare_gaussians() : compare ( h1g , h2g , 'Compare gaussians (1) and (2)' ) compare ( h1g , h3g , 'Compare gaussians (1) and (3)' ) compare ( h1g , h4g , 'Compare gaussians (1) and (4)' ) compare ( h1g , h4g , 'Compare gaussians (1) and (4) with rescale' , density = True ) compare ( h1g , h5g , 'Compare gaussians (1) and (5)' ) compare ( h2g , h3g , 'Compare gaussians (2) and (3) : should be the same!' ) compare ( h2g , h4g , 'Compare gaussians (2) and (4)' ) compare ( h2g , h4g , 'Compare gaussians (2) and (4) with rescale' , density = True ) compare ( h2g , h5g , 'Compare gaussians (2) and (5)' ) compare ( h3g , h4g , 'Compare gaussians (3) and (4)' ) compare ( h3g , h4g , 'Compare gaussians (3) and (4) with rescale' , density = True ) compare ( h3g , h5g , 'Compare gaussians (3) and (5)' ) compare ( h4g , h5g , 'Compare gaussians (4) and (5)' ) def test_compare_uniforms () : compare ( h1u , h2u , 'Compare uniforms (1) and (2)' ) compare ( h1u , h3u , 'Compare uniforms (1) and (3)' ) compare ( h1u , h4u , 'Compare uniforms (1) and (4)' ) compare ( h1u , h4u , 'Compare uniforms (1) and (4) with rescale' , density = True ) compare ( h1u , h5u , 'Compare uniforms (1) and (5)' ) compare ( h2u , h3u , 'Compare uniforms (2) and (3) : should be the same!' ) compare ( h2u , h4u , 'Compare uniforms (2) and (4)' ) compare ( h2u , h4u , 'Compare uniforms (2) and (4) with rescale' , density = True ) compare ( h2u , h4u , 'Compare uniforms (2) and (5)' ) compare ( h3u , h4u , 'Compare uniforms (3) and (4)' ) compare ( h3u , h4u , 'Compare uniforms (3) and (4) with rescale;' , density = True ) compare ( h3u , h5u , 'Compare uniforms (3) and (5)' ) compare ( h4u , h5u , 'Compare uniforms (4) and (5)' ) def test_compare_exponentials () : compare ( h1e , h2e , 'Compare exponentials (1) and (2)' ) compare ( h1e , h3e , 'Compare exponentials (1) and (3)' ) compare ( h1e , h4e , 'Compare exponentials (1) and (4)' ) compare ( h1e , h4e , 'Compare exponentials (1) and (4) with rescale' , density = True ) compare ( h1e , h5e , 'Compare exponentials (1) and (5)' ) compare ( h2e , h3e , 'Compare exponentials (2) and (3) : should be the same!' ) compare ( h2e , h4e , 'Compare exponentials (2) and (4)' ) compare ( h2e , h4e , 'Compare exponentials (2) and (4) with rescale' , density = True ) compare ( h2e , h5e , 'Compare exponentials (2) and (5)' ) compare ( h3e , h4e , 'Compare exponentials (3) and (4)' ) compare ( h3e , h4e , 'Compare exponentials (3) and (4) with rescale' , density = True ) compare ( h3e , h5e , 'Compare exponentials (3) and (5)' ) compare ( h4e , h5e , 'Compare exponentials (4) and (5)' ) def test_compare_gauss_vs_uniform() : _ig = 0 for ig in ( h1g , h2g , h3g , h4g , h5g ) : _ig += 1 _iu = 0 for iu in ( h1u , h2u , h3u , h4u , h5u ) : _iu += 1 compare ( ig , iu , 'Compare gaussian (%d) and uniform (%d)' % ( _ig , _iu ) ) def test_compare_gauss_vs_exponent () : _ig = 0 for ig in ( h1g , h2g , h3g , h4g , h5g ) : _ig += 1 _ie = 0 for ie in ( h1e , h2e , h3e , h4e , h5e ) : _ie += 1 compare ( ig , ie , 'Compare gaussian (%d) and exponent (%d)' % ( _ig , _ie ) ) def test_compare_uniform_vs_exponent () : _iu = 0 for iu in ( h1u , h2u , h3u , h4u , h5u ) : _iu += 1 _ie = 0 for ie in ( h1e , h2e , h3e , h4e , h5e ) : _ie += 1 compare ( iu , ie , 'Compare uniform (%d) and exponent (%d)' % ( _iu , _ie ) ) # ============================================================================= if '__main__' == __name__ : test_compare_gaussians () test_compare_uniforms () test_compare_exponentials () test_compare_gauss_vs_uniform () test_compare_gauss_vs_exponent () test_compare_uniform_vs_exponent () pass # ============================================================================= ## The END # =============================================================================
#!/usr/bin/python2.5 # Copyright (C) 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Unit tests for transitfeed/util.py import datetime import re from six import StringIO import tests.util as test_util from transitfeed import problems from transitfeed.problems import ProblemReporter from transitfeed import stop from transitfeed import util from transitfeed import version import unittest from six.moves import urllib class ColorLuminanceTestCase(test_util.TestCase): def runTest(self): self.assertEqual(util.ColorLuminance('000000'), 0, "ColorLuminance('000000') should be zero") self.assertEqual(util.ColorLuminance('FFFFFF'), 255, "ColorLuminance('FFFFFF') should be 255") RGBmsg = ("ColorLuminance('RRGGBB') should be " "0.299*<Red> + 0.587*<Green> + 0.114*<Blue>") decimal_places_tested = 8 self.assertAlmostEqual(util.ColorLuminance('640000'), 29.9, decimal_places_tested, RGBmsg) self.assertAlmostEqual(util.ColorLuminance('006400'), 58.7, decimal_places_tested, RGBmsg) self.assertAlmostEqual(util.ColorLuminance('000064'), 11.4, decimal_places_tested, RGBmsg) self.assertAlmostEqual(util.ColorLuminance('1171B3'), 0.299*17 + 0.587*113 + 0.114*179, decimal_places_tested, RGBmsg) class FindUniqueIdTestCase(test_util.TestCase): def test_simple(self): d = {} for i in range(0, 5): d[util.FindUniqueId(d)] = 1 k = d.keys() k.sort() self.assertEqual(('0', '1', '2', '3', '4'), tuple(k)) def test_AvoidCollision(self): d = {'1': 1} d[util.FindUniqueId(d)] = 1 self.assertEqual(2, len(d)) self.assertFalse('3' in d, "Ops, next statement should add something to d") d['3'] = None d[util.FindUniqueId(d)] = 1 self.assertEqual(4, len(d)) class ApproximateDistanceBetweenStopsTestCase(test_util.TestCase): def testEquator(self): stop1 = stop.Stop(lat=0, lng=100, name='Stop one', stop_id='1') stop2 = stop.Stop(lat=0.01, lng=100.01, name='Stop two', stop_id='2') self.assertAlmostEqual( util.ApproximateDistanceBetweenStops(stop1, stop2), 1570, -1) # Compare first 3 digits def testWhati(self): stop1 = stop.Stop(lat=63.1, lng=-117.2, name='whati one', stop_id='1') stop2 = stop.Stop(lat=63.102, lng=-117.201, name='whati two', stop_id='2') self.assertAlmostEqual( util.ApproximateDistanceBetweenStops(stop1, stop2), 228, 0) class TimeConversionHelpersTestCase(test_util.TestCase): def testTimeToSecondsSinceMidnight(self): self.assertEqual(util.TimeToSecondsSinceMidnight("01:02:03"), 3723) self.assertEqual(util.TimeToSecondsSinceMidnight("00:00:00"), 0) self.assertEqual(util.TimeToSecondsSinceMidnight("25:24:23"), 91463) try: util.TimeToSecondsSinceMidnight("10:15:00am") except problems.Error: pass # expected else: self.fail("Should have thrown Error") def testFormatSecondsSinceMidnight(self): self.assertEqual(util.FormatSecondsSinceMidnight(3723), "01:02:03") self.assertEqual(util.FormatSecondsSinceMidnight(0), "00:00:00") self.assertEqual(util.FormatSecondsSinceMidnight(91463), "25:24:23") def testDateStringToDateObject(self): self.assertEqual(util.DateStringToDateObject("20080901"), datetime.date(2008, 9, 1)) self.assertEqual(util.DateStringToDateObject("20080841"), None) class ValidationUtilsTestCase(test_util.TestCase): def testIsValidURL(self): self.assertTrue(util.IsValidURL("http://www.example.com")) self.assertFalse(util.IsValidURL("ftp://www.example.com")) self.assertFalse(util.IsValidURL("")) def testValidateURL(self): accumulator = test_util.RecordingProblemAccumulator(self) problems = ProblemReporter(accumulator) self.assertTrue(util.ValidateURL("", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertTrue(util.ValidateURL("http://www.example.com", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertFalse(util.ValidateURL("ftp://www.example.com", "col", problems)) e = accumulator.PopInvalidValue("col") accumulator.AssertNoMoreExceptions() def testIsValidHexColor(self): self.assertTrue(util.IsValidHexColor("33FF00")) self.assertFalse(util.IsValidHexColor("blue")) self.assertFalse(util.IsValidHexColor("")) def testIsValidLanguageCode(self): self.assertTrue(util.IsValidLanguageCode("de")) self.assertFalse(util.IsValidLanguageCode("Swiss German")) self.assertFalse(util.IsValidLanguageCode("")) def testValidateLanguageCode(self): accumulator =test_util. RecordingProblemAccumulator(self) problems = ProblemReporter(accumulator) self.assertTrue(util.ValidateLanguageCode("", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertTrue(util.ValidateLanguageCode("de", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertFalse(util.ValidateLanguageCode("Swiss German", "col", problems)) e = accumulator.PopInvalidValue("col") accumulator.AssertNoMoreExceptions() def testIsValidTimezone(self): self.assertTrue(util.IsValidTimezone("America/Los_Angeles")) self.assertFalse(util.IsValidTimezone("Switzerland/Wil")) self.assertFalse(util.IsValidTimezone("")) def testValidateTimezone(self): accumulator = test_util.RecordingProblemAccumulator(self) problems = ProblemReporter(accumulator) self.assertTrue(util.ValidateTimezone("", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertTrue(util.ValidateTimezone("America/Los_Angeles", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertFalse(util.ValidateTimezone("Switzerland/Wil", "col", problems)) e = accumulator.PopInvalidValue("col") accumulator.AssertNoMoreExceptions() def testIsValidDate(self): self.assertTrue(util.IsValidDate("20100801")) self.assertFalse(util.IsValidDate("20100732")) self.assertFalse(util.IsValidDate("")) def testValidateDate(self): accumulator = test_util.RecordingProblemAccumulator(self) problems = ProblemReporter(accumulator) self.assertTrue(util.ValidateDate("", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertTrue(util.ValidateDate("20100801", "col", problems)) accumulator.AssertNoMoreExceptions() self.assertFalse(util.ValidateDate("20100732", "col", problems)) e = accumulator.PopInvalidValue("col") accumulator.AssertNoMoreExceptions() class FloatStringToFloatTestCase(test_util.TestCase): def runTest(self): accumulator = test_util.RecordingProblemAccumulator(self) problems = ProblemReporter(accumulator) self.assertAlmostEqual(0, util.FloatStringToFloat("0", problems)) self.assertAlmostEqual(0, util.FloatStringToFloat(u"0", problems)) self.assertAlmostEqual(1, util.FloatStringToFloat("1", problems)) self.assertAlmostEqual(1, util.FloatStringToFloat("1.00000", problems)) self.assertAlmostEqual(1.5, util.FloatStringToFloat("1.500", problems)) self.assertAlmostEqual(-2, util.FloatStringToFloat("-2.0", problems)) self.assertAlmostEqual(-2.5, util.FloatStringToFloat("-2.5", problems)) self.assertRaises(ValueError, util.FloatStringToFloat, ".", problems) self.assertRaises(ValueError, util.FloatStringToFloat, "0x20", problems) self.assertRaises(ValueError, util.FloatStringToFloat, "-0x20", problems) self.assertRaises(ValueError, util.FloatStringToFloat, "0b10", problems) # These should issue a warning, but otherwise parse successfully self.assertAlmostEqual(0.001, util.FloatStringToFloat("1E-3", problems)) e = accumulator.PopException("InvalidFloatValue") self.assertAlmostEqual(0.001, util.FloatStringToFloat(".001", problems)) e = accumulator.PopException("InvalidFloatValue") self.assertAlmostEqual(-0.001, util.FloatStringToFloat("-.001", problems)) e = accumulator.PopException("InvalidFloatValue") self.assertAlmostEqual(0, util.FloatStringToFloat("0.", problems)) e = accumulator.PopException("InvalidFloatValue") accumulator.AssertNoMoreExceptions() class NonNegIntStringToIntTestCase(test_util.TestCase): def runTest(self): accumulator = test_util.RecordingProblemAccumulator(self) problems = ProblemReporter(accumulator) self.assertEqual(0, util.NonNegIntStringToInt("0", problems)) self.assertEqual(0, util.NonNegIntStringToInt(u"0", problems)) self.assertEqual(1, util.NonNegIntStringToInt("1", problems)) self.assertEqual(2, util.NonNegIntStringToInt("2", problems)) self.assertEqual(10, util.NonNegIntStringToInt("10", problems)) self.assertEqual(1234567890123456789, util.NonNegIntStringToInt("1234567890123456789", problems)) self.assertRaises(ValueError, util.NonNegIntStringToInt, "", problems) self.assertRaises(ValueError, util.NonNegIntStringToInt, "-1", problems) self.assertRaises(ValueError, util.NonNegIntStringToInt, "0x1", problems) self.assertRaises(ValueError, util.NonNegIntStringToInt, "1.0", problems) self.assertRaises(ValueError, util.NonNegIntStringToInt, "1e1", problems) self.assertRaises(ValueError, util.NonNegIntStringToInt, "0x20", problems) self.assertRaises(ValueError, util.NonNegIntStringToInt, "0b10", problems) self.assertRaises(TypeError, util.NonNegIntStringToInt, 1, problems) self.assertRaises(TypeError, util.NonNegIntStringToInt, None, problems) # These should issue a warning, but otherwise parse successfully self.assertEqual(1, util.NonNegIntStringToInt("+1", problems)) e = accumulator.PopException("InvalidNonNegativeIntegerValue") self.assertEqual(1, util.NonNegIntStringToInt("01", problems)) e = accumulator.PopException("InvalidNonNegativeIntegerValue") self.assertEqual(0, util.NonNegIntStringToInt("00", problems)) e = accumulator.PopException("InvalidNonNegativeIntegerValue") accumulator.AssertNoMoreExceptions() class CheckVersionTestCase(test_util.TempDirTestCaseBase): def setUp(self): self.orig_urlopen = urllib.request.urlopen self.mock = MockURLOpen() self.accumulator = test_util.RecordingProblemAccumulator(self) self.problems = ProblemReporter(self.accumulator) def tearDown(self): self.mock = None urllib.request.urlopen = self.orig_urlopen def testAssignedDifferentVersion(self): util.CheckVersion(self.problems, '100.100.100') e = self.accumulator.PopException('NewVersionAvailable') self.assertEqual(e.version, '100.100.100') self.assertEqual(e.url, 'https://github.com/google/transitfeed') self.accumulator.AssertNoMoreExceptions() def testAssignedSameVersion(self): util.CheckVersion(self.problems, version.__version__) self.accumulator.AssertNoMoreExceptions() def testGetCorrectReturns(self): urllib.request.urlopen = self.mock.mockedConnectSuccess util.CheckVersion(self.problems) self.accumulator.PopException('NewVersionAvailable') def testPageNotFound(self): urllib.request.urlopen = self.mock.mockedPageNotFound util.CheckVersion(self.problems) e = self.accumulator.PopException('OtherProblem') self.assertTrue(re.search(r'we failed to reach', e.description)) self.assertTrue(re.search(r'Reason: Not Found \[404\]', e.description)) def testConnectionTimeOut(self): urllib.request.urlopen = self.mock.mockedConnectionTimeOut util.CheckVersion(self.problems) e = self.accumulator.PopException('OtherProblem') self.assertTrue(re.search(r'we failed to reach', e.description)) self.assertTrue(re.search(r'Reason: Connection timed', e.description)) def testGetAddrInfoFailed(self): urllib.request.urlopen = self.mock.mockedGetAddrInfoFailed util.CheckVersion(self.problems) e = self.accumulator.PopException('OtherProblem') self.assertTrue(re.search(r'we failed to reach', e.description)) self.assertTrue(re.search(r'Reason: Getaddrinfo failed', e.description)) def testEmptyIsReturned(self): urllib.request.urlopen = self.mock.mockedEmptyIsReturned util.CheckVersion(self.problems) e = self.accumulator.PopException('OtherProblem') self.assertTrue(re.search(r'we had trouble parsing', e.description)) class MockURLOpen: """Pretend to be a urllib2.urlopen suitable for testing.""" def mockedConnectSuccess(self, request): return StringIO('latest_version=100.0.1') def mockedPageNotFound(self, request): raise urllib.error.HTTPError(request.get_full_url(), 404, 'Not Found', request.header_items(), None) def mockedConnectionTimeOut(self, request): raise urllib.error.URLError('Connection timed out') def mockedGetAddrInfoFailed(self, request): raise urllib.error.URLError('Getaddrinfo failed') def mockedEmptyIsReturned(self, request): return StringIO() if __name__ == '__main__': unittest.main()
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Utilities for the Nexmark suite. The Nexmark suite is a series of queries (streaming pipelines) performed on a simulation of auction events. This util includes: - A Command class used to terminate the streaming jobs launched in nexmark_launcher.py by the DirectRunner. - A ParseEventFn DoFn to parse events received from PubSub. Usage: To run a process for a certain duration, define in the code: command = Command(process_to_terminate, args) command.run(timeout=duration) """ # pytype: skip-file import json import logging import threading import apache_beam as beam from apache_beam.metrics import MetricsFilter from apache_beam.runners.runner import PipelineResult # pylint: disable=unused-import from apache_beam.testing.benchmarks.nexmark.models import auction_bid from apache_beam.testing.benchmarks.nexmark.models import nexmark_model from apache_beam.testing.benchmarks.nexmark.models.field_name import FieldNames from apache_beam.transforms import window from apache_beam.utils.timestamp import Timestamp _LOGGER = logging.getLogger(__name__) class Command(object): def __init__(self, cmd, args): self.cmd = cmd self.args = args def run(self, timeout): def thread_target(): logging.debug( 'Starting thread for %d seconds: %s', timeout, self.cmd.__name__) self.cmd(*self.args) _LOGGER.info( '%d seconds elapsed. Thread (%s) finished.', timeout, self.cmd.__name__) thread = threading.Thread(target=thread_target, name='Thread-timeout') thread.daemon = True thread.start() thread.join(timeout) def setup_coder(): beam.coders.registry.register_coder( nexmark_model.Auction, nexmark_model.AuctionCoder) beam.coders.registry.register_coder( nexmark_model.Person, nexmark_model.PersonCoder) beam.coders.registry.register_coder(nexmark_model.Bid, nexmark_model.BidCoder) beam.coders.registry.register_coder( auction_bid.AuctionBid, auction_bid.AuctionBidCoder) class ParseEventFn(beam.DoFn): """ Original parser for parsing raw events info into a Python objects. Each event line has the following format: person: <id starting with 'p'>,name,email,credit_card,city, \ state,timestamp,extra auction: <id starting with 'a'>,item_name, description,initial_bid, \ reserve_price,timestamp,expires,seller,category,extra bid: <auction starting with 'b'>,bidder,price,timestamp,extra For example: 'p12345,maria,maria@maria.com,1234-5678-9012-3456, \ sunnyvale,CA,1528098831536' 'a12345,car67,2012 hyundai elantra,15000,20000, \ 1528098831536,20180630,maria,vehicle' 'b12345,maria,20000,1528098831536' """ def process(self, elem): model_dict = { 'p': nexmark_model.Person, 'a': nexmark_model.Auction, 'b': nexmark_model.Bid, } row = elem.split(',') model = model_dict.get(elem[0]) if not model: raise ValueError('Invalid event: %s.' % row) event = model(*row) logging.debug('Parsed event: %s', event) yield event class ParseJsonEventFn(beam.DoFn): """Parses the raw event info into a Python objects. Each event line has the following format: person: {id,name,email,credit_card,city, \ state,timestamp,extra} auction: {id,item_name, description,initial_bid, \ reserve_price,timestamp,expires,seller,category,extra} bid: {auction,bidder,price,timestamp,extra} For example: {"id":1000,"name":"Peter Jones","emailAddress":"nhd@xcat.com",\ "creditCard":"7241 7320 9143 4888","city":"Portland","state":"WY",\ "dateTime":1528098831026,\"extra":"WN_HS_bnpVQ\\[["} {"id":1000,"itemName":"wkx mgee","description":"eszpqxtdxrvwmmywkmogoahf",\ "initialBid":28873,"reserve":29448,"dateTime":1528098831036,\ "expires":1528098840451,"seller":1000,"category":13,"extra":"zcuupiz"} {"auction":1000,"bidder":1001,"price":32530001,"dateTime":1528098831066,\ "extra":"fdiysaV^]NLVsbolvyqwgticfdrwdyiyofWPYTOuwogvszlxjrcNOORM"} """ def process(self, elem): json_dict = json.loads(elem) if type(json_dict[FieldNames.DATE_TIME]) is dict: json_dict[FieldNames.DATE_TIME] = json_dict[ FieldNames.DATE_TIME]['millis'] if FieldNames.NAME in json_dict: yield nexmark_model.Person( json_dict[FieldNames.ID], json_dict[FieldNames.NAME], json_dict[FieldNames.EMAIL_ADDRESS], json_dict[FieldNames.CREDIT_CARD], json_dict[FieldNames.CITY], json_dict[FieldNames.STATE], millis_to_timestamp(json_dict[FieldNames.DATE_TIME]), json_dict[FieldNames.EXTRA]) elif FieldNames.ITEM_NAME in json_dict: if type(json_dict[FieldNames.EXPIRES]) is dict: json_dict[FieldNames.EXPIRES] = json_dict[FieldNames.EXPIRES]['millis'] yield nexmark_model.Auction( json_dict[FieldNames.ID], json_dict[FieldNames.ITEM_NAME], json_dict[FieldNames.DESCRIPTION], json_dict[FieldNames.INITIAL_BID], json_dict[FieldNames.RESERVE], millis_to_timestamp(json_dict[FieldNames.DATE_TIME]), millis_to_timestamp(json_dict[FieldNames.EXPIRES]), json_dict[FieldNames.SELLER], json_dict[FieldNames.CATEGORY], json_dict[FieldNames.EXTRA]) elif FieldNames.AUCTION in json_dict: yield nexmark_model.Bid( json_dict[FieldNames.AUCTION], json_dict[FieldNames.BIDDER], json_dict[FieldNames.PRICE], millis_to_timestamp(json_dict[FieldNames.DATE_TIME]), json_dict[FieldNames.EXTRA]) else: raise ValueError('Invalid event: %s.' % str(json_dict)) class CountAndLog(beam.PTransform): def expand(self, pcoll): return ( pcoll | 'window' >> beam.WindowInto(window.GlobalWindows()) | "Count" >> beam.combiners.Count.Globally() | "Log" >> beam.Map(log_count_info)) def log_count_info(count): logging.info('Query resulted in %d results', count) return count def display(elm): logging.debug(elm) return elm def model_to_json(model): return json.dumps(construct_json_dict(model), separators=(',', ':')) def construct_json_dict(model): return {k: unnest_to_json(v) for k, v in model.__dict__.items()} def unnest_to_json(cand): if isinstance(cand, Timestamp): return cand.micros // 1000 elif isinstance( cand, (nexmark_model.Auction, nexmark_model.Bid, nexmark_model.Person)): return construct_json_dict(cand) else: return cand def millis_to_timestamp(millis): # type: (int) -> Timestamp micro_second = millis * 1000 return Timestamp(micros=micro_second) def get_counter_metric(result, namespace, name): # type: (PipelineResult, str, str) -> int """ get specific counter metric from pipeline result Args: result: the PipelineResult which metrics are read from namespace: a string representing the namespace of wanted metric name: a string representing the name of the wanted metric Returns: the result of the wanted metric if it exist, else -1 """ metrics = result.metrics().query( MetricsFilter().with_namespace(namespace).with_name(name)) counters = metrics['counters'] if len(counters) > 1: raise RuntimeError( '%d instead of one metric result matches name: %s in namespace %s' % (len(counters), name, namespace)) return counters[0].result if len(counters) > 0 else -1 def get_start_time_metric(result, namespace, name): # type: (PipelineResult, str, str) -> int """ get the start time out of all times recorded by the specified distribution metric Args: result: the PipelineResult which metrics are read from namespace: a string representing the namespace of wanted metric name: a string representing the name of the wanted metric Returns: the smallest time in the metric or -1 if it doesn't exist """ distributions = result.metrics().query( MetricsFilter().with_namespace(namespace).with_name( name))['distributions'] min_list = list(map(lambda m: m.result.min, distributions)) return min(min_list) if len(min_list) > 0 else -1 def get_end_time_metric(result, namespace, name): # type: (PipelineResult, str, str) -> int """ get the end time out of all times recorded by the specified distribution metric Args: result: the PipelineResult which metrics are read from namespace: a string representing the namespace of wanted metric name: a string representing the name of the wanted metric Returns: the largest time in the metric or -1 if it doesn't exist """ distributions = result.metrics().query( MetricsFilter().with_namespace(namespace).with_name( name))['distributions'] max_list = list(map(lambda m: m.result.max, distributions)) return max(max_list) if len(max_list) > 0 else -1
#!/usr/bin/env python3 # coding: utf-8 #-------------------------------------------- # Authors: # Lukas Kurth <l.kurth@fz-juelich.de> # Frank Boers <f.boers@fz-juelich.de>, # #-------------------------------------------- # Date: 10.03.20 #-------------------------------------------- # License: BSD (3-clause) #-------------------------------------------- # Updates #-------------------------------------------- import getpass,datetime,platform import os,sys,argparse,pprint # from ruamel.ordereddict import ordereddict from collections import OrderedDict from copy import deepcopy import wx import wx.lib.agw.customtreectrl as CT from wx.lib.agw.customtreectrl import CustomTreeCtrl from jumeg.gui.wxlib.utils.jumeg_gui_wxlib_utils_controls import SpinCtrlScientific,EditableListBoxPanel,JuMEG_wxSTXTBTCtrl from jumeg.base.jumeg_base import jumeg_base as jb from jumeg.base.jumeg_base_config import JuMEG_CONFIG from jumeg.base import jumeg_logger logger = jumeg_logger.get_logger() __version__= "2020.04.28.001" # platform.python_version() class JuMEG_ConfigTreeCtrl(CustomTreeCtrl): def __init__(self,parent,**kwargs): style = (CT.TR_DEFAULT_STYLE| CT.TR_SINGLE |CT.TR_HAS_VARIABLE_ROW_HEIGHT |CT.TR_ELLIPSIZE_LONG_ITEMS|CT.TR_TOOLTIP_ON_LONG_ITEMS |CT.TR_ALIGN_WINDOWS) super().__init__(parent,agwStyle=style) self.root_name = "jumeg" self.verbose = False #--- float settings self.float_digits = 3 self.float_min = -1000.0 self.float_max = 1000.0 self.float_inc = 0.1 #--- self._root_key = "_root_keys" self._sorted_key = "_sorted_keys" self._sorted_keys = [] self._list_seperator = " " self._data = dict() self._wx_init(**kwargs) self._info = dict() self._used_dict = dict() def update(self,data=None,root_name=None,item_data=None): ''' initialises a new TreeCtrl ''' self._clear() if item_data==None: item_data=dict() if root_name: self.root_name=root_name self._wx_init(data=data,root_name=self.root_name,item_data=item_data) def sort(self,keys): pass # def _get_item_data(self,data,item_data): # self.__get_item_data(data,item_data) def _get_item_data(self,data,item_data): if data==None: logger.exception("data is None") return keys = list(data.keys()) keys.sort() #item_data = OrderedDict.fromkeys( keys ) ''' ToDo from collections import OrderedDict personA = OrderedDict([ (u'score', OrderedDict([ (u'2015-09-09 03:40:33 +0100', 2646), (u'2015-09-10 03:35:34 +0100', 2646), ])), (u'adjusted_score', OrderedDict([ (u'2015-09-09 03:40:33 +0100', 3646), (u'2015-09-10 03:35:34 +0100', 3646), ])) ]) ''' for k in keys: if k.startswith("_"): #_keys item_data[k] = deepcopy( data[k] ) continue v = data[k] if isinstance(v,(dict)): item_data[k] = dict() self._get_item_data(data[k],item_data[k]) else: try: #--- ck for list as data type and convert str in list to orig data types if v.GetName().startswith("list"): dtype = v.GetName().split("_")[1] # d = v.GetLineText(lineNo=0).split(self._list_seperator) d = v.GetValue() if (d): if dtype == "float": item_data[k] = [float(x) for x in d] elif dtype == "int": item_data[k] = [int(x) for x in d] else: item_data[k]=d else: # str item_data[k]=list() #--- None elif v.GetName().startswith("NoneStr"):# check for "None" or "None,0" d = v.GetValue().strip() if d.upper() == "NONE" or d.upper()=="NULL": item_data[k] = None else: d = [ x.strip() for x in d.split(",") ] for i in range(len(d)): if (d[i].upper() == "NONE") or (d[i].upper()=="NULL"): d[i] = None item_data[k] = d else: item_data[k]=v.GetValue() except: logger.exception("ERROR") continue # info, _keys return item_data def GetData(self): data = self._item_data keys = list(data.keys()) item_data = dict() #OrderedDict.fromkeys( ["info",*keys] ) item_data["info"] = self.update_info() for k in keys: item_data[k] = dict() self._get_item_data(data[k],item_data[k]) return item_data def _clear(self): ''' deletes the actual TreeCtrl ''' self.DeleteAllItems() self._data=None def _init_tree_ctrl(self,data=None,root=None,item_data=None): ''' builds a new TreeCtrl recursively based on the data which is given as a dict ''' if data==None: logger.exception("data is None") return txt_size = 30 style = wx.TE_RIGHT if not root: root = self.root klist = [] dlist = [] keys = list(data.keys()) keys.sort() #--- sort keys: # global sortred keys, lokal sorted keys, keys,dict-keys #--- global sorted keys skeys = [ *self._sorted_keys ] #--- extend with lokal sorted keys if self._sorted_key in keys: skeys.extend( data.get( self._sorted_key,[] ) ) for k in keys: if k in skeys: continue if isinstance( data[k],(dict) ): dlist.append(k) else: klist.append(k) keys =[*skeys, *klist, *dlist] for k in keys: if k.startswith("_") : item_data[k] = data[k] continue if not k in data.keys() : continue # if item_data.get(k,None): continue v = data[k] child= None ctrl = None #--- type dict recursive if isinstance(v,(dict)): item_data[k] = dict() child = self.AppendItem(root,"{}".format(k),ct_type=0) self._init_tree_ctrl(data=data[k],root=child,item_data=item_data[k]) continue elif isinstance(v,(bool)): ctrl=wx.CheckBox(self,-1,name="bool") #label=k ctrl.SetValue(v) child = self.AppendItem(root,"{}".format(k),wnd=ctrl) self.SetItemBold(child,True) elif isinstance(v,(str)): if os.path.dirname(v): ctrl = JuMEG_wxSTXTBTCtrl(self,name="TEST",label=v,cmd=self.ClickOnShowDLG,textlength=txt_size,style=style) else: ctrl = wx.TextCtrl(self,-1,style=wx.TE_LEFT,value=v,name="str") sz = ctrl.GetSizeFromTextSize(ctrl.GetTextExtent("W" * txt_size)) ctrl.SetInitialSize(sz) child = self.AppendItem(root,"{}".format(k),wnd=ctrl,ct_type=0) elif isinstance(v,(list)): ctrl = EditableListBoxPanel(self,label=k.upper()) #--- ck data type for later reconstruction from listbox (string) dtype = str( type( v[0] ) ).lower() name = "list" if dtype.find("float")>-1: name+= "_float" elif dtype.find("int") > -1: name += "_int" else: name +="_str" ctrl.SetName(name) ctrl.Value = v child = self.AppendItem(root,"{}".format(k),wnd=ctrl) elif isinstance(v,(int)): ctrl=wx.SpinCtrl(self,-1,"",(30,50),name="int") ctrl.SetRange(0,10000) ctrl.SetValue(v) child=self.AppendItem(root,"{}".format(k),wnd=ctrl) self.SetItemBold(child,True) elif isinstance(v,(float)): # v = float(v) # e.g.: 1.123456 or 5.123e-11 convert to float if str(v).find("e")>0: ctrl = SpinCtrlScientific(self,name="float") # ctrl = wx.SpinCtrlDouble(self,inc=self.float_inc,name="float",style=wx.SP_ARROW_KEYS) else: ctrl = wx.SpinCtrlDouble(self,inc=self.float_inc,name="float",style=wx.SP_ARROW_KEYS) ctrl.Digits = self.float_digits ctrl.Min = self.float_min ctrl.Max = self.float_max if v < ctrl.Min: ctrl.Min = abs(v) * -2.0 if v > ctrl.Max: ctrl.Max = abs(v) * 2.0 ctrl.Value = v child = self.AppendItem(root,"{}".format(k),wnd=ctrl) else: # None => txt ctrl = wx.TextCtrl(self,-1,style=wx.TE_LEFT,value="None",name="NoneStr") sz = ctrl.GetSizeFromTextSize(ctrl.GetTextExtent("W" * txt_size)) ctrl.SetInitialSize(sz) child = self.AppendItem(root,"{}".format(k),wnd=ctrl,ct_type=0) item_data[k]=ctrl try: self.SetPyData(child,data[k]) except: logger.exception("key: {}\n -> data: {}".format(k,data.get(k))) def ClickOnShowDLG(self,evt): """ shows File, DirDialog depending on file extention :param evt: :return: """ try: obj = evt.GetEventObject() except: obj = evt # txt ctrl p = jb.expandvars( obj.GetValue() ) if os.path.isdir(p): with wx.DirDialog(self,message=obj.GetName(),defaultPath=p,style=wx.DD_DEFAULT_STYLE,name=obj.GetName() + "_DLG") as DLG: DLG.SetPath( p ) if DLG.ShowModal() == wx.ID_CANCEL: return # the user changed their mind obj.SetValue( DLG.GetPath() ) else: fext = p.rsplit(".",1)[-1] wc = "files (*." +fext+",*.*)|*."+fext+";*.all" with wx.FileDialog(self,"{} => Select File Name".format(obj.GetName()),wildcard=wc, style=wx.DD_DEFAULT_STYLE, name=obj.GetName() + "_DLG") as DLG: DLG.SetPath(p) if DLG.ShowModal() == wx.ID_CANCEL: return # the user changed their mind obj.SetValue( DLG.GetPath() ) def update_info(self): ''' updates the time,version and user ''' now = datetime.datetime.now() dt = now.strftime('%Y-%m-%d')+" "+now.strftime('%H:%M') self._info={ "user": getpass.getuser(), "time": dt, "gui-version": __version__, "python-version": platform.python_version() } return self._info def update_used_dict(self): ''' updates the used_dict i.e. the dict used for process ''' self._used_dict=self.GetData() def info(self): logger.info("config info:\n {}\n".format(pprint.pformat(self.GetData(),indent=4))) def _wx_init(self,**kwargs): data = kwargs.get("data",{ }) if not data: return item_data = dict() #--- get default sorted keys ''' _keys: _root_keys: ["info","global","noise_reducer","suggest_bads","interpolate_bads","ica","report"] _sorted_keys: ["run","overwrite","save","plot","plot_show","plot_dir"] ''' wxkeys = data.get("_keys") if wxkeys: self._sorted_keys = wxkeys.get(self._sorted_key,[]) #--- get a sorted root key list avoid double items skeys = wxkeys.get(self._root_key,[] ) keys = skeys + [ k for k in list( data ) if k not in skeys ] item_data["_keys"]= deepcopy( wxkeys ) else: keys = list( data ) #data.keys() => view of obj !!! if "info" in keys: keys.remove("info") #--- self.root_name = kwargs.get("root_name", self.root_name) self.root = self.AddRoot(kwargs.get("root",self.root_name)) self._info = data.get("info") for k in keys: if k.startswith("_"): continue d = data.get(k,None) if isinstance(d,(dict)): item_data[k]=dict() child = self.AppendItem(self.root,"{}".format(k)) self._init_tree_ctrl( data=d ,root=child,item_data=item_data[k]) self.AppendSeparator(self.root) self._item_data = item_data self.Expand(self.root) class JuMEG_wxConfig(wx.Panel): def __init__(self,parent,**kwargs): super().__init__(parent) self.root_name="jumeg" self.SetName(kwargs.get("name","test")) self._CfgTreeCtrl = None self._CfgTreeCtrlPNL = None self._wx_init(**kwargs) self._ApplyLayout() @property def verbose(self): return self.CFG.verbose @verbose.setter def verbose(self,v): self.CFG.verbose = v if self.CfgTreeCtrl: self.CfgTreeCtrl.verbose = v @property def CFG(self): return self._CFG @property def CfgTreeCtrl(self): return self._CfgTreeCtrl def _wx_init(self,**kwargs): self.SetBackgroundColour(wx.GREEN) self._CfgTreeCtrlPNL = wx.Panel(self) self._init_cfg(**kwargs) #--- init buttons #fehlerhaft show button self._bt_open = wx.Button(self,label="Open",name=self.GetName()+".BT.OPEN") self._bt_info = wx.Button(self,label="Show", name=self.GetName()+".BT.SHOW") self._bt_save = wx.Button(self,label="Save", name=self.GetName()+".BT.SAVE") self._bt_update =wx.Button(self,label="Update", name=self.GetName()+".BT.UPDATE") self._bt_close = wx.Button(self,label="Close",name=self.GetName()+".BT.CLOSE") self.Bind(wx.EVT_BUTTON,self.ClickOnButton) def _update_TreeCtrl(self): if self._CfgTreeCtrl: self._CfgTreeCtrl._clear() self.CfgTreeCtrl.update(data=self.CFG.GetDataDict(),root_name=self.root_name) else: #--- init & pacl ctrl self._CfgTreeCtrl = JuMEG_ConfigTreeCtrl(self._CfgTreeCtrlPNL,root_name=self.root_name,data=self.CFG.GetDataDict()) self.CfgTreeCtrl.verbose = self.verbose LEA = wx.LEFT | wx.EXPAND | wx.ALL vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(self._CfgTreeCtrl,1,LEA,4) self._CfgTreeCtrlPNL.SetSizer(vbox) self._CfgTreeCtrlPNL.SetAutoLayout(True) self._CfgTreeCtrlPNL.Fit() self._CfgTreeCtrlPNL.Layout() self.Layout() def _init_cfg(self, **kwargs): self._CFG = JuMEG_CONFIG(**kwargs) if self.CFG.update(**kwargs): self._update_TreeCtrl() def FDLGSave(self, event=None): ''' opens a menu to save the current data into a .yaml file ''' with wx.FileDialog(self, "Save config file", wildcard="config files (*.yaml,*.json)|*.yaml;*.json", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as FDGL: FDGL.SetDirectory(os.path.dirname(self.CFG.filename)) if FDGL.ShowModal() == wx.ID_CANCEL: return # the user changed their mind # save the current contents in the file fname,ext = FDGL.GetPath().rsplit(".",1) if ext in ["yaml","json"]: pathname = FDGL.GetPath() else: pathname = fname+".yaml" try: data = self.CfgTreeCtrl._used_dict self.CFG.save_cfg(fname=pathname,data=data) except IOError: wx.LogError("ERROR Can not save current data in config file '%s'." % pathname) def FDLGOpen(self, event=None): ''' opens a dialogue to load a [.yaml|.json] file and build a tree out of it ''' # otherwise ask the user what new file to open with wx.FileDialog(self, "Open config file", wildcard="config files (*.yaml,+.json)|*.yaml;*.json", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST) as FDGL: p = "." if self.CFG.filename: p = os.path.dirname(self.CFG.filename) FDGL.SetDirectory(p) if FDGL.ShowModal() == wx.ID_CANCEL: return None # the user changed their mind # Proceed loading the file chosen by the user return FDGL.GetPath() def ClickOnOpenConfigFile(self): fcfg = self.FDLGOpen() if not fcfg: return False if self.CfgTreeCtrl: # one config file is loaded if wx.MessageBox("Do you want to save?", "Please confirm",wx.ICON_QUESTION | wx.YES_NO, self) == wx.YES: self.ClickOnSaveConfigFile() self.CFG.update(config=fcfg) self._update_TreeCtrl() def ClickOnSaveConfigFile(self): self._CfgTreeCtrl.update_info() self._CfgTreeCtrl.update_used_dict() self.FDLGSave() def ClickOnButton(self,evt): ''' implements the show, save, update and open buttons :param evt: the button which has been clicked ''' obj = evt.GetEventObject() if obj.GetName().endswith(".BT.SHOW"): self.CfgTreeCtrl.info() elif obj.GetName().endswith(".BT.SAVE"): self.ClickOnSaveConfigFile() evt.Skip() elif obj.GetName().endswith(".BT.UPDATE"): self._CfgTreeCtrl.update_used_dict() elif obj.GetName().endswith(".BT.OPEN"): self.ClickOnOpenConfigFile() evt.Skip() else: evt.Skip() def _ApplyLayout(self): LEA = wx.LEFT | wx.EXPAND | wx.ALL vbox = wx.BoxSizer(wx.VERTICAL) #--- st1 = wx.StaticLine(self) st1.SetBackgroundColour("GREY85") st2 = wx.StaticLine(self) st2.SetBackgroundColour("GREY80") vbox.Add(st1,0,LEA,1) st = wx.StaticLine(self) st.SetBackgroundColour("GREY85") vbox.Add(self._CfgTreeCtrlPNL,1,LEA,1) vbox.Add(st,0,LEA,1) #--- buttons hbox= wx.BoxSizer(wx.HORIZONTAL) hbox.Add(self._bt_close,0,LEA,2) hbox.Add((0,0),1,LEA,2) hbox.Add(self._bt_update,0,LEA,2) hbox.Add(self._bt_info,0,LEA,2) hbox.Add(self._bt_save,0,LEA,2) hbox.Add(self._bt_open,0,LEA,2) vbox.Add(hbox,0,LEA,2) self.SetAutoLayout(True) self.SetSizer(vbox) self.Fit() self.Layout() class MainWindow(wx.Frame): def __init__(self, parent, title, **kwargs): wx.Frame.__init__(self, parent, -1,title=title) self._wx_init(**kwargs) def _update_from_kwargs(self,**kwargs): pass def _wx_init(self,**kwargs): w,h = wx.GetDisplaySize() self.SetSize(w/4.0,h/3.0) self.Center() self._update_from_kwargs(**kwargs) #--- init STB in a new CLS self._STB = self.CreateStatusBar() self._STB.SetStatusStyles([wx.SB_SUNKEN]) self._PNL = JuMEG_wxConfig(self,**kwargs) self.Bind(wx.EVT_BUTTON,self.ClickOnButton) self.Bind(wx.EVT_CLOSE,self.ClickOnClose) def ClickOnButton(self,evt): ''' implements the close button event or skips the event ''' obj = evt.GetEventObject() if obj.GetName().endswith("CLOSE"): self.Close() if obj.GetName().endswith("OPEN"): self._STB.SetStatusText(self._PNL.CFG.fname) if obj.GetName().endswith("SAVE"): self._STB.SetStatusText(self._PNL.CFG.fname) else: evt.Skip() def ClickOnClose(self,evt): ''' implements the close button event ''' #--- place to clean your stuff evt.Skip() #--- def run(opt): ''' runs the project ''' if opt.debug: opt.verbose = True opt.debug = True opt.path = "./config/" #opt.config = "test_config.yaml" opt.config = "test_config.json" app = wx.App() if opt.path: cfg = os.path.join(opt.path,opt.config) else: cfg = opt.config frame = MainWindow(None,'JuMEG Config',config=cfg,verbose=opt.verbose,debug=opt.debug) frame.Show() app.MainLoop() #---- def get_args(argv): info_global = """ JuMEG Config GUI Start Parameter ---> view time series data FIF file jumeg_cfg_gui01.py --config=test_config.yaml --path=./config -v """ parser = argparse.ArgumentParser(info_global) parser.add_argument("-p","--path",help="config file path") parser.add_argument("-cfg","--config",help="config file name") parser.add_argument("-v","--verbose",action="store_true",help="verbose mode") parser.add_argument("-d","--debug",action="store_true",help="debug mode") #--- init flags # ck if flag is set in argv as True # problem can not switch on/off flag via cmd call opt = parser.parse_args() for g in parser._action_groups: for obj in g._group_actions: if str(type(obj)).endswith('_StoreTrueAction\'>'): if vars(opt).get(obj.dest): opt.__dict__[obj.dest] = False for flg in argv: if flg in obj.option_strings: opt.__dict__[obj.dest] = True break return opt,parser #========================================================================================= #==== MAIN #========================================================================================= if __name__ == "__main__": opt,parser = get_args(sys.argv) """if len(sys.argv) < 2: parser.print_help() sys.exit(-1)""" jumeg_logger.setup_script_logging(name=sys.argv[0],opt=opt,logger=logger) run(opt)
# coding=utf-8 """ This module, generator_parser.py, will be used to give live syntax commenting and formatting to *_generator.py files. """ import tempfile from typing import List from code_manager.abstract_definitions import classes from code_manager.abstract_definitions import code_file from code_manager.abstract_definitions import conditionals from code_manager.abstract_definitions import functions from code_manager.abstract_definitions import variables from code_manager.code_generator_support.file_modifier import FileModifier from universal_code import debugging as dbg from code_manager.code_generator_support import fancy_comments from code_manager.abstract_definitions.code import Code class_keywords = classes.KEYWORDS_IN_USE_ALL code_file_keywords = code_file.KEYWORDS_IN_USE_ALL conditional_keywords = conditionals.KEYWORDS_IN_USE_ALL function_keywords = functions.KEYWORDS_IN_USE_ALL variable_keywords = variables.KEYWORDS_IN_USE_ALL def does_line_only_contain_white_space_and_newline(text_to_check) -> bool: """Just a utility function to check if a string is whitespace up until a new line character. :param text_to_check: The text to check. :return: A boolean indicating true or false. """ index = 0 only_whitespace = True while index < len(text_to_check) - 1: if text_to_check[index] != ' ' or text_to_check[index] != '\t': only_whitespace = False index += 1 if only_whitespace: if text_to_check[index] == '\n': return True return False def has_keyword_match(text_content, keywords_to_check_for): """Just a utility function to see if any of the provided keywords appear in the text. :param text_content: The text content to look for keywords in. :param keywords_to_check_for: The keywords to look for. :return: A boolean indicating if a match was found or not. """ for w in keywords_to_check_for: if w in text_content: return True return False def get_the_length_of_the_longest_code_line(codes): """This utility function will return the length of the longest generator code line. :param codes: The list of codes to use as reference. :return: An integer indicating the length of the longest code line. """ longest = -1 for c in codes: if c.get_longest_line_length() > longest: longest = c.get_longest_line_length() return longest def create_a_single_line_comment(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_single_line_comment() code_to_add.set_last_line_number(index) codes.append(code_to_add) def insert_a_single_line_comment(index, insert_index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param insert_index: The position in codes to insert into. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_single_line_comment() code_to_add.set_last_line_number(index) codes.insert(insert_index, code_to_add) def create_an_empty_line(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_empty_line() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_code_dump_line(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_code_line_dump() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_script_block(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single script line code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_script_block() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_code_dump_block(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_code_block_dump() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_an_import_statement(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_import_statement() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_class_declaration(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_class_declaration() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_function_declaration(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_function_declaration() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_conditional_statement(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_conditional() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_variable_declaration(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_variable_declaration() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_code_file_declaration(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_code_file_declaration() code_to_add.set_last_line_number(index) codes.append(code_to_add) def create_a_comment_block(index, content, codes): """Just a utility function to avoid typing the same thing over and over. :param index: The current line number in the file. :param content: The content of the single line comment code. :param codes: The codes to add to. :return: Void. """ code_to_add = Code(content) code_to_add.set_to_a_comment_block() code_to_add.set_last_line_number(index) codes.append(code_to_add) def get_generation_file_as_code_objects(file_path) -> List[Code]: """Returns the file as a list of code objects. :param file_path: The path location of the file. :return: A list of code objects. """ with open(file_path) as f: content = f.readlines() codes = [] index = 0 currently_in_a_comment_block = False comment_block_text = '' currently_in_a_code_dump_block = False code_dump_text = '' currently_in_a_script_block = False script_block_text = '' while index < len(content): current_content = content[index] # print(current_content, end='') # Empty lines. if does_line_only_contain_white_space_and_newline(current_content): create_an_empty_line(index, current_content, codes) # Import statements. elif 'from ' in current_content and ' import ' in current_content: create_an_import_statement(index, current_content, codes) # Single line comment. elif current_content.lstrip().startswith('#'): create_a_single_line_comment(index, current_content, codes) # Comment blocks. elif current_content.lstrip().startswith('"""') and not current_content.lstrip().startswith('""")'): if not currently_in_a_comment_block: comment_block_text += current_content currently_in_a_comment_block = True else: comment_block_text += current_content create_a_comment_block(index, comment_block_text, codes) comment_block_text = '' currently_in_a_comment_block = False # Inside of a comment block. elif currently_in_a_comment_block: comment_block_text += current_content # Raw code dump single line version. elif current_content.lstrip().startswith('c(\''): create_a_code_dump_line(index, current_content, codes) # Script code single line version. elif current_content.lstrip().startswith('cc(\''): create_a_script_block(index, current_content, codes) # Script code block start. elif current_content.lstrip().startswith('cc("""'): script_block_text += current_content currently_in_a_script_block = True # Raw code dump block start. elif current_content.lstrip().startswith('c("""'): code_dump_text += current_content currently_in_a_code_dump_block = True # Raw code dump block end OR script code block end. elif current_content.lstrip().startswith('""")'): if currently_in_a_code_dump_block: code_dump_text += current_content create_a_code_dump_block(index, code_dump_text, codes) code_dump_text = '' currently_in_a_code_dump_block = False elif currently_in_a_script_block: script_block_text += current_content create_a_script_block(index, script_block_text, codes) script_block_text = '' currently_in_a_script_block = False # Inside a raw code dump block. elif currently_in_a_code_dump_block: code_dump_text += current_content # Inside a script block. elif currently_in_a_script_block: script_block_text += current_content # Class declaration. elif has_keyword_match(current_content, class_keywords): create_a_class_declaration(index, current_content, codes) # Function declaration. elif has_keyword_match(current_content, function_keywords): create_a_function_declaration(index, current_content, codes) # Conditional declaration. elif has_keyword_match(current_content, conditional_keywords): create_a_conditional_statement(index, current_content, codes) # Variable declaration. elif has_keyword_match(current_content, variable_keywords): create_a_variable_declaration(index, current_content, codes) # CodeFile declaration. elif has_keyword_match(current_content, code_file_keywords): create_a_code_file_declaration(index, current_content, codes) index += 1 return codes ################################################################################### library_nickname_mapping = {} for key in classes.library_nickname_mapping.keys(): library_nickname_mapping[key] = classes.library_nickname_mapping[key] for key in code_file.library_nickname_mapping.keys(): library_nickname_mapping[key] = code_file.library_nickname_mapping[key] for key in conditionals.library_nickname_mapping.keys(): library_nickname_mapping[key] = conditionals.library_nickname_mapping[key] for key in functions.library_nickname_mapping.keys(): library_nickname_mapping[key] = functions.library_nickname_mapping[key] for key in variables.library_nickname_mapping.keys(): library_nickname_mapping[key] = variables.library_nickname_mapping[key] library_nickname_mapping['c'] = 'from code_manager.code_generator_support.import_error_mask import c' library_nickname_mapping['cc'] = 'from code_manager.code_generator_support.import_error_mask import cc' ################################################################################### def get_currently_imported_libraries_from_a_code_list(codes_parameter): """A utility function to get the current list of libraries in the code. :param codes_parameter: The codes to search through. :return: A list of library names as strings. """ libraries_currently_imported = [] for lib in codes_parameter: if lib.is_a_library_import_statement(): words = str(lib).split(' ') libraries_currently_imported.append(words[-1].replace('\n', '')) return libraries_currently_imported def get_currently_used_libraries_from_a_code_list(codes_parameter): """A utility function to get the current list of libraries in the code. :param codes_parameter: The codes to search through. :return: A list of library names as strings. """ libraries_currently_used = [] for line in codes_parameter: # Go through each custom keyword that we have. for key in library_nickname_mapping: if key + '(' in str(line): libraries_currently_used.append(key) return libraries_currently_used def get_last_import_statement_line_number(codes_parameter): """A utility function to get the line number of the last library import statement. This function will crash the program if there is more than one library import statement block. :param codes_parameter: The codes to search through. :return: The last line number (of library imports) as an integer. """ last_line_number = -1 should_set_last_line_number = False number_of_import_statement_blocks = 0 for index, line in enumerate(codes_parameter): if line.is_a_library_import_statement(): should_set_last_line_number = True else: if should_set_last_line_number: number_of_import_statement_blocks += 1 last_line_number = codes_parameter[index - 1].get_last_line_number() should_set_last_line_number = False if number_of_import_statement_blocks != 1: dbg.terminate('There is either none or more than one library import statement blocks!') return last_line_number + 1 # We want the line after it to be modified, not before it. def get_index_of_first_empty_line_above_code(current_index, codes): """Just a utility function to get the line number of the first code that is an empty line that is above the current_index. -1 if none found. :param current_index: The index to start searching above from. (Numerically the index will be decreasing) :param codes: The list of codes. :return: The index if found or -1, as an integer. """ index = current_index while index > -1: if codes[index].is_an_empty_line(): return index index -= 1 return -1 def get_index_of_first_empty_line_below_code(current_index, codes): """Just a utility function to get the line number of the first code taht is an empty line that is below the current_index. :param current_index: The index to start searching below from. (Numerically the index will be increasing) :param codes: The list of codes. :return: The index if found or -1, as an integer. """ index = current_index while index < len(codes): if codes[index].is_an_empty_line(): return index index += 1 return -1 def parse_generator_file(file_path): """This function will provide live assistance to generator files. :param file_path: The file path to the generator file. :return: Void. """ file_name = file_path codes = get_generation_file_as_code_objects(file_name) #for c in codes: # print(c, end='') currently_used_libraries = get_currently_used_libraries_from_a_code_list(codes) currently_imported_libraries = get_currently_imported_libraries_from_a_code_list(codes) missing_imports = set(currently_used_libraries) - set(currently_imported_libraries) if len(missing_imports) > 0: with FileModifier(file_name) as file_pointer: for mi in missing_imports: file_pointer.write_line(library_nickname_mapping[mi], get_last_import_statement_line_number(codes)) ''' # Files are imported now check for class commenting needed. index = 0 while index < len(codes): current_line = codes[index] #print(str(current_line), end='') if current_line.is_a_class_declaration_statement(): #print(str(codes[index - 1]), end='') #print(str(current_line), end='') first_empty_line_above = get_index_of_first_empty_line_above_code(index, codes) first_empty_line_below = get_index_of_first_empty_line_below_code(index, codes) if first_empty_line_above == -1: dbg.terminate('Something went wrong.') elif first_empty_line_below == -1: dbg.terminate('Something else went wrong.') # What the starting class comment should be. #start_comment = fancy_comments.get_comment_divider('Class name here', get_the_length_of_the_longest_code_line(codes)) start_comment = 'YOLO' # There currently exists no starting class comment. if first_empty_line_above + 1 == index: print(codes[first_empty_line_above + 1]) #print(index) insert_a_single_line_comment(index, first_empty_line_above, start_comment, codes) index += 1 #print(index) #print(codes) else: y = 2 # double check the existing comment. # Insert comment below. #code_to_add = Code(fancy_comments.get_comment_divider("Class name here", )) #code_to_add.set_to_a_single_line_comment() #codes.insert #print(str(codes[first_empty_line]), end='') #print(str(current_line), end='') index += 1 '''
# -*- coding: utf-8 -*- """ pygments.formatters.terminal256 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Formatter for 256-color terminal output with ANSI sequences. RGB-to-XTERM color conversion routines adapted from xterm256-conv tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2) by Wolfgang Frisch. Formatter version 1. :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ # TODO: # - Options to map style's bold/underline/italic/border attributes # to some ANSI attrbutes (something like 'italic=underline') # - An option to output "style RGB to xterm RGB/index" conversion table # - An option to indicate that we are running in "reverse background" # xterm. This means that default colors are white-on-black, not # black-on-while, so colors like "white background" need to be converted # to "white background, black foreground", etc... import sys from pygments.formatter import Formatter __all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter'] class EscapeSequence: def __init__(self, fg=None, bg=None, bold=False, underline=False): self.fg = fg self.bg = bg self.bold = bold self.underline = underline def escape(self, attrs): if len(attrs): return "\x1b[" + ";".join(attrs) + "m" return "" def color_string(self): attrs = [] if self.fg is not None: attrs.extend(("38", "5", "%i" % self.fg)) if self.bg is not None: attrs.extend(("48", "5", "%i" % self.bg)) if self.bold: attrs.append("01") if self.underline: attrs.append("04") return self.escape(attrs) def true_color_string(self): attrs = [] if self.fg: attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2]))) if self.bg: attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2]))) if self.bold: attrs.append("01") if self.underline: attrs.append("04") return self.escape(attrs) def reset_string(self): attrs = [] if self.fg is not None: attrs.append("39") if self.bg is not None: attrs.append("49") if self.bold or self.underline: attrs.append("00") return self.escape(attrs) class Terminal256Formatter(Formatter): """ Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly. The formatter takes colors from a style defined by the `style` option and converts them to nearest ANSI 256-color escape sequences. Bold and underline attributes from the style are preserved (and displayed). .. versionadded:: 0.9 Options accepted: `style` The style to use, can be a string or a Style subclass (default: ``'default'``). """ name = 'Terminal256' aliases = ['terminal256', 'console256', '256'] filenames = [] def __init__(self, **options): Formatter.__init__(self, **options) self.xterm_colors = [] self.best_match = {} self.style_string = {} self.usebold = 'nobold' not in options self.useunderline = 'nounderline' not in options self._build_color_table() # build an RGB-to-256 color conversion table self._setup_styles() # convert selected style's colors to term. colors def _build_color_table(self): # colors 0..15: 16 basic colors self.xterm_colors.append((0x00, 0x00, 0x00)) # 0 self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1 self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2 self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3 self.xterm_colors.append((0x00, 0x00, 0xee)) # 4 self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5 self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6 self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7 self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8 self.xterm_colors.append((0xff, 0x00, 0x00)) # 9 self.xterm_colors.append((0x00, 0xff, 0x00)) # 10 self.xterm_colors.append((0xff, 0xff, 0x00)) # 11 self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12 self.xterm_colors.append((0xff, 0x00, 0xff)) # 13 self.xterm_colors.append((0x00, 0xff, 0xff)) # 14 self.xterm_colors.append((0xff, 0xff, 0xff)) # 15 # colors 16..232: the 6x6x6 color cube valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff) for i in range(217): r = valuerange[(i // 36) % 6] g = valuerange[(i // 6) % 6] b = valuerange[i % 6] self.xterm_colors.append((r, g, b)) # colors 233..253: grayscale for i in range(1, 22): v = 8 + i * 10 self.xterm_colors.append((v, v, v)) def _closest_color(self, r, g, b): distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff) match = 0 for i in range(0, 254): values = self.xterm_colors[i] rd = r - values[0] gd = g - values[1] bd = b - values[2] d = rd*rd + gd*gd + bd*bd if d < distance: match = i distance = d return match def _color_index(self, color): index = self.best_match.get(color, None) if index is None: try: rgb = int(str(color), 16) except ValueError: rgb = 0 r = (rgb >> 16) & 0xff g = (rgb >> 8) & 0xff b = rgb & 0xff index = self._closest_color(r, g, b) self.best_match[color] = index return index def _setup_styles(self): for ttype, ndef in self.style: escape = EscapeSequence() if ndef['color']: escape.fg = self._color_index(ndef['color']) if ndef['bgcolor']: escape.bg = self._color_index(ndef['bgcolor']) if self.usebold and ndef['bold']: escape.bold = True if self.useunderline and ndef['underline']: escape.underline = True self.style_string[str(ttype)] = (escape.color_string(), escape.reset_string()) def format(self, tokensource, outfile): # hack: if the output is a terminal and has an encoding set, # use that to avoid unicode encode problems if not self.encoding and hasattr(outfile, "encoding") and \ hasattr(outfile, "isatty") and outfile.isatty() and \ sys.version_info < (3,): self.encoding = outfile.encoding return Formatter.format(self, tokensource, outfile) def format_unencoded(self, tokensource, outfile): for ttype, value in tokensource: not_found = True while ttype and not_found: try: # outfile.write( "<" + str(ttype) + ">" ) on, off = self.style_string[str(ttype)] # Like TerminalFormatter, add "reset colors" escape sequence # on newline. spl = value.split('\n') for line in spl[:-1]: if line: outfile.write(on + line + off) outfile.write('\n') if spl[-1]: outfile.write(on + spl[-1] + off) not_found = False # outfile.write( '#' + str(ttype) + '#' ) except KeyError: # ottype = ttype ttype = ttype[:-1] # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' ) if not_found: outfile.write(value) class TerminalTrueColorFormatter(Terminal256Formatter): r""" Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly. .. versionadded:: 2.1 Options accepted: `style` The style to use, can be a string or a Style subclass (default: ``'default'``). """ name = 'TerminalTrueColor' aliases = ['terminal16m', 'console16m', '16m'] filenames = [] def _build_color_table(self): pass def _color_tuple(self, color): try: rgb = int(str(color), 16) except ValueError: return None r = (rgb >> 16) & 0xff g = (rgb >> 8) & 0xff b = rgb & 0xff return (r, g, b) def _setup_styles(self): for ttype, ndef in self.style: escape = EscapeSequence() if ndef['color']: escape.fg = self._color_tuple(ndef['color']) if ndef['bgcolor']: escape.bg = self._color_tuple(ndef['bgcolor']) if self.usebold and ndef['bold']: escape.bold = True if self.useunderline and ndef['underline']: escape.underline = True self.style_string[str(ttype)] = (escape.true_color_string(), escape.reset_string())
# ============================================================================= # Copyright (c) 2016, Cisco Systems, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= from flask import Blueprint from flask import abort from flask import render_template from flask import request from flask import redirect from flask import url_for from flask import jsonify from flask_login import login_required from flask_login import current_user from flask_login import login_user from flask_login import logout_user from wtforms import Form from wtforms import StringField from wtforms import SelectField from wtforms import PasswordField from wtforms import HiddenField from wtforms.validators import required from wtforms.validators import Required from database import DBSession from common import can_create_user from common import get_user_list from common import get_user from common import fill_user_privileges from models import User from models import logger from models import Preferences from models import SystemOption from models import UserPrivilege from models import CSMMessage from forms import add_validator from forms import remove_validator from utils import get_base_url from utils import get_return_url from smu_info_loader import SMUInfoLoader import datetime authenticate = Blueprint('authenticate', __name__, url_prefix='/authenticate') @authenticate.route('/login/', methods=['GET', 'POST']) def login(): form = LoginForm(request.form) error_message = None if request.method == 'POST' and form.validate(): username = form.username.data.strip() password = form.password.data.strip() db_session = DBSession() user, authenticated = \ User.authenticate(db_session.query, username, password) if authenticated: login_user(user) # record the base URL try: system_option = SystemOption.get(db_session) system_option.base_url = get_base_url(request.url) db_session.commit() except: logger.exception('login() hit exception') # Certain admin features (Admin Console/Create or Edit User require # re-authentication. The return_url indicates which admin feature the # user wants to access. return_url = get_return_url(request) if return_url is None: return redirect(request.args.get("next") or url_for('home')) else: return redirect(url_for(return_url)) else: error_message = 'Your user name or password is incorrect. \ Re-enter them again or contact your system administrator.' # Fill the username if the user is still logged in. username = get_username(current_user) if username is not None: form.username.data = username return render_template('user/login.html', form=form, error_message=error_message, username=username) def get_username(current_user): """ Return the current username. If the user already logged out, return None """ try: return current_user.username except: return None @authenticate.route('/logout/') def logout(): logout_user() return redirect(url_for('authenticate.login')) @authenticate.route('/users/create', methods=['GET','POST']) @login_required def user_create(): if not can_create_user(current_user): abort(401) form = UserForm(request.form) # Need to add the Required flag back as it is globally removed during user_edit() add_validator(form.password, Required) fill_user_privileges(form.privilege.choices) if request.method == 'POST' and form.validate(): db_session = DBSession() user = get_user(db_session, form.username.data) if user is not None: return render_template('user/edit.html', form=form, duplicate_error=True) user = User( username=form.username.data, password=form.password.data, privilege=form.privilege.data, fullname=form.fullname.data, email=form.email.data) user.preferences.append(Preferences()) db_session.add(user) db_session.commit() return redirect(url_for('home')) else: # Default to Active form.active.data = True return render_template('user/edit.html', form=form) @authenticate.route('/users/<username>/edit', methods=['GET', 'POST']) @login_required def user_edit(username): db_session = DBSession() user = get_user(db_session, username) if user is None: abort(404) form = UserForm(request.form) # Remove the Required flag so validation won't fail. In edit mode, it is okay # not to provide the password. In this case, the password on file is used. remove_validator(form.password, Required) fill_user_privileges(form.privilege.choices) if request.method == 'POST' and form.validate(): if len(form.password.data) > 0: user.password = form.password.data user.privilege = form.privilege.data user.fullname = form.fullname.data user.email = form.email.data user.active = form.active.data db_session.commit() return redirect(url_for('home')) else: form.username.data = user.username form.privilege.data = user.privilege form.fullname.data = user.fullname form.email.data = user.email form.active.data = user.active return render_template('user/edit.html', form=form) @authenticate.route('/users/edit', methods=['GET','POST']) @login_required def current_user_edit(): return user_edit(current_user.username) @authenticate.route('/users/') @login_required def user_list(): db_session = DBSession() users = get_user_list(db_session) if users is None: abort(404) if current_user.privilege == UserPrivilege.ADMIN: return render_template('user/index.html', users=users, system_option=SystemOption.get(db_session)) return render_template('user/not_authorized.html', user=current_user) @authenticate.route('/users/<username>/delete/', methods=['DELETE']) @login_required def user_delete(username): db_session = DBSession() user = get_user(db_session, username) if user is None: abort(404) db_session.delete(user) db_session.commit() return jsonify({'status': 'OK'}) @authenticate.route('/api/acknowledge_csm_message', methods=['POST']) @login_required def api_acknowledge_csm_message(): db_session = DBSession() user = current_user if len(user.csm_message) == 0: user.csm_message.append(CSMMessage(acknowledgment_date=datetime.date.today() )) else: user.csm_message[0].acknowledgment_date=datetime.date.today() db_session.commit() return jsonify({'status': 'OK'}) @authenticate.route('/api/get_csm_message', methods=['POST']) @login_required def api_get_csm_message(): rows = [] user = current_user csm_messages = SMUInfoLoader.get_cco_csm_messages() if len(csm_messages) > 0: acknowledgment_date = datetime.datetime(2000, 1, 1) if len(user.csm_message) > 0: acknowledgment_date = user.csm_message[0].acknowledgment_date # csm_messages returns a dictionary keyed by a token (e.g. @12/01/01@Admin,Operator) and message readers = [UserPrivilege.ADMIN, UserPrivilege.NETWORK_ADMIN, UserPrivilege.OPERATOR, UserPrivilege.VIEWER] for csm_message in csm_messages: tokens = csm_message['token'].split('@') date = tokens[0] if len(tokens) == 2: readers = tokens[1].split(',') if user.privilege in readers: message = csm_message['message'] try: delta = datetime.datetime.strptime(date, "%Y/%m/%d") - acknowledgment_date if delta.days > 0: rows.append({'date': date, 'message': message.replace("\n", "<br>")}) except: logger.exception('api_get_csm_message() hit exception') return jsonify(**{'data': rows}) class LoginForm(Form): """ Render HTML input for user login form. Authentication (i.e. password verification) happens in the view function. """ username = StringField('Username', [required()]) password = PasswordField('Password', [required()]) class UserForm(Form): """ Render HTML input for user registration form. Authentication (i.e. password verification) happens in the view function. """ username = StringField('Username', [required()]) password = PasswordField('Password', [required()]) privilege = SelectField('Privilege', [required()], coerce=str, choices=[('', '')]) active = HiddenField("Active") fullname = StringField('Full Name', [required()]) email = StringField('Email Address', [required()])
from __future__ import unicode_literals import os from collections import OrderedDict from django.contrib.staticfiles.finders import get_finders from django.contrib.staticfiles.storage import staticfiles_storage from django.core.files.storage import FileSystemStorage from django.core.management.base import BaseCommand, CommandError from django.core.management.color import no_style from django.utils.encoding import smart_text from django.utils.functional import cached_property from django.utils.six.moves import input class Command(BaseCommand): """ Command that allows to copy or symlink static files from different locations to the settings.STATIC_ROOT. """ help = "Collect static files in a single location." requires_system_checks = False def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) self.copied_files = [] self.symlinked_files = [] self.unmodified_files = [] self.post_processed_files = [] self.storage = staticfiles_storage self.style = no_style() @cached_property def local(self): try: self.storage.path('') except NotImplementedError: return False return True def add_arguments(self, parser): parser.add_argument('--noinput', '--no-input', action='store_false', dest='interactive', default=True, help="Do NOT prompt the user for input of any kind.") parser.add_argument('--no-post-process', action='store_false', dest='post_process', default=True, help="Do NOT post process collected files.") parser.add_argument('-i', '--ignore', action='append', default=[], dest='ignore_patterns', metavar='PATTERN', help="Ignore files or directories matching this glob-style " "pattern. Use multiple times to ignore more.") parser.add_argument('-n', '--dry-run', action='store_true', dest='dry_run', default=False, help="Do everything except modify the filesystem.") parser.add_argument('-c', '--clear', action='store_true', dest='clear', default=False, help="Clear the existing files using the storage " "before trying to copy or link the original file.") parser.add_argument('-l', '--link', action='store_true', dest='link', default=False, help="Create a symbolic link to each file instead of copying.") parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', default=True, help="Don't ignore the common private glob-style patterns 'CVS', " "'.*' and '*~'.") def set_options(self, **options): """ Set instance variables based on an options dict """ self.interactive = options['interactive'] self.verbosity = options['verbosity'] self.symlink = options['link'] self.clear = options['clear'] self.dry_run = options['dry_run'] ignore_patterns = options['ignore_patterns'] if options['use_default_ignore_patterns']: ignore_patterns += ['CVS', '.*', '*~'] self.ignore_patterns = list(set(ignore_patterns)) self.post_process = options['post_process'] def collect(self): """ Perform the bulk of the work of collectstatic. Split off from handle() to facilitate testing. """ if self.symlink and not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir('') if self.symlink: handler = self.link_file else: handler = self.copy_file found_files = OrderedDict() for finder in get_finders(): for path, storage in finder.list(self.ignore_patterns): # Prefix the relative path if the source storage contains it if getattr(storage, 'prefix', None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) else: self.log( "Found another file with the destination path '%s'. It " "will be ignored since only the first encountered file " "is collected. If this is not what you want, make sure " "every static file has a unique path." % prefixed_path, level=1, ) # Here we check if the storage backend has a post_process # method and pass it the list of modified files. if self.post_process and hasattr(self.storage, 'post_process'): processor = self.storage.post_process(found_files, dry_run=self.dry_run) for original_path, processed_path, processed in processor: if isinstance(processed, Exception): self.stderr.write("Post-processing '%s' failed!" % original_path) # Add a blank line before the traceback, otherwise it's # too easy to miss the relevant part of the error message. self.stderr.write("") raise processed if processed: self.log("Post-processed '%s' as '%s'" % (original_path, processed_path), level=1) self.post_processed_files.append(original_path) else: self.log("Skipped post-processing '%s'" % original_path) return { 'modified': self.copied_files + self.symlinked_files, 'unmodified': self.unmodified_files, 'post_processed': self.post_processed_files, } def handle(self, **options): self.set_options(**options) message = ['\n'] if self.dry_run: message.append( 'You have activated the --dry-run option so no files will be modified.\n\n' ) message.append( 'You have requested to collect static files at the destination\n' 'location as specified in your settings' ) if self.is_local_storage() and self.storage.location: destination_path = self.storage.location message.append(':\n\n %s\n\n' % destination_path) else: destination_path = None message.append('.\n\n') if self.clear: message.append('This will DELETE ALL FILES in this location!\n') else: message.append('This will overwrite existing files!\n') message.append( 'Are you sure you want to do this?\n\n' "Type 'yes' to continue, or 'no' to cancel: " ) if self.interactive and input(''.join(message)) != 'yes': raise CommandError("Collecting static files cancelled.") collected = self.collect() modified_count = len(collected['modified']) unmodified_count = len(collected['unmodified']) post_processed_count = len(collected['post_processed']) if self.verbosity >= 1: template = ("\n%(modified_count)s %(identifier)s %(action)s" "%(destination)s%(unmodified)s%(post_processed)s.\n") summary = template % { 'modified_count': modified_count, 'identifier': 'static file' + ('' if modified_count == 1 else 's'), 'action': 'symlinked' if self.symlink else 'copied', 'destination': (" to '%s'" % destination_path if destination_path else ''), 'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''), 'post_processed': (collected['post_processed'] and ', %s post-processed' % post_processed_count or ''), } return summary def log(self, msg, level=2): """ Small log helper """ if self.verbosity >= level: self.stdout.write(msg) def is_local_storage(self): return isinstance(self.storage, FileSystemStorage) def clear_dir(self, path): """ Deletes the given relative path using the destination storage backend. """ if not self.storage.exists(path): return dirs, files = self.storage.listdir(path) for f in files: fpath = os.path.join(path, f) if self.dry_run: self.log("Pretending to delete '%s'" % smart_text(fpath), level=1) else: self.log("Deleting '%s'" % smart_text(fpath), level=1) try: full_path = self.storage.path(fpath) except NotImplementedError: self.storage.delete(fpath) else: if not os.path.exists(full_path) and os.path.lexists(full_path): # Delete broken symlinks os.unlink(full_path) else: self.storage.delete(fpath) for d in dirs: self.clear_dir(os.path.join(path, d)) def delete_file(self, path, prefixed_path, source_storage): """ Checks if the target file should be deleted if it already exists """ if self.storage.exists(prefixed_path): try: # When was the target file modified last time? target_last_modified = self.storage.get_modified_time(prefixed_path) except (OSError, NotImplementedError, AttributeError): # The storage doesn't support get_modified_time() or failed pass else: try: # When was the source file modified last time? source_last_modified = source_storage.get_modified_time(path) except (OSError, NotImplementedError, AttributeError): pass else: # The full path of the target file if self.local: full_path = self.storage.path(prefixed_path) else: full_path = None # Skip the file if the source file is younger # Avoid sub-second precision (see #14665, #19540) if (target_last_modified.replace(microsecond=0) >= source_last_modified.replace(microsecond=0) and full_path and not (self.symlink ^ os.path.islink(full_path))): if prefixed_path not in self.unmodified_files: self.unmodified_files.append(prefixed_path) self.log("Skipping '%s' (not modified)" % path) return False # Then delete the existing file if really needed if self.dry_run: self.log("Pretending to delete '%s'" % path) else: self.log("Deleting '%s'" % path) self.storage.delete(prefixed_path) return True def link_file(self, path, prefixed_path, source_storage): """ Attempt to link ``path`` """ # Skip this file if it was already copied earlier if prefixed_path in self.symlinked_files: return self.log("Skipping '%s' (already linked earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally link the file if self.dry_run: self.log("Pretending to link '%s'" % source_path, level=1) else: self.log("Linking '%s'" % source_path, level=1) full_path = self.storage.path(prefixed_path) try: os.makedirs(os.path.dirname(full_path)) except OSError: pass try: if os.path.lexists(full_path): os.unlink(full_path) os.symlink(source_path, full_path) except AttributeError: import platform raise CommandError("Symlinking is not supported by Python %s." % platform.python_version()) except NotImplementedError: import platform raise CommandError("Symlinking is not supported in this " "platform (%s)." % platform.platform()) except OSError as e: raise CommandError(e) if prefixed_path not in self.symlinked_files: self.symlinked_files.append(prefixed_path) def copy_file(self, path, prefixed_path, source_storage): """ Attempt to copy ``path`` with storage """ # Skip this file if it was already copied earlier if prefixed_path in self.copied_files: return self.log("Skipping '%s' (already copied earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally start copying if self.dry_run: self.log("Pretending to copy '%s'" % source_path, level=1) else: self.log("Copying '%s'" % source_path, level=1) with source_storage.open(path) as source_file: self.storage.save(prefixed_path, source_file) self.copied_files.append(prefixed_path)
#!/usr/bin/env python from __future__ import with_statement import logging import optparse import os import os.path import re import shutil import subprocess import sys import itertools __version__ = "0.5.7" logger = logging.getLogger() env_bin_dir = "bin" if sys.platform == "win32": env_bin_dir = "Scripts" class UserError(Exception): pass def _dirmatch(path, matchwith): """Check if path is within matchwith's tree. >>> _dirmatch('/home/foo/bar', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar/etc', '/home/foo/bar') True >>> _dirmatch('/home/foo/bar2', '/home/foo/bar') False >>> _dirmatch('/home/foo/bar2/etc', '/home/foo/bar') False """ matchlen = len(matchwith) if path.startswith(matchwith) and path[matchlen : matchlen + 1] in [os.sep, ""]: return True return False def _virtualenv_sys(venv_path): "obtain version and path info from a virtualenv." executable = os.path.join(venv_path, env_bin_dir, "python") # Must use "executable" as the first argument rather than as the # keyword argument "executable" to get correct value from sys.path p = subprocess.Popen( [ executable, "-c", "import sys;" 'print ("%d.%d" % (sys.version_info.major, sys.version_info.minor));' 'print ("\\n".join(sys.path));', ], env={}, stdout=subprocess.PIPE, ) stdout, err = p.communicate() assert not p.returncode and stdout lines = stdout.decode("utf-8").splitlines() return lines[0], list(filter(bool, lines[1:])) def clone_virtualenv(src_dir, dst_dir): if not os.path.exists(src_dir): raise UserError("src dir %r does not exist" % src_dir) if os.path.exists(dst_dir): raise UserError("dest dir %r exists" % dst_dir) # sys_path = _virtualenv_syspath(src_dir) logger.info("cloning virtualenv '%s' => '%s'..." % (src_dir, dst_dir)) shutil.copytree( src_dir, dst_dir, symlinks=True, ignore=shutil.ignore_patterns("*.pyc") ) version, sys_path = _virtualenv_sys(dst_dir) logger.info("fixing scripts in bin...") fixup_scripts(src_dir, dst_dir, version) has_old = lambda s: any(i for i in s if _dirmatch(i, src_dir)) if has_old(sys_path): # only need to fix stuff in sys.path if we have old # paths in the sys.path of new python env. right? logger.info("fixing paths in sys.path...") fixup_syspath_items(sys_path, src_dir, dst_dir) v_sys = _virtualenv_sys(dst_dir) remaining = has_old(v_sys[1]) assert not remaining, v_sys fix_symlink_if_necessary(src_dir, dst_dir) def fix_symlink_if_necessary(src_dir, dst_dir): # sometimes the source virtual environment has symlinks that point to itself # one example is $OLD_VIRTUAL_ENV/local/lib points to $OLD_VIRTUAL_ENV/lib # this function makes sure # $NEW_VIRTUAL_ENV/local/lib will point to $NEW_VIRTUAL_ENV/lib # usually this goes unnoticed unless one tries to upgrade a package though pip, so this bug is hard to find. logger.info("scanning for internal symlinks that point to the original virtual env") for dirpath, dirnames, filenames in os.walk(dst_dir): for a_file in itertools.chain(filenames, dirnames): full_file_path = os.path.join(dirpath, a_file) if os.path.islink(full_file_path): target = os.path.realpath(full_file_path) if target.startswith(src_dir): new_target = target.replace(src_dir, dst_dir) logger.debug("fixing symlink in %s" % (full_file_path,)) os.remove(full_file_path) os.symlink(new_target, full_file_path) def fixup_scripts(old_dir, new_dir, version, rewrite_env_python=False): bin_dir = os.path.join(new_dir, env_bin_dir) root, dirs, files = next(os.walk(bin_dir)) pybinre = re.compile(r"pythonw?([0-9]+(\.[0-9]+(\.[0-9]+)?)?)?$") for file_ in files: filename = os.path.join(root, file_) if file_ in ["python", "python%s" % version, "activate_this.py"]: continue elif file_.startswith("python") and pybinre.match(file_): # ignore other possible python binaries continue elif file_.endswith(".pyc"): # ignore compiled files continue elif file_ == "activate" or file_.startswith("activate."): fixup_activate(os.path.join(root, file_), old_dir, new_dir) elif os.path.islink(filename): fixup_link(filename, old_dir, new_dir) elif os.path.isfile(filename): fixup_script_( root, file_, old_dir, new_dir, version, rewrite_env_python=rewrite_env_python, ) def fixup_script_(root, file_, old_dir, new_dir, version, rewrite_env_python=False): old_shebang = "#!%s/bin/python" % os.path.normcase(os.path.abspath(old_dir)) new_shebang = "#!%s/bin/python" % os.path.normcase(os.path.abspath(new_dir)) env_shebang = "#!/usr/bin/env python" filename = os.path.join(root, file_) with open(filename, "rb") as f: if f.read(2) != b"#!": # no shebang return f.seek(0) lines = f.readlines() if not lines: # warn: empty script return def rewrite_shebang(version=None): logger.debug("fixing %s" % filename) shebang = new_shebang if version: shebang = shebang + version shebang = (shebang + "\n").encode("utf-8") with open(filename, "wb") as f: f.write(shebang) f.writelines(lines[1:]) try: bang = lines[0].decode("utf-8").strip() except UnicodeDecodeError: # binary file return # This takes care of the scheme in which shebang is of type # '#!/venv/bin/python3' while the version of system python # is of type 3.x e.g. 3.5. short_version = bang[len(old_shebang) :] if not bang.startswith("#!"): return elif bang == old_shebang: rewrite_shebang() elif bang.startswith(old_shebang) and bang[len(old_shebang) :] == version: rewrite_shebang(version) elif ( bang.startswith(old_shebang) and short_version and bang[len(old_shebang) :] == short_version ): rewrite_shebang(short_version) elif rewrite_env_python and bang.startswith(env_shebang): if bang == env_shebang: rewrite_shebang() elif bang[len(env_shebang) :] == version: rewrite_shebang(version) else: # can't do anything return def fixup_activate(filename, old_dir, new_dir): logger.debug("fixing %s" % filename) with open(filename, "rb") as f: data = f.read().decode("utf-8") data = data.replace(old_dir, new_dir) with open(filename, "wb") as f: f.write(data.encode("utf-8")) def fixup_link(filename, old_dir, new_dir, target=None): logger.debug("fixing %s" % filename) if target is None: target = os.readlink(filename) origdir = os.path.dirname(os.path.abspath(filename)).replace(new_dir, old_dir) if not os.path.isabs(target): target = os.path.abspath(os.path.join(origdir, target)) rellink = True else: rellink = False if _dirmatch(target, old_dir): if rellink: # keep relative links, but don't keep original in case it # traversed up out of, then back into the venv. # so, recreate a relative link from absolute. target = target[len(origdir) :].lstrip(os.sep) else: target = target.replace(old_dir, new_dir, 1) # else: links outside the venv, replaced with absolute path to target. _replace_symlink(filename, target) def _replace_symlink(filename, newtarget): tmpfn = "%s.new" % filename os.symlink(newtarget, tmpfn) os.rename(tmpfn, filename) def fixup_syspath_items(syspath, old_dir, new_dir): for path in syspath: if not os.path.isdir(path): continue path = os.path.normcase(os.path.abspath(path)) if _dirmatch(path, old_dir): path = path.replace(old_dir, new_dir, 1) if not os.path.exists(path): continue elif not _dirmatch(path, new_dir): continue root, dirs, files = next(os.walk(path)) for file_ in files: filename = os.path.join(root, file_) if filename.endswith(".pth"): fixup_pth_file(filename, old_dir, new_dir) elif filename.endswith(".egg-link"): fixup_egglink_file(filename, old_dir, new_dir) def fixup_pth_file(filename, old_dir, new_dir): logger.debug("fixup_pth_file %s" % filename) with open(filename, "r") as f: lines = f.readlines() has_change = False for num, line in enumerate(lines): line = (line.decode("utf-8") if hasattr(line, "decode") else line).strip() if not line or line.startswith("#") or line.startswith("import "): continue elif _dirmatch(line, old_dir): lines[num] = line.replace(old_dir, new_dir, 1) has_change = True if has_change: with open(filename, "w") as f: payload = os.linesep.join([l.strip() for l in lines]) + os.linesep f.write(payload) def fixup_egglink_file(filename, old_dir, new_dir): logger.debug("fixing %s" % filename) with open(filename, "rb") as f: link = f.read().decode("utf-8").strip() if _dirmatch(link, old_dir): link = link.replace(old_dir, new_dir, 1) with open(filename, "wb") as f: link = (link + "\n").encode("utf-8") f.write(link) def main(): parser = optparse.OptionParser( "usage: %prog [options]" " /path/to/existing/venv /path/to/cloned/venv" ) parser.add_option( "-v", action="count", dest="verbose", default=False, help="verbosity" ) options, args = parser.parse_args() try: old_dir, new_dir = args except ValueError: print("virtualenv-clone %s" % (__version__,)) parser.error("not enough arguments given.") old_dir = os.path.realpath(old_dir) new_dir = os.path.realpath(new_dir) loglevel = (logging.WARNING, logging.INFO, logging.DEBUG)[min(2, options.verbose)] logging.basicConfig(level=loglevel, format="%(message)s") try: clone_virtualenv(old_dir, new_dir) except UserError: e = sys.exc_info()[1] parser.error(str(e)) if __name__ == "__main__": main()
# -*- coding: utf-8 -*- from __future__ import print_function import errno from exhibitionist import get_server from exhibitionist.exceptions import ExhibitionistError import unittest from exhibitionist.objectRegistry import ObjectRegistry from exhibitionist.toolbox import ExhibitionistRequestHandler context = None # mute missing symbol warning class TestServer(unittest.TestCase): def setUp(self): self.server = get_server() def tearDown(self): if self.server.started_ok: self.server.stop() self.server.join(5) def test_add_handler(self): from exhibitionist.decorators import http_handler import requests # expose static files to client @http_handler(r'/') class Handler(ExhibitionistRequestHandler): def get(self, *args, **kwds): self.write("a") self.server.add_handler(Handler) self.server.start() result = requests.get(self.server.get_view_url("Handler")).content self.assertEqual(result, b"a") def test_websocket_enable_disable(self): server = get_server() self.assertTrue(hasattr(server.start(), "websocket")) server.stop() server.join(5) server = get_server(websockets_enabled=False) self.assertFalse(hasattr(server.start(), "websocket")) server.stop() server.join(5) def test_respect_requested_port(self): server = get_server(port=64003).start() self.assertEqual(server.port, 64003) # dies request port taken # logging.info("the next test should fail to bind to 64003") try: server = get_server(port=64003).start(timeout=0.1) except ExhibitionistError as e: # self.assertEqual(e.errno, errno.EADDRNOTAVAIL) pass else: self.fail("exception was not raised") def test_missing_obj_404(self): from exhibitionist.decorators import http_handler import requests registry = ObjectRegistry() basket = [] @http_handler(r'/{{objid}}', __registry=registry) class Handler(ExhibitionistRequestHandler): def get(self, *args, **kwds): basket.append(context.object) self.write("") o = object() self.server.add_handler(Handler) self.server.start() self.assertEqual(len(basket), 0) url = self.server.get_view_url("Handler", o, __registry=registry) url = url[:-1] + "_" # make objid invalid r = requests.get(url) self.assertEqual(r.status_code, 404) def test_can_get_as_objid(self): from exhibitionist.decorators import http_handler import tornado.web import requests registry = ObjectRegistry() basket = [] @http_handler(r'/{{objid}}', __registry=registry) class Handler(ExhibitionistRequestHandler): def get(self, *args, **kwds): basket.append(context.object) self.write("") o = object() k = registry.register(o) self.server.add_handler(Handler) self.server.start() self.assertEqual(len(basket), 0) url = self.server.get_view_url("Handler", k, __registry=registry) r = requests.get(url) self.assertEqual(r.status_code, 200) self.assertTrue(registry.register(o) in url) self.assertTrue(k in url) def test_can_pass_handler_kwds(self): from exhibitionist.decorators import http_handler import requests registry = ObjectRegistry() basket = [] @http_handler(r'/', __registry=registry,foo='bar') class Handler(ExhibitionistRequestHandler): def initialize(self,**kwds): basket.append(kwds.get('foo','')) def get(self, *args, **kwds): pass self.server.add_handler(Handler) self.server.start() self.assertEqual(len(basket), 0) url = self.server.get_view_url("Handler", __registry=registry) r = requests.get(url) self.assertEqual(r.status_code, 200) self.assertEqual(len(basket), 1) self.assertEqual(basket.pop(), 'bar') def test_add_obj_handler(self): from exhibitionist.decorators import http_handler import requests registry = ObjectRegistry() basket = [] @http_handler(r'/{{objid}}', __registry=registry) class Handler(ExhibitionistRequestHandler): def get(self, *args, **kwds): basket.append(context.object) self.write("") class A(object): pass o = object() self.server.add_handler(Handler) self.server.start() self.assertEqual(len(basket), 0) requests.get( self.server.get_view_url("Handler", o, __registry=registry)) self.assertEqual(len(basket), 1) self.assertEqual(basket.pop(), o) # test weakref reaped o = A() self.assertEqual(len(basket), 0) url = self.server.get_view_url("Handler", o, __registry=registry, _weakref=True) requests.get(url) self.assertEqual(len(basket), 1) self.assertEqual(basket.pop(), o) del o r = requests.get(url) self.assertEqual(r.status_code, 404) def test_add_obj_handler_from_module(self): import requests import bogus_handlers as handlers registry = ObjectRegistry() self.server.add_handler(handlers) self.server.start() url = self.server.get_view_url("MyJSONView", __registry=registry) result = requests.get(url) body = result.content self.assertTrue(b"bar" in body) headers = result.headers # test JSONMixin self.assertEqual(headers["Content-Type"], "application/json") # test CORSMixin self.assertEqual(headers["Access-Control-Allow-Origin"], "CORS") #test JSONP, with custom callback param name url = self.server.get_view_url("MyJSONView", __registry=registry) + "?cb=?" result = requests.get(url) headers = result.headers self.assertEqual(headers["Content-Type"], 'text/javascript') def test_can_only_add_providers_before_server_start(self): self.server.start()._ensure_up() try: self.server.register_provider(object()) except ExhibitionistError: pass else: self.fail("Didn't catch attempt to register provider after start") def test_can_only_add_handlers_before_server_start(self): self.server.start()._ensure_up() try: self.server.add_handler(object()) except Exception: pass else: self.fail("Didn't catch attempt to add handlers after server start") def test_pure_evil_at_bay(self): # make sure requests to a given port are always handled # by the server thread that is listening to that port, # even having identical routes on different servers. # Tornado 2.4.1 doesn't do that if server threads share the IOLoop from tornado.web import RequestHandler import threading import requests from exhibitionist.decorators import http_handler bucket = set() @http_handler("/a",__test=True) class H(RequestHandler): def __init__(self, *args, **kwds): super(H, self).__init__(*args, **kwds) self.bucket = bucket def get(self): self.bucket.add(threading.current_thread().ident) servers = [] try: for i in range(10): servers.append(get_server().add_handler(H).start()) for i in range(100): requests.get(servers[0].get_view_url("H")) self.assertEqual(len(bucket), 1) finally: for s in servers: try: s.stop() except: pass def test_notify_channel_takes_strings_only(self): try: self.server.notify_channel(object(), "") except ValueError: pass else: self.fail("failed to raise exception") def test_kwds_in_decorator(self): from exhibitionist.decorators import http_handler import tornado.web import requests registry = ObjectRegistry() basket = [] @http_handler(r'/{{objid}}', __registry=registry, myArg='abc') class Handler(ExhibitionistRequestHandler): def initialize(self, myArg): basket.append(myArg) def get(self, *args, **kwds): self.write("") o = object() self.server.add_handler(Handler) self.server.start() self.assertEqual(len(basket), 0) requests.get( self.server.get_view_url('Handler', o, __registry=registry)) self.assertEqual(len(basket), 1) self.assertEqual(basket.pop(), 'abc')
from .array import Array from .obj import Object class FileReader: _WHITESPACE = b'\t\n\r ' _READBUF_CHUNK_SIZE = 1024*1024 def __init__(self, file, encoding): self.file = file # TODO: Support encodings where basic shortest characters are longer than one byte (e.g. UTF-16 and UTF-32)! self.encoding = encoding # This buffer is for reading and peeking self.readbuf = b'' self.readbuf_read = 0 self.readbuf_pos = 0 def read(self, read_all=False, to_python=False): # "to_python" cannot be set without "read_all" assert read_all or not to_python self._skip_whitespace() # None if self._skip_if_next('null'): return None # False if self._skip_if_next('false'): return False # True if self._skip_if_next('true'): return True # Number if self._peek() in b'-0123456789': num = self._get() # Check sign if num == b'-': num += self._get() # Read integer part if num[-1] != b'0': while self._peek() in b'0123456789': num += self._get() # Read possible decimal part and convert to float if self._peek() == b'.': self._get() num += b'.' + self._get() if num[-1] not in b'01234567890': raise Exception(u'Expected digit after dot! Position {}'.format(self.readbuf_read)) while self._peek() in b'0123456789': num += self._get() num = float(num) else: num = int(num) # Read possible exponent if self._peek() in b'eE': self._get() exp = self._get() exp_neg = False if exp == b'-': exp_neg = True exp = self._get() elif exp == b'+': exp = self._get() while self._peek() in b'0123456789': exp += self._get() exp = int(exp) exp = 10 ** exp if exp_neg: num = float(num) / exp else: num *= exp return num # String if self._skip_if_next('"'): string = b'' while True: c = self._get() if c == b'"': break if c == b'\\': c = self._get() if c == b'"': string += b'"' elif c == b'\\': string += b'\\' elif c == b'/': string += b'/' elif c == b'b': string += b'\b' elif c == b'f': string += b'\f' elif c == b'n': string += b'\n' elif c == b'r': string += b'\r' elif c == b't': string += b'\t' elif c == b'u': # Deal with these later. They are hard to process now, since # they can have unicode surrogates and other tricky stuff. string += b'\\u' else: raise Exception(u'Unexpected \\{} in backslash encoding! Position {}'.format(c.decode('utf-8'), self.readbuf_read - 1)) else: string += c # TODO: self.encoding should be used here... # Convert from bytes to string. Handle unicode surrogates with UTF-16 trick string = string.decode('unicode_escape').encode('utf-16', 'surrogatepass').decode('utf-16') return string # Array if self._peek() == b'[': if to_python: array = Array(self, False) return array.to_python() else: return Array(self, read_all) # Object if self._peek() == b'{': if to_python: obj = Object(self, False) return obj.to_python() else: return Object(self, read_all) c = self._peek() raise Exception(u'Unexpected bytes! Value \'{}\' Position {}'.format(c.decode('utf-8'), self.readbuf_read)) def _skip_whitespace(self): while True: next_char = self._peek() if next_char is None: break if next_char not in FileReader._WHITESPACE: break self._get() def _get(self): self._ensure_readbuf_left(1) if len(self.readbuf) - self.readbuf_read < 1: raise Exception(u'Unexpected end of file when getting next byte!') result = self.readbuf[self.readbuf_read:self.readbuf_read + 1] self.readbuf_read += 1 return result def _read(self, amount): self._ensure_readbuf_left(amount) if len(self.readbuf) - self.readbuf_read < amount: raise Exception(u'Unexpected end of file reading a chunk!') result = self.readbuf[self.readbuf_read:self.readbuf_read + amount] self.readbuf_read += amount return result def _peek(self): self._ensure_readbuf_left(1) if len(self.readbuf) - self.readbuf_read < 1: return None return self.readbuf[self.readbuf_read:self.readbuf_read + 1] def _is_next(self, s): if not isinstance(s, bytes): s = s.encode(self.encoding) s_len = len(s) self._ensure_readbuf_left(s_len) if len(self.readbuf) - self.readbuf_read < s_len: return False return self.readbuf[self.readbuf_read:self.readbuf_read + s_len] == s def _skip_if_next(self, s): """ If next bytes are same as in 's', then skip them and return True. """ if not isinstance(s, bytes): s = s.encode(self.encoding) if self._is_next(s): self.readbuf_read += len(s) return True return False def _ensure_readbuf_left(self, minimum_left): if len(self.readbuf) - self.readbuf_read >= minimum_left: return read_amount = max(minimum_left, FileReader._READBUF_CHUNK_SIZE) - (len(self.readbuf) - self.readbuf_read) self.readbuf_pos += self.readbuf_read old_pos = self.file.tell() self.readbuf = self.readbuf[self.readbuf_read:] + self.file.read(read_amount) self.readbuf_read = 0 def _tell_read_pos(self): return self.readbuf_pos + self.readbuf_read def _seek(self, pos): # If position is at the area of # readbuffer, then just rewind it. if pos >= self.readbuf_pos and pos < self.readbuf_pos + len(self.readbuf): self.readbuf_read = pos - self.readbuf_pos # If position is outside the readbuffer, # then read buffer from scratch else: self.readbuf = b'' self.readbuf_read = 0 self.readbuf_pos = pos self.file.seek(pos)
# mbed SDK # Copyright (c) 2011-2013 ARM Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ # The scanning rules and Resources object. A project in Mbed OS contains metadata in the file system as directory names. These directory names adhere to a set of rules referred to as scanning rules. The following are the English version of the scanning rules: Directory names starting with "TEST_", "TARGET_", "TOOLCHAIN_" and "FEATURE_" are excluded from a build unless one of the following is true: * The suffix after "TARGET_" is a target label (see target.labels). * The suffix after "TOOLCHAIN_" is a toolchain label, defined by the inheritance hierarchy of the toolchain class. * The suffix after "FEATURE_" is a member of `target.features`. """ from __future__ import print_function, division, absolute_import import fnmatch import re from collections import namedtuple, defaultdict from copy import copy from itertools import chain from os import walk, sep from os.path import (join, splitext, dirname, relpath, basename, split, normpath, abspath, exists) from tools.settings import ROOT from .ignore import MbedIgnoreSet, IGNORE_FILENAME # Support legacy build conventions: the original mbed build system did not have # standard labels for the "TARGET_" and "TOOLCHAIN_" specific directories, but # had the knowledge of a list of these directories to be ignored. LEGACY_IGNORE_DIRS = set([ # Legacy Targets 'LPC11U24', 'LPC1768', 'LPC2368', 'LPC4088', 'LPC812', 'KL25Z', # Legacy Toolchains 'ARM', 'uARM', 'IAR', 'GCC_ARM', 'GCC_CS', 'GCC_CR', 'GCC_CW', 'GCC_CW_EWL', 'GCC_CW_NEWLIB', 'ARMC6', # Tests, here for simplicity 'TESTS', 'TEST_APPS', ]) LEGACY_TOOLCHAIN_NAMES = { 'ARM_STD':'ARM', 'ARM_MICRO': 'uARM', 'GCC_ARM': 'GCC_ARM', 'GCC_CR': 'GCC_CR', 'IAR': 'IAR', 'ARMC6': 'ARMC6', } MBED_LIB_FILENAME = 'mbed_lib.json' MBED_APP_FILENAME = 'mbed_app.json' CONFIG_FILES = set([ MBED_LIB_FILENAME, MBED_APP_FILENAME ]) FileRef = namedtuple("FileRef", "name path") class FileType(object): C_SRC = "c" CPP_SRC = "c++" ASM_SRC = "s" HEADER = "header" INC_DIR = "inc" LIB_DIR = "libdir" LIB = "lib" OBJECT = "o" HEX = "hex" BIN = "bin" JSON = "json" LD_SCRIPT = "ld" LIB_REF = "libref" BLD_REF = "bldref" REPO_DIR = "repodir" def __init__(self): raise NotImplemented class Resources(object): ALL_FILE_TYPES = [ FileType.C_SRC, FileType.CPP_SRC, FileType.ASM_SRC, FileType.HEADER, FileType.INC_DIR, FileType.LIB_DIR, FileType.LIB, FileType.OBJECT, FileType.HEX, FileType.BIN, FileType.JSON, FileType.LD_SCRIPT, FileType.LIB_REF, FileType.BLD_REF, FileType.REPO_DIR, ] def __init__(self, notify, collect_ignores=False): # publicly accessible things self.ignored_dirs = [] # library requirements self._libs_filtered = None # Pre-mbed 2.0 ignore dirs self._legacy_ignore_dirs = (LEGACY_IGNORE_DIRS) # Primate parameters self._notify = notify self._collect_ignores = collect_ignores # Storage for file references, indexed by file type self._file_refs = defaultdict(set) # Incremental scan related self._label_paths = [] self._labels = { "TARGET": [], "TOOLCHAIN": [], "FEATURE": [], "COMPONENT": [] } self._prefixed_labels = set() # Path seperator style (defaults to OS-specific seperator) self._sep = sep self._ignoreset = MbedIgnoreSet() # make sure mbed-os root is added as include directory script_dir = dirname(abspath(__file__)) mbed_os_root_dir = normpath(join(script_dir, '..', '..')) self.add_file_ref(FileType.INC_DIR, mbed_os_root_dir, mbed_os_root_dir) def ignore_dir(self, directory): if self._collect_ignores: self.ignored_dirs.append(directory) def _collect_duplicates(self, dupe_dict, dupe_headers): for filename in self.s_sources + self.c_sources + self.cpp_sources: objname, _ = splitext(basename(filename)) dupe_dict.setdefault(objname, set()) dupe_dict[objname] |= set([filename]) for filename in self.headers: headername = basename(filename) dupe_headers.setdefault(headername, set()) dupe_headers[headername] |= set([headername]) return dupe_dict, dupe_headers def detect_duplicates(self): """Detect all potential ambiguities in filenames and report them with a toolchain notification """ count = 0 dupe_dict, dupe_headers = self._collect_duplicates(dict(), dict()) for objname, filenames in dupe_dict.items(): if len(filenames) > 1: count+=1 self._notify.tool_error( "Object file %s.o is not unique! It could be made from: %s"\ % (objname, " ".join(filenames))) for headername, locations in dupe_headers.items(): if len(locations) > 1: count+=1 self._notify.tool_error( "Header file %s is not unique! It could be: %s" %\ (headername, " ".join(locations))) return count def win_to_unix(self): self._sep = "/" if self._sep != sep: for file_type in self.ALL_FILE_TYPES: v = [f._replace(name=f.name.replace(sep, self._sep)) for f in self.get_file_refs(file_type)] self._file_refs[file_type] = set(v) def __str__(self): s = [] for (label, file_type) in ( ('Include Directories', FileType.INC_DIR), ('Headers', FileType.HEADER), ('Assembly sources', FileType.ASM_SRC), ('C sources', FileType.C_SRC), ('C++ sources', FileType.CPP_SRC), ('Library directories', FileType.LIB_DIR), ('Objects', FileType.OBJECT), ('Libraries', FileType.LIB), ('Hex files', FileType.HEX), ('Bin files', FileType.BIN), ('Linker script', FileType.LD_SCRIPT) ): resources = self.get_file_refs(file_type) if resources: s.append('%s:\n ' % label + '\n '.join( "%s -> %s" % (name, path) for name, path in resources)) return '\n'.join(s) def _add_labels(self, prefix, labels): self._labels[prefix].extend(labels) self._prefixed_labels |= set("%s_%s" % (prefix, label) for label in labels) for path, base_path, into_path in self._label_paths: if basename(path) in self._prefixed_labels: self.add_directory(path, base_path, into_path) self._label_paths = [(p, b, i) for p, b, i in self._label_paths if basename(p) not in self._prefixed_labels] def add_target_labels(self, target): self._add_labels("TARGET", target.labels) self._add_labels("COMPONENT", target.components) self.add_features(target.features) def add_features(self, features): self._add_labels("FEATURE", features) def add_toolchain_labels(self, toolchain): for prefix, value in toolchain.get_labels().items(): self._add_labels(prefix, value) self._legacy_ignore_dirs -= set( [toolchain.target.name, LEGACY_TOOLCHAIN_NAMES[toolchain.name]]) def add_ignore_patterns(self, root, base_path, patterns): real_base = relpath(root, base_path) self._ignoreset.add_ignore_patterns(real_base, patterns) def _not_current_label(self, dirname, label_type): return (dirname.startswith(label_type + "_") and dirname[len(label_type) + 1:] not in self._labels[label_type]) def add_file_ref(self, file_type, file_name, file_path): if file_type: if sep != self._sep: file_name = file_name.replace(sep, self._sep) # Mbed OS projects only use one linker script at a time, so remove # any existing linker script when adding a new one if file_type == FileType.LD_SCRIPT: self._file_refs[file_type].clear() self._file_refs[file_type].add(FileRef(file_name, file_path)) def _include_file(self, ref): """Determine if a given file ref should be included in the build Files may be part of a library if a parent directory contains an mbed_lib.json. If a file is part of a library, include or exclude it based on the library it's part of. If a file is not part of a library, it's included. """ _, path = ref cur_dir = dirname(path) included_lib_paths = [dirname(e.path) for e in self._libs_filtered] excluded_lib_paths = [dirname(e.path) for e in self._excluded_libs] while dirname(cur_dir) != cur_dir: if cur_dir in included_lib_paths: return True elif cur_dir in excluded_lib_paths: return False cur_dir = dirname(cur_dir) return True def get_file_refs(self, file_type): """Return a list of FileRef for every file of the given type""" if self._libs_filtered is None: return list(self._file_refs[file_type]) else: return [ ref for ref in self._file_refs[file_type] if self._include_file(ref) ] def filter_by_libraries(self, libraries_included): """ Call after completely done scanning to filter resources based on libraries """ self._libs_filtered = set(libraries_included) all_library_refs = set( ref for ref in self._file_refs[FileType.JSON] if ref.name.endswith(MBED_LIB_FILENAME) ) self._excluded_libs = all_library_refs - self._libs_filtered if self._collect_ignores: self.ignored_dirs += [ dirname(n) or "." for n, _ in self._excluded_libs ] def _get_from_refs(self, file_type, key): return sorted([key(f) for f in self.get_file_refs(file_type)]) def get_file_names(self, file_type): return self._get_from_refs(file_type, lambda f: f.name) def get_file_paths(self, file_type): return self._get_from_refs(file_type, lambda f: f.path) def add_files_to_type(self, file_type, files): for f in files: self.add_file_ref(file_type, f, f) @property def inc_dirs(self): return self.get_file_names(FileType.INC_DIR) @property def headers(self): return self.get_file_names(FileType.HEADER) @property def s_sources(self): return self.get_file_names(FileType.ASM_SRC) @property def c_sources(self): return self.get_file_names(FileType.C_SRC) @property def cpp_sources(self): return self.get_file_names(FileType.CPP_SRC) @property def lib_dirs(self): return self.get_file_names(FileType.LIB_DIR) @property def objects(self): return self.get_file_names(FileType.OBJECT) @property def libraries(self): return self.get_file_names(FileType.LIB) @property def lib_builds(self): return self.get_file_names(FileType.BLD_REF) @property def lib_refs(self): return self.get_file_names(FileType.LIB_REF) @property def linker_script(self): options = self.get_file_names(FileType.LD_SCRIPT) if options: return options[0] else: return None @property def hex_files(self): return self.get_file_names(FileType.HEX) @property def bin_files(self): return self.get_file_names(FileType.BIN) @property def json_files(self): return self.get_file_names(FileType.JSON) def add_directory( self, path, base_path=None, into_path=None, exclude_paths=None, ): """ Scan a directory and include its resources in this resources obejct Positional arguments: path - the path to search for resources Keyword arguments base_path - If this is part of an incremental scan, include the origin directory root of the scan here into_path - Pretend that scanned files are within the specified directory within a project instead of using their actual path exclude_paths - A list of paths that are to be excluded from a build """ self._notify.progress("scan", abspath(path)) if base_path is None: base_path = path if into_path is None: into_path = path if self._collect_ignores and relpath(path, base_path) in self.ignored_dirs: self.ignored_dirs.remove(relpath(path, base_path)) if exclude_paths: self.add_ignore_patterns( path, base_path, [join(e, "*") for e in exclude_paths]) for root, dirs, files in walk(path, followlinks=True): # Check if folder contains .mbedignore if IGNORE_FILENAME in files: real_base = relpath(root, base_path) self._ignoreset.add_mbedignore( real_base, join(root, IGNORE_FILENAME)) root_path = join(relpath(root, base_path)) if self._ignoreset.is_ignored(join(root_path,"")): self.ignore_dir(join(into_path, root_path)) dirs[:] = [] continue for d in copy(dirs): dir_path = join(root, d) if d == '.hg' or d == '.git': fake_path = join(into_path, relpath(dir_path, base_path)) self.add_file_ref(FileType.REPO_DIR, fake_path, dir_path) if (any(self._not_current_label(d, t) for t in self._labels.keys())): self._label_paths.append((dir_path, base_path, into_path)) self.ignore_dir(join( into_path, relpath(dir_path, base_path) )) dirs.remove(d) elif (d.startswith('.') or d in self._legacy_ignore_dirs or self._ignoreset.is_ignored(join(root_path, d, ""))): self.ignore_dir(join( into_path, relpath(dir_path, base_path) )) dirs.remove(d) # Add root to include paths root = root.rstrip("/") for file in files: file_path = join(root, file) self._add_file(file_path, base_path, into_path) _EXT = { ".c": FileType.C_SRC, ".cc": FileType.CPP_SRC, ".cpp": FileType.CPP_SRC, ".s": FileType.ASM_SRC, ".h": FileType.HEADER, ".hh": FileType.HEADER, ".hpp": FileType.HEADER, ".inc": FileType.HEADER, ".tpp": FileType.HEADER, ".o": FileType.OBJECT, ".hex": FileType.HEX, ".bin": FileType.BIN, ".json": FileType.JSON, ".a": FileType.LIB, ".ar": FileType.LIB, ".sct": FileType.LD_SCRIPT, ".ld": FileType.LD_SCRIPT, ".icf": FileType.LD_SCRIPT, ".lib": FileType.LIB_REF, ".bld": FileType.BLD_REF, } _DIR_EXT = { ".a": FileType.LIB_DIR, ".ar": FileType.LIB_DIR, } def _all_parents(self, file_path, base_path, into_path): suffix = relpath(file_path, base_path) components = suffix.split(self._sep) start_at = 0 for index, directory in reversed(list(enumerate(components))): if directory in self._prefixed_labels: start_at = index + 1 break for n in range(start_at, len(components)): parent_name_parts = components[:n] if into_path: parent_name_parts.insert(0, into_path) parent_name = self._sep.join(parent_name_parts) parent_path = join(base_path, *components[:n]) yield FileRef(parent_name, parent_path) def _add_file(self, file_path, base_path, into_path): """ Add a single file into the resources object that was found by scanning starting as base_path """ fake_path = join(into_path, relpath(file_path, base_path)) if (self._ignoreset.is_ignored(relpath(file_path, base_path)) or basename(file_path).startswith(".")): self.ignore_dir(fake_path) return _, ext = splitext(file_path) if ext == "" and "cxxsupport" in fake_path: file_type = FileType.HEADER else: file_type = self._EXT.get(ext.lower()) self.add_file_ref(file_type, fake_path, file_path) if file_type == FileType.HEADER: for name, path in self._all_parents(file_path, base_path, into_path): self.add_file_ref(FileType.INC_DIR, name, path) dir_type = self._DIR_EXT.get(ext.lower()) self.add_file_ref(dir_type, dirname(fake_path), dirname(file_path)) def scan_with_toolchain(self, src_paths, toolchain, dependencies_paths=None, inc_dirs=None, exclude=True): """ Scan resources using initialized toolcain Positional arguments src_paths - the paths to source directories toolchain - valid toolchain object Keyword arguments dependencies_paths - dependency paths that we should scan for include dirs inc_dirs - additional include directories which should be added to the scanner resources exclude - Exclude the toolchain's build directory from the resources """ self.add_toolchain_labels(toolchain) for path in src_paths: if exists(path): into_path = relpath(path).strip(".\\/") if exclude: self.add_directory( path, into_path=into_path, exclude_paths=[toolchain.build_dir] ) else: self.add_directory(path, into_path=into_path) # Scan dependency paths for include dirs if dependencies_paths is not None: toolchain.progress("dep", dependencies_paths) for dep in dependencies_paths: lib_self = self.__class__(self._notify, self._collect_ignores)\ .scan_with_toolchain([dep], toolchain) self.inc_dirs.extend(lib_self.inc_dirs) # Add additional include directories if passed if inc_dirs: if isinstance(inc_dirs, list): self.inc_dirs.extend(inc_dirs) else: self.inc_dirs.append(inc_dirs) # Load self into the config system which might expand/modify self # based on config data toolchain.config.load_resources(self) # Set the toolchain's configuration data toolchain.set_config_data(toolchain.config.get_config_data()) return self def scan_with_config(self, src_paths, config): if config.target: self.add_target_labels(config.target) for path in src_paths: if exists(path): self.add_directory(path) config.load_resources(self) return self def filter(self, res_filter): if res_filter is None: return for t in res_filter.file_types: self._file_refs[t] = set(filter( res_filter.predicate, self._file_refs[t]))
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import unittest import mxnet as mx import numpy as np from converter._mxnet_converter import convert from collections import namedtuple from converter import utils def _mxnet_remove_batch(input_data): for blob in input_data: input_data[blob] = np.reshape(input_data[blob], input_data[blob].shape[1:]) return input_data def _get_mxnet_module(net, data_shapes, mode, label_names, input_names=None): """ Given a symbolic graph, input shape and the initialization mode, returns an MXNet module. """ mx.random.seed(1993) mod = utils.create_module(sym=net, data_shapes=data_shapes, label_shapes=input_names, label_names=label_names) if mode == 'random': mod.init_params( initializer=mx.init.Uniform(scale=.1) ) elif mode == 'zeros': mod.init_params( initializer=mx.init.Zero() ) elif mode == 'ones': mod.init_params( initializer=mx.init.One() ) else: Exception(KeyError("%s is not a valid initialization mode" % mode)) return mod class SingleLayerTest(unittest.TestCase): """ Unit test class for testing where converter is able to convert individual layers or not. In order to do so, it converts model and generates preds on both CoreML and MXNet and check they are the same. """ def _test_mxnet_model(self, net, input_shape, mode, class_labels=None, coreml_mode=None, label_names=None, delta=1e-2, pre_processing_args=None, input_name='data'): """ Helper method that convert the CoreML model into CoreML and compares the predictions over random data. Parameters ---------- net: MXNet Symbol Graph The graph that we'll be converting into CoreML. input_shape: tuple of ints The shape of input data. Generally of the format (batch-size, channels, height, width) mode: (random|zeros|ones) The mode to use in order to set the parameters (weights and biases). label_names: list of strings The names of the output labels. Default: None delta: float The maximum difference b/w predictions of MXNet and CoreML that is tolerable. input_name: str The name of the input variable to the symbolic graph. """ data_shapes = [(input_name, input_shape)] mod = _get_mxnet_module(net, data_shapes, mode, label_names) # Generate some dummy data input_data = {input_name: np.random.uniform(-10., 10., input_shape)} Batch = namedtuple('Batch', ['data']) mod.forward(Batch([mx.nd.array(input_data[input_name])])) mxnet_preds = mod.get_outputs()[0].asnumpy().flatten() # Get predictions from coreml coreml_model = convert( model=mod, class_labels=class_labels, mode=coreml_mode, input_shape={input_name: input_shape}, preprocessor_args=pre_processing_args ) coreml_preds = coreml_model.predict(_mxnet_remove_batch(input_data)).values()[0].flatten() # Check prediction accuracy self.assertEquals(len(mxnet_preds), len(coreml_preds)) for i in range(len(mxnet_preds)): self.assertAlmostEquals(mxnet_preds[i], coreml_preds[i], delta=delta) def test_tiny_inner_product_zero_input(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) self._test_mxnet_model(net, input_shape=input_shape, mode='zeros') def test_really_tiny_inner_product_ones_input(self): np.random.seed(1988) input_shape = (1, 1) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=1) self._test_mxnet_model(net, input_shape=input_shape, mode='ones') def test_really_tiny_2_inner_product_ones_input(self): np.random.seed(1988) input_shape = (1, 1) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) self._test_mxnet_model(net, input_shape=input_shape, mode='ones') def test_tiny_inner_product_ones_input(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) self._test_mxnet_model(net, input_shape=input_shape, mode='ones', delta=0.05) def test_tiny_inner_product_random_input(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_inner_product_no_bias(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5, no_bias=True) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_softmax_random_input(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.SoftmaxOutput(net, name='softmax') self._test_mxnet_model(net, input_shape=input_shape, mode='random', label_names=['softmax_label']) def test_tiny_relu_activation_random_input(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.Activation(net, name='relu1', act_type="relu") self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_sigmoid_activation_random_input(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.Activation(net, name='sigmoid1', act_type="sigmoid") self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_tanh_activation_random_input(self): np.random.seed(1988) input_shape = (1, 10) # Define a model net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.Activation(net, name='tanh1', act_type="tanh") self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_really_tiny_conv_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (1 ,1) stride = (1, 1) pad = (0, 0) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_conv_ones_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='ones', delta=0.05) def test_tiny_conv_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_asym_conv_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5 ,3) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_asym_conv_random_asym_input(self): np.random.seed(1988) input_shape = (1, 1, 28, 18) num_filter = 16 kernel = (5, 3) stride = (1, 1) pad = (0, 0) dilate = (1, 1) # define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1', dilate=dilate) net = mx.sym.Activation(net, name='tanh', act_type="tanh") self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_conv_valid_pooling_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (2, 2) stride = (2, 2) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) net = mx.symbol.Pooling( data=net, kernel=kernel, stride=stride, pad=pad, name='pool_1', pool_type='avg', pooling_convention='valid' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_conv_pooling_full_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (2, 2) stride = (2, 2) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) net = mx.symbol.Pooling( data=net, kernel=kernel, stride=stride, pad=pad, name='pool_1', pool_type='avg', pooling_convention='full' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_conv_pooling_full_random_input_with_padding(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 2 kernel = (2, 2) stride = (2, 2) pad = (1, 1) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) net = mx.symbol.Pooling( data=net, kernel=kernel, stride=stride, pad=pad, name='pool_1', pool_type='avg', pooling_convention='full' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_really_tiny_conv_random_3d_input(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 1 kernel = (1, 1) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_really_tiny_conv_random_input_multi_filter(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 64 kernel = (1, 1) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_conv_random_3d_input(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 1 kernel = (5 ,5) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_conv_random_input_multi_filter(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 64 kernel = (5, 5) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_conv_random(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 64 kernel = (5, 5) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_flatten(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 64 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) net = mx.sym.Flatten(data=net, name='flatten1') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.SoftmaxOutput(net, name='softmax') self._test_mxnet_model(net, input_shape=input_shape, mode='random', label_names=['softmax_label']) def test_transpose(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 64 kernel = (5, 5) stride = (1, 1) pad = (0, 0) net = mx.sym.Variable('data') net = mx.sym.transpose(data=net, name='transpose', axes=(0, 1, 2, 3)) net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_reshape(self): np.random.seed(1988) input_shape = (1, 8) net = mx.sym.Variable('data') net = mx.sym.reshape(data=net, shape=(1, 2, 2, 2)) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_synset_random_input(self): np.random.seed(1989) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.SoftmaxOutput(net, name='softmax') mod = _get_mxnet_module(net, data_shapes=[('data', input_shape)], mode='random', label_names=['softmax_label']) # Generate some dummy data input_data = np.random.uniform(-0.1, 0.1, input_shape) Batch = namedtuple('Batch', ['data']) mod.forward(Batch([mx.nd.array(input_data)])) kwargs = {'input_shape': {'data': input_shape}} # Get predictions from coreml coreml_model = convert( model=mod, class_labels=['Category1', 'Category2', 'Category3', 'Category4', 'Category5'], mode='classifier', **kwargs ) prediction = coreml_model.predict( _mxnet_remove_batch({'data': input_data})) self.assertEqual(prediction['classLabel'], 'Category3') def test_really_tiny_deconv_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (1, 1) stride = (1, 1) pad = (0, 0) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_deconv_ones_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='ones', delta=0.05) def test_tiny_deconv_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_asym_deconv_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5, 3) stride = (1, 1) pad = (0, 0) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_asym_deconv_random_asym_input(self): np.random.seed(1988) input_shape = (1, 1, 28, 18) num_filter = 16 kernel = (5, 3) stride = (1, 1) pad = (0, 0) dilate = (1, 1) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, dilate=dilate, name='deconv_1' ) net = mx.sym.Activation(net, name = 'tanh', act_type = "tanh") # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_deconv_pooling_random_input(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) net = mx.symbol.Pooling( data=net, kernel=kernel, stride=stride, pad=pad, name='pool_1', pool_type='max' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_really_tiny_deconv_random_3d_input(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 1 kernel = (1, 1) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_really_tiny_deconv_random_input_multi_filter(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 64 kernel = (1, 1) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_deconv_random_3d_input(self): np.random.seed(1988) input_shape = (1, 3, 10, 10) num_filter = 1 kernel = (5, 5) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_tiny_deconv_random_input_multi_filter(self): np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 64 kernel = (5 ,5) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, name='deconv_1' ) # Test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_deconv_random(self): np.random.seed(1988) input_shape = (1, 10, 4, 4) num_filter = 3 kernel = (2, 2) stride = (1, 1) pad = (0, 0) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=False, name='deconv_1' ) # test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_deconv_random_output_shape(self): np.random.seed(1988) input_shape = (1, 10, 4, 4) num_filter = 3 kernel = (2, 2) stride = (1, 1) pad = (0, 0) target_shape = (5, 5) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=False, target_shape=target_shape, name='deconv_1' ) # test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_deconv_random_padding(self): np.random.seed(1988) input_shape = (1, 10, 9, 9) num_filter = 3 kernel = (3, 3) stride = (3, 3) pad = (2, 2) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=False, name='deconv_1') # test the mxnet model self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_conv_random_padding_odd(self): np.random.seed(1988) input_shape = (1, 10, 6, 6) num_filter = 3 kernel = (5, 5) stride = (1, 1) pad = (3, 3) # define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=False, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_conv_random_padding_even(self): np.random.seed(1988) input_shape = (1, 10, 6, 6) num_filter = 3 kernel = (5, 5) stride = (1, 1) pad = (2, 2) # define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=False, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_deconv_random_all_inputs(self): np.random.seed(1988) input_shape = (1, 10, 5, 5) num_filter = 3 kernel = (3, 3) stride = (2, 2) pad = (1, 1) dilate = (1, 1) target_shape = (11, 11) # define a model net = mx.sym.Variable('data') net = mx.symbol.Deconvolution( data=net, num_filter=num_filter, kernel=kernel, stride=stride, pad=pad, no_bias=False, target_shape=target_shape, dilate=dilate, name='deconv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') def test_batch_norm(self): np.random.seed(1988) input_shape = (1, 1, 2, 3) net = mx.sym.Variable('data') gamma = mx.sym.Variable('gamma') beta = mx.sym.Variable('beta') moving_mean = mx.sym.Variable('moving_mean') moving_var = mx.sym.Variable('moving_var') net = mx.symbol.BatchNorm( data=net, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, use_global_stats=True, name='batch_norm_1') self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2) def test_batch_norm_no_global_stats(self): """ This test should throw an exception since converter doesn't support use_global_stats=False). The reason for this is CoreML doesn't support local batch stats. """ np.random.seed(1988) input_shape = (1, 1, 2, 3) net = mx.sym.Variable('data') gamma = mx.sym.Variable('gamma') beta = mx.sym.Variable('beta') moving_mean = mx.sym.Variable('moving_mean') moving_var = mx.sym.Variable('moving_var') net = mx.symbol.BatchNorm( data=net, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, use_global_stats=False, name='batch_norm_1') self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2) def test_batch_norm_with_fix_gamma(self): """ The gamma will always be an array of ones when fix_gamma=True. The values of gamma may be changed accidentally if there have been fix_gamma=False before the final trained model. """ np.random.seed(1988) input_shape = (1, 1, 2, 3) net = mx.sym.Variable('data') gamma = mx.sym.Variable('gamma') beta = mx.sym.Variable('beta') moving_mean = mx.sym.Variable('moving_mean') moving_var = mx.sym.Variable('moving_var') net = mx.symbol.BatchNorm( data=net, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, fix_gamma=True, name='batch_norm_1') self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2) np.random.seed(1988) net = mx.symbol.BatchNorm( data=net, gamma=gamma, beta=beta, moving_mean=moving_mean, moving_var=moving_var, fix_gamma=False, name='batch_norm_2') self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2) def test_pre_processing_args(self): np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) net = mx.sym.SoftmaxOutput(net, name='softmax') self._test_mxnet_model(net, input_shape=input_shape, mode='random', label_names=['softmax_label'], pre_processing_args={'red_bias': 0, 'blue_bias': 0, 'green_bias': 0, 'image_scale': 1}) def test_different_input_variables(self): """ Verifying the behavior when input variable name is different than the standard name - 'data'. """ np.random.seed(1988) input_shape = (1, 10) net = mx.sym.Variable('data1') net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5) self._test_mxnet_model(net, input_shape=input_shape, mode='zeros', input_name='data1') def test_really_tiny_conv_optional_params(self): """ Verifying the behavior of a convolutional layer when stride and pad are not provided. """ np.random.seed(1988) input_shape = (1, 1, 10, 10) num_filter = 1 kernel = (1, 1) # Define a model net = mx.sym.Variable('data') net = mx.symbol.Convolution( data=net, num_filter=num_filter, kernel=kernel, name='conv_1' ) self._test_mxnet_model(net, input_shape=input_shape, mode='random') # TODO test_concat if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(SingleLayerTest) unittest.TextTestRunner(verbosity=2).run(suite)
# Copyright 2012 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from glance.common import exception from glance.common import wsgi import glance.context import glance.db.simple.api as simple_db import glance.openstack.common.log as logging import glance.store CONF = cfg.CONF LOG = logging.getLogger(__name__) UUID1 = 'c80a1a6c-bd1f-41c5-90ee-81afedb1d58d' UUID2 = '971ec09a-8067-4bc8-a91f-ae3557f1c4c7' TENANT1 = '6838eb7b-6ded-434a-882c-b344c77fe8df' TENANT2 = '2c014f32-55eb-467d-8fcb-4bd706012f81' USER1 = '54492ba0-f4df-4e4e-be62-27f4d76b29cf' USER2 = '0b3b3006-cb76-4517-ae32-51397e22c754' USER3 = '2hss8dkl-d8jh-88yd-uhs9-879sdjsd8skd' BASE_URI = 'swift+http://storeurl.com/container' def get_fake_request(path='', method='POST', is_admin=False, user=USER1, roles=['member'], tenant=TENANT1): req = wsgi.Request.blank(path) req.method = method kwargs = { 'user': user, 'tenant': tenant, 'roles': roles, 'is_admin': is_admin, } req.context = glance.context.RequestContext(**kwargs) return req class FakeDB(object): def __init__(self): self.reset() self.init_db() @staticmethod def init_db(): images = [ {'id': UUID1, 'owner': TENANT1, 'status': 'queued', 'locations': [{'url': '%s/%s' % (BASE_URI, UUID1), 'metadata': {}}]}, {'id': UUID2, 'owner': TENANT1, 'status': 'queued'}, ] [simple_db.image_create(None, image) for image in images] members = [ {'image_id': UUID1, 'member': TENANT1, 'can_share': True}, {'image_id': UUID1, 'member': TENANT2, 'can_share': False}, ] [simple_db.image_member_create(None, member) for member in members] simple_db.image_tag_set_all(None, UUID1, ['ping', 'pong']) @staticmethod def reset(): simple_db.DATA = { 'images': {}, 'members': [], 'tags': {}, 'locations': [], } def __getattr__(self, key): return getattr(simple_db, key) class FakeStoreAPI(object): def __init__(self, store_metadata=None): self.data = { '%s/%s' % (BASE_URI, UUID1): ('XXX', 3), '%s/fake_location' % (BASE_URI): ('YYY', 3) } self.acls = {} if store_metadata is None: self.store_metadata = {} else: self.store_metadata = store_metadata def create_stores(self): pass def set_acls(self, context, uri, public=False, read_tenants=[], write_tenants=[]): self.acls[uri] = { 'public': public, 'read': read_tenants, 'write': write_tenants, } def get_from_backend(self, context, location): try: scheme = location[:location.find('/') - 1] if scheme == 'unknown': raise exception.UnknownScheme(scheme=scheme) return self.data[location] except KeyError: raise exception.NotFound() def safe_delete_from_backend(self, context, uri, id, **kwargs): try: del self.data[uri] except KeyError: pass def schedule_delayed_delete_from_backend(self, context, uri, id, **kwargs): pass def delete_image_from_backend(self, context, store_api, image_id, uri): if CONF.delayed_delete: self.schedule_delayed_delete_from_backend(context, uri, image_id) else: self.safe_delete_from_backend(context, uri, image_id) def get_size_from_backend(self, context, location): return self.get_from_backend(context, location)[1] def add_to_backend(self, context, scheme, image_id, data, size): store_max_size = 7 current_store_size = 2 for location in self.data.keys(): if image_id in location: raise exception.Duplicate() if not size: size = len(data.fd) if (current_store_size + size) > store_max_size: raise exception.StorageFull() if context.user == USER2: raise exception.Forbidden() if context.user == USER3: raise exception.StorageWriteDenied() self.data[image_id] = (data, size) checksum = 'Z' return (image_id, size, checksum, self.store_metadata) def check_location_metadata(self, val, key=''): glance.store.check_location_metadata(val) class FakePolicyEnforcer(object): def __init__(self, *_args, **kwargs): self.rules = {} def enforce(self, _ctxt, action, target=None, **kwargs): """Raise Forbidden if a rule for given action is set to false.""" if self.rules.get(action) is False: raise exception.Forbidden() def set_rules(self, rules): self.rules = rules class FakeNotifier(object): def __init__(self, *_args, **kwargs): self.log = [] def warn(self, event_type, payload): log = {} log['notification_type'] = "WARN" log['event_type'] = event_type log['payload'] = payload self.log.append(log) def info(self, event_type, payload): log = {} log['notification_type'] = "INFO" log['event_type'] = event_type log['payload'] = payload self.log.append(log) def error(self, event_type, payload): log = {} log['notification_type'] = "ERROR" log['event_type'] = event_type log['payload'] = payload self.log.append(log) def get_logs(self): return self.log
import logging import os import shlex import subprocess from typing import Any, Callable, Iterable, List, Mapping, Optional, Union from pip._internal.cli.spinners import SpinnerInterface, open_spinner from pip._internal.exceptions import InstallationSubprocessError from pip._internal.utils.logging import subprocess_logger from pip._internal.utils.misc import HiddenText CommandArgs = List[Union[str, HiddenText]] LOG_DIVIDER = "----------------------------------------" def make_command(*args): # type: (Union[str, HiddenText, CommandArgs]) -> CommandArgs """ Create a CommandArgs object. """ command_args = [] # type: CommandArgs for arg in args: # Check for list instead of CommandArgs since CommandArgs is # only known during type-checking. if isinstance(arg, list): command_args.extend(arg) else: # Otherwise, arg is str or HiddenText. command_args.append(arg) return command_args def format_command_args(args): # type: (Union[List[str], CommandArgs]) -> str """ Format command arguments for display. """ # For HiddenText arguments, display the redacted form by calling str(). # Also, we don't apply str() to arguments that aren't HiddenText since # this can trigger a UnicodeDecodeError in Python 2 if the argument # has type unicode and includes a non-ascii character. (The type # checker doesn't ensure the annotations are correct in all cases.) return " ".join( shlex.quote(str(arg)) if isinstance(arg, HiddenText) else shlex.quote(arg) for arg in args ) def reveal_command_args(args): # type: (Union[List[str], CommandArgs]) -> List[str] """ Return the arguments in their raw, unredacted form. """ return [arg.secret if isinstance(arg, HiddenText) else arg for arg in args] def make_subprocess_output_error( cmd_args, # type: Union[List[str], CommandArgs] cwd, # type: Optional[str] lines, # type: List[str] exit_status, # type: int ): # type: (...) -> str """ Create and return the error message to use to log a subprocess error with command output. :param lines: A list of lines, each ending with a newline. """ command = format_command_args(cmd_args) # We know the joined output value ends in a newline. output = "".join(lines) msg = ( # Use a unicode string to avoid "UnicodeEncodeError: 'ascii' # codec can't encode character ..." in Python 2 when a format # argument (e.g. `output`) has a non-ascii character. "Command errored out with exit status {exit_status}:\n" " command: {command_display}\n" " cwd: {cwd_display}\n" "Complete output ({line_count} lines):\n{output}{divider}" ).format( exit_status=exit_status, command_display=command, cwd_display=cwd, line_count=len(lines), output=output, divider=LOG_DIVIDER, ) return msg def call_subprocess( cmd, # type: Union[List[str], CommandArgs] show_stdout=False, # type: bool cwd=None, # type: Optional[str] on_returncode="raise", # type: str extra_ok_returncodes=None, # type: Optional[Iterable[int]] command_desc=None, # type: Optional[str] extra_environ=None, # type: Optional[Mapping[str, Any]] unset_environ=None, # type: Optional[Iterable[str]] spinner=None, # type: Optional[SpinnerInterface] log_failed_cmd=True, # type: Optional[bool] stdout_only=False, # type: Optional[bool] ): # type: (...) -> str """ Args: show_stdout: if true, use INFO to log the subprocess's stderr and stdout streams. Otherwise, use DEBUG. Defaults to False. extra_ok_returncodes: an iterable of integer return codes that are acceptable, in addition to 0. Defaults to None, which means []. unset_environ: an iterable of environment variable names to unset prior to calling subprocess.Popen(). log_failed_cmd: if false, failed commands are not logged, only raised. stdout_only: if true, return only stdout, else return both. When true, logging of both stdout and stderr occurs when the subprocess has terminated, else logging occurs as subprocess output is produced. """ if extra_ok_returncodes is None: extra_ok_returncodes = [] if unset_environ is None: unset_environ = [] # Most places in pip use show_stdout=False. What this means is-- # # - We connect the child's output (combined stderr and stdout) to a # single pipe, which we read. # - We log this output to stderr at DEBUG level as it is received. # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't # requested), then we show a spinner so the user can still see the # subprocess is in progress. # - If the subprocess exits with an error, we log the output to stderr # at ERROR level if it hasn't already been displayed to the console # (e.g. if --verbose logging wasn't enabled). This way we don't log # the output to the console twice. # # If show_stdout=True, then the above is still done, but with DEBUG # replaced by INFO. if show_stdout: # Then log the subprocess output at INFO level. log_subprocess = subprocess_logger.info used_level = logging.INFO else: # Then log the subprocess output using DEBUG. This also ensures # it will be logged to the log file (aka user_log), if enabled. log_subprocess = subprocess_logger.debug used_level = logging.DEBUG # Whether the subprocess will be visible in the console. showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level # Only use the spinner if we're not showing the subprocess output # and we have a spinner. use_spinner = not showing_subprocess and spinner is not None if command_desc is None: command_desc = format_command_args(cmd) log_subprocess("Running command %s", command_desc) env = os.environ.copy() if extra_environ: env.update(extra_environ) for name in unset_environ: env.pop(name, None) try: proc = subprocess.Popen( # Convert HiddenText objects to the underlying str. reveal_command_args(cmd), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT if not stdout_only else subprocess.PIPE, cwd=cwd, env=env, errors="backslashreplace", ) except Exception as exc: if log_failed_cmd: subprocess_logger.critical( "Error %s while executing command %s", exc, command_desc, ) raise all_output = [] if not stdout_only: assert proc.stdout assert proc.stdin proc.stdin.close() # In this mode, stdout and stderr are in the same pipe. while True: line = proc.stdout.readline() # type: str if not line: break line = line.rstrip() all_output.append(line + "\n") # Show the line immediately. log_subprocess(line) # Update the spinner. if use_spinner: assert spinner spinner.spin() try: proc.wait() finally: if proc.stdout: proc.stdout.close() output = "".join(all_output) else: # In this mode, stdout and stderr are in different pipes. # We must use communicate() which is the only safe way to read both. out, err = proc.communicate() # log line by line to preserve pip log indenting for out_line in out.splitlines(): log_subprocess(out_line) all_output.append(out) for err_line in err.splitlines(): log_subprocess(err_line) all_output.append(err) output = out proc_had_error = proc.returncode and proc.returncode not in extra_ok_returncodes if use_spinner: assert spinner if proc_had_error: spinner.finish("error") else: spinner.finish("done") if proc_had_error: if on_returncode == "raise": if not showing_subprocess and log_failed_cmd: # Then the subprocess streams haven't been logged to the # console yet. msg = make_subprocess_output_error( cmd_args=cmd, cwd=cwd, lines=all_output, exit_status=proc.returncode, ) subprocess_logger.error(msg) raise InstallationSubprocessError(proc.returncode, command_desc) elif on_returncode == "warn": subprocess_logger.warning( 'Command "%s" had error code %s in %s', command_desc, proc.returncode, cwd, ) elif on_returncode == "ignore": pass else: raise ValueError(f"Invalid value: on_returncode={on_returncode!r}") return output def runner_with_spinner_message(message): # type: (str) -> Callable[..., None] """Provide a subprocess_runner that shows a spinner message. Intended for use with for pep517's Pep517HookCaller. Thus, the runner has an API that matches what's expected by Pep517HookCaller.subprocess_runner. """ def runner( cmd, # type: List[str] cwd=None, # type: Optional[str] extra_environ=None, # type: Optional[Mapping[str, Any]] ): # type: (...) -> None with open_spinner(message) as spinner: call_subprocess( cmd, cwd=cwd, extra_environ=extra_environ, spinner=spinner, ) return runner
#!/usr/bin/env python import re import os class PgCatalogHeader(object): """This class is a base class for catalog header parser class, and provides basic methods to parse header files by regular expressions. The result will be in self.tuplist. To extend this class, set these three class values. - header - hasoid - prefix and call self.initialize() in __init__(). """ catalogdir = '../../../include/catalog' def initialize(self): path = self.fullpath(self.header) self.tuplist = self.readheader(path, self.hasoid, self.prefix) if not self.tuplist: raise Exception("no content") def fullpath(self, filename): """Returns the full path name of the catalog file.""" thisdir = os.path.dirname(os.path.realpath(__file__)) path = os.path.join(thisdir, self.catalogdir) return os.path.join(path, filename) def readheader(self, header, hasoid, tableprefix): """Returns a list of dictionaries as the result of parse. It finds lines starting with "#define Anum_" and collects attribute names, then parse DATA() macros. All data is parsed as string regardless of the column type. """ anum = re.compile(r'#define Anum_' + tableprefix + r'_(\w+)') rebuf = list() attlist = list() for line in open(header): m = anum.match(line) if m: # Build up regular expression. # We capture the group by name to look up later rebuf.append(r'(?P<' + m.group(1) + r'>\S+|"[^"]+")') attlist.append(m.group(1)) oidpattern = '' if hasoid: oidpattern = r'OID\s*=\s*(?P<oid>\w+)\s*' attlist.append('oid') insert = re.compile(r'DATA\(insert\s+' + oidpattern + r'\(\s*' + '\s+'.join(rebuf) + r'\s*\)\);') # Collect all the DATA() lines and put them into a list tuplist = list() for line in open(header): m = insert.match(line) if m: tup = dict() for att in attlist: tup[att] = m.group(att) tuplist.append(tup) return tuplist class PgAmop(PgCatalogHeader): header = 'pg_amop.h' hasoid = False prefix = 'pg_amop' def __init__(self): self.initialize() def find_amopopr(self, amopclaid, amopstrategy): """Returns the operator oid that matches opclass and strategy.""" for tup in self.tuplist: if (tup['amopclaid'] == str(amopclaid) and tup['amopstrategy'] == str(amopstrategy)): return tup['amopopr'] class PgOpclass(PgCatalogHeader): header = 'pg_opclass.h' hasoid = True prefix = 'pg_opclass' def __init__(self): self.initialize() def find_btree_oid_by_opcintype(self, opcintype): """Returns the opclass oid whoose input type is opcintype if it is a btree opclass and default for the type. """ for tup in self.tuplist: # 403 is the btree access method id if (tup['opcintype'] == str(opcintype) and tup['opcamid'] == '403' and tup['opcdefault'] == 't'): return tup['oid'] class PgOperator(PgCatalogHeader): header = 'pg_operator.h' hasoid = True prefix = 'pg_operator' def __init__(self): self.initialize() def find_oprcode(self, oid): """Returns the procedure oid of the operator.""" for tup in self.tuplist: if tup['oid'] == str(oid): return tup['oprcode'] class PgType(PgCatalogHeader): header = 'pg_type.h' hasoid = True prefix = 'pg_type' def __init__(self): self.initialize() self.oid_defs = self._read_oid_defs() def findtup_by_typname(self, typname): """Returns a tuple that matches typname. The input typname is normalized if it's any of quote_char, boolean, smallint, integer, bigint, real, or timestamp_with_time_zone. Also, if typname looks like an array type with '[]', it is normalized to an array type name with underscore prefix. """ basename = typname.rstrip('[]') isarray = False if basename != typname: isarray = True typname = basename if typname == 'quoted_char': typname = 'char' elif typname == 'boolean': typname = 'bool' elif typname == 'smallint': typname = 'int2' elif typname == 'integer': typname = 'int4' elif typname == 'bigint': typname = 'int8' elif typname == 'real': typname = 'float4' elif typname == 'timestamp_with_time_zone': typname = 'timestamptz' if isarray: typname = '_' + typname for tup in self.tuplist: if tup['typname'] == str(typname): return tup def findtup_by_typid(self, typid): for tup in self.tuplist: if tup['oid'] == str(typid): return tup def oid_to_def(self, oid): return self.oid_defs.get(int(oid), str(oid)) def _read_oid_defs(self): """Reads #define lines in pg_type.sql and builds up a map from oid(int) to macro string. """ filename = os.path.join(self.catalogdir, 'pg_type.sql') pat = re.compile(r'^.*#define\s+\S*OID\s+\d+') oidmap = dict() for line in open(filename): m = pat.match(line) if m: tup = line.split() oid = int(tup[-1]) oidname = tup[-2] oidmap[oid] = oidname return oidmap class PgProc(PgCatalogHeader): header = 'pg_proc.h' hasoid = True prefix = 'pg_proc' def __init__(self): self.initialize() def find_prosrc_by_proname(self, proname): for tup in self.tuplist: if tup['proname'] == str(proname): return tup['prosrc']
# Copyright (c) 2014 EMC Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import xml.dom.minidom from oslo_log import log import six from manila.i18n import _LW LOG = log.getLogger(__name__) def name(tt): return tt[0] def attrs(tt): return tt[1] def kids(tt): return filter_tuples(tt[2]) def filter_tuples(l): """Return only the tuples in a list. In a tupletree, tuples correspond to XML elements. Useful for stripping out whitespace data in a child list. """ if l is None: return [] else: return [x for x in l if type(x) == tuple] def parse_xml_api(tt): check_node(tt, 'ResponsePacket', ['xmlns']) child = optional_child(tt, ['Response', 'PacketFault']) return child def parse_response(tt): check_node(tt, 'Response') list_child = [ 'QueryStatus', 'FileSystem', 'FileSystemCapabilities', 'FileSystemCapacityInfo', 'Mount', 'CifsShare', 'CifsServer', 'Volume', 'StoragePool', 'Fault', 'TaskResponse', 'Checkpoint', 'NfsExport', 'Mover', 'MoverStatus', 'MoverDnsDomain', 'MoverInterface', 'MoverRoute', 'LogicalNetworkDevice', 'MoverDeduplicationSettings', 'Vdm', ] return list_of_various(tt, list_child) def parse_querystatus(tt): check_node(tt, 'QueryStatus', ['maxSeverity']) child = list_of_various(tt, ['Problem']) if child: return name(tt), attrs(tt), child else: return name(tt), attrs(tt) def parse_filesystem(tt): required_attrs = ['fileSystem', 'name', 'type', 'storages', 'volume'] optional_attrs = [ 'containsSlices', 'flrClock', 'internalUse', 'maxFlrRetentionPeriod', 'storagePools', 'virtualProvisioning', 'dataServicePolicies', ] check_node(tt, 'FileSystem', required_attrs, optional_attrs) list_child = [ 'RwFileSystemHosts', 'RoFileSystemHosts', 'FileSystemAutoExtInfo', 'ProductionFileSystemData', 'MigrationFileSystemData', ] child = list_of_various(tt, list_child) if len(child) > 0: for item in child: if (item[0] == 'RwFileSystemHosts' or item[0] == 'RoFileSystemHosts'): if 'mover' in item[1].keys(): attrs(tt)['mover'] = item[1]['mover'] if 'moverIdIsVdm' in item[1].keys(): attrs(tt)['moverIdIsVdm'] = item[1]['moverIdIsVdm'] elif item[0] == 'FileSystemAutoExtInfo': if 'autoExtEnabled' in item[1].keys(): attrs(tt)['autoExtEnabled'] = item[1]['autoExtEnabled'] if 'autoExtensionMaxSize' in item[1].keys(): attrs(tt)['autoExtensionMaxSize'] = ( item[1]['autoExtensionMaxSize']) if 'highWaterMark' in item[1].keys(): attrs(tt)['highWaterMark'] = item[1]['highWaterMark'] elif item[0] == 'ProductionFileSystemData': if 'cwormState' in item[1].keys(): attrs(tt)['cwormState'] = item[1]['cwormState'] if 'replicationRole' in item[1].keys(): attrs(tt)['replicationRole'] = item[1]['replicationRole'] elif item[0] == 'MigrationFileSystemData': if 'state' in item[1].keys(): attrs(tt)['state'] = item[1]['state'] return name(tt), attrs(tt) def parse_rwfilesystemhosts_filesystem(tt): check_node(tt, 'RwFileSystemHosts', ['mover'], ['moverIdIsVdm']) return name(tt), attrs(tt) def parse_rofilesystemhosts_filesystem(tt): check_node(tt, 'RoFileSystemHosts', ['mover'], ['moverIdIsVdm']) return name(tt), attrs(tt) def parse_rwfilesystemhosts_ckpt(tt): check_node(tt, 'rwFileSystemHosts', ['mover'], ['moverIdIsVdm']) return name(tt), attrs(tt) def parse_rofilesystemhosts_ckpt(tt): check_node(tt, 'roFileSystemHosts', ['mover'], ['moverIdIsVdm']) return name(tt), attrs(tt) def parse_filesystemautoextinfo(tt): required_attrs = [] optional_attrs = [ 'autoExtEnabled', 'autoExtensionMaxSize', 'highWaterMark', ] check_node(tt, 'FileSystemAutoExtInfo', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_productionfilesystemdata(tt): required_attrs = [] optional_attrs = ['cwormState', 'replicationRole'] check_node(tt, 'ProductionFileSystemData', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_migrationfilesystemdata(tt): check_node(tt, 'MigrationFileSystemData', [], ['state']) return name(tt), attrs(tt) def parse_filesystemcapabilities(tt): check_node(tt, 'FileSystemCapabilities', ['fileSystem'], []) child = list_of_various(tt, ['StoragePoolBased', 'DiskVolumeBased']) if len(child) > 0: for item in child: if item[0] == 'StoragePoolBased': if 'recommendedPool' in item[1].keys(): attrs(tt)['recommendedPool'] = item[1]['recommendedPool'] if 'validPools' in item[1].keys(): attrs(tt)['validPools'] = item[1]['validPools'] return name(tt), attrs(tt) def parse_storagepoolbased(tt): check_node(tt, 'StoragePoolBased', [], ['recommendedPool', 'validPools']) return name(tt), attrs(tt) def parse_diskvolumebased(tt): required_attrs = [] optional_attrs = ['recommendedStorage', 'validStorages'] check_node(tt, 'DiskVolumeBased', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_filesystemcapacityinfo(tt): check_node(tt, 'FileSystemCapacityInfo', ['fileSystem', 'volumeSize'], []) child = optional_child(tt, ['ResourceUsage']) if child is not None: if child[0] == 'ResourceUsage': if 'spaceTotal' in child[1].keys(): attrs(tt)['spaceTotal'] = child[1]['spaceTotal'] if 'filesUsed' in child[1].keys(): attrs(tt)['filesUsed'] = child[1]['filesUsed'] if 'spaceUsed' in child[1].keys(): attrs(tt)['spaceUsed'] = child[1]['spaceUsed'] if 'filesTotal' in child[1].keys(): attrs(tt)['filesTotal'] = child[1]['filesTotal'] return name(tt), attrs(tt) def parse_resourceusage(tt): required_attrs = ['filesTotal', 'filesUsed', 'spaceTotal', 'spaceUsed'] check_node(tt, 'ResourceUsage', required_attrs) return name(tt), attrs(tt) def parse_mount(tt): required_attrs = ['fileSystem', 'mover', 'path'] optional_attrs = ['disabled', 'ntCredential', 'moverIdIsVdm'] check_node(tt, 'Mount', required_attrs, optional_attrs) child = list_of_various(tt, ['NfsOptions', 'CifsOptions']) if child is not None: for item in child: if item[0] == 'NfsOptions': if 'ro' in item[1].keys(): attrs(tt)['ro'] = item[1]['ro'] if item[0] == 'CifsOptions': if 'cifsSyncwrite' in item[1].keys(): attrs(tt)['cifsSyncwrite'] = item[1]['cifsSyncwrite'] return name(tt), attrs(tt) def parse_nfsoptions(tt): required_attrs = [] optional_attrs = ['ro', 'prefetch', 'uncached', 'virusScan'] check_node(tt, 'NfsOptions', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_cifsoptions(tt): required_attrs = [] optional_attrs = [ 'cifsSyncwrite', 'accessPolicy', 'lockingPolicy', 'notify', 'notifyOnAccess', 'notifyOnWrite', 'oplock', 'triggerLevel', ] check_node(tt, 'CifsOptions', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_cifsshare(tt): required_attrs = ['mover', 'name', 'path'] optional_attrs = ['comment', 'fileSystem', 'maxUsers', 'moverIdIsVdm'] check_node(tt, 'CifsShare', required_attrs, optional_attrs) child = one_child(tt, ['CifsServers']) if child is not None: attrs(tt)['CifsServers'] = child[1] return name(tt), attrs(tt) def parse_cifsservers(tt): check_node(tt, 'CifsServers') child = list_of_various(tt, ['li']) if len(child) > 0 and child[0] is not None: return 'CifsServers', child def parse_li(tt): check_node(tt, 'li', [], [], [], True) return ''.join(tt[2]) def parse_cifsserver(tt): required_attrs = ['mover', 'name', 'type'] optional_attrs = ['localUsers', 'interfaces', 'moverIdIsVdm'] check_node(tt, 'CifsServer', required_attrs, optional_attrs) list_child = [ 'Aliases', 'StandaloneServerData', 'NT40ServerData', 'W2KServerData', ] child = list_of_various(tt, list_child) if len(child) > 0: for item in child: if item[0] == 'Aliases': attrs(tt)['aliases'] = item[1] elif item[0] == 'W2KServerData': if 'domain' in item[1].keys(): attrs(tt)['domain'] = item[1]['domain'] if 'domainJoined' in item[1].keys(): attrs(tt)['domainJoined'] = item[1]['domainJoined'] if 'compName' in item[1].keys(): attrs(tt)['compName'] = item[1]['compName'] elif item[0] == 'NT40ServerData': if 'domain' in item[1].keys(): attrs(tt)['domain'] = item[1]['domain'] return name(tt), attrs(tt) def parse_aliases(tt): check_node(tt, 'Aliases') child = list_of_various(tt, ['li']) if len(child) > 0: return 'Aliases', child def parse_standaloneserverdata(tt): check_node(tt, 'StandaloneServerData', ['workgroup']) return name(tt), attrs(tt) def parse_nt40serverdata(tt): check_node(tt, 'NT40ServerData', ['domain']) return name(tt), attrs(tt) def parse_w2kserverdata(tt): check_node(tt, 'W2KServerData', ['compName', 'domain'], ['domainJoined']) return name(tt), attrs(tt) def parse_volume(tt): required_attrs = ['name', 'size', 'type', 'virtualProvisioning', 'volume'] optional_attrs = ['clientVolumes'] check_node(tt, 'Volume', required_attrs, optional_attrs) list_child = [ 'MetaVolumeData', 'SliceVolumeData', 'StripeVolumeData', 'DiskVolumeData', 'PoolVolumeData', 'FreeSpace', ] child = list_of_various(tt, list_child) if len(child) > 0: for item in child: if item[0] == 'MetaVolumeData': if 'memberVolumes' in item[1].keys(): attrs(tt)['memberVolumes'] = item[1]['memberVolumes'] if 'clientFileSystems' in item[1].keys(): attrs(tt)['clientFileSystems'] = ( item[1]['clientFileSystems']) return name(tt), attrs(tt) def parse_slicevolumedata(tt): pass def parse_stripevolumedata(tt): pass def parse_diskvolumedata(tt): pass def parse_poolvolumedata(tt): pass def parse_freespace(tt): pass def parse_metavolumedata(tt): check_node(tt, 'MetaVolumeData', ['memberVolumes'], ['clientFileSystems']) return name(tt), attrs(tt) def parse_storagepool(tt): required_attrs = [ 'autoSize', 'diskType', 'memberVolumes', 'movers', 'name', 'pool', 'size', 'storageSystems', 'usedSize', ] optional_attrs = [ 'description', 'mayContainSlicesDefault', 'stripeCount', 'stripeSize', 'templatePool', 'virtualProvisioning', 'dataServicePolicies', 'isHomogeneous', ] check_node(tt, 'StoragePool', required_attrs, optional_attrs) list_child = ['SystemStoragePoolData', 'UserStoragePoolData'] child = list_of_various(tt, list_child) if len(child) > 0: for item in child: if item[0] == 'SystemStoragePoolData': if 'greedy' in item[1].keys(): attrs(tt)['greedy'] = item[1]['greedy'] if 'isBackendPool' in item[1].keys(): attrs(tt)['isBackendPool'] = item[1]['isBackendPool'] return name(tt), attrs(tt) def parse_systemstoragepooldata(tt): required_attrs = ['potentialAdditionalSize'] optional_attrs = [ 'greedy', 'dynamic', 'isBackendPool', 'usedSize', 'size', ] check_node(tt, 'SystemStoragePoolData', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_userstoragepooldata(tt): pass def parse_fault(tt): check_node(tt, 'Fault', ['maxSeverity']) child = list_of_various(tt, ['Problem']) if len(child) != 0: return name(tt), attrs(tt), child else: return name(tt), attrs(tt) def parse_packetfault(tt): check_node(tt, 'PacketFault', ['maxSeverity']) child = list_of_various(tt, ['Problem']) if len(child) != 0: return name(tt), attrs(tt), child else: return name(tt), attrs(tt) def parse_problem(tt): required_attrs = ['component', 'messageCode', 'severity'] optional_attrs = ['facility', 'message'] check_node(tt, 'Problem', required_attrs, optional_attrs) child = list_of_various(tt, ['Description', 'Action', 'Diagnostics']) if 0 != len(child): for item in child: if item is not None: if 'Description' in item.keys(): attrs(tt)['description'] = item['Description'] if 'Action' in item.keys(): attrs(tt)['action'] = item['Action'] if 'Diagnostics' in item.keys(): attrs(tt)['Diagnostics'] = item['Diagnostics'] return name(tt), attrs(tt) def parse_description(tt): check_node(tt, 'Description', [], [], [], True) if tt[2] is not None: return {name(tt): ''.join(tt[2])} def parse_action(tt): pass def parse_diagnostics(tt): check_node(tt, 'Diagnostics', [], [], [], True) return {name(tt): ''.join(tt[2])} def parse_taskresponse(tt): check_node(tt, 'TaskResponse', ['taskId']) child = one_child(tt, ['Status']) if 'maxSeverity' in child[1].keys(): attrs(tt)['maxSeverity'] = child[1]['maxSeverity'] if len(child) == 2: return name(tt), attrs(tt) else: return name(tt), attrs(tt), child[2] def parse_status(tt): check_node(tt, 'Status', ['maxSeverity']) child = list_of_various(tt, ['Problem']) if child: return name(tt), attrs(tt), child else: return name(tt), attrs(tt) def parse_checkpoint(tt): required_attrs = ['checkpoint', 'name', 'state', 'time'] optional_attrs = [ 'baseline', 'checkpointOf', 'fileSystemSize', 'writeable', ] check_node(tt, 'Checkpoint', required_attrs, optional_attrs) child = list_of_various(tt, ['rwFileSystemHosts', 'roFileSystemHosts']) for item in child: if item[0] == 'rwFileSystemHosts' or item[0] == 'roFileSystemHosts': if 'mover' in item[1].keys(): attrs(tt)['mover'] = item[1]['mover'] if 'moverIdIsVdm' in item[1].keys(): attrs(tt)['moverIdIsVdm'] = item[1]['moverIdIsVdm'] if item[0] == 'roFileSystemHosts': attrs(tt)['readOnly'] = True else: attrs(tt)['readOnly'] = False return name(tt), attrs(tt) def parse_nfsexport(tt): required_attrs = ['mover', 'path'] optional_attrs = ['anonUser', 'fileSystem', 'readOnly'] check_node(tt, 'NfsExport', required_attrs, optional_attrs) list_child = ['AccessHosts', 'RwHosts', 'RoHosts', 'RootHosts'] child = list_of_various(tt, list_child) for item in child: if 'AccessHosts' in item.keys(): attrs(tt)['AccessHosts'] = item['AccessHosts'] if 'RwHosts' in item.keys(): attrs(tt)['RwHosts'] = item['RwHosts'] if 'RoHosts' in item.keys(): attrs(tt)['RoHosts'] = item['RoHosts'] if 'RootHosts' in item.keys(): attrs(tt)['RootHosts'] = item['RootHosts'] return name(tt), attrs(tt) def parse_accesshosts(tt): check_node(tt, 'AccessHosts') access_hosts = [] child = list_of_various(tt, ['li']) for item in child: if item != '': access_hosts.append(item) return {'AccessHosts': access_hosts} def parse_rwhosts(tt): check_node(tt, 'RwHosts') rw_hosts = [] child = list_of_various(tt, ['li']) for item in child: if item != '': rw_hosts.append(item) return {'RwHosts': rw_hosts} def parse_rohosts(tt): check_node(tt, 'RoHosts') ro_hosts = [] child = list_of_various(tt, ['li']) for item in child: if item != '': ro_hosts.append(item) return {'RoHosts': ro_hosts} def parse_roothosts(tt): check_node(tt, 'RootHosts') root_hosts = [] child = list_of_various(tt, ['li']) for item in child: if item != '': root_hosts.append(item) return {'RootHosts': root_hosts} def parse_mover(tt): required_attrs = ['host', 'mover', 'name'] optional_attrs = [ 'failoverPolicy', 'i18NMode', 'ntpServers', 'role', 'standbyFors', 'standbys', 'targetState', ] check_node(tt, 'Mover', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_moverstatus(tt): required_attrs = ['csTime', 'mover', 'uptime'] optional_attrs = ['clock', 'timezone', 'version'] check_node(tt, 'MoverStatus', required_attrs, optional_attrs) child = one_child(tt, ['Status']) if len(child) >= 2: attrs(tt)['Status'] = child[1]['maxSeverity'] if len(child) >= 3: attrs(tt)['Problem'] = child[2] return name(tt), attrs(tt) def parse_moverdnsdomain(tt): required_attrs = ['mover', 'name', 'servers'] optional_attrs = ['protocol'] check_node(tt, 'MoverDnsDomain', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_moverinterface(tt): required_attrs = ['device', 'ipAddress', 'macAddr', 'mover', 'name'] optional_attrs = [ 'broadcastAddr', 'ipVersion', 'mtu', 'netMask', 'up', 'vlanid', ] check_node(tt, 'MoverInterface', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_moverroute(tt): required_attrs = ['mover'] optional_attrs = [ 'destination', 'interface', 'ipVersion', 'netMask', 'gateway', ] check_node(tt, 'MoverRoute', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_logicalnetworkdevice(tt): required_attrs = ['mover', 'name', 'speed', 'type'] optional_attrs = ['interfaces'] check_node(tt, 'LogicalNetworkDevice', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_moverdeduplicationsettings(tt): required_attrs = ['mover'] optional_attrs = [ 'accessTime', 'modificationTime', 'maximumSize', 'minimumSize', 'caseSensitive', 'duplicateDetectionMethod', 'minimumScanInterval', 'fileExtensionExcludeList', 'savVolHighWatermark', 'backupDataHighWatermark', 'CPULowWatermark', 'CPUHighWatermark', 'cifsCompressionEnabled', ] check_node(tt, 'MoverDeduplicationSettings', required_attrs, optional_attrs) return name(tt), attrs(tt) def parse_vdm(tt): required_attrs = ['name', 'state', 'vdm'] optional_attrs = ['mover', 'rootFileSystem'] check_node(tt, 'Vdm', required_attrs, optional_attrs) child = list_of_various(tt, ['Status', 'Interfaces']) if len(child) > 0: for item in child: if 'Interfaces' == item[0]: attrs(tt)['Interfaces'] = item[1] return name(tt), attrs(tt) def parse_interfaces(tt): check_node(tt, 'Interfaces') interfaces = [] child = list_of_various(tt, ['li']) for item in child: if item != '': interfaces.append(item) if interfaces: return 'Interfaces', interfaces def one_child(tt, acceptable): """Parse children of a node with exactly one child node. PCData is ignored. """ k = kids(tt) if len(k) != 1: LOG.warn(_LW('Expected just one %(item)s, got %(more)s.'), {'item': acceptable, 'more': " ".join([t[0] for t in k])}) child = k[0] if name(child) not in acceptable: LOG.warn(_LW('Expected one of %(item)s, got %(child)s ' 'under %(parent)s.'), {'item': acceptable, 'child': name(child), 'parent': name(tt)}) return parse_any(child) def parse_any(tt): """Parse any fragment of XML.""" node_name = name(tt).replace('.', '_') # Special handle for file system and checkpoint if node_name == 'RwFileSystemHosts' or node_name == 'RoFileSystemHosts': node_name += '_filesystem' elif node_name == 'rwFileSystemHosts' or node_name == 'roFileSystemHosts': node_name += '_ckpt' fn_name = 'parse_' + node_name.lower() fn = globals().get(fn_name) if fn is None: LOG.warn(_LW('No parser for node type %s.'), name(tt)) else: return fn(tt) def check_node(tt, nodename, required_attrs=None, optional_attrs=None, allowed_children=None, allow_pcdata=False): """Check static local constraints on a single node. The node must have the given name. The required attrs must be present, and the optional attrs may be. If allowed_children is not None, the node may have children of the given types. It can be [] for nodes that may not have any children. If it's None, it is assumed the children are validated in some other way. If allow_pcdata is true, then non-whitespace text children are allowed. (Whitespace text nodes are always allowed.) """ if not optional_attrs: optional_attrs = [] if not required_attrs: required_attrs = [] if name(tt) != nodename: LOG.warn(_LW('Expected node type %(expected)s, not %(actual)s.'), {'expected': nodename, 'actual': name(tt)}) # Check we have all the required attributes, and no unexpected ones tt_attrs = {} if attrs(tt) is not None: tt_attrs = attrs(tt).copy() for attr in required_attrs: if attr not in tt_attrs: LOG.warn(_LW('Expected %(attr)s attribute on %(node)s node,' ' but only have %(attrs)s.'), {'attr': attr, 'node': name(tt), 'attrs': attrs(tt).keys()}) else: del tt_attrs[attr] for attr in optional_attrs: if attr in tt_attrs: del tt_attrs[attr] if len(tt_attrs.keys()) > 0: LOG.warn(_LW('Invalid extra attributes %s.'), tt_attrs.keys()) if allowed_children is not None: for c in kids(tt): if name(c) not in allowed_children: LOG.warn(_LW('Unexpected node %(node)s under %(parent)s;' ' wanted %(expected)s.'), {'node': name(c), 'parent': name(tt), 'expected': allowed_children}) if not allow_pcdata: for c in tt[2]: if isinstance(c, six.string_types): if c.lstrip(' \t\n') != '': LOG.warn(_LW('Unexpected non-blank pcdata node %(node)s' ' under %(parent)s.'), {'node': repr(c), 'parent': name(tt)}) def optional_child(tt, allowed): """Parse zero or one of a list of elements from the child nodes.""" k = kids(tt) if len(k) > 1: LOG.warn(_LW('Expected either zero or one of %(node)s ' 'under %(parent)s.'), {'node': allowed, 'parent': tt}) elif len(k) == 1: return one_child(tt, allowed) else: return None def list_of_various(tt, acceptable): """Parse zero or more of a list of elements from the child nodes. Each element of the list can be any type from the list of the acceptable nodes. """ r = [] for child in kids(tt): if name(child) not in acceptable: LOG.warn(_LW('Expected one of %(expected)s under' ' %(parent)s, got %(actual)s.'), {'expected': acceptable, 'parent': name(tt), 'actual': repr(name(child))}) result = parse_any(child) if result is not None: r.append(result) return r def dom_to_tupletree(node): """Convert a DOM object to a pyRXP-style tuple tree. Each element is a 4-tuple of (NAME, ATTRS, CONTENTS, None). Very nice for processing complex nested trees. """ if node.nodeType == node.DOCUMENT_NODE: # boring; pop down one level return dom_to_tupletree(node.firstChild) assert node.nodeType == node.ELEMENT_NODE node_name = node.nodeName attributes = {} contents = [] for child in node.childNodes: if child.nodeType == child.ELEMENT_NODE: contents.append(dom_to_tupletree(child)) elif child.nodeType == child.TEXT_NODE: msg = "text node %s is not a string" % repr(child) assert isinstance(child.nodeValue, six.string_types), msg contents.append(child.nodeValue) else: raise RuntimeError("can't handle %s" % child) for i in range(node.attributes.length): attr_node = node.attributes.item(i) attributes[attr_node.nodeName] = attr_node.nodeValue return node_name, attributes, contents, None def xml_to_tupletree(xml_string): """Parse XML straight into tupletree.""" dom_xml = xml.dom.minidom.parseString(xml_string) return dom_to_tupletree(dom_xml)
# This is a code for the first lesson. You can execute it by typing # $ python intro.py # Or if your prefer python3 (and you should prefer it) # $ python3 intro.py # As you see "#" is used for comments. # ## Print # ### Python 2 and 3 # Unfortunately today there are two incompatible branches of Python language: # version 2.7 and versions 3.*. The very first difference that you find is a # print statement. In Python 2 print is literally a statement and should be # used without parentheses: # >>> print 1 # will print "1". Nowadays you should use Python 3 where print is a function # (for now just believe there is a reason to be so): # >>> print(1) # will print "1". If you must use Python 2 or you write a library that should # work on both 2 and 3 than you can use this "magic" line with __future__ # statement: from __future__ import print_function, division # has no effect in Python 3 # We will describe import system later. This line has no effect in Python 3 but # in Python 2 it sets behaviour of print statement to the same as in Python 3. # This line should be the first meaningful line in your file. # Now we are ready to print something: print(1) # will print "1" without quotes # Let's test simple arithmetic operations: print(1 + 2) # 3 print(3 - 4) # -1 print(3 * (-5)) # -15 print(2 ** 10) # 1024 # ## Numbers # ### Integers # Python supports long integers print(1024 ** 128) # `20815864389328798163850480654728171077230524494533409610638224700807216119346720596024478883464648369684843227908562015582767132496646929816279813211354641525848259018778440691546366699323167100945918841095379622423387354295096957733925002768876520583464697770622321657076833170056511209332449663781837603694136444406281042053396870977465916057756101739472373801429441421111406337458176` # ### Floats # There is built-in floating point numbers (float for short): print(1.23) # 1.23 print(1e-3) # 0.001 # Floats have limits: print(1e200 * 1e200) # inf # ### Division # Division (operator `/`) works different in Python 2 and 3. In Python 2 # division of two integers is always integer but in Python 3 it is always # float. "Magic" statement `from __future__ import division` above works in the # very same way as for print function described earlier and asks Python 2 to # work as Python 3. Use it always if your code can be run with Python 2 # interpreter. # Let's look how division works print(1 / 2) # 0.5 print(4 / 2) # 2.0 print(1.5 / 0.5) # 3.0 # Operator `//` returns integer number rounded to smaller value. It returns # integer typed value for a pair of integers and float typed value if at least # one of the value is float: print(1 // 2) # 0 print(4.0 // 2) # 2.0 print(1.5 // 0.4) # 3.0 # Operator `%` returns remainder, returned value type is determined from the # same laws as for `//`. print(1 % 2) # 1 print(1.5 % 0.4) # 0.29999999999999993 # Yes, Python has a common floating point arithmetic accuracy problem, see # Wikipedia for details # <https://en.wikipedia.org/wiki/Floating-point_arithmetic#Accuracy_problems> # ### Complex numbers # Python has a floating complex type: print(2 + 3j) # (2+3j) # Where j is the imaginary unit modifier # ## Variables and numerical types # ### Dynamic type checking # Python is a dynamic type checking language that means you don't need to # declare variable before assignment. Also you can change value and type of a # variable after the first assignment: a = 10 print(a) # 10 a = 12.0 print(a) # 12.0 # ### Type conversion # You can convert value from one type to another a = 7 b = complex(7) print(b) # (7+0j) a = 13.2 b = int(a) print(b) # 13 # ### Attributes and methods # Python is an object oriented language and each variable is represented by an # object of some type (class). We will describe how to create our own classes # later. Now the only thing that we should now that classes and therefore # objects have attributes and methods (functions). Syntax of attribute access # is the same as in a lot of other languages via `.` separator. a = (-1 + 0j)**(-0.5) # "+ 0j" is needed only in Python 2 print(a.imag) # -1.0 print(a.conjugate().imag) # 1.0 # ### Type of the variable # Built-in function `type` returns object type print(type(1)) # <class 'int'> # Full list of built-in functions can be found on # <https://docs.python.org/library/functions.html> # ## If-else blocks and Boolean variables # ### If-elif-else # If statement is as simple as a = 4 if a > 3: print(a) else: print(0) # `4` # Pay attention to colon and that blocks inside if-else statement are shifted. if a < 0: print(-a) else: if a > 0: print(a) else: print(0) # `4` # You can combine `else:` and `if:` statements into one `elif` statement: if a < 0: print(-a) elif a > 0: print(a) else: print(0) # `4` # ## Indentation # Indention is a part of the Python language. You should always use only one # type of indention in the source file: constant number of spaces or tabs. # Otherwise interpreter will fail with `IndentationError` error. PEP 8 (style # guide for Python code, <http://pep8.org/#indentation>) recommends to use 4 # spaces per each indention level. I will try to follow PEP 8 in this course. # However sometimes indent means nothing but only pretty code appearance a = (1 + 2 - 3j) print(a) # (3-3j) # ### Boolean type variables and Boolean value of variables # In fact we have already met Boolean variables above inside `if` statement. print(1 > 0) # True print(1 == 0) # False a = 0.5 print(a > 0 and a < 1) # True # Is the same as print(0 < a < 1) # True print(a < 0 or a > 1) # False b = a < 0 or a > 1 b = not b print(b) # True # Each variable has Boolean value that can be used by `if` to decide what to # do. You can obtain this Boolean value by built-in `bool` function: print(bool(0+1j)) # True print(bool(0)) # False # ## While loop # While statement is as simple as `if` a = 0 s = 0 while a < 10: s += a a += 1 print(s) # 45 # ### Continue and break statements # You can skip loop step using continue statement: a = 0 s = 0 while a < 10: a += 1 if a % 2 == 1: continue s += a print(a, s) # 10 30 # You can exit infinite loop using break statement: a = 0 s = 0 while True: s += a a += 1 if s > 1024: break print(s) # 1035 # This is an artificial example but pay attention how to make infinite loop. # ### while and else together # If use want to catch break from while (or for) loop use while-else: a = 0 while a < 10: a += 1 b = a * 2 if b > 20: print(b) break else: print(20) # 20 a = 0 while a < 10: a += 1 b = a * 2 if b > 10: print(b) break else: print(10) # `12` # This syntax should be preferred to a usage of a boolean flag.
import re import collections from enum import Enum from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS from ydk.errors import YPYError, YPYModelError from ydk.providers._importer import _yang_ns _meta_table = { 'OverlayEncapTypeIdentity' : { 'meta_info' : _MetaInfoClass('OverlayEncapTypeIdentity', False, [ ], 'nvo', 'overlay-encap-type', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'NvoInstances.NvoInstance.VirtualNetwork.Multicast' : { 'meta_info' : _MetaInfoClass('NvoInstances.NvoInstance.VirtualNetwork.Multicast', False, [ _MetaInfoClassMember('multicast-group-max', ATTRIBUTE, 'str' , None, None, [], [b'(2((2[4-9])|(3[0-9]))\\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){2}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'], ''' End of IPV4 Multicast group address (leave unspecified for single value ''', 'multicast_group_max', 'nvo', False), _MetaInfoClassMember('multicast-group-min', ATTRIBUTE, 'str' , None, None, [], [b'(2((2[4-9])|(3[0-9]))\\.)(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){2}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])'], ''' Single IPV4 Multicast group address or start of range ''', 'multicast_group_min', 'nvo', False), ], 'nvo', 'multicast', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'NvoInstances.NvoInstance.VirtualNetwork.Peers' : { 'meta_info' : _MetaInfoClass('NvoInstances.NvoInstance.VirtualNetwork.Peers', False, [ _MetaInfoClassMember('peer-ip', REFERENCE_UNION, 'str' , None, None, [], [], ''' VTEP peer IP address ''', 'peer_ip', 'nvo', True, [ _MetaInfoClassMember('peer-ip', ATTRIBUTE, 'str' , None, None, [], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'], ''' VTEP peer IP address ''', 'peer_ip', 'nvo', True), _MetaInfoClassMember('peer-ip', ATTRIBUTE, 'str' , None, None, [], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'], ''' VTEP peer IP address ''', 'peer_ip', 'nvo', True), ]), ], 'nvo', 'peers', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'NvoInstances.NvoInstance.VirtualNetwork.EndHostDiscoveryEnum' : _MetaInfoEnum('EndHostDiscoveryEnum', 'ydk.models.cisco_ios_xe.nvo', { 'flood-and-learn':'flood_and_learn', 'bgp':'bgp', }, 'nvo', _yang_ns._namespaces['nvo']), 'NvoInstances.NvoInstance.VirtualNetwork' : { 'meta_info' : _MetaInfoClass('NvoInstances.NvoInstance.VirtualNetwork', False, [ _MetaInfoClassMember('vni-start', ATTRIBUTE, 'int' , None, None, [('1', '16777214')], [], ''' Single Virtual Network Identifier or start of range ''', 'vni_start', 'nvo', True), _MetaInfoClassMember('vni-end', ATTRIBUTE, 'int' , None, None, [('1', '16777214')], [], ''' End of Virtual Network Identifier range (make equal to vni-start for single vni ''', 'vni_end', 'nvo', True), _MetaInfoClassMember('bgp', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Use control protocol BGP to discover peers ''', 'bgp', 'nvo', False), _MetaInfoClassMember('end-host-discovery', REFERENCE_ENUM_CLASS, 'EndHostDiscoveryEnum' , 'ydk.models.cisco_ios_xe.nvo', 'NvoInstances.NvoInstance.VirtualNetwork.EndHostDiscoveryEnum', [], [], ''' How to peform endpoint discovery ''', 'end_host_discovery', 'nvo', False), _MetaInfoClassMember('multicast', REFERENCE_CLASS, 'Multicast' , 'ydk.models.cisco_ios_xe.nvo', 'NvoInstances.NvoInstance.VirtualNetwork.Multicast', [], [], ''' Mulitcast group range associated with the VxLAN segment(s) ''', 'multicast', 'nvo', False), _MetaInfoClassMember('peers', REFERENCE_LIST, 'Peers' , 'ydk.models.cisco_ios_xe.nvo', 'NvoInstances.NvoInstance.VirtualNetwork.Peers', [], [], ''' List of VTEP peers ''', 'peers', 'nvo', False), _MetaInfoClassMember('routing-instance', ATTRIBUTE, 'str' , None, None, [], [], ''' VRF Name ''', 'routing_instance', 'nvo', False), _MetaInfoClassMember('suppress-arp', ATTRIBUTE, 'Empty' , None, None, [], [], ''' Enable ARP request suppression for this VNI ''', 'suppress_arp', 'nvo', False), ], 'nvo', 'virtual-network', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'NvoInstances.NvoInstance' : { 'meta_info' : _MetaInfoClass('NvoInstances.NvoInstance', False, [ _MetaInfoClassMember('nvo-id', ATTRIBUTE, 'int' , None, None, [('0', '65535')], [], ''' Network Virtualization Overlay Instance Identifier ''', 'nvo_id', 'nvo', True), _MetaInfoClassMember('overlay-encapsulation', REFERENCE_IDENTITY_CLASS, 'OverlayEncapTypeIdentity' , 'ydk.models.cisco_ios_xe.nvo', 'OverlayEncapTypeIdentity', [], [], ''' Encapsulation type ''', 'overlay_encapsulation', 'nvo', False), _MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None, [], [], ''' Source interface name ''', 'source_interface', 'nvo', False), _MetaInfoClassMember('virtual-network', REFERENCE_LIST, 'VirtualNetwork' , 'ydk.models.cisco_ios_xe.nvo', 'NvoInstances.NvoInstance.VirtualNetwork', [], [], ''' VNI member attributes ''', 'virtual_network', 'nvo', False), ], 'nvo', 'nvo-instance', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'NvoInstances' : { 'meta_info' : _MetaInfoClass('NvoInstances', False, [ _MetaInfoClassMember('nvo-instance', REFERENCE_LIST, 'NvoInstance' , 'ydk.models.cisco_ios_xe.nvo', 'NvoInstances.NvoInstance', [], [], ''' List of instances ''', 'nvo_instance', 'nvo', False), ], 'nvo', 'nvo-instances', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'NvgreTypeIdentity' : { 'meta_info' : _MetaInfoClass('NvgreTypeIdentity', False, [ ], 'nvo', 'nvgre-type', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, 'VxlanTypeIdentity' : { 'meta_info' : _MetaInfoClass('VxlanTypeIdentity', False, [ ], 'nvo', 'vxlan-type', _yang_ns._namespaces['nvo'], 'ydk.models.cisco_ios_xe.nvo' ), }, } _meta_table['NvoInstances.NvoInstance.VirtualNetwork.Multicast']['meta_info'].parent =_meta_table['NvoInstances.NvoInstance.VirtualNetwork']['meta_info'] _meta_table['NvoInstances.NvoInstance.VirtualNetwork.Peers']['meta_info'].parent =_meta_table['NvoInstances.NvoInstance.VirtualNetwork']['meta_info'] _meta_table['NvoInstances.NvoInstance.VirtualNetwork']['meta_info'].parent =_meta_table['NvoInstances.NvoInstance']['meta_info'] _meta_table['NvoInstances.NvoInstance']['meta_info'].parent =_meta_table['NvoInstances']['meta_info']
""" Widget for multi-model constraints. """ import os # numpy methods required for the validator! Don't remove. # pylint: disable=unused-import,unused-wildcard-import,redefined-builtin from numpy import * from PyQt5 import QtCore from PyQt5 import QtGui from PyQt5 import QtWidgets import webbrowser from sas.qtgui.Perspectives.Fitting import FittingUtilities import sas.qtgui.Utilities.GuiUtils as GuiUtils from sas.qtgui.Perspectives.Fitting.Constraint import Constraint #ALLOWED_OPERATORS = ['=','<','>','>=','<='] ALLOWED_OPERATORS = ['='] # Local UI from sas.qtgui.Perspectives.Fitting.UI.ComplexConstraintUI import Ui_ComplexConstraintUI class ComplexConstraint(QtWidgets.QDialog, Ui_ComplexConstraintUI): constraintReadySignal = QtCore.pyqtSignal(tuple) def __init__(self, parent=None, tabs=None): super(ComplexConstraint, self).__init__(parent) self.setupUi(self) self.setModal(True) # disable the context help icon windowFlags = self.windowFlags() self.setWindowFlags(windowFlags & ~QtCore.Qt.WindowContextHelpButtonHint) # Useful globals self.tabs = tabs self.params = None self.tab_names = None self.operator = '=' self._constraint = Constraint() self.all_menu = None self.parent = parent self.redefining_warning = "" self.warning = self.lblWarning.text() self.setupData() self.setupSignals() self.setupWidgets() self.setupTooltip() # Default focus is on OK self.cmdOK.setFocus() def setupData(self): """ Digs into self.tabs and pulls out relevant info """ self.tab_names = [tab.kernel_module.name for tab in self.tabs] self.params = [tab.getParamNames() for tab in self.tabs] def setupSignals(self): """ Signals from various elements """ self.cmdOK.clicked.connect(self.onApply) self.cmdHelp.clicked.connect(self.onHelp) self.cmdAddAll.clicked.connect(self.onSetAll) self.txtConstraint.editingFinished.connect(self.validateFormula) self.cbModel1.currentIndexChanged.connect(self.onModelIndexChange) self.cbModel2.currentIndexChanged.connect(self.onModelIndexChange) self.cbParam1.currentIndexChanged.connect(self.onParamIndexChange) self.cbParam2.currentIndexChanged.connect(self.onParamIndexChange) self.cbOperator.currentIndexChanged.connect(self.onOperatorChange) def setupWidgets(self): """ Setup widgets based on current parameters """ self.cbModel1.insertItems(0, self.tab_names) # add an `All` option in the lhs if there are more than 3 tabs if len(self.tab_names) > 2: self.cbModel1.addItem("All") self.cbModel2.insertItems(0, self.tab_names) self.setupParamWidgets() self.setupMenu() def setupMenu(self): # Show Add All button, if necessary if self.cbModel1.currentText() ==self.cbModel2.currentText(): self.cmdAddAll.setVisible(False) else: self.cmdAddAll.setVisible(True) return def setupParamWidgets(self): """ Fill out comboboxes and set labels with non-constrained parameters """ # Store previously select parameter previous_param1 = self.cbParam1.currentText() # Clear the combobox self.cbParam1.clear() # Populate the left combobox parameter arbitrarily with the parameters # from the first tab if `All` option is selected if self.cbModel1.currentText() == "All": items1 = self.tabs[1].main_params_to_fit else: tab_index1 = self.cbModel1.currentIndex() items1 = self.tabs[tab_index1].main_params_to_fit self.cbParam1.addItems(items1) # Show the previously selected parameter if available if previous_param1 in items1: index1 = self.cbParam1.findText(previous_param1) self.cbParam1.setCurrentIndex(index1) # Store previously select parameter previous_param2 = self.cbParam2.currentText() # M2 has to be non-constrained self.cbParam2.clear() tab_index2 = self.cbModel2.currentIndex() items2 = [param for param in self.params[tab_index2] if not self.tabs[tab_index2].paramHasConstraint(param)] self.cbParam2.addItems(items2) # Show the previously selected parameter if available if previous_param2 in items2: index2 = self.cbParam2.findText(previous_param2) self.cbParam2.setCurrentIndex(index2) self.txtParam.setText(self.cbModel1.currentText() + ":" + self.cbParam1.currentText()) self.cbOperator.clear() self.cbOperator.addItems(ALLOWED_OPERATORS) self.txtOperator.setText(self.cbOperator.currentText()) self.txtConstraint.setText(self.tab_names[tab_index2]+"."+self.cbParam2.currentText()) # disable Apply if no parameters available if len(items1)==0: self.cmdOK.setEnabled(False) self.cmdAddAll.setEnabled(False) txt = "No parameters in model "+self.tab_names[0] +\ " are available for constraining.\n"+\ "Please select at least one parameter for fitting when adding a constraint." else: txt = self.redefining_warning self.cmdOK.setEnabled(True) self.cmdAddAll.setEnabled(True) self.lblWarning.setText(txt) # disable Aplly all if `All` option on lhs has been selected if self.cbModel1.currentText() == "All": self.cmdAddAll.setEnabled(False) else: self.cmdAddAll.setEnabled(True) def setupTooltip(self): """ Tooltip for txtConstraint """ tooltip = "E.g. M1:scale = 2.0 * M2.scale\n" tooltip += "M1:scale = sqrt(M2.scale) + 5" self.txtConstraint.setToolTip(tooltip) def onParamIndexChange(self, index): """ Respond to parameter combo box changes """ # Find out the signal source source = self.sender().objectName() param1 = self.cbParam1.currentText() param2 = self.cbParam2.currentText() if source == "cbParam1": self.txtParam.setText(self.cbModel1.currentText() + ":" + param1) else: self.txtConstraint.setText(self.cbModel2.currentText() + "." + param2) # Check if any of the parameters are polydisperse params_list = [param1, param2] all_pars = [tab.model_parameters for tab in self.tabs] is2Ds = [tab.is2D for tab in self.tabs] txt = self.redefining_warning for pars, is2D in zip(all_pars, is2Ds): if any([FittingUtilities.isParamPolydisperse(p, pars, is2D) for p in params_list]): # no parameters are pd - reset the text to not show the warning txt = self.warning self.lblWarning.setText(txt) def onModelIndexChange(self, index): """ Respond to mode combo box changes """ # disable/enable Add All self.setupMenu() # Reload parameters self.setupParamWidgets() def onOperatorChange(self, index): """ Respond to operator combo box changes """ self.txtOperator.setText(self.cbOperator.currentText()) def validateFormula(self): """ Add visual cues when formula is incorrect """ # temporarily disable validation return # formula_is_valid = self.validateConstraint(self.txtConstraint.text()) if not formula_is_valid: self.cmdOK.setEnabled(False) self.cmdAddAll.setEnabled(False) self.txtConstraint.setStyleSheet("QLineEdit {background-color: red;}") else: self.cmdOK.setEnabled(True) self.cmdAddAll.setEnabled(True) self.txtConstraint.setStyleSheet("QLineEdit {background-color: white;}") def validateConstraint(self, constraint_text): """ Ensure the constraint has proper form """ # 0. none or empty if not constraint_text or not isinstance(constraint_text, str): return False # M1.scale --> model_str='M1', constraint_text='scale' param_str = self.cbParam2.currentText() constraint_text = constraint_text.strip() model_str = self.cbModel2.currentText() # 0. Has to contain the model name if model_str != model_str: return False # Remove model name from constraint constraint_text = constraint_text.replace(model_str+".",'') # 1. just the parameter if param_str == constraint_text: return True # 2. ensure the text contains parameter name parameter_string_start = constraint_text.find(param_str) if parameter_string_start < 0: return False # 3. replace parameter name with "1" and try to evaluate the expression try: expression_to_evaluate = constraint_text.replace(param_str, "1.0") eval(expression_to_evaluate) except Exception: # Too many cases to cover individually, just a blanket # Exception should be sufficient # Note that in current numpy things like sqrt(-1) don't # raise but just return warnings return False return True def constraint(self): """ Return the generated constraint """ param = self.cbParam1.currentText() value = self.cbParam2.currentText() func = self.txtConstraint.text() value_ex = (self.cbModel1.currentText() + "." + self.cbParam1.currentText()) model1 = self.cbModel1.currentText() operator = self.cbOperator.currentText() con = Constraint(self, param=param, value=value, func=func, value_ex=value_ex, operator=operator) return (model1, con) def constraintIsRedefined(self, cons_tuple): """ Warn the user when a constraint is being redefined """ # get the the parameter that is being redefined param = cons_tuple[1].param # get a list of all constrained parameters tab_index1 = self.cbModel1.currentIndex() items = [param for param in self.params[tab_index1] if self.tabs[tab_index1].paramHasConstraint(param)] # loop over the list of constrained parameters to check for redefinition for item in items: if item == param: return True return False def onApply(self): """ Respond to Add constraint action. Send a signal that the constraint is ready to be applied """ # if the combobox is set to `All` just call `applyAcrossTabs` and # return if self.cbModel1.currentText() == "All": # exclude the tab on the lhs tabs = [tab for tab in self.tabs if tab.kernel_module.name != self.cbModel2.currentText()] self.applyAcrossTabs(tabs, self.cbParam1.currentText(), self.txtConstraint.text()) self.setupParamWidgets() return cons_tuple = self.constraint() #check if constraint has been redefined if self.constraintIsRedefined(cons_tuple): txt = "Warning: parameter " + \ cons_tuple[0] + "." + cons_tuple[1].param +\ " has been redefined." self.redefining_warning = txt else: self.redefining_warning = "" self.constraintReadySignal.emit(cons_tuple) # reload the comboboxes if self.parent.constraint_accepted: self.setupParamWidgets() def applyAcrossTabs(self, tabs, param, expr): """ Apply constraints across tabs, e.g. all `scale` parameters constrained to an expression. *tabs* is a list of active fit tabs for which the parameter string *param* will be constrained to the *expr* string. """ for tab in tabs: if hasattr(tab, "kernel_module"): if param in tab.kernel_module.params: value_ex = tab.kernel_module.name + "." +param constraint = Constraint(param=param, value=param, func=expr, value_ex=value_ex, operator="=") self.constraintReadySignal.emit((tab.kernel_module.name, constraint)) def onSetAll(self): """ Set constraints on all identically named parameters between two fitpages """ # loop over parameters in constrained model index1 = self.cbModel1.currentIndex() index2 = self.cbModel2.currentIndex() items1 = self.tabs[index1].kernel_module.params items2 = self.params[index2] # create an empty list to store redefined constraints redefined_constraints = [] for item in items1: if item not in items2: continue param = item value = item func = self.cbModel2.currentText() + "." + param value_ex = self.cbModel1.currentText() + "." + param model1 = self.cbModel1.currentText() operator = self.cbOperator.currentText() con = Constraint(self, param=param, value=value, func=func, value_ex=value_ex, operator=operator) # check for redefined constraints and add them to the list if self.constraintIsRedefined((model1, con)): redefined_constraints.append(model1 + "." + param) # warn the user if constraints have been redefined if redefined_constraints: constraint_txt = "" for redefined_constraint in redefined_constraints: constraint_txt += redefined_constraint + ", " txt = "Warning: parameters " +\ constraint_txt[:-2] +\ " have been redefined." if len(redefined_constraints) == 1: txt = txt.replace("parameters", "parameter") txt = txt.replace("has", "been") self.redefining_warning = txt else: self.redefining_warning = "" self.constraintReadySignal.emit((model1, con)) # reload the comboboxes self.setupParamWidgets() def onHelp(self): """ Display related help section """ tree_location = "/user/qtgui/Perspectives/Fitting/" helpfile = "fitting_help.html#simultaneous-fits-with-constraints" help_location = tree_location + helpfile self.parent.parent.parent.showHelp(help_location)
# -*- coding: utf-8 -*- """Helper for interacting with git.""" import os from l2treviewtools.helpers import cli class GitHelper(cli.CLIHelper): """Git command helper.""" def __init__(self, git_repo_url): """Initializes a git helper. Args: git_repo_url (str): git repo URL. """ super(GitHelper, self).__init__() self._git_repo_url = git_repo_url self._remotes = [] def _GetRemotes(self): """Retrieves the git repository remotes. Returns: list[str]: git repository remotes or None. """ if not self._remotes: exit_code, output, _ = self.RunCommand(u'git remote -v') if exit_code == 0: self._remotes = list(filter(None, output.split(b'\n'))) return self._remotes def AddPath(self, path): """Adds a specific path to be managed by git. Args: path (str): path. Returns: bool: True if the path was added. """ command = u'git add -A {0:s}'.format(path) exit_code, _, _ = self.RunCommand(command) return exit_code == 0 def CheckHasBranch(self, branch): """Checks if the git repo has a specific branch. Args: branch (str): name of the feature branch. Returns: bool: True if git repo has the specific branch. """ exit_code, output, _ = self.RunCommand(u'git branch') if exit_code != 0: return False # Check for remote entries starting with upstream. for line in output.split(b'\n'): # Ignore the first 2 characters of the line. if line[2:] == branch: return True return False def CheckHasProjectOrigin(self): """Checks if the git repo has the project remote origin defined. Returns: bool: True if the git repo has the project origin defined. """ origin_git_repo_url = self.GetRemoteOrigin() is_match = origin_git_repo_url == self._git_repo_url if not is_match: is_match = origin_git_repo_url == self._git_repo_url[:-4] return is_match def CheckHasProjectUpstream(self): """Checks if the git repo has the project remote upstream defined. Returns: bool: True if the git repo has the project remote upstream defined. """ # Check for remote entries starting with upstream. for remote in self._GetRemotes(): if remote.startswith(b'upstream\t{0:s}'.format(self._git_repo_url)): return True return False def CheckHasUncommittedChanges(self): """Checks if the git repo has uncommitted changes. Returns: bool: True if the git repo has uncommitted changes. """ exit_code, output, _ = self.RunCommand(u'git status -s') if exit_code != 0: return False # Check if 'git status -s' yielded any output. for line in output.split(b'\n'): if line: return True return False def CheckSynchronizedWithUpstream(self): """Checks if the git repo is synchronized with upstream. Returns: bool: True if the git repo is synchronized with upstream. """ # Fetch the entire upstream repo information not only that of # the master branch. Otherwise the information about the current # upstream HEAD is not updated. exit_code, _, _ = self.RunCommand(u'git fetch upstream') if exit_code != 0: return False # The result of "git log HEAD..upstream/master --oneline" should be empty # if the git repo is synchronized with upstream. exit_code, output, _ = self.RunCommand( u'git log HEAD..upstream/master --oneline') return exit_code == 0 and not output def CommitToOriginInNameOf( self, codereview_issue_number, author, description): """Commits changes in name of an author to the master branch of origin. Args: codereview_issue_number (int|str): codereview issue number. author (str): full name and email address of the author, formatted as: "Full Name <email.address@example.com>". description (str): description of the commit. Returns: bool: True if the changes were committed to the git repository. """ command = ( u'git commit -a --author="{0:s}" ' u'-m "Code review: {1:s}: {2:s}"').format( author, codereview_issue_number, description) exit_code, _, _ = self.RunCommand(command) if exit_code != 0: return False exit_code, _, _ = self.RunCommand(u'git push origin master') if exit_code != 0: return False return True def DropUncommittedChanges(self): """Drops the uncommitted changes.""" self.RunCommand(u'git stash') self.RunCommand(u'git stash drop') def GetActiveBranch(self): """Retrieves the active branch. Returns: str: name of the active branch or None. """ exit_code, output, _ = self.RunCommand(u'git branch') if exit_code != 0: return False # Check for remote entries starting with upstream. for line in output.split(b'\n'): if line.startswith(b'* '): # Ignore the first 2 characters of the line. return line[2:] return def GetChangedFiles(self, diffbase=None): """Retrieves the changed files. Args: diffbase (Optional[str]): git diffbase, for example "upstream/master". Returns: list[str]: names of the changed files. """ if diffbase: command = u'git diff --name-only {0:s}'.format(diffbase) else: command = u'git ls-files' exit_code, output, _ = self.RunCommand(command) if exit_code != 0: return [] return output.split(b'\n') def GetChangedPythonFiles(self, diffbase=None): """Retrieves the changed Python files. Note that several Python files are excluded: * Python files generated by the protobuf compiler (*_pb2.py) * Python files used as test data (test_data/*.py) * Python files used for sphinx (docs/*.py) * setup.py and l2treviewtools/lib/upload.py Args: diffbase (Optional[str]): git diffbase, for example "upstream/master". Returns: list[str]: names of the changed Python files. """ upload_path = os.path.join(u'l2treviewtools', u'lib', u'upload.py') python_files = [] for changed_file in self.GetChangedFiles(diffbase=diffbase): if (not changed_file.endswith(u'.py') or changed_file.endswith(u'_pb2.py') or not os.path.exists(changed_file) or changed_file.startswith(u'data') or changed_file.startswith(u'docs') or changed_file.startswith(u'test_data') or changed_file in (u'setup.py', upload_path)): continue python_files.append(changed_file) return python_files def GetEmailAddress(self): """Retrieves the email address. Returns: str: email address or None. """ exit_code, output, _ = self.RunCommand(u'git config user.email') if exit_code != 0: return output_lines = output.split(b'\n') if not output_lines: return return output_lines[0] def GetLastCommitMessage(self): """Retrieves the last commit message. Returns: str: last commit message or None. """ exit_code, output, _ = self.RunCommand(u'git log -1') if exit_code != 0: return # Expecting 6 lines of output where the 5th line contains # the commit message. output_lines = output.split(b'\n') if len(output_lines) != 6: return return output_lines[4].strip() def GetRemoteOrigin(self): """Retrieves the remote origin. Returns: str: git repository URL or None. """ # Check for remote entries starting with origin. for remote in self._GetRemotes(): if remote.startswith(b'origin\t'): values = remote.split() if len(values) == 3: return values[1] def PullFromFork(self, git_repo_url, branch): """Pulls changes from a feature branch on a fork. Args: git_repo_url (str): git repository URL of the fork. branch (str): name of the feature branch of the fork. Returns: bool: True if the pull was successful. """ command = u'git pull --squash {0:s} {1:s}'.format(git_repo_url, branch) exit_code, _, _ = self.RunCommand(command) return exit_code == 0 def PushToOrigin(self, branch, force=False): """Forces a push of the active branch of the git repo to origin. Args: branch (str): name of the feature branch. force (Optional[bool]): True if the push should be forced. Returns: bool: True if the push was successful. """ if force: command = u'git push --set-upstream origin {0:s}'.format(branch) else: command = u'git push -f --set-upstream origin {0:s}'.format(branch) exit_code, _, _ = self.RunCommand(command) return exit_code == 0 def RemoveFeatureBranch(self, branch): """Removes the git feature branch both local and from origin. Args: branch (str): name of the feature branch. """ if branch == u'master': return self.RunCommand(u'git push origin --delete {0:s}'.format(branch)) self.RunCommand(u'git branch -D {0:s}'.format(branch)) def SynchronizeWithOrigin(self): """Synchronizes git with origin. Returns: bool: True if the git repository has synchronized with origin. """ exit_code, _, _ = self.RunCommand(u'git fetch origin') if exit_code != 0: return False exit_code, _, _ = self.RunCommand(u'git pull --no-edit origin master') return exit_code == 0 def SynchronizeWithUpstream(self): """Synchronizes git with upstream. Returns: bool: True if the git repository has synchronized with upstream. """ exit_code, _, _ = self.RunCommand(u'git fetch upstream') if exit_code != 0: return False exit_code, _, _ = self.RunCommand( u'git pull --no-edit --rebase upstream master') if exit_code != 0: return False exit_code, _, _ = self.RunCommand(u'git push') return exit_code == 0 def SwitchToMasterBranch(self): """Switches git to the master branch. Returns: bool: True if the git repository has switched to the master branch. """ exit_code, _, _ = self.RunCommand(u'git checkout master') return exit_code == 0
# coding=utf-8 # Copyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """WMT: Translate dataset.""" import codecs import functools import gzip import itertools import os import re import xml.etree.cElementTree as ElementTree from absl import logging import six import tensorflow as tf import tensorflow_datasets.public_api as tfds _DESCRIPTION = """\ Translate dataset based on the data from statmt.org. Versions exists for the different years using a combination of multiple data sources. The base `wmt_translate` allows you to create your own config to choose your own data/language pair by creating a custom `tfds.translate.wmt.WmtConfig`. ``` config = tfds.translate.wmt.WmtConfig( version="0.0.1", language_pair=("fr", "de"), subsets={ tfds.Split.TRAIN: ["commoncrawl_frde"], tfds.Split.VALIDATION: ["euelections_dev2019"], }, ) builder = tfds.builder("wmt_translate", config=config) ``` """ CWMT_SUBSET_NAMES = [ "casia2015", "casict2011", "casict2015", "datum2015", "datum2017", "neu2017" ] class SubDataset(object): """Class to keep track of information on a sub-dataset of WMT.""" def __init__(self, name, target, sources, url, path, manual_dl_files=None): """Sub-dataset of WMT. Args: name: `string`, a unique dataset identifier. target: `string`, the target language code. sources: `set<string>`, the set of source language codes. url: `string` or `(string, string)`, URL(s) or URL template(s) specifying where to download the raw data from. If two strings are provided, the first is used for the source language and the second for the target. Template strings can either contain '{src}' placeholders that will be filled in with the source language code, '{0}' and '{1}' placeholders that will be filled in with the source and target language codes in alphabetical order, or all 3. path: `string` or `(string, string)`, path(s) or path template(s) specifing the path to the raw data relative to the root of the downloaded archive. If two strings are provided, the dataset is assumed to be made up of parallel text files, the first being the source and the second the target. If one string is provided, both languages are assumed to be stored within the same file and the extension is used to determine how to parse it. Template strings should be formatted the same as in `url`. manual_dl_files: `<list>(string)` (optional), the list of files that must be manually downloaded to the data directory. """ self._paths = (path,) if isinstance(path, six.string_types) else path self._urls = (url,) if isinstance(url, six.string_types) else url self._manual_dl_files = manual_dl_files if manual_dl_files else [] self.name = name self.target = target self.sources = set(sources) def _inject_language(self, src, strings): """Injects languages into (potentially) template strings.""" if src not in self.sources: raise ValueError("Invalid source for '{0}': {1}".format(self.name, src)) def _format_string(s): if "{0}" in s and "{1}" and "{src}" in s: return s.format(*sorted([src, self.target]), src=src) elif "{0}" in s and "{1}" in s: return s.format(*sorted([src, self.target])) elif "{src}" in s: return s.format(src=src) else: return s return [_format_string(s) for s in strings] def get_url(self, src): return self._inject_language(src, self._urls) def get_manual_dl_files(self, src): return self._inject_language(src, self._manual_dl_files) def get_path(self, src): return self._inject_language(src, self._paths) # Subsets used in the training sets for various years of WMT. _TRAIN_SUBSETS = [ # pylint:disable=line-too-long SubDataset( name="commoncrawl", target="en", # fr-de pair in commoncrawl_frde sources={"cs", "de", "es", "fr", "ru"}, url="http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", path=("commoncrawl.{src}-en.{src}", "commoncrawl.{src}-en.en")), SubDataset( name="commoncrawl_frde", target="de", sources={"fr"}, url=( "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.fr.gz", "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/commoncrawl.de.gz" ), path=("", "")), SubDataset( name="czeng_10", target="en", sources={"cs"}, url="http://ufal.mff.cuni.cz/czeng/czeng10", manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], # Each tar contains multiple files, which we process specially in # _parse_czeng. path=("data.plaintext-format/??train.gz",) * 10), SubDataset( name="czeng_16pre", target="en", sources={"cs"}, url="http://ufal.mff.cuni.cz/czeng/czeng16pre", manual_dl_files=["czeng16pre.deduped-ignoring-sections.txt.gz"], path=""), SubDataset( name="czeng_16", target="en", sources={"cs"}, url="http://ufal.mff.cuni.cz/czeng", manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], # Each tar contains multiple files, which we process specially in # _parse_czeng. path=("data.plaintext-format/??train.gz",) * 10), SubDataset( # This dataset differs from the above in the filtering that is applied # during parsing. name="czeng_17", target="en", sources={"cs"}, url="http://ufal.mff.cuni.cz/czeng", manual_dl_files=["data-plaintext-format.%d.tar" % i for i in range(10)], # Each tar contains multiple files, which we process specially in # _parse_czeng. path=("data.plaintext-format/??train.gz",) * 10), SubDataset( name="dcep_v1", target="en", sources={"lv"}, url="http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz", path=("dcep.en-lv/dcep.lv", "dcep.en-lv/dcep.en")), SubDataset( name="europarl_v7", target="en", sources={"cs", "de", "es", "fr"}, url="http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", path=("training/europarl-v7.{src}-en.{src}", "training/europarl-v7.{src}-en.en")), SubDataset( name="europarl_v7_frde", target="de", sources={"fr"}, url=( "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.fr.gz", "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/europarl-v7.de.gz" ), path=("", "")), SubDataset( name="europarl_v8_18", target="en", sources={"et", "fi"}, url="http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz", path=("training/europarl-v8.{src}-en.{src}", "training/europarl-v8.{src}-en.en")), SubDataset( name="europarl_v8_16", target="en", sources={"fi", "ro"}, url="http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz", path=("training-parallel-ep-v8/europarl-v8.{src}-en.{src}", "training-parallel-ep-v8/europarl-v8.{src}-en.en")), SubDataset( name="europarl_v9", target="en", sources={"cs", "de", "fi", "lt"}, url="http://www.statmt.org/europarl/v9/training/europarl-v9.{src}-en.tsv.gz", path=""), SubDataset( name="gigafren", target="en", sources={"fr"}, url="http://www.statmt.org/wmt10/training-giga-fren.tar", path=("giga-fren.release2.fixed.fr.gz", "giga-fren.release2.fixed.en.gz")), SubDataset( name="hindencorp_01", target="en", sources={"hi"}, url="http://ufallab.ms.mff.cuni.cz/~bojar/hindencorp", manual_dl_files=["hindencorp0.1.gz"], path=""), SubDataset( name="leta_v1", target="en", sources={"lv"}, url="http://data.statmt.org/wmt17/translation-task/leta.v1.tgz", path=("LETA-lv-en/leta.lv", "LETA-lv-en/leta.en")), SubDataset( name="multiun", target="en", sources={"es", "fr"}, url="http://www.statmt.org/wmt13/training-parallel-un.tgz", path=("un/undoc.2000.{src}-en.{src}", "un/undoc.2000.{src}-en.en")), SubDataset( name="newscommentary_v8", target="en", sources={"cs", "de", "fr", "es", "ru"}, url="http://www.statmt.org/wmt13/training-parallel-nc-v8.tgz", path=("training/news-commentary-v8.{src}-en.{src}", "training/news-commentary-v8.{src}-en.en")), SubDataset( name="newscommentary_v9", target="en", sources={"cs", "de", "fr", "ru"}, url="http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz", path=("training/news-commentary-v9.{src}-en.{src}", "training/news-commentary-v9.{src}-en.en")), SubDataset( name="newscommentary_v10", target="en", sources={"cs", "de", "fr", "ru"}, url="http://www.statmt.org/wmt15/training-parallel-nc-v10.tgz", path=("news-commentary-v10.{src}-en.{src}", "news-commentary-v10.{src}-en.en")), SubDataset( name="newscommentary_v11", target="en", sources={"cs", "de", "ru"}, url="http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz", path=("training-parallel-nc-v11/news-commentary-v11.{src}-en.{src}", "training-parallel-nc-v11/news-commentary-v11.{src}-en.en")), SubDataset( name="newscommentary_v12", target="en", sources={"cs", "de", "ru", "zh"}, url="http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz", path=("training/news-commentary-v12.{src}-en.{src}", "training/news-commentary-v12.{src}-en.en")), SubDataset( name="newscommentary_v13", target="en", sources={"cs", "de", "ru", "zh"}, url="http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", path=("training-parallel-nc-v13/news-commentary-v13.{src}-en.{src}", "training-parallel-nc-v13/news-commentary-v13.{src}-en.en")), SubDataset( name="newscommentary_v14", target="en", # fr-de pair in newscommentary_v14_frde sources={"cs", "de", "kk", "ru", "zh"}, url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.{0}-{1}.tsv.gz", path=""), SubDataset( name="newscommentary_v14_frde", target="de", sources={"fr"}, url="http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.de-fr.tsv.gz", path=""), SubDataset( name="onlinebooks_v1", target="en", sources={"lv"}, url="http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz", path=("farewell/farewell.lv", "farewell/farewell.en")), SubDataset( name="paracrawl_v1", target="en", sources={"cs", "de", "et", "fi", "ru"}, url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-{src}.zipporah0-dedup-clean.tgz", path=("paracrawl-release1.en-{src}.zipporah0-dedup-clean.{src}", "paracrawl-release1.en-{src}.zipporah0-dedup-clean.en")), SubDataset( name="paracrawl_v1_ru", target="en", sources={"ru"}, url="https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz", path=("paracrawl-release1.en-ru.zipporah0-dedup-clean.ru", "paracrawl-release1.en-ru.zipporah0-dedup-clean.en")), SubDataset( name="paracrawl_v3", target="en", # fr-de pair in paracrawl_v3_frde sources={"cs", "de", "fi", "lt"}, url="https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-{src}.bicleaner07.tmx.gz", path=""), SubDataset( name="paracrawl_v3_frde", target="de", sources={"fr"}, url=( "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.de.gz", "http://data.statmt.org/wmt19/translation-task/fr-de/bitexts/de-fr.bicleaner07.fr.gz" ), path=("", "")), SubDataset( name="rapid_2016", target="en", sources={"de", "et", "fi"}, url="http://data.statmt.org/wmt18/translation-task/rapid2016.tgz", path=("rapid2016.{0}-{1}.{src}", "rapid2016.{0}-{1}.en")), SubDataset( name="rapid_2016_ltfi", target="en", sources={"fi", "lt"}, url="https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-{src}.tmx.zip", path="rapid2016.en-{src}.tmx"), SubDataset( name="rapid_2019", target="en", sources={"de"}, url="https://s3-eu-west-1.amazonaws.com/tilde-model/rapid2019.de-en.zip", path=("rapid2019.de-en.de", "rapid2019.de-en.en")), SubDataset( name="setimes_2", target="en", sources={"ro", "tr"}, url="http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-{src}.tmx.gz", path=""), SubDataset( name="uncorpus_v1", target="en", sources={"ru", "zh"}, url="https://storage.googleapis.com/tfds-data/downloads/uncorpus/UNv1.0.en-{src}.tar.gz", path=("en-{src}/UNv1.0.en-{src}.{src}", "en-{src}/UNv1.0.en-{src}.en")), SubDataset( name="wikiheadlines_fi", target="en", sources={"fi"}, url="http://www.statmt.org/wmt15/wiki-titles.tgz", path="wiki/fi-en/titles.fi-en"), SubDataset( name="wikiheadlines_hi", target="en", sources={"hi"}, url="http://www.statmt.org/wmt14/wiki-titles.tgz", path="wiki/hi-en/wiki-titles.hi-en"), SubDataset( # Verified that wmt13, wmt14 and wmt15 files are identical. name="wikiheadlines_ru", target="en", sources={"ru"}, url="http://www.statmt.org/wmt15/wiki-titles.tgz", path="wiki/ru-en/wiki.ru-en"), SubDataset( name="wikititles_v1", target="en", sources={"cs", "de", "fi", "gu", "kk", "lt", "ru", "zh"}, url="http://data.statmt.org/wikititles/v1/wikititles-v1.{src}-en.tsv.gz", path=""), SubDataset( name="yandexcorpus", target="en", sources={"ru"}, url="https://translate.yandex.ru/corpus?lang=en", manual_dl_files=["1mcorpus.zip"], path=("corpus.en_ru.1m.ru", "corpus.en_ru.1m.en")), # pylint:enable=line-too-long ] + [ SubDataset( # pylint:disable=g-complex-comprehension name=ss, target="en", sources={"zh"}, url="ftp://cwmt-wmt:cwmt-wmt@nlp.nju.edu.cn/parallel/%s.zip" % ss, path=("%s/*_c[hn].txt" % ss, "%s/*_en.txt" % ss)) for ss in CWMT_SUBSET_NAMES ] _DEV_SUBSETS = [ SubDataset( name="euelections_dev2019", target="de", sources={"fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/euelections_dev2019.fr-de.src.fr", "dev/euelections_dev2019.fr-de.tgt.de")), SubDataset( name="newsdev2014", target="en", sources={"hi"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdev2014.hi", "dev/newsdev2014.en")), SubDataset( name="newsdev2015", target="en", sources={"fi"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdev2015-fien-src.{src}.sgm", "dev/newsdev2015-fien-ref.en.sgm")), SubDataset( name="newsdiscussdev2015", target="en", sources={"ro", "tr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm")), SubDataset( name="newsdev2016", target="en", sources={"ro", "tr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdev2016-{src}en-src.{src}.sgm", "dev/newsdev2016-{src}en-ref.en.sgm")), SubDataset( name="newsdev2017", target="en", sources={"lv", "zh"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdev2017-{src}en-src.{src}.sgm", "dev/newsdev2017-{src}en-ref.en.sgm")), SubDataset( name="newsdev2018", target="en", sources={"et"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdev2018-{src}en-src.{src}.sgm", "dev/newsdev2018-{src}en-ref.en.sgm")), SubDataset( name="newsdev2019", target="en", sources={"gu", "kk", "lt"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdev2019-{src}en-src.{src}.sgm", "dev/newsdev2019-{src}en-ref.en.sgm")), SubDataset( name="newsdiscussdev2015", target="en", sources={"fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdiscussdev2015-{src}en-src.{src}.sgm", "dev/newsdiscussdev2015-{src}en-ref.en.sgm")), SubDataset( name="newsdiscusstest2015", target="en", sources={"fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm")), SubDataset( name="newssyscomb2009", target="en", sources={"cs", "de", "es", "fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newssyscomb2009.{src}", "dev/newssyscomb2009.en")), SubDataset( name="newstest2008", target="en", sources={"cs", "de", "es", "fr", "hu"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/news-test2008.{src}", "dev/news-test2008.en")), SubDataset( name="newstest2009", target="en", sources={"cs", "de", "es", "fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2009.{src}", "dev/newstest2009.en")), SubDataset( name="newstest2010", target="en", sources={"cs", "de", "es", "fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2010.{src}", "dev/newstest2010.en")), SubDataset( name="newstest2011", target="en", sources={"cs", "de", "es", "fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2011.{src}", "dev/newstest2011.en")), SubDataset( name="newstest2012", target="en", sources={"cs", "de", "es", "fr", "ru"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2012.{src}", "dev/newstest2012.en")), SubDataset( name="newstest2013", target="en", sources={"cs", "de", "es", "fr", "ru"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2013.{src}", "dev/newstest2013.en")), SubDataset( name="newstest2014", target="en", sources={"cs", "de", "es", "fr", "hi", "ru"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2014-{src}en-src.{src}.sgm", "dev/newstest2014-{src}en-ref.en.sgm")), SubDataset( name="newstest2015", target="en", sources={"cs", "de", "fi", "ru"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2015-{src}en-src.{src}.sgm", "dev/newstest2015-{src}en-ref.en.sgm")), SubDataset( name="newsdiscusstest2015", target="en", sources={"fr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newsdiscusstest2015-{src}en-src.{src}.sgm", "dev/newsdiscusstest2015-{src}en-ref.en.sgm")), SubDataset( name="newstest2016", target="en", sources={"cs", "de", "fi", "ro", "ru", "tr"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2016-{src}en-src.{src}.sgm", "dev/newstest2016-{src}en-ref.en.sgm")), SubDataset( name="newstestB2016", target="en", sources={"fi"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstestB2016-enfi-ref.{src}.sgm", "dev/newstestB2016-enfi-src.en.sgm")), SubDataset( name="newstest2017", target="en", sources={"cs", "de", "fi", "lv", "ru", "tr", "zh"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2017-{src}en-src.{src}.sgm", "dev/newstest2017-{src}en-ref.en.sgm")), SubDataset( name="newstestB2017", target="en", sources={"fi"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstestB2017-fien-src.fi.sgm", "dev/newstestB2017-fien-ref.en.sgm")), SubDataset( name="newstest2018", target="en", sources={"cs", "de", "et", "fi", "ru", "tr", "zh"}, url="http://data.statmt.org/wmt19/translation-task/dev.tgz", path=("dev/newstest2018-{src}en-src.{src}.sgm", "dev/newstest2018-{src}en-ref.en.sgm")), ] DATASET_MAP = {ds.name: ds for ds in _TRAIN_SUBSETS + _DEV_SUBSETS} _CZENG17_FILTER = SubDataset( name="czeng17_filter", target="en", sources={"cs"}, url="http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip", path="convert_czeng16_to_17.pl") class WmtConfig(tfds.core.BuilderConfig): """BuilderConfig for WMT.""" def __init__(self, *, url=None, citation=None, description=None, language_pair=(None, None), subsets=None, **kwargs): """BuilderConfig for WMT. Args: url: The reference URL for the dataset. citation: The paper citation for the dataset. description: The description of the dataset. language_pair: pair of languages that will be used for translation. Should contain 2 letter coded strings. For example: ("en", "de"). subsets: Dict[split, list[str]]. List of the subset to use for each of the split. Note that WMT subclasses overwrite this parameter. **kwargs: keyword arguments forwarded to super. """ name = "%s-%s" % (language_pair[0], language_pair[1]) if "name" in kwargs: # Add name suffix for custom configs name += "." + kwargs.pop("name") super(WmtConfig, self).__init__( name=name, description=description, **kwargs) self.url = url or "http://www.statmt.org" self.citation = citation self.language_pair = language_pair self.subsets = subsets class WmtTranslate(tfds.core.GeneratorBasedBuilder): """WMT translation dataset.""" MANUAL_DOWNLOAD_INSTRUCTIONS = """\ Some of the wmt configs here, require a manual download. Please look into wmt.py to see the exact path (and file name) that has to be downloaded. """ def __init__(self, *args, **kwargs): if type(self) == WmtTranslate and "config" not in kwargs: # pylint: disable=unidiomatic-typecheck raise ValueError( "The raw `wmt_translate` can only be instantiated with the config " "kwargs. You may want to use one of the `wmtYY_translate` " "implementation instead to get the WMT dataset for a specific year.") super(WmtTranslate, self).__init__(*args, **kwargs) @property def _subsets(self): """Subsets that make up each split of the dataset.""" return self.builder_config.subsets @property def subsets(self): """Subsets that make up each split of the dataset for the language pair.""" source, target = self.builder_config.language_pair filtered_subsets = {} for split, ss_names in self._subsets.items(): filtered_subsets[split] = [] for ss_name in ss_names: ds = DATASET_MAP[ss_name] if ds.target != target or source not in ds.sources: logging.info( "Skipping sub-dataset that does not include language pair: %s", ss_name) else: filtered_subsets[split].append(ss_name) logging.info("Using sub-datasets: %s", filtered_subsets) return filtered_subsets def _info(self): src, target = self.builder_config.language_pair return tfds.core.DatasetInfo( builder=self, description=_DESCRIPTION, features=tfds.features.Translation( languages=self.builder_config.language_pair,), supervised_keys=(src, target), homepage=self.builder_config.url, citation=self.builder_config.citation, ) def _split_generators(self, dl_manager): source, _ = self.builder_config.language_pair def _check_manual_files(ds): """Verifies the manual files are downloaded for the given sub-dataset.""" manual_dl_files = ds.get_manual_dl_files(source) manual_paths = [] for fname in manual_dl_files: manual_path = os.path.join(dl_manager.manual_dir, fname) if not tf.io.gfile.exists(manual_path): raise AssertionError( "For {0}, you must manually download the following file(s) " "from {1} and place them in {2}: {3}".format( ds.name, ds.get_url(source), dl_manager.manual_dir, ", ".join(manual_dl_files))) manual_paths.append(manual_path) return manual_paths manual_paths = {} urls_to_download = {} for ss_name in itertools.chain.from_iterable(self.subsets.values()): if ss_name == "czeng_17": # CzEng1.7 is CzEng1.6 with some blocks filtered out. We must download # the filtering script so we can parse out which blocks need to be # removed. urls_to_download[_CZENG17_FILTER.name] = _CZENG17_FILTER.get_url(source) ds = DATASET_MAP[ss_name] if ds.get_manual_dl_files(source): manual_paths[ss_name] = _check_manual_files(ds) else: urls_to_download[ss_name] = ds.get_url(source) # Download and extract files from URLs. downloaded_files = dl_manager.download_and_extract(urls_to_download) # Extract manually downloaded files. manual_files = dl_manager.extract(manual_paths) manual_files = tf.nest.map_structure(os.fspath, manual_files) downloaded_files = tf.nest.map_structure(os.fspath, downloaded_files) extraction_map = dict(downloaded_files, **manual_files) return [ tfds.core.SplitGenerator( # pylint:disable=g-complex-comprehension name=split, gen_kwargs={ "split_subsets": split_subsets, "extraction_map": extraction_map }) for split, split_subsets in self.subsets.items() ] def _generate_examples(self, split_subsets, extraction_map): """Returns the examples in the raw (text) form.""" source, _ = self.builder_config.language_pair def _get_local_paths(ds, extract_dirs): rel_paths = ds.get_path(source) if len(extract_dirs) == 1: extract_dirs = extract_dirs * len(rel_paths) return [ os.path.join(ex_dir, rel_path) if rel_path else ex_dir for ex_dir, rel_path in zip(extract_dirs, rel_paths) ] for ss_name in split_subsets: logging.info("Generating examples from: %s", ss_name) ds = DATASET_MAP[ss_name] extract_dirs = extraction_map[ss_name] files = _get_local_paths(ds, extract_dirs) if ss_name.startswith("czeng"): if ss_name.endswith("16pre"): sub_generator = functools.partial( _parse_tsv, language_pair=("en", "cs")) elif ss_name.endswith("17"): filter_path = _get_local_paths( _CZENG17_FILTER, extraction_map[_CZENG17_FILTER.name])[0] sub_generator = functools.partial( _parse_czeng, filter_path=filter_path) else: sub_generator = _parse_czeng elif ss_name == "hindencorp_01": sub_generator = _parse_hindencorp elif len(files) == 2: if ss_name.endswith("_frde"): sub_generator = _parse_frde_bitext else: sub_generator = _parse_parallel_sentences elif len(files) == 1: fname = files[0] # Note: Due to formatting used by `download_manager`, the file # extension may not be at the end of the file path. if ".tsv" in fname: sub_generator = _parse_tsv elif ss_name.startswith("newscommentary_v14"): sub_generator = functools.partial( _parse_tsv, language_pair=self.builder_config.language_pair) elif "tmx" in fname: sub_generator = _parse_tmx elif ss_name.startswith("wikiheadlines"): sub_generator = _parse_wikiheadlines else: raise ValueError("Unsupported file format: %s" % fname) else: raise ValueError("Invalid number of files: %d" % len(files)) for sub_key, ex in sub_generator(*files): if not all(ex.values()): continue # TODO(adarob): Add subset feature. # ex["subset"] = subset key = "{}/{}".format(ss_name, sub_key) yield key, ex def _parse_parallel_sentences(f1, f2): """Returns examples from parallel SGML or text files, which may be gzipped.""" def _parse_text(path): """Returns the sentences from a single text file, which may be gzipped.""" split_path = path.split(".") if split_path[-1] == "gz": lang = split_path[-2] with tf.io.gfile.GFile(path, "rb") as f, gzip.GzipFile(fileobj=f) as g: return g.read().decode("utf-8").split("\n"), lang if split_path[-1] == "txt": # CWMT lang = split_path[-2].split("_")[-1] lang = "zh" if lang in ("ch", "cn") else lang else: lang = split_path[-1] with tf.io.gfile.GFile(path) as f: return f.read().split("\n"), lang def _parse_sgm(path): """Returns sentences from a single SGML file.""" lang = path.split(".")[-2] sentences = [] # Note: We can't use the XML parser since some of the files are badly # formatted. seg_re = re.compile(r"<seg id=\"\d+\">(.*)</seg>") with tf.io.gfile.GFile(path) as f: for line in f: seg_match = re.match(seg_re, line) if seg_match: assert len(seg_match.groups()) == 1 sentences.append(seg_match.groups()[0]) return sentences, lang parse_file = _parse_sgm if f1.endswith(".sgm") else _parse_text # Some datasets (e.g., CWMT) contain multiple parallel files specified with # a wildcard. We sort both sets to align them and parse them one by one. f1_files = tf.io.gfile.glob(f1) f2_files = tf.io.gfile.glob(f2) assert f1_files and f2_files, "No matching files found: %s, %s." % (f1, f2) assert len(f1_files) == len(f2_files), ( "Number of files do not match: %d vs %d for %s vs %s." % (len(f1_files), len(f2_files), f1, f2)) for f_id, (f1_i, f2_i) in enumerate(zip(sorted(f1_files), sorted(f2_files))): l1_sentences, l1 = parse_file(f1_i) l2_sentences, l2 = parse_file(f2_i) assert len(l1_sentences) == len(l2_sentences), ( "Sizes do not match: %d vs %d for %s vs %s." % (len(l1_sentences), len(l2_sentences), f1_i, f2_i)) for line_id, (s1, s2) in enumerate(zip(l1_sentences, l2_sentences)): key = "{}/{}".format(f_id, line_id) yield key, {l1: s1, l2: s2} def _parse_frde_bitext(fr_path, de_path): with tf.io.gfile.GFile(fr_path) as f: fr_sentences = f.read().split("\n") with tf.io.gfile.GFile(de_path) as f: de_sentences = f.read().split("\n") assert len(fr_sentences) == len(de_sentences), ( "Sizes do not match: %d vs %d for %s vs %s." % (len(fr_sentences), len(de_sentences), fr_path, de_path)) for line_id, (s1, s2) in enumerate(zip(fr_sentences, de_sentences)): yield line_id, {"fr": s1, "de": s2} def _parse_tmx(path): """Generates examples from TMX file.""" def _get_tuv_lang(tuv): for k, v in tuv.items(): if k.endswith("}lang"): return v raise AssertionError("Language not found in `tuv` attributes.") def _get_tuv_seg(tuv): segs = tuv.findall("seg") assert len(segs) == 1, "Invalid number of segments: %d" % len(segs) return segs[0].text with tf.io.gfile.GFile(path, "rb") as f: if six.PY3: # Workaround due to: https://github.com/tensorflow/tensorflow/issues/33563 utf_f = codecs.getreader("utf-8")(f) else: utf_f = f for line_id, (_, elem) in enumerate(ElementTree.iterparse(utf_f)): # pytype: disable=wrong-arg-types if elem.tag == "tu": yield line_id, { _get_tuv_lang(tuv): _get_tuv_seg(tuv) for tuv in elem.iterfind("tuv") } elem.clear() def _parse_tsv(path, language_pair=None): """Generates examples from TSV file.""" if language_pair is None: lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])\.tsv", path) assert lang_match is not None, "Invalid TSV filename: %s" % path l1, l2 = lang_match.groups() else: l1, l2 = language_pair with tf.io.gfile.GFile(path) as f: for j, line in enumerate(f): cols = line.split("\t") if len(cols) != 2: logging.warning("Skipping line %d in TSV (%s) with %d != 2 columns.", j, path, len(cols)) continue s1, s2 = cols yield j, {l1: s1.strip(), l2: s2.strip()} def _parse_wikiheadlines(path): """Generates examples from Wikiheadlines dataset file.""" lang_match = re.match(r".*\.([a-z][a-z])-([a-z][a-z])$", path) assert lang_match is not None, "Invalid Wikiheadlines filename: %s" % path l1, l2 = lang_match.groups() with tf.io.gfile.GFile(path) as f: for line_id, line in enumerate(f): s1, s2 = line.split("|||") yield line_id, {l1: s1.strip(), l2: s2.strip()} def _parse_czeng(*paths, **kwargs): """Generates examples from CzEng v1.6, with optional filtering for v1.7.""" filter_path = kwargs.get("filter_path", None) if filter_path: re_block = re.compile(r"^[^-]+-b(\d+)-\d\d[tde]") with tf.io.gfile.GFile(filter_path) as f: bad_blocks = set( re.search(r"qw{([\s\d]*)}", f.read()).groups()[0].split()) # pytype: disable=attribute-error logging.info("Loaded %d bad blocks to filter from CzEng v1.6 to make v1.7.", len(bad_blocks)) for path in paths: for gz_path in tf.io.gfile.glob(path): with tf.io.gfile.GFile(gz_path, "rb") as g, gzip.GzipFile(fileobj=g) as f: filename = os.path.basename(gz_path) for line_id, line in enumerate(f): line = line.decode("utf-8") # required for py3 if not line.strip(): continue id_, unused_score, cs, en = line.split("\t") if filter_path: block_match = re.match(re_block, id_) if block_match and block_match.groups()[0] in bad_blocks: continue sub_key = "{}/{}".format(filename, line_id) yield sub_key, { "cs": cs.strip(), "en": en.strip(), } def _parse_hindencorp(path): with tf.io.gfile.GFile(path) as f: for line_id, line in enumerate(f): split_line = line.split("\t") if len(split_line) != 5: logging.warning("Skipping invalid HindEnCorp line: %s", line) continue yield line_id, {"en": split_line[3].strip(), "hi": split_line[4].strip()}
''' Shapeless handling of WKB geometries. Use approximate_wkb() to copy an approximate well-known binary representation of a geometry. Along the way, reduce precision of double floating point coordinates by replacing their three least-significant bytes with nulls. The resulting WKB will match the original at up to 26 bits of precision, close enough for spherical mercator zoom 18 street scale geography. Reduced-precision WKB geometries will compress as much as 50% smaller with zlib. See also: http://edndoc.esri.com/arcsde/9.0/general_topics/wkb_representation.htm http://en.wikipedia.org/wiki/Double-precision_floating-point_format ''' from struct import unpack try: from io import StringIO except ImportError: # Python 2 from StringIO import StringIO # # wkbByteOrder # wkbXDR = 0 # Big Endian wkbNDR = 1 # Little Endian # # wkbGeometryType # wkbPoint = 1 wkbLineString = 2 wkbPolygon = 3 wkbMultiPoint = 4 wkbMultiLineString = 5 wkbMultiPolygon = 6 wkbGeometryCollection = 7 wkbMultis = wkbMultiPoint, wkbMultiLineString, wkbMultiPolygon, wkbGeometryCollection def copy_byte(src, dest): ''' Copy an unsigned byte between files, and return it. ''' byte = src.read(1) dest.write(byte) (val, ) = unpack('B', byte) return val def copy_int_little(src, dest): ''' Copy a little-endian unsigned 4-byte int between files, and return it. ''' word = src.read(4) dest.write(word) (val, ) = unpack('<I', word) return val def copy_int_big(src, dest): ''' Copy a big-endian unsigned 4-byte int between files, and return it. ''' word = src.read(4) dest.write(word) (val, ) = unpack('>I', word) return val def approx_point_little(src, dest): ''' Copy a pair of little-endian doubles between files, truncating significands. ''' xy = src.read(2 * 8) dest.write('\x00\x00\x00') dest.write(xy[-13:-8]) dest.write('\x00\x00\x00') dest.write(xy[-5:]) def approx_point_big(src, dest): ''' Copy a pair of big-endian doubles between files, truncating significands. ''' xy = src.read(2 * 8) dest.write(xy[:5]) dest.write('\x00\x00\x00') dest.write(xy[8:13]) dest.write('\x00\x00\x00') def approx_line(src, dest, copy_int, approx_point): ''' ''' points = copy_int(src, dest) for i in range(points): approx_point(src, dest) def approx_polygon(src, dest, copy_int, approx_point): ''' ''' rings = copy_int(src, dest) for i in range(rings): approx_line(src, dest, copy_int, approx_point) def approx_geometry(src, dest): ''' ''' end = copy_byte(src, dest) if end == wkbNDR: copy_int = copy_int_little approx_point = approx_point_little elif end == wkbXDR: copy_int = copy_int_big approx_point = approx_point_big else: raise ValueError(end) type = copy_int(src, dest) if type == wkbPoint: approx_point(src, dest) elif type == wkbLineString: approx_line(src, dest, copy_int, approx_point) elif type == wkbPolygon: approx_polygon(src, dest, copy_int, approx_point) elif type in wkbMultis: parts = copy_int(src, dest) for i in range(parts): approx_geometry(src, dest) else: raise ValueError(type) def approximate_wkb(wkb_in): ''' Return an approximation of the input WKB with lower-precision geometry. ''' input, output = StringIO(wkb_in), StringIO() approx_geometry(input, output) wkb_out = output.getvalue() assert len(wkb_in) == input.tell(), 'The whole WKB was not processed' assert len(wkb_in) == len(wkb_out), 'The output WKB is the wrong length' return wkb_out if __name__ == '__main__': from random import random from math import hypot from shapely.wkb import loads from shapely.geometry import * point1 = Point(random(), random()) point2 = loads(approximate_wkb(point1.wkb)) assert hypot(point1.x - point2.x, point1.y - point2.y) < 1e-8 point1 = Point(random(), random()) point2 = Point(random(), random()) point3 = point1.union(point2) point4 = loads(approximate_wkb(point3.wkb)) assert hypot(point3.geoms[0].x - point4.geoms[0].x, point3.geoms[0].y - point4.geoms[0].y) < 1e-8 assert hypot(point3.geoms[1].x - point4.geoms[1].x, point3.geoms[1].y - point4.geoms[1].y) < 1e-8 line1 = Point(random(), random()).buffer(1 + random(), 3).exterior line2 = loads(approximate_wkb(line1.wkb)) assert abs(1. - line2.length / line1.length) < 1e-8 line1 = Point(random(), random()).buffer(1 + random(), 3).exterior line2 = Point(random(), random()).buffer(1 + random(), 3).exterior line3 = MultiLineString([line1, line2]) line4 = loads(approximate_wkb(line3.wkb)) assert abs(1. - line4.length / line3.length) < 1e-8 poly1 = Point(random(), random()).buffer(1 + random(), 3) poly2 = loads(approximate_wkb(poly1.wkb)) assert abs(1. - poly2.area / poly1.area) < 1e-8 poly1 = Point(random(), random()).buffer(2 + random(), 3) poly2 = Point(random(), random()).buffer(1 + random(), 3) poly3 = poly1.difference(poly2) poly4 = loads(approximate_wkb(poly3.wkb)) assert abs(1. - poly4.area / poly3.area) < 1e-8 poly1 = Point(random(), 2 + random()).buffer(1 + random(), 3) poly2 = Point(2 + random(), random()).buffer(1 + random(), 3) poly3 = poly1.union(poly2) poly4 = loads(approximate_wkb(poly3.wkb)) assert abs(1. - poly4.area / poly3.area) < 1e-8
# Copyright (c) 2014 Alex Meade. All rights reserved. # Copyright (c) 2014 Clinton Knight. All rights reserved. # Copyright (c) 2014 Andrew Kerr. All rights reserved. # Copyright (c) 2015 Tom Barron. All rights reserved. # Copyright (c) 2015 Goutham Pacha Ravi. All rights reserved. # Copyright (c) 2015 Dustin Schoenbrun. All rights reserved. # Copyright (c) 2016 Chuck Fouts. All rights reserved. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Mock unit tests for the NetApp block storage library """ import copy import uuid import ddt import mock from oslo_log import versionutils from oslo_utils import units import six from cinder import context from cinder import exception from cinder.objects import fields from cinder import test from cinder.tests.unit import fake_volume from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake import cinder.tests.unit.volume.drivers.netapp.fakes as na_fakes from cinder.volume.drivers.netapp.dataontap import block_base from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api from cinder.volume.drivers.netapp.dataontap.utils import loopingcalls from cinder.volume.drivers.netapp import utils as na_utils from cinder.volume import utils as volume_utils @ddt.ddt class NetAppBlockStorageLibraryTestCase(test.TestCase): def setUp(self): super(NetAppBlockStorageLibraryTestCase, self).setUp() kwargs = { 'configuration': self.get_config_base(), 'host': 'openstack@netappblock', } self.library = block_base.NetAppBlockStorageLibrary( 'driver', 'protocol', **kwargs) self.library.zapi_client = mock.Mock() self.zapi_client = self.library.zapi_client self.mock_request = mock.Mock() self.ctxt = context.RequestContext('fake', 'fake', auth_token=True) def get_config_base(self): return na_fakes.create_configuration() @mock.patch.object(versionutils, 'report_deprecated_feature') def test_get_reserved_percentage_default_multipler(self, mock_report): default = 1.2 reserved_percentage = 20.0 self.library.configuration.netapp_size_multiplier = default self.library.configuration.reserved_percentage = reserved_percentage result = self.library._get_reserved_percentage() self.assertEqual(reserved_percentage, result) self.assertFalse(mock_report.called) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_get_reserved_percentage(self, mock_report): multiplier = 2.0 self.library.configuration.netapp_size_multiplier = multiplier result = self.library._get_reserved_percentage() reserved_ratio = round(1 - (1 / multiplier), 2) reserved_percentage = 100 * int(reserved_ratio) self.assertEqual(reserved_percentage, result) msg = ('The "netapp_size_multiplier" configuration option is ' 'deprecated and will be removed in the Mitaka release. ' 'Please set "reserved_percentage = %d" instead.' % result) mock_report.assert_called_once_with(block_base.LOG, msg) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value={'Volume': 'FAKE_CMODE_VOL1'})) def test_get_pool(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertEqual('FAKE_CMODE_VOL1', pool) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value=None)) def test_get_pool_no_metadata(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertIsNone(pool) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr', mock.Mock(return_value=dict())) def test_get_pool_volume_unknown(self): pool = self.library.get_pool({'name': 'volume-fake-uuid'}) self.assertIsNone(pool) def test_create_volume(self): volume_size_in_bytes = int(fake.SIZE) * units.Gi self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(na_utils, 'log_extra_spec_warnings') self.mock_object(block_base, 'LOG') self.mock_object(volume_utils, 'extract_host', return_value=fake.POOL_NAME) self.mock_object(self.library, '_setup_qos_for_volume', return_value=None) self.mock_object(self.library, '_create_lun') self.mock_object(self.library, '_create_lun_handle') self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(self.library, '_get_volume_model_update') self.library.create_volume(fake.VOLUME) self.library._create_lun.assert_called_once_with( fake.POOL_NAME, fake.LUN_NAME, volume_size_in_bytes, fake.LUN_METADATA, None) self.library._get_volume_model_update.assert_called_once_with( fake.VOLUME) self.assertEqual( 0, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(0, block_base.LOG.error.call_count) def test_create_volume_no_pool(self): self.mock_object(volume_utils, 'extract_host', return_value=None) self.assertRaises(exception.InvalidHost, self.library.create_volume, fake.VOLUME) def test_create_volume_exception_path(self): self.mock_object(block_base, 'LOG') self.mock_object(na_utils, 'get_volume_extra_specs') self.mock_object(self.library, '_setup_qos_for_volume', return_value=None) self.mock_object(self.library, '_create_lun', side_effect=Exception) self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.assertRaises(exception.VolumeBackendAPIException, self.library.create_volume, fake.VOLUME) self.assertEqual(1, self.library. _mark_qos_policy_group_for_deletion.call_count) self.assertEqual(1, block_base.LOG.exception.call_count) def test_create_volume_no_pool_provided_by_scheduler(self): fake_volume = copy.deepcopy(fake.VOLUME) # Set up fake volume whose 'host' field is missing pool information. fake_volume['host'] = '%s@%s' % (fake.HOST_NAME, fake.BACKEND_NAME) self.assertRaises(exception.InvalidHost, self.library.create_volume, fake_volume) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' self.library.host_type = 'linux' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') self.zapi_client.map_lun.return_value = '1' lun_id = self.library._map_lun('fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) self.assertEqual('1', lun_id) mock_get_or_create_igroup.assert_called_once_with( fake.FC_FORMATTED_INITIATORS, protocol, os) self.zapi_client.map_lun.assert_called_once_with( fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_map_lun_mismatch_host_os( self, mock_get_or_create_igroup, mock_get_lun_attr): os = 'windows' protocol = 'fcp' self.library.host_type = 'linux' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') self.library._map_lun('fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) mock_get_or_create_igroup.assert_called_once_with( fake.FC_FORMATTED_INITIATORS, protocol, self.library.host_type) self.zapi_client.map_lun.assert_called_once_with( fake.LUN_PATH, fake.IGROUP1_NAME, lun_id=None) self.assertEqual(1, block_base.LOG.warning.call_count) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2') self.zapi_client.map_lun.side_effect = netapp_api.NaApiError lun_id = self.library._map_lun( 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) self.assertEqual('2', lun_id) mock_find_mapped_lun_igroup.assert_called_once_with( fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_or_create_igroup') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_map_lun_api_error(self, mock_find_mapped_lun_igroup, mock_get_or_create_igroup, mock_get_lun_attr): os = 'linux' protocol = 'fcp' mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH, 'OsType': os} mock_get_or_create_igroup.return_value = (fake.IGROUP1_NAME, os, 'iscsi') mock_find_mapped_lun_igroup.return_value = (None, None) self.zapi_client.map_lun.side_effect = netapp_api.NaApiError self.assertRaises(netapp_api.NaApiError, self.library._map_lun, 'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_find_mapped_lun_igroup') def test_unmap_lun(self, mock_find_mapped_lun_igroup): mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1) self.library._unmap_lun(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) self.zapi_client.unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.IGROUP1_NAME) def test_find_mapped_lun_igroup(self): self.assertRaises(NotImplementedError, self.library._find_mapped_lun_igroup, fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) def test_has_luns_mapped_to_initiators(self): self.zapi_client.has_luns_mapped_to_initiators.return_value = True self.assertTrue(self.library._has_luns_mapped_to_initiators( fake.FC_FORMATTED_INITIATORS)) self.zapi_client.has_luns_mapped_to_initiators.assert_called_once_with( fake.FC_FORMATTED_INITIATORS) def test_get_or_create_igroup_preexisting(self): self.zapi_client.get_igroup_by_initiators.return_value = [fake.IGROUP1] self.library._create_igroup_add_initiators = mock.Mock() igroup_name, host_os, ig_type = self.library._get_or_create_igroup( fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') self.assertEqual(fake.IGROUP1_NAME, igroup_name) self.assertEqual('linux', host_os) self.assertEqual('fcp', ig_type) self.zapi_client.get_igroup_by_initiators.assert_called_once_with( fake.FC_FORMATTED_INITIATORS) self.assertEqual( 0, self.library._create_igroup_add_initiators.call_count) @mock.patch.object(uuid, 'uuid4', mock.Mock(return_value=fake.UUID1)) def test_get_or_create_igroup_none_preexisting(self): """This method also tests _create_igroup_add_initiators.""" self.zapi_client.get_igroup_by_initiators.return_value = [] igroup_name, os, ig_type = self.library._get_or_create_igroup( fake.FC_FORMATTED_INITIATORS, 'fcp', 'linux') self.assertEqual('openstack-' + fake.UUID1, igroup_name) self.zapi_client.create_igroup.assert_called_once_with( igroup_name, 'fcp', 'linux') self.assertEqual(len(fake.FC_FORMATTED_INITIATORS), self.zapi_client.add_igroup_initiator.call_count) self.assertEqual('linux', os) self.assertEqual('fcp', ig_type) def test_get_fc_target_wwpns(self): self.assertRaises(NotImplementedError, self.library._get_fc_target_wwpns) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_map_lun') def test_initialize_connection_fc(self, mock_map_lun, mock_build_initiator_target_map): self.maxDiff = None mock_map_lun.return_value = '1' mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, fake.FC_I_T_MAP, 4) target_info = self.library.initialize_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO) mock_map_lun.assert_called_once_with( 'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_map_lun') def test_initialize_connection_fc_no_wwpns( self, mock_map_lun, mock_build_initiator_target_map): mock_map_lun.return_value = '1' mock_build_initiator_target_map.return_value = (None, None, 0) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_fc, fake.FC_VOLUME, fake.FC_CONNECTOR) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators): mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = True target_info = self.library.terminate_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO_EMPTY) mock_unmap_lun.assert_called_once_with(fake.LUN_PATH, fake.FC_FORMATTED_INITIATORS) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_build_initiator_target_map') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_has_luns_mapped_to_initiators') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_unmap_lun') @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_lun_attr') def test_terminate_connection_fc_no_more_luns( self, mock_get_lun_attr, mock_unmap_lun, mock_has_luns_mapped_to_initiators, mock_build_initiator_target_map): mock_get_lun_attr.return_value = {'Path': fake.LUN_PATH} mock_unmap_lun.return_value = None mock_has_luns_mapped_to_initiators.return_value = False mock_build_initiator_target_map.return_value = (fake.FC_TARGET_WWPNS, fake.FC_I_T_MAP, 4) target_info = self.library.terminate_connection_fc(fake.FC_VOLUME, fake.FC_CONNECTOR) self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_fc_target_wwpns') def test_build_initiator_target_map_no_lookup_service( self, mock_get_fc_target_wwpns): self.library.lookup_service = None mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS (target_wwpns, init_targ_map, num_paths) = \ self.library._build_initiator_target_map(fake.FC_CONNECTOR) self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map) self.assertEqual(0, num_paths) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_fc_target_wwpns') def test_build_initiator_target_map_with_lookup_service( self, mock_get_fc_target_wwpns): self.library.lookup_service = mock.Mock() self.library.lookup_service.get_device_mapping_from_network.\ return_value = fake.FC_FABRIC_MAP mock_get_fc_target_wwpns.return_value = fake.FC_FORMATTED_TARGET_WWPNS (target_wwpns, init_targ_map, num_paths) = \ self.library._build_initiator_target_map(fake.FC_CONNECTOR) self.assertSetEqual(set(fake.FC_TARGET_WWPNS), set(target_wwpns)) self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map) self.assertEqual(4, num_paths) @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_configured(self, mock_check_flags): self.library.configuration.netapp_lun_ostype = 'windows' self.library.configuration.netapp_host_type = 'solaris' self.library.configuration.netapp_lun_space_reservation = 'disabled' self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertEqual('windows', self.library.lun_ostype) self.assertEqual('solaris', self.library.host_type) @mock.patch.object(na_utils, 'check_flags') def test_do_setup_san_unconfigured(self, mock_check_flags): self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'enabled' self.library.do_setup(mock.Mock()) self.assertTrue(mock_check_flags.called) self.assertEqual('linux', self.library.lun_ostype) self.assertEqual('linux', self.library.host_type) def test_do_setup_space_reservation_disabled(self): self.mock_object(na_utils, 'check_flags') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'disabled' self.library.do_setup(mock.Mock()) self.assertEqual('false', self.library.lun_space_reservation) def test_do_setup_space_reservation_enabled(self): self.mock_object(na_utils, 'check_flags') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.configuration.netapp_lun_space_reservation = 'enabled' self.library.do_setup(mock.Mock()) self.assertEqual('true', self.library.lun_space_reservation) def test_get_existing_vol_with_manage_ref_no_source_info(self): self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {}) def test_get_existing_vol_manage_not_found(self): self.zapi_client.get_lun_by_args.return_value = [] self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {'source-name': 'lun_path'}) self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) def test_get_existing_vol_manage_lun_by_path(self): self.library.vserver = 'fake_vserver' self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] mock_lun = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'fake_uuid'}) self.mock_object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', return_value=mock_lun) existing_ref = {'source-name': 'fake_path'} lun = self.library._get_existing_vol_with_manage_ref(existing_ref) self.zapi_client.get_lun_by_args.assert_called_once_with( path='fake_path') self.library._extract_lun_info.assert_called_once_with('lun0') self.assertEqual('lun0', lun.name) def test_get_existing_vol_manage_lun_by_uuid(self): self.library.vserver = 'fake_vserver' self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] mock_lun = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'fake_uuid'}) self.mock_object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', return_value=mock_lun) existing_ref = {'source-id': 'fake_uuid'} lun = self.library._get_existing_vol_with_manage_ref(existing_ref) self.zapi_client.get_lun_by_args.assert_called_once_with( uuid='fake_uuid') self.library._extract_lun_info.assert_called_once_with('lun0') self.assertEqual('lun0', lun.name) def test_get_existing_vol_manage_lun_invalid_mode(self): self.assertRaises(exception.ManageExistingInvalidReference, self.library._get_existing_vol_with_manage_ref, {'source-id': 'src_id'}) def test_get_existing_vol_manage_lun_invalid_lun(self): self.zapi_client.get_lun_by_args.return_value = ['lun0', 'lun1'] self.mock_object(block_base.NetAppBlockStorageLibrary, '_is_lun_valid_on_storage', side_effect=[False, True]) mock_lun0 = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'src_id_0'}) mock_lun1 = block_base.NetAppLun( 'lun1', 'lun1', '5', {'UUID': 'src_id_1'}) self.mock_object(block_base.NetAppBlockStorageLibrary, '_extract_lun_info', side_effect=[mock_lun0, mock_lun1]) lun = self.library._get_existing_vol_with_manage_ref( {'source-name': 'lun_path'}) self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count) self.library._extract_lun_info.assert_has_calls([ mock.call('lun0'), mock.call('lun1'), ]) self.assertEqual('lun1', lun.name) @mock.patch.object(block_base.NetAppBlockStorageLibrary, '_get_existing_vol_with_manage_ref', mock.Mock(return_value=block_base.NetAppLun( 'handle', 'name', '1073742824', {}))) def test_manage_existing_get_size(self): size = self.library.manage_existing_get_size( {'id': 'vol_id'}, {'ref': 'ref'}) self.assertEqual(2, size) self.library._get_existing_vol_with_manage_ref.assert_called_once_with( {'ref': 'ref'}) @ddt.data(None, {'replication_status': fields.ReplicationStatus.ENABLED}) def test_manage_existing_lun_name_matches(self, model_update): volume = fake_volume.fake_volume_obj(self.ctxt) existing_ref = {'source-name': 'fake_path'} mock_lun = block_base.NetAppLun( volume['name'], volume['name'], '3', {'UUID': 'fake_uuid', 'Path': 'p'}) self.mock_object(self.library, '_get_existing_vol_with_manage_ref', return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_check_volume_type_for_lun', return_value=True) self.mock_object(self.library, '_setup_qos_for_volume') self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', return_value=None) self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_get_volume_model_update', return_value=model_update) mock_info_log = self.mock_object(block_base.LOG, 'info') actual_update = self.library.manage_existing(volume, existing_ref) self.assertEqual(model_update, actual_update) self.assertEqual(2, mock_info_log.call_count) self.library._add_lun_to_table.assert_called_once_with(mock_lun) @ddt.data(None, 'fake_qos_policy_group_name') def test_manage_existing_rename_lun(self, qos_policy_group_name): expected_update = ( {'replication_status': fields.ReplicationStatus.ENABLED}) volume = fake_volume.fake_volume_obj(self.ctxt) existing_ref = {'source-name': 'fake_path'} mock_lun = block_base.NetAppLun( 'lun0', 'lun0', '3', {'UUID': 'fake_uuid', 'Path': fake.LUN_PATH}) self.mock_object(self.library, '_get_existing_vol_with_manage_ref', return_value=mock_lun) self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_check_volume_type_for_lun', return_value=True) self.mock_object(self.library, '_setup_qos_for_volume') self.mock_object(na_utils, 'get_qos_policy_group_name_from_info', return_value=qos_policy_group_name) self.mock_object(self.library, '_add_lun_to_table') self.mock_object(self.library, '_get_volume_model_update', return_value=expected_update) self.mock_object(self.zapi_client, 'set_lun_qos_policy_group') mock_info_log = self.mock_object(block_base.LOG, 'info') actual_update = self.library.manage_existing(volume, existing_ref) expected_new_path = '/vol/vol0/%s' % volume['name'] self.assertEqual(expected_update, actual_update) self.assertEqual(1, mock_info_log.call_count) self.library._add_lun_to_table.assert_called_once_with(mock_lun) if qos_policy_group_name: (self.zapi_client.set_lun_qos_policy_group. assert_called_once_with(expected_new_path, qos_policy_group_name)) else: self.assertFalse( self.zapi_client.set_lun_qos_policy_group.called) @mock.patch.object(block_base.LOG, 'info') def test_unmanage(self, log): mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Path': 'p', 'UUID': 'uuid'}) self.library._get_lun_from_table = mock.Mock(return_value=mock_lun) self.library.unmanage({'name': 'vol'}) self.library._get_lun_from_table.assert_called_once_with('vol') self.assertEqual(1, log.call_count) def test_check_vol_type_for_lun(self): result = self.library._check_volume_type_for_lun( 'vol', 'lun', 'existing_ref', {}) self.assertIsNone(result) def test_is_lun_valid_on_storage(self): self.assertTrue(self.library._is_lun_valid_on_storage('lun')) def test_initialize_connection_iscsi(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = ( target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list', return_value=target_details_list[1]) self.zapi_client.get_iscsi_service_details.return_value = ( fake.ISCSI_SERVICE_IQN) self.mock_object(na_utils, 'get_iscsi_connection_properties', return_value=fake.ISCSI_CONNECTION_PROPERTIES) target_info = self.library.initialize_connection_iscsi(volume, connector) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_method'], target_info['data']['auth_method']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['auth_password'], target_info['data']['auth_password']) self.assertIn('auth_password', target_info['data']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data']['discovery_auth_method'], target_info['data']['discovery_auth_method']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discovery_auth_password'], target_info['data']['discovery_auth_password']) self.assertIn('auth_password', target_info['data']) self.assertEqual( fake.ISCSI_CONNECTION_PROPERTIES['data'] ['discovery_auth_username'], target_info['data']['discovery_auth_username']) self.assertEqual(fake.ISCSI_CONNECTION_PROPERTIES, target_info) block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], 'iscsi', None) self.zapi_client.get_iscsi_target_details.assert_called_once_with() block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\ .assert_called_once_with( target_details_list) self.zapi_client.get_iscsi_service_details.assert_called_once_with() def test_initialize_connection_iscsi_no_target_list(self): volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = None self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list') self.mock_object(na_utils, 'get_iscsi_connection_properties', return_value=fake.ISCSI_CONNECTION_PROPERTIES) self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) self.assertEqual( 0, block_base.NetAppBlockStorageLibrary ._get_preferred_target_from_list.call_count) self.assertEqual( 0, self.zapi_client.get_iscsi_service_details.call_count) self.assertEqual( 0, na_utils.get_iscsi_connection_properties.call_count) def test_initialize_connection_iscsi_no_preferred_target(self): volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = None self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list', return_value=None) self.mock_object(na_utils, 'get_iscsi_connection_properties') self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) self.assertEqual(0, self.zapi_client .get_iscsi_service_details.call_count) self.assertEqual(0, na_utils.get_iscsi_connection_properties .call_count) def test_initialize_connection_iscsi_no_iscsi_service_details(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST volume = fake.ISCSI_VOLUME connector = fake.ISCSI_CONNECTOR self.mock_object(block_base.NetAppBlockStorageLibrary, '_map_lun', return_value=fake.ISCSI_LUN['lun_id']) self.zapi_client.get_iscsi_target_details.return_value = ( target_details_list) self.mock_object(block_base.NetAppBlockStorageLibrary, '_get_preferred_target_from_list', return_value=target_details_list[1]) self.zapi_client.get_iscsi_service_details.return_value = None self.mock_object(na_utils, 'get_iscsi_connection_properties') self.assertRaises(exception.VolumeBackendAPIException, self.library.initialize_connection_iscsi, volume, connector) block_base.NetAppBlockStorageLibrary._map_lun.assert_called_once_with( fake.ISCSI_VOLUME['name'], [fake.ISCSI_CONNECTOR['initiator']], 'iscsi', None) self.zapi_client.get_iscsi_target_details.assert_called_once_with() block_base.NetAppBlockStorageLibrary._get_preferred_target_from_list\ .assert_called_once_with(target_details_list) def test_get_target_details_list(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[0], result) def test_get_preferred_target_from_empty_list(self): target_details_list = [] result = self.library._get_preferred_target_from_list( target_details_list) self.assertIsNone(result) def test_get_preferred_target_from_list_with_one_interface_disabled(self): target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) target_details_list[0]['interface-enabled'] = 'false' result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[1], result) def test_get_preferred_target_from_list_with_all_interfaces_disabled(self): target_details_list = copy.deepcopy(fake.ISCSI_TARGET_DETAILS_LIST) for target in target_details_list: target['interface-enabled'] = 'false' result = self.library._get_preferred_target_from_list( target_details_list) self.assertEqual(target_details_list[0], result) def test_get_preferred_target_from_list_with_filter(self): target_details_list = fake.ISCSI_TARGET_DETAILS_LIST filter = [target_detail['address'] for target_detail in target_details_list[1:]] result = self.library._get_preferred_target_from_list( target_details_list, filter) self.assertEqual(target_details_list[1], result) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_setup_error_invalid_lun_os(self): self.library.configuration.netapp_lun_ostype = 'unknown' self.library.do_setup(mock.Mock()) self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) block_base.LOG.error.assert_called_once_with(mock.ANY) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_setup_error_invalid_host_type(self): self.library.configuration.netapp_lun_ostype = 'linux' self.library.configuration.netapp_host_type = 'future_os' self.library.do_setup(mock.Mock()) self.assertRaises(exception.NetAppDriverException, self.library.check_for_setup_error) block_base.LOG.error.assert_called_once_with(mock.ANY) @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_both_config(self): self.library.configuration.netapp_lun_ostype = 'linux' self.library.configuration.netapp_host_type = 'linux' self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() mock_looping_start_tasks = self.mock_object( self.library.loopingcalls, 'start_tasks') self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) mock_looping_start_tasks.assert_called_once_with() @mock.patch.object(na_utils, 'check_flags', mock.Mock()) def test_check_for_setup_error_no_os_host(self): mock_start_tasks = self.mock_object( self.library.loopingcalls, 'start_tasks') self.library.configuration.netapp_lun_ostype = None self.library.configuration.netapp_host_type = None self.library.do_setup(mock.Mock()) self.zapi_client.get_lun_list.return_value = ['lun1'] self.library._extract_and_populate_luns = mock.Mock() self.library.check_for_setup_error() self.library._extract_and_populate_luns.assert_called_once_with( ['lun1']) mock_start_tasks.assert_called_once_with() def test_delete_volume(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') self.library.delete_volume(fake.VOLUME) mock_delete_lun.assert_called_once_with(fake.LUN_NAME) def test_delete_lun(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() self.library.lun_table = fake.LUN_TABLE self.library._delete_lun(fake.LUN_NAME) mock_get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH) def test_delete_lun_no_metadata(self): self.mock_object(self.library, '_get_lun_attr', return_value=None) self.library.zapi_client = mock.Mock() self.library.lun_table = fake.LUN_TABLE self.mock_object(self.library, 'zapi_client') self.library._delete_lun(fake.LUN_NAME) self.library._get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.assertEqual(0, self.library.zapi_client.destroy_lun.call_count) self.assertEqual(0, self.zapi_client. mark_qos_policy_group_for_deletion.call_count) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_delete_lun_missing_lun(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() error = netapp_api.NaApiError(code=netapp_api.EOBJECTNOTFOUND) self.mock_object(self.library.zapi_client, 'destroy_lun', side_effect=error) self.library.lun_table = {fake.LUN_NAME: None} self.library._delete_lun(fake.LUN_NAME) mock_get_lun_attr.assert_called_once_with( fake.LUN_NAME, 'metadata') self.library.zapi_client.destroy_lun.assert_called_once_with(fake.PATH) block_base.LOG.error.assert_not_called() block_base.LOG.warning.assert_called_once() self.assertFalse(self.library.lun_table) @mock.patch.object(block_base, 'LOG', mock.Mock()) def test_delete_lun_client_exception(self): mock_get_lun_attr = self.mock_object(self.library, '_get_lun_attr') mock_get_lun_attr.return_value = fake.LUN_METADATA self.library.zapi_client = mock.Mock() self.mock_object(self.library.zapi_client, 'destroy_lun', side_effect=netapp_api.NaApiError) self.assertRaises(exception.NetAppDriverException, self.library._delete_lun, fake.LUN_NAME) block_base.LOG.error.assert_not_called() block_base.LOG.warning.assert_not_called() def test_delete_snapshot(self): mock_delete_lun = self.mock_object(self.library, '_delete_lun') self.library.delete_snapshot(fake.SNAPSHOT) mock_delete_lun.assert_called_once_with(fake.SNAPSHOT_NAME) def test_clone_source_to_destination(self): self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_extend_volume') self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.mock_object(self.library, '_get_volume_model_update', return_value={'key': 'value'}) self.library.lun_space_reservation = 'false' retval = self.library._clone_source_to_destination( fake.CLONE_SOURCE, fake.CLONE_DESTINATION) self.assertEqual({'key': 'value'}, retval) na_utils.get_volume_extra_specs.assert_called_once_with( fake.CLONE_DESTINATION) self.library._setup_qos_for_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.EXTRA_SPECS) self.library._clone_lun.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, space_reserved='false', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.library._extend_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, fake.QOS_POLICY_GROUP_NAME) self.assertEqual(0, self.library.delete_volume.call_count) self.assertEqual(0, self.library. _mark_qos_policy_group_for_deletion.call_count) def test_clone_source_to_destination_exception_path(self): self.mock_object(na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) self.mock_object(self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_extend_volume', side_effect=Exception) self.mock_object(self.library, 'delete_volume') self.mock_object(self.library, '_mark_qos_policy_group_for_deletion') self.library.lun_space_reservation = 'true' self.assertRaises(exception.VolumeBackendAPIException, self.library._clone_source_to_destination, fake.CLONE_SOURCE, fake.CLONE_DESTINATION) na_utils.get_volume_extra_specs.assert_called_once_with( fake.CLONE_DESTINATION) self.library._setup_qos_for_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.EXTRA_SPECS) self.library._clone_lun.assert_called_once_with( fake.CLONE_SOURCE_NAME, fake.CLONE_DESTINATION_NAME, space_reserved='true', qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.library._extend_volume.assert_called_once_with( fake.CLONE_DESTINATION, fake.CLONE_DESTINATION_SIZE, fake.QOS_POLICY_GROUP_NAME) self.assertEqual(1, self.library.delete_volume.call_count) self.assertEqual(1, self.library. _mark_qos_policy_group_for_deletion.call_count) def test_create_lun(self): self.assertRaises(NotImplementedError, self.library._create_lun, fake.VOLUME_ID, fake.LUN_ID, fake.SIZE, fake.LUN_METADATA) def test_clone_lun(self): self.assertRaises(NotImplementedError, self.library._clone_lun, fake.VOLUME_ID, 'new-' + fake.VOLUME_ID) def test_create_snapshot(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) mock_clone_lun = self.mock_object(self.library, '_clone_lun') self.mock_object(self.library, '_get_lun_from_table', return_value=fake_lun) self.library.create_snapshot(fake.SNAPSHOT) mock_clone_lun.assert_called_once_with( fake_lun.name, fake.SNAPSHOT_NAME, space_reserved='false', is_snapshot=True) def test_create_volume_from_snapshot(self): mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') source = { 'name': fake.SNAPSHOT['name'], 'size': fake.SNAPSHOT['volume_size'] } self.library.create_volume_from_snapshot(fake.VOLUME, fake.SNAPSHOT) mock_do_clone.assert_has_calls([ mock.call(source, fake.VOLUME)]) def test_create_cloned_volume(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object(self.library, '_get_lun_from_table') mock_get_lun_from_table.return_value = fake_lun mock_do_clone = self.mock_object(self.library, '_clone_source_to_destination') source = { 'name': fake_lun.name, 'size': fake.VOLUME_REF['size'] } self.library.create_cloned_volume(fake.VOLUME, fake.VOLUME_REF) mock_do_clone.assert_has_calls([ mock.call(source, fake.VOLUME)]) def test_extend_volume(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_setup_qos_for_volume = self.mock_object( self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) mock_extend_volume = self.mock_object(self.library, '_extend_volume') self.library.extend_volume(fake.VOLUME, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_setup_qos_for_volume.assert_called_once_with(volume_copy, fake.EXTRA_SPECS) mock_extend_volume.assert_called_once_with(fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) def test_extend_volume_api_error(self): new_size = 100 volume_copy = copy.copy(fake.VOLUME) volume_copy['size'] = new_size mock_get_volume_extra_specs = self.mock_object( na_utils, 'get_volume_extra_specs', return_value=fake.EXTRA_SPECS) mock_setup_qos_for_volume = self.mock_object( self.library, '_setup_qos_for_volume', return_value=fake.QOS_POLICY_GROUP_INFO) mock_extend_volume = self.mock_object( self.library, '_extend_volume', side_effect=netapp_api.NaApiError) self.assertRaises(netapp_api.NaApiError, self.library.extend_volume, fake.VOLUME, new_size) mock_get_volume_extra_specs.assert_called_once_with(fake.VOLUME) mock_setup_qos_for_volume.assert_has_calls([ mock.call(volume_copy, fake.EXTRA_SPECS), mock.call(fake.VOLUME, fake.EXTRA_SPECS)]) mock_extend_volume.assert_called_once_with( fake.VOLUME, new_size, fake.QOS_POLICY_GROUP_NAME) def test__extend_volume_direct(self): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 2 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi fake_volume = copy.copy(fake.VOLUME) fake_volume['size'] = new_size fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake.VOLUME['name']: fake_lun} self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) mock_do_direct_resize.assert_called_once_with( fake.LUN_METADATA['Path'], six.text_type(new_size_bytes)) self.assertFalse(mock_do_sub_clone_resize.called) self.assertEqual(six.text_type(new_size_bytes), self.library.lun_table[fake.VOLUME['name']].size) def test__extend_volume_clone(self): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE * 20 new_size_bytes = new_size * units.Gi max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi fake_volume = copy.copy(fake.VOLUME) fake_volume['size'] = new_size fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake.VOLUME['name']: fake_lun} self.library._extend_volume(fake.VOLUME, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(fake.VOLUME['name']) mock_get_lun_geometry.assert_called_once_with( fake.LUN_METADATA['Path']) self.assertFalse(mock_do_direct_resize.called) mock_do_sub_clone_resize.assert_called_once_with( fake.LUN_METADATA['Path'], six.text_type(new_size_bytes), qos_policy_group_name='fake_qos_policy') self.assertEqual(six.text_type(new_size_bytes), self.library.lun_table[fake.VOLUME['name']].size) def test__extend_volume_no_change(self): current_size = fake.LUN_SIZE current_size_bytes = current_size * units.Gi new_size = fake.LUN_SIZE max_size = fake.LUN_SIZE * 10 max_size_bytes = max_size * units.Gi fake_volume = copy.copy(fake.VOLUME) fake_volume['size'] = new_size fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, current_size_bytes, fake.LUN_METADATA) mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) fake_lun_geometry = {'max_resize': six.text_type(max_size_bytes)} mock_get_lun_geometry = self.mock_object( self.library.zapi_client, 'get_lun_geometry', return_value=fake_lun_geometry) mock_do_direct_resize = self.mock_object(self.library.zapi_client, 'do_direct_resize') mock_do_sub_clone_resize = self.mock_object(self.library, '_do_sub_clone_resize') self.library.lun_table = {fake_volume['name']: fake_lun} self.library._extend_volume(fake_volume, new_size, 'fake_qos_policy') mock_get_lun_from_table.assert_called_once_with(fake_volume['name']) self.assertFalse(mock_get_lun_geometry.called) self.assertFalse(mock_do_direct_resize.called) self.assertFalse(mock_do_sub_clone_resize.called) def test_do_sub_clone_resize(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.library._do_sub_clone_resize(fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) mock_clone_lun.assert_called_once_with( fake.LUN_NAME, new_lun_name, block_count=block_count) mock_post_sub_clone_resize.assert_called_once_with(fake.LUN_PATH) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_compression_on(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='on') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') self.assertFalse(mock_get_lun_block_count.called) self.assertFalse(mock_create_lun.called) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_no_blocks(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 block_count = 0 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(exception.VolumeBackendAPIException, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) self.assertFalse(mock_create_lun.called) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_create_error(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun', side_effect=netapp_api.NaApiError) mock_clone_lun = self.mock_object(self.library, '_clone_lun') mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(netapp_api.NaApiError, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) self.assertFalse(mock_clone_lun.called) self.assertFalse(mock_post_sub_clone_resize.called) self.assertFalse(mock_destroy_lun.called) def test_do_sub_clone_resize_clone_error(self): fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID, fake.LUN_SIZE, fake.LUN_METADATA) new_lun_size = fake.LUN_SIZE * 10 new_lun_name = 'new-%s' % fake.LUN_NAME new_lun_path = '/vol/vol0/%s' % new_lun_name block_count = fake.LUN_SIZE * units.Gi / 512 mock_get_lun_from_table = self.mock_object( self.library, '_get_lun_from_table', return_value=fake_lun) mock_get_vol_option = self.mock_object( self.library, '_get_vol_option', return_value='off') mock_get_lun_block_count = self.mock_object( self.library, '_get_lun_block_count', return_value=block_count) mock_create_lun = self.mock_object( self.library.zapi_client, 'create_lun') mock_clone_lun = self.mock_object( self.library, '_clone_lun', side_effect=netapp_api.NaApiError) mock_post_sub_clone_resize = self.mock_object( self.library, '_post_sub_clone_resize') mock_destroy_lun = self.mock_object( self.library.zapi_client, 'destroy_lun') self.assertRaises(netapp_api.NaApiError, self.library._do_sub_clone_resize, fake.LUN_PATH, new_lun_size, fake.QOS_POLICY_GROUP_NAME) mock_get_lun_from_table.assert_called_once_with(fake.LUN_NAME) mock_get_vol_option.assert_called_once_with('vol0', 'compression') mock_get_lun_block_count.assert_called_once_with(fake.LUN_PATH) mock_create_lun.assert_called_once_with( 'vol0', new_lun_name, new_lun_size, fake.LUN_METADATA, qos_policy_group_name=fake.QOS_POLICY_GROUP_NAME) mock_clone_lun.assert_called_once_with( fake.LUN_NAME, new_lun_name, block_count=block_count) self.assertFalse(mock_post_sub_clone_resize.called) mock_destroy_lun.assert_called_once_with(new_lun_path) def test_configure_chap_generate_username_and_password(self): """Ensure that a CHAP username and password are generated.""" initiator_name = fake.ISCSI_CONNECTOR['initiator'] username, password = self.library._configure_chap(initiator_name) self.assertEqual(na_utils.DEFAULT_CHAP_USER_NAME, username) self.assertIsNotNone(password) self.assertEqual(len(password), na_utils.CHAP_SECRET_LENGTH) def test_add_chap_properties(self): """Ensure that CHAP properties are added to the properties dictionary """ properties = {'data': {}} self.library._add_chap_properties(properties, 'user1', 'pass1') data = properties['data'] self.assertEqual('CHAP', data['auth_method']) self.assertEqual('user1', data['auth_username']) self.assertEqual('pass1', data['auth_password']) self.assertEqual('CHAP', data['discovery_auth_method']) self.assertEqual('user1', data['discovery_auth_username']) self.assertEqual('pass1', data['discovery_auth_password']) def test_add_looping_tasks(self): mock_add_task = self.mock_object(self.library.loopingcalls, 'add_task') mock_call_snap_cleanup = self.mock_object( self.library, '_delete_snapshots_marked_for_deletion') mock_call_ems_logging = self.mock_object( self.library, '_handle_ems_logging') self.library._add_looping_tasks() mock_add_task.assert_has_calls([ mock.call(mock_call_snap_cleanup, loopingcalls.ONE_MINUTE, loopingcalls.ONE_MINUTE), mock.call(mock_call_ems_logging, loopingcalls.ONE_HOUR)]) def test_delete_snapshots_marked_for_deletion(self): snapshots = [{ 'name': fake.SNAPSHOT_NAME, 'volume_name': fake.VOLUME['name'] }] mock_get_backing_flexvol_names = self.mock_object( self.library, '_get_backing_flexvol_names') mock_get_backing_flexvol_names.return_value = [fake.VOLUME['name']] mock_get_snapshots_marked = self.mock_object( self.zapi_client, 'get_snapshots_marked_for_deletion') mock_get_snapshots_marked.return_value = snapshots mock_delete_snapshot = self.mock_object( self.zapi_client, 'delete_snapshot') self.library._delete_snapshots_marked_for_deletion() mock_get_backing_flexvol_names.assert_called_once_with() mock_get_snapshots_marked.assert_called_once_with( [fake.VOLUME['name']]) mock_delete_snapshot.assert_called_once_with( fake.VOLUME['name'], fake.SNAPSHOT_NAME)
import subprocess import tempfile import shutil import errno import os import pexpect from . import PexpectTestCase import pytest class TestCaseWhich(PexpectTestCase.PexpectTestCase): " Tests for pexpect.which(). " def test_which_finds_ls(self): " which() can find ls(1). " exercise = pexpect.which("ls") assert exercise is not None assert exercise.startswith('/') def test_os_defpath_which(self): " which() finds an executable in $PATH and returns its abspath. " fname = 'cc' bin_dir = tempfile.mkdtemp() bin_path = os.path.join(bin_dir, fname) save_path = os.environ['PATH'] save_defpath = os.defpath try: # setup os.environ['PATH'] = '' os.defpath = bin_dir with open(bin_path, 'w') as fp: pass # given non-executable, os.chmod(bin_path, 0o400) # exercise absolute and relative, assert pexpect.which(bin_path) is None assert pexpect.which(fname) is None # given executable, os.chmod(bin_path, 0o700) # exercise absolute and relative, assert pexpect.which(bin_path) == bin_path assert pexpect.which(fname) == bin_path finally: # restore, os.environ['PATH'] = save_path os.defpath = save_defpath # destroy scratch files and folders, if os.path.exists(bin_path): os.unlink(bin_path) if os.path.exists(bin_dir): os.rmdir(bin_dir) def test_path_search_which(self): " which() finds an executable in $PATH and returns its abspath. " fname = 'gcc' bin_dir = tempfile.mkdtemp() bin_path = os.path.join(bin_dir, fname) save_path = os.environ['PATH'] try: # setup os.environ['PATH'] = bin_dir with open(bin_path, 'w') as fp: pass # given non-executable, os.chmod(bin_path, 0o400) # exercise absolute and relative, assert pexpect.which(bin_path) is None assert pexpect.which(fname) is None # given executable, os.chmod(bin_path, 0o700) # exercise absolute and relative, assert pexpect.which(bin_path) == bin_path assert pexpect.which(fname) == bin_path finally: # restore, os.environ['PATH'] = save_path # destroy scratch files and folders, if os.path.exists(bin_path): os.unlink(bin_path) if os.path.exists(bin_dir): os.rmdir(bin_dir) def test_which_follows_symlink(self): " which() follows symlinks and returns its path. " fname = 'original' symname = 'extra-crispy' bin_dir = tempfile.mkdtemp() bin_path = os.path.join(bin_dir, fname) sym_path = os.path.join(bin_dir, symname) save_path = os.environ['PATH'] try: # setup os.environ['PATH'] = bin_dir with open(bin_path, 'w') as fp: pass os.chmod(bin_path, 0o400) os.symlink(bin_path, sym_path) # should not be found because symlink points to non-executable assert pexpect.which(symname) is None # but now it should -- because it is executable os.chmod(bin_path, 0o700) assert pexpect.which(symname) == sym_path finally: # restore, os.environ['PATH'] = save_path # destroy scratch files, symlinks, and folders, if os.path.exists(sym_path): os.unlink(sym_path) if os.path.exists(bin_path): os.unlink(bin_path) if os.path.exists(bin_dir): os.rmdir(bin_dir) def test_which_should_not_match_folders(self): " Which does not match folders, even though they are executable. " # make up a path and insert a folder that is 'executable', a naive # implementation might match (previously pexpect versions 3.2 and # sh versions 1.0.8, reported by @lcm337.) fname = 'g++' bin_dir = tempfile.mkdtemp() bin_dir2 = os.path.join(bin_dir, fname) save_path = os.environ['PATH'] try: os.environ['PATH'] = bin_dir os.mkdir(bin_dir2, 0o755) # should not be found because it is not executable *file*, # but rather, has the executable bit set, as a good folder # should -- it should not be returned because it fails isdir() exercise = pexpect.which(fname) assert exercise is None finally: # restore, os.environ['PATH'] = save_path # destroy scratch folders, for _dir in (bin_dir2, bin_dir,): if os.path.exists(_dir): os.rmdir(_dir) def test_which_should_match_other_group_user(self): " which() returns executables by other, group, and user ownership. " # create an executable and test that it is found using which() for # each of the 'other', 'group', and 'user' permission bits. fname = 'g77' bin_dir = tempfile.mkdtemp() bin_path = os.path.join(bin_dir, fname) save_path = os.environ['PATH'] try: # setup os.environ['PATH'] = bin_dir # an interpreted script requires the ability to read, # whereas a binary program requires only to be executable. # # to gain access to a binary program, we make a copy of # the existing system program echo(1). bin_echo = None for pth in ('/bin/echo', '/usr/bin/echo'): if os.path.exists(pth): bin_echo = pth break bin_which = None for pth in ('/bin/which', '/usr/bin/which'): if os.path.exists(pth): bin_which = pth break if not bin_echo or not bin_which: pytest.skip('needs `echo` and `which` binaries') shutil.copy(bin_echo, bin_path) isroot = os.getuid() == 0 for should_match, mode in ( # note that although the file may have matching 'group' or # 'other' executable permissions, it is *not* executable # because the current uid is the owner of the file -- which # takes precedence (False, 0o000), # ----------, no (isroot, 0o001), # ---------x, no (isroot, 0o010), # ------x---, no (True, 0o100), # ---x------, yes (False, 0o002), # --------w-, no (False, 0o020), # -----w----, no (False, 0o200), # --w-------, no (isroot, 0o003), # --------wx, no (isroot, 0o030), # -----wx---, no (True, 0o300), # --wx------, yes (False, 0o004), # -------r--, no (False, 0o040), # ----r-----, no (False, 0o400), # -r--------, no (isroot, 0o005), # -------r-x, no (isroot, 0o050), # ----r-x---, no (True, 0o500), # -r-x------, yes (False, 0o006), # -------rw-, no (False, 0o060), # ----rw----, no (False, 0o600), # -rw-------, no (isroot, 0o007), # -------rwx, no (isroot, 0o070), # ----rwx---, no (True, 0o700), # -rwx------, yes (isroot, 0o4001), # ---S-----x, no (isroot, 0o4010), # ---S--x---, no (True, 0o4100), # ---s------, yes (isroot, 0o4003), # ---S----wx, no (isroot, 0o4030), # ---S-wx---, no (True, 0o4300), # --ws------, yes (isroot, 0o2001), # ------S--x, no (isroot, 0o2010), # ------s---, no (True, 0o2100), # ---x--S---, yes ): mode_str = '{0:0>4o}'.format(mode) # given file mode, os.chmod(bin_path, mode) # exercise whether we may execute can_execute = True try: subprocess.Popen(fname).wait() == 0 except OSError as err: if err.errno != errno.EACCES: raise # permission denied can_execute = False assert should_match == can_execute, ( should_match, can_execute, mode_str) # exercise whether which(1) would match proc = subprocess.Popen((bin_which, fname), env={'PATH': bin_dir}, stdout=subprocess.PIPE) bin_which_match = bool(not proc.wait()) assert should_match == bin_which_match, ( should_match, bin_which_match, mode_str) # finally, exercise pexpect's which(1) matches # the same. pexpect_match = bool(pexpect.which(fname)) assert should_match == pexpect_match == bin_which_match, ( should_match, pexpect_match, bin_which_match, mode_str) finally: # restore, os.environ['PATH'] = save_path # destroy scratch files and folders, if os.path.exists(bin_path): os.unlink(bin_path) if os.path.exists(bin_dir): os.rmdir(bin_dir)
import _base import new import warnings from html5lib.constants import DataLossWarning import etree as etree_builders try: import lxml.etree as etree except ImportError: import lxml.etree as etree fullTree = True """Module for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Doctypes with mixed case names Docypes with no name When any of these things occur, we emit a DataLossWarning """ class DocumentType(object): def __init__(self, name, publicId, systemId): self.name = name if name != name.lower(): warnings.warn("lxml does not preserve doctype case", DataLossWarning) self.publicId = publicId self.systemId = systemId class Document(object): def __init__(self): self._elementTree = None self._childNodes = [] def appendChild(self, element): self._elementTree.getroot().addnext(element._element) def _getChildNodes(self): return self._childNodes childNodes = property(_getChildNodes) def testSerializer(element): rv = [] finalText = None def serializeElement(element, indent=0): if not hasattr(element, "tag"): if hasattr(element, "getroot"): #Full tree case rv.append("#document") if element.docinfo.internalDTD: if not (element.docinfo.public_id or element.docinfo.system_url): dtd_str = "<!DOCTYPE %s>"%element.docinfo.root_name else: dtd_str = """<!DOCTYPE %s "%s" "%s">"""%( element.docinfo.root_name, element.docinfo.public_id, element.docinfo.system_url) rv.append("|%s%s"%(' '*(indent+2), dtd_str)) next_element = element.getroot() while next_element.getprevious() is not None: next_element = next_element.getprevious() while next_element is not None: serializeElement(next_element, indent+2) next_element = next_element.getnext() elif isinstance(element, basestring): #Text in a fragment rv.append("|%s\"%s\""%(' '*indent, element)) else: #Fragment case rv.append("#document-fragment") for next_element in element: serializeElement(next_element, indent+2) elif type(element.tag) == type(etree.Comment): rv.append("|%s<!-- %s -->"%(' '*indent, element.text)) else: rv.append("|%s<%s>"%(' '*indent, element.tag)) if hasattr(element, "attrib"): for name, value in element.attrib.iteritems(): rv.append('|%s%s="%s"' % (' '*(indent+2), name, value)) if element.text: rv.append("|%s\"%s\"" %(' '*(indent+2), element.text)) indent += 2 for child in element.getchildren(): serializeElement(child, indent) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" %(' '*(indent-2), element.tail)) serializeElement(element, 0) if finalText is not None: rv.append("|%s\"%s\""%(' '*2, finalText)) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] finalText = None def serializeElement(element): if not hasattr(element, "tag"): if element.docinfo.internalDTD: if element.docinfo.doctype: dtd_str = element.docinfo.doctype else: dtd_str = "<!DOCTYPE %s>"%element.docinfo.root_name rv.append(dtd_str) serializeElement(element.getroot()) elif type(element.tag) == type(etree.Comment): rv.append("<!--%s-->"%(element.text,)) else: #This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>"%(element.tag,)) else: attr = " ".join(["%s=\"%s\""%(name, value) for name, value in element.attrib.iteritems()]) rv.append("<%s %s>"%(element.tag, attr)) if element.text: rv.append(element.text) for child in element.getchildren(): serializeElement(child) rv.append("</%s>"%(element.tag,)) if hasattr(element, "tail") and element.tail: rv.append(element.tail) serializeElement(element) if finalText is not None: rv.append("%s\""%(' '*2, finalText)) return "".join(rv) class TreeBuilder(_base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = None commentClass = None fragmentClass = Document def __init__(self, fullTree = False): builder = etree_builders.getETreeModule(etree, fullTree=fullTree) self.elementClass = builder.Element self.commentClass = builder.Comment #self.fragmentClass = builder.DocumentFragment _base.TreeBuilder.__init__(self) def reset(self): _base.TreeBuilder.reset(self) self.insertComment = self.insertCommentInitial self.initial_comments = [] self.doctype = None def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._elementTree else: return self.document._elementTree.getroot() def getFragment(self): fragment = [] element = self.openElements[0]._element if element.text: fragment.append(element.text) fragment.extend(element.getchildren()) if element.tail: fragment.append(element.tail) return fragment def insertDoctype(self, name, publicId, systemId): if not name: warnings.warn("lxml cannot represent null doctype", DataLossWarning) doctype = self.doctypeClass(name, publicId, systemId) self.doctype = doctype def insertCommentInitial(self, data, parent=None): self.initial_comments.append(data) def insertRoot(self, name): """Create the document root""" #Because of the way libxml2 works, it doesn't seem to be possible to #alter informatioN like the doctype after the tree has been parsed. #Therefore we need to use the built-in parser to create our iniial #tree, after which we can add elements like normal docStr = "" if self.doctype and self.doctype.name: docStr += "<!DOCTYPE %s"%self.doctype.name if self.doctype.publicId is not None or self.doctype.systemId is not None: docStr += ' PUBLIC "%s" "%s"'%(self.doctype.publicId or "", self.doctype.systemId or "") docStr += ">" docStr += "<html></html>" try: root = etree.fromstring(docStr) except etree.XMLSyntaxError: print docStr raise #Append the initial comments: for comment_data in self.initial_comments: root.addprevious(etree.Comment(comment_data)) #Create the root document and add the ElementTree to it self.document = self.documentClass() self.document._elementTree = root.getroottree() #Add the root element to the internal child/open data structures root_element = self.elementClass(name) root_element._element = root self.document._childNodes.append(root_element) self.openElements.append(root_element) #Reset to the default insert comment function self.insertComment = super(TreeBuilder, self).insertComment
""" Generate samples of synthetic data sets. """ # Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel, # G. Louppe # License: BSD 3 clause from itertools import product import numbers import numpy as np from scipy import linalg from ..utils import array2d, check_random_state from ..utils import shuffle as util_shuffle def make_classification(n_samples=100, n_features=20, n_informative=2, n_redundant=2, n_repeated=0, n_classes=2, n_clusters_per_class=2, weights=None, flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, shuffle=True, random_state=None): """Generate a random n-class classification problem. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. These comprise `n_informative` informative features, `n_redundant` redundant features, `n_repeated` dupplicated features and `n_features-n_informative-n_redundant- n_repeated` useless features drawn at random. n_informative : int, optional (default=2) The number of informative features. Each class is composed of a number of gaussian clusters each located around the vertices of a hypercube in a subspace of dimension `n_informative`. For each cluster, informative features are drawn independently from N(0, 1) and then randomly linearly combined in order to add covariance. The clusters are then placed on the vertices of the hypercube. n_redundant : int, optional (default=2) The number of redundant features. These features are generated as random linear combinations of the informative features. n_repeated : int, optional (default=2) The number of dupplicated features, drawn randomly from the informative and the redundant features. n_classes : int, optional (default=2) The number of classes (or labels) of the classification problem. n_clusters_per_class : int, optional (default=2) The number of clusters per class. weights : list of floats or None (default=None) The proportions of samples assigned to each class. If None, then classes are balanced. Note that if `len(weights) == n_classes - 1`, then the last class weight is automatically inferred. flip_y : float, optional (default=0.01) The fraction of samples whose class are randomly exchanged. class_sep : float, optional (default=1.0) The factor multiplying the hypercube dimension. hypercube : boolean, optional (default=True) If True, the clusters are put on the vertices of a hypercube. If False, the clusters are put on the vertices of a random polytope. shift : float or None, optional (default=0.0) Shift all features by the specified value. If None, then features are shifted by a random value drawn in [-class_sep, class_sep]. scale : float or None, optional (default=1.0) Multiply all features by the specified value. If None, then features are scaled by a random value drawn in [1, 100]. Note that scaling happens after shifting. shuffle : boolean, optional (default=True) Shuffle the samples and the features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for class membership of each sample. Notes ----- The algorithm is adapted from Guyon [1] and was designed to generate the "Madelon" dataset. References ---------- .. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable selection benchmark", 2003. """ generator = check_random_state(random_state) # Count features, clusters and samples if n_informative + n_redundant + n_repeated > n_features: raise ValueError("Number of informative, redundant and repeated " "features must sum to less than the number of total features") if 2 ** n_informative < n_classes * n_clusters_per_class: raise ValueError("n_classes * n_clusters_per_class must" "be smaller or equal 2 ** n_informative") if weights and len(weights) not in [n_classes, n_classes - 1]: raise ValueError("Weights specified but incompatible with number " "of classes.") n_useless = n_features - n_informative - n_redundant - n_repeated n_clusters = n_classes * n_clusters_per_class if weights and len(weights) == (n_classes - 1): weights.append(1.0 - sum(weights)) if weights is None: weights = [1.0 / n_classes] * n_classes weights[-1] = 1.0 - sum(weights[:-1]) n_samples_per_cluster = [] for k in xrange(n_clusters): n_samples_per_cluster.append(int(n_samples * weights[k % n_classes] / n_clusters_per_class)) for i in xrange(n_samples - sum(n_samples_per_cluster)): n_samples_per_cluster[i % n_clusters] += 1 # Intialize X and y X = np.zeros((n_samples, n_features)) y = np.zeros(n_samples) # Build the polytope C = np.array(list(product([-class_sep, class_sep], repeat=n_informative))) if not hypercube: for k in xrange(n_clusters): C[k, :] *= generator.rand() for f in xrange(n_informative): C[:, f] *= generator.rand() generator.shuffle(C) # Loop over all clusters pos = 0 pos_end = 0 for k in xrange(n_clusters): # Number of samples in cluster k n_samples_k = n_samples_per_cluster[k] # Define the range of samples pos = pos_end pos_end = pos + n_samples_k # Assign labels y[pos:pos_end] = k % n_classes # Draw features at random X[pos:pos_end, :n_informative] = generator.randn(n_samples_k, n_informative) # Multiply by a random matrix to create co-variance of the features A = 2 * generator.rand(n_informative, n_informative) - 1 X[pos:pos_end, :n_informative] = np.dot(X[pos:pos_end, :n_informative], A) # Shift the cluster to a vertice X[pos:pos_end, :n_informative] += np.tile(C[k, :], (n_samples_k, 1)) # Create redundant features if n_redundant > 0: B = 2 * generator.rand(n_informative, n_redundant) - 1 X[:, n_informative:n_informative + n_redundant] = \ np.dot(X[:, :n_informative], B) # Repeat some features if n_repeated > 0: n = n_informative + n_redundant indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.int) X[:, n:n + n_repeated] = X[:, indices] # Fill useless features X[:, n_features - n_useless:] = generator.randn(n_samples, n_useless) # Randomly flip labels if flip_y >= 0.0: for i in xrange(n_samples): if generator.rand() < flip_y: y[i] = generator.randint(n_classes) # Randomly shift and scale constant_shift = shift is not None constant_scale = scale is not None for f in xrange(n_features): if not constant_shift: shift = (2 * generator.rand() - 1) * class_sep if not constant_scale: scale = 1 + 100 * generator.rand() X[:, f] += shift X[:, f] *= scale # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = range(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] return X, y def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, random_state=None): """Generate a random multilabel classification problem. For each sample, the generative process is: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is never zero or more than `n_classes`, and that the document length is never zero. Likewise, we reject classes which have already been chosen. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=20) The total number of features. n_classes : int, optional (default=5) The number of classes of the classification problem. n_labels : int, optional (default=2) The average number of labels per instance. Number of labels follows a Poisson distribution that never takes the value 0. length : int, optional (default=50) Sum of the features (number of words if documents). allow_unlabeled : bool, optional (default=True) If ``True``, some instances might not belong to any class. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. Y : list of tuples The label sets. """ generator = check_random_state(random_state) p_c = generator.rand(n_classes) p_c /= p_c.sum() p_w_c = generator.rand(n_features, n_classes) p_w_c /= np.sum(p_w_c, axis=0) def sample_example(): _, n_classes = p_w_c.shape # pick a nonzero number of labels per document by rejection sampling n = n_classes + 1 while (not allow_unlabeled and n == 0) or n > n_classes: n = generator.poisson(n_labels) # pick n classes y = [] while len(y) != n: # pick a class with probability P(c) c = generator.multinomial(1, p_c).argmax() if not c in y: y.append(c) # pick a non-zero document length by rejection sampling k = 0 while k == 0: k = generator.poisson(length) # generate a document of length k words x = np.zeros(n_features, dtype=int) for i in range(k): if len(y) == 0: # if sample does not belong to any class, generate noise word w = generator.randint(n_features) else: # pick a class and generate an appropriate word c = y[generator.randint(len(y))] w = generator.multinomial(1, p_w_c[:, c]).argmax() x[w] += 1 return x, y X, Y = zip(*[sample_example() for i in range(n_samples)]) return np.array(X, dtype=np.float64), Y def make_hastie_10_2(n_samples=12000, random_state=None): """Generates data for binary classification used in Hastie et al. 2009, Example 10.2. The ten features are standard independent Gaussian and the target ``y`` is defined by:: y[i] = 1 if np.sum(X[i] > 9.34 else -1 Parameters ---------- n_samples : int, optional (default=12000) The number of samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 10] The input samples. y : array of shape [n_samples] The output values. **References**: .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning Ed. 2", Springer, 2009. """ rs = check_random_state(random_state) shape = (n_samples, 10) X = rs.normal(size=shape).reshape(shape) y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64) y[y == 0.0] = -1.0 return X, y def make_regression(n_samples=100, n_features=100, n_informative=10, n_targets=1, bias=0.0, effective_rank=None, tail_strength=0.5, noise=0.0, shuffle=True, coef=False, random_state=None): """Generate a random regression problem. The input set can either be well conditioned (by default) or have a low rank-fat tail singular profile. See the `make_low_rank_matrix` for more details. The output is generated by applying a (potentially biased) random linear regression model with `n_informative` nonzero regressors to the previously generated input and some gaussian centered noise with some adjustable scale. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. n_informative : int, optional (default=10) The number of informative features, i.e., the number of features used to build the linear model used to generate the output. n_targets : int, optional (default=1) The number of regression targets, i.e., the dimension of the y output vector associated with a sample. By default, the output is a scalar. bias : float, optional (default=0.0) The bias term in the underlying linear model. effective_rank : int or None, optional (default=None) if not None: The approximate number of singular vectors required to explain most of the input data by linear combinations. Using this kind of singular spectrum in the input allows the generator to reproduce the correlations often observed in practice. if None: The input set is well conditioned, centered and gaussian with unit variance. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile if `effective_rank` is not None. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. shuffle : boolean, optional (default=True) Shuffle the samples and the features. coef : boolean, optional (default=False) If True, the coefficients of the underlying linear model are returned. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] or [n_samples, n_targets] The output values. coef : array of shape [n_features] or [n_features, n_targets], optional The coefficient of the underlying linear model. It is returned only if coef is True. """ generator = check_random_state(random_state) if effective_rank is None: # Randomly generate a well conditioned input set X = generator.randn(n_samples, n_features) else: # Randomly generate a low rank, fat tail input set X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features, effective_rank=effective_rank, tail_strength=tail_strength, random_state=generator) # Generate a ground truth model with only n_informative features being non # zeros (the other features are not correlated to y and should be ignored # by a sparsifying regularizers such as L1 or elastic net) ground_truth = np.zeros((n_features, n_targets)) ground_truth[:n_informative, :] = 100 * generator.rand(n_informative, n_targets) y = np.dot(X, ground_truth) + bias # Add noise if noise > 0.0: y += generator.normal(scale=noise, size=y.shape) # Randomly permute samples and features if shuffle: X, y = util_shuffle(X, y, random_state=generator) indices = range(n_features) generator.shuffle(indices) X[:, :] = X[:, indices] ground_truth = ground_truth[indices] y = np.squeeze(y) if coef: return X, y, np.squeeze(ground_truth) else: return X, y def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None, factor=.8): """Make a large circle containing a smaller circle in 2d. A simple toy dataset to visualize clustering and classification algorithms. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle: bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. factor : double < 1 (default=.8) Scale factor between inner and outer circle. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ if factor > 1 or factor < 0: raise ValueError("'factor' has to be between 0 and 1.") generator = check_random_state(random_state) # so as not to have the first point = last point, we add one and then # remove it. linspace = np.linspace(0, 2 * np.pi, n_samples / 2 + 1)[:-1] outer_circ_x = np.cos(linspace) outer_circ_y = np.sin(linspace) inner_circ_x = outer_circ_x * factor inner_circ_y = outer_circ_y * factor X = np.vstack((np.append(outer_circ_x, inner_circ_x),\ np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples / 2), np.ones(n_samples / 2)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if not noise is None: X += generator.normal(scale=noise, size=X.shape) return X, y.astype(np.int) def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None): """Make two interleaving half circles A simple toy dataset to visualize clustering and classification algorithms. Parameters ---------- n_samples : int, optional (default=100) The total number of points generated. shuffle : bool, optional (default=True) Whether to shuffle the samples. noise : double or None (default=None) Standard deviation of Gaussian noise added to the data. Returns ------- X : array of shape [n_samples, 2] The generated samples. y : array of shape [n_samples] The integer labels (0 or 1) for class membership of each sample. """ n_samples_out = n_samples / 2 n_samples_in = n_samples - n_samples_out generator = check_random_state(random_state) outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out)) outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out)) inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5 X = np.vstack((np.append(outer_circ_x, inner_circ_x),\ np.append(outer_circ_y, inner_circ_y))).T y = np.hstack([np.zeros(n_samples_in), np.ones(n_samples_out)]) if shuffle: X, y = util_shuffle(X, y, random_state=generator) if not noise is None: X += generator.normal(scale=noise, size=X.shape) return X, y.astype(np.int) def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0, center_box=(-10.0, 10.0), shuffle=True, random_state=None): """Generate isotropic Gaussian blobs for clustering. Parameters ---------- n_samples : int, optional (default=100) The total number of points equally divided among clusters. n_features : int, optional (default=2) The number of features for each sample. centers : int or array of shape [n_centers, n_features], optional (default=3) The number of centers to generate, or the fixed center locations. cluster_std: float or sequence of floats, optional (default=1.0) The standard deviation of the clusters. center_box: pair of floats (min, max), optional (default=(-10.0, 10.0)) The bounding box for each cluster center when centers are generated at random. shuffle : boolean, optional (default=True) Shuffle the samples. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The generated samples. y : array of shape [n_samples] The integer labels for cluster membership of each sample. Examples -------- >>> from sklearn.datasets.samples_generator import make_blobs >>> X, y = make_blobs(n_samples=10, centers=3, n_features=2, ... random_state=0) >>> print X.shape (10, 2) >>> y array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0]) """ generator = check_random_state(random_state) if isinstance(centers, numbers.Integral): centers = generator.uniform(center_box[0], center_box[1], size=(centers, n_features)) else: centers = array2d(centers) n_features = centers.shape[1] X = [] y = [] n_centers = centers.shape[0] n_samples_per_center = [int(n_samples // n_centers)] * n_centers for i in xrange(n_samples % n_centers): n_samples_per_center[i] += 1 for i, n in enumerate(n_samples_per_center): X.append(centers[i] + generator.normal(scale=cluster_std, size=(n, n_features))) y += [i] * n X = np.concatenate(X) y = np.array(y) if shuffle: indices = np.arange(n_samples) generator.shuffle(indices) X = X[indices] y = y[indices] return X, y def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None): """Generate the "Friedman #1" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are independent features uniformly distributed on the interval [0, 1]. The output `y` is created according to the formula:: y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1). Out of the `n_features` features, only 5 are actually used to compute `y`. The remaining features are independent of `y`. The number of features has to be >= 5. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. Should be at least 5. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ if n_features < 5: raise ValueError("n_features must be at least five.") generator = check_random_state(random_state) X = generator.rand(n_samples, n_features) y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \ + 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples) return X, y def make_friedman2(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman #2" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \ - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1). Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \ + noise * generator.randn(n_samples) return X, y def make_friedman3(n_samples=100, noise=0.0, random_state=None): """Generate the "Friedman #3" regression problem This dataset is described in Friedman [1] and Breiman [2]. Inputs `X` are 4 independent features uniformly distributed on the intervals:: 0 <= X[:, 0] <= 100, 40 * pi <= X[:, 1] <= 560 * pi, 0 <= X[:, 2] <= 1, 1 <= X[:, 3] <= 11. The output `y` is created according to the formula:: y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \ / X[:, 0]) + noise * N(0, 1). Parameters ---------- n_samples : int, optional (default=100) The number of samples. noise : float, optional (default=0.0) The standard deviation of the gaussian noise applied to the output. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 4] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals of Statistics 19 (1), pages 1-67, 1991. .. [2] L. Breiman, "Bagging predictors", Machine Learning 24, pages 123-140, 1996. """ generator = check_random_state(random_state) X = generator.rand(n_samples, 4) X[:, 0] *= 100 X[:, 1] *= 520 * np.pi X[:, 1] += 40 * np.pi X[:, 3] *= 10 X[:, 3] += 1 y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \ + noise * generator.randn(n_samples) return X, y def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10, tail_strength=0.5, random_state=None): """Generate a mostly low rank matrix with bell-shaped singular values Most of the variance can be explained by a bell-shaped curve of width effective_rank: the low rank part of the singular values profile is:: (1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2) The remaining singular values' tail is fat, decreasing as:: tail_strength * exp(-0.1 * i / effective_rank). The low rank part of the profile can be considered the structured signal part of the data while the tail can be considered the noisy part of the data that cannot be summarized by a low number of linear components (singular vectors). This kind of singular profiles is often seen in practice, for instance: - gray level pictures of faces - TF-IDF vectors of text documents crawled from the web Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=100) The number of features. effective_rank : int, optional (default=10) The approximate number of singular vectors required to explain most of the data by linear combinations. tail_strength : float between 0.0 and 1.0, optional (default=0.5) The relative importance of the fat noisy tail of the singular values profile. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The matrix. """ generator = check_random_state(random_state) n = min(n_samples, n_features) # Random (ortho normal) vectors from ..utils.fixes import qr_economic u, _ = qr_economic(generator.randn(n_samples, n)) v, _ = qr_economic(generator.randn(n_features, n)) # Index of the singular values singular_ind = np.arange(n, dtype=np.float64) # Build the singular profile by assembling signal and noise components low_rank = (1 - tail_strength) * \ np.exp(-1.0 * (singular_ind / effective_rank) ** 2) tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank) s = np.identity(n) * (low_rank + tail) return np.dot(np.dot(u, s), v.T) def make_sparse_coded_signal(n_samples, n_components, n_features, n_nonzero_coefs, random_state=None): """Generate a signal as a sparse combination of dictionary elements. Returns a matrix Y = DX, such as D is (n_features, n_components), X is (n_components, n_samples) and each column of X has exactly n_nonzero_coefs non-zero elements. Parameters ---------- n_samples : int number of samples to generate n_components: int, number of components in the dictionary n_features : int number of features of the dataset to generate n_nonzero_coefs : int number of active (non-zero) coefficients in each sample random_state: int or RandomState instance, optional (default=None) seed used by the pseudo random number generator Returns ------- data: array of shape [n_features, n_samples] The encoded signal (Y). dictionary: array of shape [n_features, n_components] The dictionary with normalized components (D). code: array of shape [n_components, n_samples] The sparse code such that each column of this matrix has exactly n_nonzero_coefs non-zero items (X). """ generator = check_random_state(random_state) # generate dictionary D = generator.randn(n_features, n_components) D /= np.sqrt(np.sum((D ** 2), axis=0)) # generate code X = np.zeros((n_components, n_samples)) for i in xrange(n_samples): idx = np.arange(n_components) generator.shuffle(idx) idx = idx[:n_nonzero_coefs] X[idx, i] = generator.randn(n_nonzero_coefs) # encode signal Y = np.dot(D, X) return map(np.squeeze, (Y, D, X)) def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None): """Generate a random regression problem with sparse uncorrelated design This dataset is described in Celeux et al [1]. as:: X ~ N(0, 1) y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3] Only the first 4 features are informative. The remaining features are useless. Parameters ---------- n_samples : int, optional (default=100) The number of samples. n_features : int, optional (default=10) The number of features. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, n_features] The input samples. y : array of shape [n_samples] The output values. References ---------- .. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert, "Regularization in regression: comparing Bayesian and frequentist methods in a poorly informative situation", 2009. """ generator = check_random_state(random_state) X = generator.normal(loc=0, scale=1, size=(n_samples, n_features)) y = generator.normal(loc=(X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]), scale=np.ones(n_samples)) return X, y def make_spd_matrix(n_dim, random_state=None): """Generate a random symmetric, positive-definite matrix. Parameters ---------- n_dim : int The matrix dimension. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_dim, n_dim] The random symmetric, positive-definite matrix. """ generator = check_random_state(random_state) A = generator.rand(n_dim, n_dim) U, s, V = linalg.svd(np.dot(A.T, A)) X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V) return X def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False, smallest_coef=.1, largest_coef=.9, random_state=None): """Generate a sparse symetric definite positive matrix. Parameters ---------- dim: integer, optional (default=1) The size of the random (matrix to generate. alpha: float between 0 and 1, optional (default=0.95) The probability that a coefficient is non zero (see notes). random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- prec: array of shape = [dim, dim] Notes ----- The sparsity is actually imposed on the cholesky factor of the matrix. Thus alpha does not translate directly into the filling fraction of the matrix itself. """ random_state = check_random_state(random_state) chol = -np.eye(dim) aux = random_state.rand(dim, dim) aux[aux < alpha] = 0 aux[aux > alpha] = (smallest_coef + (largest_coef - smallest_coef) * random_state.rand(np.sum(aux > alpha))) aux = np.tril(aux, k=-1) # Permute the lines: we don't want to have assymetries in the final # SPD matrix permutation = random_state.permutation(dim) aux = aux[permutation].T[permutation] chol += aux prec = np.dot(chol.T, chol) if norm_diag: d = np.diag(prec) d = 1. / np.sqrt(d) prec *= d prec *= d[:, np.newaxis] return prec def make_swiss_roll(n_samples=100, noise=0.0, random_state=None): """Generate a swiss roll dataset. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. Notes ----- The algorithm is from Marsland [1]. References ---------- .. [1] S. Marsland, "Machine Learning: An Algorithmic Perpsective", Chapter 10, 2009. http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py """ generator = check_random_state(random_state) t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples)) x = t * np.cos(t) y = 21 * generator.rand(1, n_samples) z = t * np.sin(t) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t def make_s_curve(n_samples=100, noise=0.0, random_state=None): """Generate an S curve dataset. Parameters ---------- n_samples : int, optional (default=100) The number of sample points on the S curve. noise : float, optional (default=0.0) The standard deviation of the gaussian noise. random_state : int, RandomState instance or None, optional (default=None) If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Returns ------- X : array of shape [n_samples, 3] The points. t : array of shape [n_samples] The univariate position of the sample according to the main dimension of the points in the manifold. """ generator = check_random_state(random_state) t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5) x = np.sin(t) y = 2.0 * generator.rand(1, n_samples) z = np.sign(t) * (np.cos(t) - 1) X = np.concatenate((x, y, z)) X += noise * generator.randn(3, n_samples) X = X.T t = np.squeeze(t) return X, t
''' Provide a base class for objects that can have declarative, typed, serializable properties. .. note:: These classes form part of the very low-level machinery that implements the Bokeh model and property system. It is unlikely that any of these classes or their methods will be applicable to any standard usage or to anyone who is not directly developing on Bokeh's own infrastructure. ''' from __future__ import absolute_import import logging log = logging.getLogger(__name__) import difflib import inspect from operator import itemgetter import sys from warnings import warn from six import StringIO from ..util.dependencies import import_optional from ..util.future import with_metaclass from ..util.string import nice_join from .property.containers import PropertyValueContainer from .property.descriptor_factory import PropertyDescriptorFactory from .property.override import Override _ABSTRACT_ADMONITION = ''' .. note:: This is an abstract base class used to help organize the hierarchy of Bokeh model types. **It is not useful to instantiate on its own.** ''' _EXAMPLE_TEMPLATE = ''' Example ------- .. bokeh-plot:: ../%(path)s :source-position: below ''' def abstract(cls): ''' A decorator to mark abstract base classes derived from |HasProps|. ''' if not issubclass(cls, HasProps): raise TypeError("%s is not a subclass of HasProps" % cls.__name__) # running python with -OO will discard docstrings -> __doc__ is None if cls.__doc__ is not None: cls.__doc__ += _ABSTRACT_ADMONITION return cls class MetaHasProps(type): ''' Specialize the construction of |HasProps| classes. This class is a `metaclass`_ for |HasProps| that is responsible for creating and adding the |PropertyDescriptor| instances that delegate validation and serialization to |Property| attributes. .. _metaclass: https://docs.python.org/3/reference/datamodel.html#metaclasses ''' def __new__(meta_cls, class_name, bases, class_dict): ''' ''' names_with_refs = set() container_names = set() # Now handle all the Override overridden_defaults = {} for name, prop in class_dict.items(): if not isinstance(prop, Override): continue if prop.default_overridden: overridden_defaults[name] = prop.default for name, default in overridden_defaults.items(): del class_dict[name] generators = dict() for name, generator in class_dict.items(): if isinstance(generator, PropertyDescriptorFactory): generators[name] = generator elif isinstance(generator, type) and issubclass(generator, PropertyDescriptorFactory): # Support the user adding a property without using parens, # i.e. using just the Property subclass instead of an # instance of the subclass generators[name] = generator.autocreate() dataspecs = {} new_class_attrs = {} for name, generator in generators.items(): prop_descriptors = generator.make_descriptors(name) replaced_self = False for prop_descriptor in prop_descriptors: if prop_descriptor.name in generators: if generators[prop_descriptor.name] is generator: # a generator can replace itself, this is the # standard case like `foo = Int()` replaced_self = True prop_descriptor.add_prop_descriptor_to_class(class_name, new_class_attrs, names_with_refs, container_names, dataspecs) else: # if a generator tries to overwrite another # generator that's been explicitly provided, # use the prop that was manually provided # and ignore this one. pass else: prop_descriptor.add_prop_descriptor_to_class(class_name, new_class_attrs, names_with_refs, container_names, dataspecs) # if we won't overwrite ourselves anyway, delete the generator if not replaced_self: del class_dict[name] class_dict.update(new_class_attrs) class_dict["__properties__"] = set(new_class_attrs) class_dict["__properties_with_refs__"] = names_with_refs class_dict["__container_props__"] = container_names if len(overridden_defaults) > 0: class_dict["__overridden_defaults__"] = overridden_defaults if dataspecs: class_dict["__dataspecs__"] = dataspecs if "__example__" in class_dict: path = class_dict["__example__"] # running python with -OO will discard docstrings -> __doc__ is None if "__doc__" in class_dict and class_dict["__doc__"] is not None: class_dict["__doc__"] += _EXAMPLE_TEMPLATE % dict(path=path) return super(MetaHasProps, meta_cls).__new__(meta_cls, class_name, bases, class_dict) def __init__(cls, class_name, bases, nmspc): if class_name == 'HasProps': return # Check for improperly overriding a Property attribute. # Overriding makes no sense except through the Override # class which can be used to tweak the default. # Historically code also tried changing the Property's # type or changing from Property to non-Property: these # overrides are bad conceptually because the type of a # read-write property is invariant. cls_attrs = cls.__dict__.keys() # we do NOT want inherited attrs here for attr in cls_attrs: for base in bases: if issubclass(base, HasProps) and attr in base.properties(): warn(('Property "%s" in class %s was overridden by a class attribute ' + \ '"%s" in class %s; it never makes sense to do this. ' + \ 'Either %s.%s or %s.%s should be removed, or %s.%s should not ' + \ 'be a Property, or use Override(), depending on the intended effect.') % (attr, base.__name__, attr, class_name, base.__name__, attr, class_name, attr, base.__name__, attr), RuntimeWarning, stacklevel=2) if "__overridden_defaults__" in cls.__dict__: our_props = cls.properties() for key in cls.__dict__["__overridden_defaults__"].keys(): if key not in our_props: warn(('Override() of %s in class %s does not override anything.') % (key, class_name), RuntimeWarning, stacklevel=2) def accumulate_from_superclasses(cls, propname): ''' Traverse the class hierarchy and accumulate the special sets of names ``MetaHasProps`` stores on classes: Args: name (str) : name of the special attribute to collect. Typically meaningful values are: ``__container_props__``, ``__properties__``, ``__properties_with_refs__`` ''' cachename = "__cached_all" + propname # we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base # classes, and the cache must be separate for each class if cachename not in cls.__dict__: s = set() for c in inspect.getmro(cls): if issubclass(c, HasProps) and hasattr(c, propname): base = getattr(c, propname) s.update(base) setattr(cls, cachename, s) return cls.__dict__[cachename] def accumulate_dict_from_superclasses(cls, propname): ''' Traverse the class hierarchy and accumulate the special dicts ``MetaHasProps`` stores on classes: Args: name (str) : name of the special attribute to collect. Typically meaningful values are: ``__dataspecs__``, ``__overridden_defaults__`` ''' cachename = "__cached_all" + propname # we MUST use cls.__dict__ NOT hasattr(). hasattr() would also look at base # classes, and the cache must be separate for each class if cachename not in cls.__dict__: d = dict() for c in inspect.getmro(cls): if issubclass(c, HasProps) and hasattr(c, propname): base = getattr(c, propname) for k,v in base.items(): if k not in d: d[k] = v setattr(cls, cachename, d) return cls.__dict__[cachename] class HasProps(with_metaclass(MetaHasProps, object)): ''' Base class for all class types that have Bokeh properties. ''' def __init__(self, **properties): ''' ''' super(HasProps, self).__init__() self._property_values = dict() self._unstable_default_values = dict() self._unstable_themed_values = dict() for name, value in properties.items(): setattr(self, name, value) def __setattr__(self, name, value): ''' Intercept attribute setting on HasProps in order to special case a few situations: * short circuit all property machinery for ``_private`` attributes * suggest similar attribute names on attribute errors Args: name (str) : the name of the attribute to set on this object value (obj) : the value to set Returns: None ''' # self.properties() below can be expensive so avoid it # if we're just setting a private underscore field if name.startswith("_"): super(HasProps, self).__setattr__(name, value) return props = sorted(self.properties()) descriptor = getattr(self.__class__, name, None) if name in props or (descriptor is not None and descriptor.fset is not None): super(HasProps, self).__setattr__(name, value) else: matches, text = difflib.get_close_matches(name.lower(), props), "similar" if not matches: matches, text = props, "possible" raise AttributeError("unexpected attribute '%s' to %s, %s attributes are %s" % (name, self.__class__.__name__, text, nice_join(matches))) def __str__(self): return "%s(...)" % self.__class__.__name__ __repr__ = __str__ def equals(self, other): ''' Structural equality of models. Args: other (HasProps) : the other instance to compare to Returns: True, if properties are structurally equal, otherwise False ''' # NOTE: don't try to use this to implement __eq__. Because then # you will be tempted to implement __hash__, which would interfere # with mutability of models. However, not implementing __hash__ # will make bokeh unusable in Python 3, where proper implementation # of __hash__ is required when implementing __eq__. if not isinstance(other, self.__class__): return False else: return self.properties_with_values() == other.properties_with_values() def set_from_json(self, name, json, models=None, setter=None): ''' Set a property value on this object from JSON. Args: name: (str) : name of the attribute to set json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter(ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None ''' if name in self.properties(): log.trace("Patching attribute %r of %r with %r", name, self, json) descriptor = self.lookup(name) descriptor.set_from_json(self, json, models, setter) else: log.warn("JSON had attr %r on obj %r, which is a client-only or invalid attribute that shouldn't have been sent", name, self) def update(self, **kwargs): ''' Updates the object's properties from the given keyword arguments. Returns: None Examples: The following are equivalent: .. code-block:: python from bokeh.models import Range1d r = Range1d # set properties individually: r.start = 10 r.end = 20 # update properties together: r.update(start=10, end=20) ''' for k,v in kwargs.items(): setattr(self, k, v) def update_from_json(self, json_attributes, models=None, setter=None): ''' Updates the object's properties from a JSON attributes dictionary. Args: json_attributes: (JSON-dict) : attributes and values to update models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter(ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None ''' for k, v in json_attributes.items(): self.set_from_json(k, v, models, setter) @classmethod def lookup(cls, name): ''' Find the ``PropertyDescriptor`` for a Bokeh property on a class, given the property name. Args: name (str) : name of the property to search for Returns: PropertyDescriptor : descriptor for property named ``name`` ''' return getattr(cls, name) @classmethod def properties_with_refs(cls): ''' Collect the names of all properties on this class that also have references. This method *always* traverses the class hierarchy and includes properties defined on any parent classes. Returns: set[str] : names of properties that have references ''' return accumulate_from_superclasses(cls, "__properties_with_refs__") @classmethod def properties_containers(cls): ''' Collect the names of all container properties on this class. This method *always* traverses the class hierarchy and includes properties defined on any parent classes. Returns: set[str] : names of container properties ''' return accumulate_from_superclasses(cls, "__container_props__") @classmethod def properties(cls, with_bases=True): ''' Collect the names of properties on this class. This method *optionally* traverses the class hierarchy and includes properties defined on any parent classes. Args: with_bases (bool, optional) : Whether to include properties defined on parent classes in the results. (default: True) Returns: set[str] : property names ''' if with_bases: return accumulate_from_superclasses(cls, "__properties__") else: return set(cls.__properties__) @classmethod def dataspecs(cls): ''' Collect the names of all ``DataSpec`` properties on this class. This method *always* traverses the class hierarchy and includes properties defined on any parent classes. Returns: set[str] : names of DataSpec properties ''' return set(cls.dataspecs_with_props().keys()) @classmethod def dataspecs_with_props(cls): ''' Collect a dict mapping the names of all ``DataSpec`` properties on this class to the associated properties. This method *always* traverses the class hierarchy and includes properties defined on any parent classes. Returns: dict[str, DataSpec] : mapping of names and ``DataSpec`` properties ''' return accumulate_dict_from_superclasses(cls, "__dataspecs__") def properties_with_values(self, include_defaults=True): ''' Collect a dict mapping property names to their values. This method *always* traverses the class hierarchy and includes properties defined on any parent classes. Non-serializable properties are skipped and property values are in "serialized" format which may be slightly different from the values you would normally read from the properties; the intent of this method is to return the information needed to losslessly reconstitute the object instance. Args: include_defaults (bool, optional) : Whether to include properties that haven't been explicitly set since the object was created. (default: True) Returns: dict : mapping from property names to their values ''' return self.query_properties_with_values(lambda prop: prop.serialized, include_defaults) @classmethod def _overridden_defaults(cls): ''' Returns a dictionary of defaults that have been overridden. This is an implementation detail of Property. ''' return accumulate_dict_from_superclasses(cls, "__overridden_defaults__") def query_properties_with_values(self, query, include_defaults=True): ''' Query the properties values of |HasProps| instances with a predicate. Args: query (callable) : A callable that accepts property descriptors and returns True or False include_defaults (bool, optional) : Whether to include properties that have not been explicitly set by a user (default: True) Returns: dict : mapping of property names and values for matching properties ''' themed_keys = set() result = dict() if include_defaults: keys = self.properties() else: # TODO (bev) For now, include unstable default values. Things rely on Instances # always getting serialized, even defaults, and adding unstable defaults here # accomplishes that. Unmodified defaults for property value containers will be # weeded out below. keys = set(self._property_values.keys()) | set(self._unstable_default_values.keys()) if self.themed_values(): themed_keys = set(self.themed_values().keys()) keys |= themed_keys for key in keys: descriptor = self.lookup(key) if not query(descriptor): continue value = descriptor.serializable_value(self) if not include_defaults and key not in themed_keys: if isinstance(value, PropertyValueContainer) and key in self._unstable_default_values: continue result[key] = value return result def themed_values(self): ''' Get any theme-provided overrides. Results are returned as a dict from property name to value, or ``None`` if no theme overrides any values for this instance. Returns: dict or None ''' return getattr(self, '__themed_values__', None) def apply_theme(self, property_values): ''' Apply a set of theme values which will be used rather than defaults, but will not override application-set values. The passed-in dictionary may be kept around as-is and shared with other instances to save memory (so neither the caller nor the |HasProps| instance should modify it). Args: property_values (dict) : theme values to use in place of defaults Returns: None ''' old_dict = self.themed_values() # if the same theme is set again, it should reuse the same dict if old_dict is property_values: return removed = set() # we're doing a little song-and-dance to avoid storing __themed_values__ or # an empty dict, if there's no theme that applies to this HasProps instance. if old_dict is not None: removed.update(set(old_dict.keys())) added = set(property_values.keys()) old_values = dict() for k in added.union(removed): old_values[k] = getattr(self, k) if len(property_values) > 0: setattr(self, '__themed_values__', property_values) elif hasattr(self, '__themed_values__'): delattr(self, '__themed_values__') # Property container values might be cached even if unmodified. Invalidate # any cached values that are not modified at this point. for k, v in old_values.items(): if k in self._unstable_themed_values: del self._unstable_themed_values[k] # Emit any change notifications that result for k, v in old_values.items(): descriptor = self.lookup(k) descriptor.trigger_if_changed(self, v) def unapply_theme(self): ''' Remove any themed values and restore defaults. Returns: None ''' self.apply_theme(property_values=dict()) def pretty(self, verbose=False, max_width=79, newline='\n'): ''' Generate a "pretty" string representation of the object. .. note:: This function only functions in the IPython shell or Jupyter Notebooks. Args: Verbose (bool, optional) : This is a conventional argument for IPython representation printers but is unused by Bokeh. (default: False) max_width (int, optional) : Minimum width to start breaking lines when possible. (default: 79) newline (str, optional) : Character to use to separate each line (default: ``\\n``) Returns: str : pretty object representation Raises: ValueError, if ``IPython`` cannot be imported ''' IPython = import_optional('IPython') cls = self.__class__ if IPython: from IPython.lib.pretty import RepresentationPrinter stream = StringIO() printer = RepresentationPrinter(stream, verbose, max_width, newline) printer.pretty(self) printer.flush() return stream.getvalue() else: raise RuntimeError("%s.%s.pretty() requires IPython" % (cls.__module__, cls.__name__)) def pprint(self, verbose=False, max_width=79, newline='\n'): ''' Print a "pretty" string representation of the object to stdout. .. note:: This function only functions in the IPython shell or Jupyter Notebooks. Args: Verbose (bool, optional) : This is a conventional argument for IPython representation printers but is unused by Bokeh. (default: False) max_width (int, optional) : Minimum width to start breaking lines when possible. (default: 79) newline (str, optional) : Character to use to separate each line (default: ``\\n``) Returns: None Raises: ValueError, if ``IPython`` cannot be imported Examples: .. code-block:: python In [1]: from bokeh.models import Range1d In [1]: r = Range1d(start=10, end=20) In [2]: r.pprint() bokeh.models.ranges.Range1d( id='1576d21a-0c74-4214-8d8f-ad415e1e4ed4', bounds=None, callback=None, end=20, js_property_callbacks={}, max_interval=None, min_interval=None, name=None, start=10, tags=[]) ''' sys.stdout.write(self.pretty()) sys.stdout.write(newline) sys.stdout.flush() def _clone(self): ''' Duplicate a HasProps object. Values that are containers are shallow-copied. ''' return self.__class__(**self._property_values) def _repr_pretty_(self, p, cycle): ''' ''' name = "%s.%s" % (self.__class__.__module__, self.__class__.__name__) if cycle: p.text("%s(...)" % name) else: with p.group(4, '%s(' % name, ')'): props = self.properties_with_values().items() sorted_props = sorted(props, key=itemgetter(0)) all_props = sorted_props for i, (prop, value) in enumerate(all_props): if i == 0: p.breakable('') else: p.text(',') p.breakable() p.text(prop) p.text('=') p.pretty(value)
#!/usr/bin/env python # ***** BEGIN LICENSE BLOCK ***** # Version: MPL 1.1/GPL 2.0/LGPL 2.1 # # The contents of this file are subject to the Mozilla Public License # Version 1.1 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # http://www.mozilla.org/MPL/ # # Software distributed under the License is distributed on an "AS IS" # basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the # License for the specific language governing rights and limitations # under the License. # # The Original Code is Komodo code. # # The Initial Developer of the Original Code is ActiveState Software Inc. # Portions created by ActiveState Software Inc are Copyright (C) 2000-2007 # ActiveState Software Inc. All Rights Reserved. # # Contributor(s): # ActiveState Software Inc # # Alternatively, the contents of this file may be used under the terms of # either the GNU General Public License Version 2 or later (the "GPL"), or # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), # in which case the provisions of the GPL or the LGPL are applicable instead # of those above. If you wish to allow use of your version of this file only # under the terms of either the GPL or the LGPL, and not to allow others to # use your version of this file under the terms of the MPL, indicate your # decision by deleting the provisions above and replace them with the notice # and other provisions required by the GPL or the LGPL. If you do not delete # the provisions above, a recipient may use your version of this file under # the terms of any one of the MPL, the GPL or the LGPL. # # ***** END LICENSE BLOCK ***** # # Contributors: # Eric Promislow (EricP@ActiveState.com) """ rubycile - a Code Intelligence Language Engine for the Ruby language Module Usage: from rubycile import scan_purelang content = open("foo.rb", "r").read() scan_purelang(content, "foo.rb") Command-line Usage: rubycile.py [<options>...] [<Ruby files>...] Options: -h, --help dump this help and exit -V, --version dump this script's version and exit -v, --verbose verbose output, use twice for more verbose output -f, --filename <path> specify the filename of the file content passed in on stdin, this is used for the "path" attribute of the emitted <file> tag. --md5=<string> md5 hash for the input --mtime=<secs> modification time for output info, in #secs since 1/1/70. -L, --language <name> the language of the file being scanned -c, --clock print timing info for scans (CIX is not printed) One or more Ruby files can be specified as arguments or content can be passed in on stdin. A directory can also be specified, in which case all .rb files in that directory are scanned. This is a Language Engine for the Code Intelligence (codeintel) system. Code Intelligence XML format. See: http://specs.tl.activestate.com/kd/kd-0100.html The command-line interface will return non-zero iff the scan failed. """ import os from os.path import abspath, basename, dirname, splitext, isfile, isdir, join import sys import getopt import re import logging import glob import time import stat from ciElementTree import Element, SubElement, tostring from SilverCity import ScintillaConstants from codeintel2 import ruby_lexer, ruby_parser, util from codeintel2.common import CILEError from codeintel2 import parser_cix #---- exceptions class RubyCILEError(CILEError): pass #---- global data _version_ = (0, 1, 0) log = logging.getLogger("rubycile") # log.setLevel(logging.DEBUG) dcLog = logging.getLogger("rubycile.dircache") # dcLog.setLevel(logging.DEBUG) _gClockIt = 0 # if true then we are gathering timing data _gClock = None # if gathering timing data this is set to time retrieval fn _gStartTime = None # start time of current file being scanned gProduceOldCIX = False # XXX Temporary -- the old format should be pulled out. # from codeintel2.util import hotshotit class _DirInfo: """ This class stats a directory to determine when files have been added to or removed from it. Update times are platform-dependent. For example, the Python docs state that on Windows update resolution on the st_mtime attribute is 2-seconds, but I've observed it to be closer to 30 seconds. """ def __init__(self, ptn): self._data = {} self._ptn = ptn def get_files(self, dirname): if dirname not in self._data: self._create(dirname) else: new_time = self._changed(dirname) if new_time: self._update(dirname, new_time) dcLog.debug("==> " + "\t\n".join(self._data[dirname]['flist'])) return self._data[dirname]['flist'] def _changed(self, dirname): new_time = self._mtime(dirname) if new_time > self._data[dirname]['mtime']: return new_time return 0 def _create(self, dirname): self._data[dirname] = {'mtime': self._mtime(dirname), 'flist': self._files(dirname), } def _files(self, dirname): return glob.glob(join(dirname, self._ptn)) def _mtime(self, dirname): try: return os.stat(dirname)[stat.ST_MTIME] except OSError: return 0 def _update(self, dirname, mtime): self._data[dirname]['mtime'] = mtime self._data[dirname]['flist'] = self._files(dirname) _modelDirInfo = _DirInfo("*.rb") def rails_role_from_path(path): apath = abspath(path) aplist = apath.split(os.path.sep) # Allow for someone to built a rails app at root... if len(aplist) < 3: return None elif (aplist[-3] == "app" and (aplist[-2] == "controllers" and aplist[-1].endswith(".rb") or aplist[-2] == "helpers" and aplist[-1].endswith("_helper.rb") or aplist[-2] == "models" and aplist[-1].endswith(".rb"))): role_parts = aplist[-3:] elif (len(aplist) >= 4 and aplist[-4] == "app" and aplist[-3] == "views" and aplist[-1].endswith((".html.erb", ".rhtml"))): role_parts = aplist[-4:] elif (aplist[-3] == "db" and aplist[-2] == "migrate" and aplist[-1].endswith(".rb") and aplist[-1][0].isdigit()): role_parts = aplist[-3:] elif (aplist[-3] == "test" and aplist[-2] in ("functional", "integration", "unit") and aplist[-1].endswith(".rb")): role_parts = aplist[-3:] else: return None return role_parts def check_insert_rails_env(path, blob_scope): role_parts = rails_role_from_path(path) if role_parts is None: return add_models = False if len(role_parts) > 1 and role_parts[0] == "app": if role_parts[1] == "views": # This stuff only works if the evaluator will load class names as well # as namespace names. blob_scope.insert(0, Element("import", symbol="ActionView::Base")) elif len(role_parts) > 2: if role_parts[1] in ("controllers", "models"): if role_parts[1] == "controllers": if role_parts[2] != "application.rb": blob_scope.insert(0, Element( "import", module="./application", symbol='*')) # For loading models apath = abspath(path) add_models = True models_dir = join(dirname(dirname(apath)), "models") rel_part = "../" # For loading migrations modelName = "*" else: # add requires for each migration file # Here's how it works: # If the file is app/models/my_thing.rb, # For each file foo in ../../db/migrate/*.rb, # Try to load module=foo, # symbol=inflector.camelcase(drop_ext(basename(filename))) modelName = ruby_parser.get_inflector().camelize( splitext(basename(path))[0]) # Load the migration modules apath = abspath(path) migration_dir = join(dirname(dirname( dirname(apath))), "db", "migrate") migration_files = _modelDirInfo.get_files(migration_dir) idx = 0 for migration_file in migration_files: idx += 1 base_part = "../../db/migrate/" + \ splitext(basename(migration_file))[0] blob_class = blob_scope.find("scope") assert blob_class.get('ilk') == 'class' blob_class.insert(idx, Element( "import", module=base_part, symbol=modelName)) elif (len(role_parts) > 2 and ((role_parts[0] == "db" and role_parts[1] == "migrate" and role_parts[2][0].isdigit()) or role_parts[0] == "test")): apath = abspath(path) add_models = True models_dir = join(dirname(dirname(dirname(apath))), "app", "models") rel_part = "../../app/" if role_parts[0] == "test" and role_parts[1] == 'functional': # Each file functional/foo_controller_test.rb will contain a line reading # require 'foo' # but codeintel won't know where to look for this foo, so we'll tell it explicitly # Use 'index' to throw an exception because # RubyCommonBufferMixin.check_for_rails_app_path specified this # pattern. end_part = role_parts[2].index("_test.rb") controller_file = rel_part + \ "controllers/" + role_parts[2][0:end_part] blob_scope.insert(0, Element( "import", module=controller_file, symbol='*')) modelName = '*' # XXX - tests can't see migration dirs yet. # migration_dir = join(dirname(dirname(dirname(apath))), "db", # "migrate") if add_models: model_files = _modelDirInfo.get_files(models_dir) idx = 0 for model_file in model_files: idx += 1 base_part = rel_part + "models/" + \ splitext(basename(model_file))[0] blob_scope.insert(idx, Element( "import", module=base_part, symbol='*')) # @hotshotit def scan_purelang(content, filename): content = content.expandtabs(8) tokenizer = ruby_lexer.RubyLexer(content) parser = ruby_parser.Parser(tokenizer, "Ruby") parse_tree = parser.parse() tree = parser_cix.produce_elementTree_cix(parse_tree, filename, "Ruby", "Ruby") rails_migration_class_nodes = parser.rails_migration_class_tree() if rails_migration_class_nodes: blob_node = tree.getchildren()[0].getchildren()[0] for parse_tree_node in rails_migration_class_nodes: assert parse_tree_node.class_name == "Class" parser_cix.common_module_class_cix( parse_tree_node, blob_node, class_ref_fn=None, attributes="__fabricated__") # parser_cix.class_etree_cix(rails_migration_class_tree, blob_node) return tree def scan_multilang(tokens, module_elem): """Build the Ruby module CIX element tree. "tokens" is a generator of UDL tokens for this UDL-based multi-lang document. "module_elem" is the <module> element of a CIX element tree on which the Ruby module should be built. This should return a tuple of: * the list of the CSL tokens in the token stream, * whether or not the document contains any Ruby tokens (style UDL_SSL...) """ tokenizer = ruby_lexer.RubyMultiLangLexer(tokens) parser = ruby_parser.Parser(tokenizer, "RHTML") parse_tree = parser.parse() parser_cix.produce_elementTree_contents_cix(parse_tree, module_elem) csl_tokens = tokenizer.get_csl_tokens() return csl_tokens, tokenizer.has_ruby_code() #---- mainline def main(argv): logging.basicConfig() # Parse options. try: opts, args = getopt.getopt(argv[1:], "Vvhf:cL:", ["version", "verbose", "help", "filename=", "md5=", "mtime=", "clock", "language="]) except getopt.GetoptError, ex: log.error(str(ex)) log.error("Try `rubycile --help'.") return 1 numVerboses = 0 stdinFilename = None md5sum = None mtime = None lang = "Ruby" global _gClockIt for opt, optarg in opts: if opt in ("-h", "--help"): sys.stdout.write(__doc__) return elif opt in ("-V", "--version"): ver = '.'.join([str(part) for part in _version_]) print "rubycile %s" % ver return elif opt in ("-v", "--verbose"): numVerboses += 1 if numVerboses == 1: log.setLevel(logging.INFO) else: log.setLevel(logging.DEBUG) elif opt in ("-f", "--filename"): stdinFilename = optarg elif opt in ("-L", "--language"): lang = optarg elif opt in ("--md5",): md5sum = optarg elif opt in ("--mtime",): mtime = optarg elif opt in ("-c", "--clock"): _gClockIt = 1 global _gClock if sys.platform.startswith("win"): _gClock = time.clock else: _gClock = time.time if len(args) == 0: contentOnStdin = 1 filenames = [stdinFilename or "<stdin>"] else: contentOnStdin = 0 paths = [] for arg in args: paths += glob.glob(arg) filenames = [] for path in paths: if isfile(path): filenames.append(path) elif isdir(path): rbfiles = [join(path, n) for n in os.listdir(path) if splitext(n)[1] == ".rb"] rbfiles = [f for f in rbfiles if isfile(f)] filenames += rbfiles try: for filename in filenames: if contentOnStdin: log.debug("reading content from stdin") content = sys.stdin.read() log.debug("finished reading content from stdin") if mtime is None: mtime = int(time.time()) else: if mtime is None: mtime = int(os.stat(filename)[stat.ST_MTIME]) content = open(filename, 'r').read() if _gClockIt: sys.stdout.write("scanning '%s'..." % filename) global _gStartTime _gStartTime = _gClock() data = scan_purelang(content, filename) # data = scan(content, filename, md5sum, mtime, lang=lang) if _gClockIt: sys.stdout.write(" %.3fs\n" % (_gClock()-_gStartTime)) elif data: sys.stdout.write(data) except KeyboardInterrupt: log.debug("user abort") return 1 if 0: # except Exception, ex: log.error(str(ex)) if log.isEnabledFor(logging.DEBUG): print import traceback traceback.print_exception(*sys.exc_info()) return 1 if __name__ == "__main__": sys.exit(main(sys.argv))
import inspect from pyramid.compat import PY3 from pyramid.static import static_view from pyramid.interfaces import ( IRouteRequest, IViewClassifier, IView, ) from pyramid.config import not_ from pyramid.compat import string_types from zope.interface import Interface ANY_KEY = '*' UNKNOWN_KEY = '<unknown>' def _get_pattern(route): pattern = route.pattern if not pattern.startswith('/'): pattern = '/%s' % pattern return pattern def _get_request_methods(route_request_methods, view_request_methods): excludes = set() if route_request_methods: route_request_methods = set(route_request_methods) if view_request_methods: view_request_methods = set(view_request_methods) for method in view_request_methods.copy(): if method.startswith('!'): view_request_methods.remove(method) excludes.add(method[1:]) has_route_methods = route_request_methods is not None has_view_methods = len(view_request_methods) > 0 has_methods = has_route_methods or has_view_methods if has_route_methods is False and has_view_methods is False: request_methods = [ANY_KEY] elif has_route_methods is False and has_view_methods is True: request_methods = view_request_methods elif has_route_methods is True and has_view_methods is False: request_methods = route_request_methods else: request_methods = route_request_methods.intersection( view_request_methods ) request_methods = set(request_methods).difference(excludes) if has_methods and not request_methods: request_methods = '<route mismatch>' elif request_methods: if excludes and request_methods == set([ANY_KEY]): for exclude in excludes: request_methods.add('!%s' % exclude) request_methods = ','.join(sorted(request_methods)) return request_methods def _get_view_source(view_callable): if not view_callable or not hasattr(view_callable, '__name__'): return {} view_module_name = view_callable.__module__ try: if PY3: view_class_name = view_callable.__self__.__class__.__name__ else: view_class_name = view_callable.im_class.__name__ view_callable_name = '%s.%s' % ( view_class_name, view_callable.__name__) except: view_callable_name = view_callable.__name__ try: real_callable = view_callable.__wrapped__ except: real_callable = view_callable try: source_lines = inspect.getsourcelines(real_callable) source_lines = ( source_lines[1], source_lines[1] + len(source_lines[0]) - 1) except: source_lines = None return { 'module_name': view_module_name, 'callable_name': view_callable_name, 'source_lines': source_lines, } def _get_view_module(view_callable): if view_callable is None: return UNKNOWN_KEY, '' if hasattr(view_callable, '__name__'): if hasattr(view_callable, '__original_view__'): original_view = view_callable.__original_view__ else: original_view = None if isinstance(original_view, static_view): raise Exception() # skip static views if original_view.package_name is not None: return '%s:%s' % ( original_view.package_name, original_view.docroot ), '' else: return original_view.docroot else: view_name = view_callable.__name__ else: # Currently only MultiView hits this, # we could just not run _get_view_module # for them and remove this logic view_name = str(view_callable) view_module = '%s.%s' % ( view_callable.__module__, view_name, ) # If pyramid wraps something in wsgiapp or wsgiapp2 decorators # that is currently returned as pyramid.router.decorator, lets # hack a nice name in: if view_module == 'pyramid.router.decorator': view_module = '<wsgiapp>' return view_module, view_callable.__doc__ def get_route_data(route, registry): pattern = _get_pattern(route) request_iface = registry.queryUtility( IRouteRequest, name=route.name ) route_request_methods = None view_request_methods_order = [] view_request_methods = {} view_callable = None route_intr = registry.introspector.get( 'routes', route.name ) if request_iface is None: return [ (route.name, _get_pattern(route), UNKNOWN_KEY, ANY_KEY) ] view_callable = registry.adapters.lookup( (IViewClassifier, request_iface, Interface), IView, name='', default=None ) try: view_module, view_docs = _get_view_module(view_callable) except: return [] view_source = _get_view_source(view_callable) # Introspectables can be turned off, so there could be a chance # that we have no `route_intr` but we do have a route + callable if route_intr is None: view_request_methods[view_module] = [] view_request_methods_order.append((view_module, view_docs)) else: if route_intr.get('static', False) is True: return [ (route.name, route_intr['external_url'], UNKNOWN_KEY, ANY_KEY) ] route_request_methods = route_intr['request_methods'] view_intr = registry.introspector.related(route_intr) if view_intr: for view in view_intr: request_method = view.get('request_methods') if view.get('attr') is not None: view_callable = getattr(view['callable'], view['attr']) view_module, view_docs = _get_view_module(view_callable) view_source = _get_view_source(view_callable) else: view_callable = view['callable'] view_module, view_docs = _get_view_module(view_callable) view_source = _get_view_source(view_callable) if request_method is not None: if view_module not in view_request_methods: view_request_methods[view_module] = [] view_request_methods_order.append((view_module, view_docs)) if isinstance(request_method, string_types): request_method = (request_method,) elif isinstance(request_method, not_): request_method = ('!%s' % request_method.value,) view_request_methods[view_module].extend(request_method) else: if view_module not in view_request_methods: view_request_methods[view_module] = [] view_request_methods_order.append((view_module, view_docs)) else: view_request_methods[view_module] = [] view_request_methods_order.append((view_module, view_docs)) final_routes = [] for view_module, view_docs in view_request_methods_order: methods = view_request_methods[view_module] request_methods = _get_request_methods( route_request_methods, methods ) final_routes.append(( route.name, pattern.replace('\\', '\\\\'), view_module, request_methods, view_docs, view_source, )) return final_routes
# ----------------------------------------------------------------------------- # ply: lex.py # # Copyright (C) 2001-2015, # David M. Beazley (Dabeaz LLC) # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the David Beazley or Dabeaz LLC may be used to # endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- __version__ = '3.6' __tabversion__ = '3.5' import re import sys import types import copy import os import inspect # This tuple contains known string types try: # Python 2.6 StringTypes = (types.StringType, types.UnicodeType) except AttributeError: # Python 3.0 StringTypes = (str, bytes) # This regular expression is used to match valid token names _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') # Exception thrown when invalid token encountered and no default error # handler is defined. class LexError(Exception): def __init__(self, message, s): self.args = (message,) self.text = s # Token class. This class is used to represent the tokens produced. class LexToken(object): def __str__(self): return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) def __repr__(self): return str(self) # This object is a stand-in for a logging object created by the # logging module. class PlyLogger(object): def __init__(self, f): self.f = f def critical(self, msg, *args, **kwargs): self.f.write((msg % args) + '\n') def warning(self, msg, *args, **kwargs): self.f.write('WARNING: ' + (msg % args) + '\n') def error(self, msg, *args, **kwargs): self.f.write('ERROR: ' + (msg % args) + '\n') info = critical debug = critical # Null logger is used when no output is generated. Does nothing. class NullLogger(object): def __getattribute__(self, name): return self def __call__(self, *args, **kwargs): return self # ----------------------------------------------------------------------------- # === Lexing Engine === # # The following Lexer class implements the lexer runtime. There are only # a few public methods and attributes: # # input() - Store a new string in the lexer # token() - Get the next token # clone() - Clone the lexer # # lineno - Current line number # lexpos - Current position in the input string # ----------------------------------------------------------------------------- class Lexer: def __init__(self): self.lexre = None # Master regular expression. This is a list of # tuples (re, findex) where re is a compiled # regular expression and findex is a list # mapping regex group numbers to rules self.lexretext = None # Current regular expression strings self.lexstatere = {} # Dictionary mapping lexer states to master regexs self.lexstateretext = {} # Dictionary mapping lexer states to regex strings self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names self.lexstate = 'INITIAL' # Current lexer state self.lexstatestack = [] # Stack of lexer states self.lexstateinfo = None # State information self.lexstateignore = {} # Dictionary of ignored characters for each state self.lexstateerrorf = {} # Dictionary of error functions for each state self.lexstateeoff = {} # Dictionary of eof functions for each state self.lexreflags = 0 # Optional re compile flags self.lexdata = None # Actual input data (as a string) self.lexpos = 0 # Current position in input text self.lexlen = 0 # Length of the input text self.lexerrorf = None # Error rule (if any) self.lexeoff = None # EOF rule (if any) self.lextokens = None # List of valid tokens self.lexignore = '' # Ignored characters self.lexliterals = '' # Literal characters that can be passed through self.lexmodule = None # Module self.lineno = 1 # Current line number self.lexoptimize = False # Optimized mode def clone(self, object=None): c = copy.copy(self) # If the object parameter has been supplied, it means we are attaching the # lexer to a new object. In this case, we have to rebind all methods in # the lexstatere and lexstateerrorf tables. if object: newtab = {} for key, ritem in self.lexstatere.items(): newre = [] for cre, findex in ritem: newfindex = [] for f in findex: if not f or not f[0]: newfindex.append(f) continue newfindex.append((getattr(object, f[0].__name__), f[1])) newre.append((cre, newfindex)) newtab[key] = newre c.lexstatere = newtab c.lexstateerrorf = {} for key, ef in self.lexstateerrorf.items(): c.lexstateerrorf[key] = getattr(object, ef.__name__) c.lexmodule = object return c # ------------------------------------------------------------ # writetab() - Write lexer information to a table file # ------------------------------------------------------------ def writetab(self, basetabmodule, outputdir=''): filename = os.path.join(outputdir, basetabmodule) + '.py' with open(filename, 'w') as tf: tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) tf.write('_tabversion = %s\n' % repr(__tabversion__)) tf.write('_lextokens = %s\n' % repr(self.lextokens)) tf.write('_lexreflags = %s\n' % repr(self.lexreflags)) tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) # Rewrite the lexstatere table, replacing function objects with function names tabre = {} for statename, lre in self.lexstatere.items(): titem = [] for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): titem.append((retext, _funcs_to_names(func, renames))) tabre[statename] = titem tf.write('_lexstatere = %s\n' % repr(tabre)) tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) taberr = {} for statename, ef in self.lexstateerrorf.items(): taberr[statename] = ef.__name__ if ef else None tf.write('_lexstateerrorf = %s\n' % repr(taberr)) tabeof = {} for statename, ef in self.lexstateeoff.items(): tabeof[statename] = ef.__name__ if ef else None tf.write('_lexstateeoff = %s\n' % repr(tabeof)) # ------------------------------------------------------------ # readtab() - Read lexer information from a tab file # ------------------------------------------------------------ def readtab(self, tabfile, fdict): if isinstance(tabfile, types.ModuleType): lextab = tabfile else: exec('import %s' % tabfile) lextab = sys.modules[tabfile] if getattr(lextab, '_tabversion', '0.0') != __tabversion__: raise ImportError('Inconsistent PLY version') self.lextokens = lextab._lextokens self.lexreflags = lextab._lexreflags self.lexliterals = lextab._lexliterals self.lextokens_all = self.lextokens | set(self.lexliterals) self.lexstateinfo = lextab._lexstateinfo self.lexstateignore = lextab._lexstateignore self.lexstatere = {} self.lexstateretext = {} for statename, lre in lextab._lexstatere.items(): titem = [] txtitem = [] for pat, func_name in lre: titem.append((re.compile(pat, lextab._lexreflags | re.VERBOSE), _names_to_funcs(func_name, fdict))) self.lexstatere[statename] = titem self.lexstateretext[statename] = txtitem self.lexstateerrorf = {} for statename, ef in lextab._lexstateerrorf.items(): self.lexstateerrorf[statename] = fdict[ef] self.lexstateeoff = {} for statename, ef in lextab._lexstateeoff.items(): self.lexstateeoff[statename] = fdict[ef] self.begin('INITIAL') # ------------------------------------------------------------ # input() - Push a new string into the lexer # ------------------------------------------------------------ def input(self, s): # Pull off the first character to see if s looks like a string c = s[:1] if not isinstance(c, StringTypes): raise ValueError('Expected a string') self.lexdata = s self.lexpos = 0 self.lexlen = len(s) # ------------------------------------------------------------ # begin() - Changes the lexing state # ------------------------------------------------------------ def begin(self, state): if state not in self.lexstatere: raise ValueError('Undefined state') self.lexre = self.lexstatere[state] self.lexretext = self.lexstateretext[state] self.lexignore = self.lexstateignore.get(state, '') self.lexerrorf = self.lexstateerrorf.get(state, None) self.lexeoff = self.lexstateeoff.get(state, None) self.lexstate = state # ------------------------------------------------------------ # push_state() - Changes the lexing state and saves old on stack # ------------------------------------------------------------ def push_state(self, state): self.lexstatestack.append(self.lexstate) self.begin(state) # ------------------------------------------------------------ # pop_state() - Restores the previous state # ------------------------------------------------------------ def pop_state(self): self.begin(self.lexstatestack.pop()) # ------------------------------------------------------------ # current_state() - Returns the current lexing state # ------------------------------------------------------------ def current_state(self): return self.lexstate # ------------------------------------------------------------ # skip() - Skip ahead n characters # ------------------------------------------------------------ def skip(self, n): self.lexpos += n # ------------------------------------------------------------ # opttoken() - Return the next token from the Lexer # # Note: This function has been carefully implemented to be as fast # as possible. Don't make changes unless you really know what # you are doing # ------------------------------------------------------------ def token(self): # Make local copies of frequently referenced attributes lexpos = self.lexpos lexlen = self.lexlen lexignore = self.lexignore lexdata = self.lexdata while lexpos < lexlen: # This code provides some short-circuit code for whitespace, tabs, and other ignored characters if lexdata[lexpos] in lexignore: lexpos += 1 continue # Look for a regular expression match for lexre, lexindexfunc in self.lexre: m = lexre.match(lexdata, lexpos) if not m: continue # Create a token for return tok = LexToken() tok.value = m.group() tok.lineno = self.lineno tok.lexpos = lexpos i = m.lastindex func, tok.type = lexindexfunc[i] if not func: # If no token type was set, it's an ignored token if tok.type: self.lexpos = m.end() return tok else: lexpos = m.end() break lexpos = m.end() # If token is processed by a function, call it tok.lexer = self # Set additional attributes useful in token rules self.lexmatch = m self.lexpos = lexpos newtok = func(tok) # Every function must return a token, if nothing, we just move to next token if not newtok: lexpos = self.lexpos # This is here in case user has updated lexpos. lexignore = self.lexignore # This is here in case there was a state change break # Verify type of the token. If not in the token map, raise an error if not self.lexoptimize: if newtok.type not in self.lextokens_all: raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( func.__code__.co_filename, func.__code__.co_firstlineno, func.__name__, newtok.type), lexdata[lexpos:]) return newtok else: # No match, see if in literals if lexdata[lexpos] in self.lexliterals: tok = LexToken() tok.value = lexdata[lexpos] tok.lineno = self.lineno tok.type = tok.value tok.lexpos = lexpos self.lexpos = lexpos + 1 return tok # No match. Call t_error() if defined. if self.lexerrorf: tok = LexToken() tok.value = self.lexdata[lexpos:] tok.lineno = self.lineno tok.type = 'error' tok.lexer = self tok.lexpos = lexpos self.lexpos = lexpos newtok = self.lexerrorf(tok) if lexpos == self.lexpos: # Error method didn't change text position at all. This is an error. raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) lexpos = self.lexpos if not newtok: continue return newtok self.lexpos = lexpos raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) if self.lexeoff: tok = LexToken() tok.type = 'eof' tok.value = '' tok.lineno = self.lineno tok.lexpos = lexpos tok.lexer = self self.lexpos = lexpos newtok = self.lexeoff(tok) return newtok self.lexpos = lexpos + 1 if self.lexdata is None: raise RuntimeError('No input string given with input()') return None # Iterator interface def __iter__(self): return self def next(self): t = self.token() if t is None: raise StopIteration return t __next__ = next # ----------------------------------------------------------------------------- # ==== Lex Builder === # # The functions and classes below are used to collect lexing information # and build a Lexer object from it. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # _get_regex(func) # # Returns the regular expression assigned to a function either as a doc string # or as a .regex attribute attached by the @TOKEN decorator. # ----------------------------------------------------------------------------- def _get_regex(func): return getattr(func, 'regex', func.__doc__) # ----------------------------------------------------------------------------- # get_caller_module_dict() # # This function returns a dictionary containing all of the symbols defined within # a caller further down the call stack. This is used to get the environment # associated with the yacc() call if none was provided. # ----------------------------------------------------------------------------- def get_caller_module_dict(levels): f = sys._getframe(levels) ldict = f.f_globals.copy() if f.f_globals != f.f_locals: ldict.update(f.f_locals) return ldict # ----------------------------------------------------------------------------- # _funcs_to_names() # # Given a list of regular expression functions, this converts it to a list # suitable for output to a table file # ----------------------------------------------------------------------------- def _funcs_to_names(funclist, namelist): result = [] for f, name in zip(funclist, namelist): if f and f[0]: result.append((name, f[1])) else: result.append(f) return result # ----------------------------------------------------------------------------- # _names_to_funcs() # # Given a list of regular expression function names, this converts it back to # functions. # ----------------------------------------------------------------------------- def _names_to_funcs(namelist, fdict): result = [] for n in namelist: if n and n[0]: result.append((fdict[n[0]], n[1])) else: result.append(n) return result # ----------------------------------------------------------------------------- # _form_master_re() # # This function takes a list of all of the regex components and attempts to # form the master regular expression. Given limitations in the Python re # module, it may be necessary to break the master regex into separate expressions. # ----------------------------------------------------------------------------- def _form_master_re(relist, reflags, ldict, toknames): if not relist: return [] regex = '|'.join(relist) try: lexre = re.compile(regex, re.VERBOSE | reflags) # Build the index to function map for the matching engine lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) lexindexnames = lexindexfunc[:] for f, i in lexre.groupindex.items(): handle = ldict.get(f, None) if type(handle) in (types.FunctionType, types.MethodType): lexindexfunc[i] = (handle, toknames[f]) lexindexnames[i] = f elif handle is not None: lexindexnames[i] = f if f.find('ignore_') > 0: lexindexfunc[i] = (None, None) else: lexindexfunc[i] = (None, toknames[f]) return [(lexre, lexindexfunc)], [regex], [lexindexnames] except Exception: m = int(len(relist)/2) if m == 0: m = 1 llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) return (llist+rlist), (lre+rre), (lnames+rnames) # ----------------------------------------------------------------------------- # def _statetoken(s,names) # # Given a declaration name s of the form "t_" and a dictionary whose keys are # state names, this function returns a tuple (states,tokenname) where states # is a tuple of state names and tokenname is the name of the token. For example, # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') # ----------------------------------------------------------------------------- def _statetoken(s, names): nonstate = 1 parts = s.split('_') for i, part in enumerate(parts[1:], 1): if part not in names and part != 'ANY': break if i > 1: states = tuple(parts[1:i]) else: states = ('INITIAL',) if 'ANY' in states: states = tuple(names) tokenname = '_'.join(parts[i:]) return (states, tokenname) # ----------------------------------------------------------------------------- # LexerReflect() # # This class represents information needed to build a lexer as extracted from a # user's input file. # ----------------------------------------------------------------------------- class LexerReflect(object): def __init__(self, ldict, log=None, reflags=0): self.ldict = ldict self.error_func = None self.tokens = [] self.reflags = reflags self.stateinfo = {'INITIAL': 'inclusive'} self.modules = set() self.error = False self.log = PlyLogger(sys.stderr) if log is None else log # Get all of the basic information def get_all(self): self.get_tokens() self.get_literals() self.get_states() self.get_rules() # Validate all of the information def validate_all(self): self.validate_tokens() self.validate_literals() self.validate_rules() return self.error # Get the tokens map def get_tokens(self): tokens = self.ldict.get('tokens', None) if not tokens: self.log.error('No token list is defined') self.error = True return if not isinstance(tokens, (list, tuple)): self.log.error('tokens must be a list or tuple') self.error = True return if not tokens: self.log.error('tokens is empty') self.error = True return self.tokens = tokens # Validate the tokens def validate_tokens(self): terminals = {} for n in self.tokens: if not _is_identifier.match(n): self.log.error("Bad token name '%s'", n) self.error = True if n in terminals: self.log.warning("Token '%s' multiply defined", n) terminals[n] = 1 # Get the literals specifier def get_literals(self): self.literals = self.ldict.get('literals', '') if not self.literals: self.literals = '' # Validate literals def validate_literals(self): try: for c in self.literals: if not isinstance(c, StringTypes) or len(c) > 1: self.log.error('Invalid literal %s. Must be a single character', repr(c)) self.error = True except TypeError: self.log.error('Invalid literals specification. literals must be a sequence of characters') self.error = True def get_states(self): self.states = self.ldict.get('states', None) # Build statemap if self.states: if not isinstance(self.states, (tuple, list)): self.log.error('states must be defined as a tuple or list') self.error = True else: for s in self.states: if not isinstance(s, tuple) or len(s) != 2: self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) self.error = True continue name, statetype = s if not isinstance(name, StringTypes): self.log.error('State name %s must be a string', repr(name)) self.error = True continue if not (statetype == 'inclusive' or statetype == 'exclusive'): self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) self.error = True continue if name in self.stateinfo: self.log.error("State '%s' already defined", name) self.error = True continue self.stateinfo[name] = statetype # Get all of the symbols with a t_ prefix and sort them into various # categories (functions, strings, error functions, and ignore characters) def get_rules(self): tsymbols = [f for f in self.ldict if f[:2] == 't_'] # Now build up a list of functions and a list of strings self.toknames = {} # Mapping of symbols to token names self.funcsym = {} # Symbols defined as functions self.strsym = {} # Symbols defined as strings self.ignore = {} # Ignore strings by state self.errorf = {} # Error functions by state self.eoff = {} # EOF functions by state for s in self.stateinfo: self.funcsym[s] = [] self.strsym[s] = [] if len(tsymbols) == 0: self.log.error('No rules of the form t_rulename are defined') self.error = True return for f in tsymbols: t = self.ldict[f] states, tokname = _statetoken(f, self.stateinfo) self.toknames[f] = tokname if hasattr(t, '__call__'): if tokname == 'error': for s in states: self.errorf[s] = t elif tokname == 'eof': for s in states: self.eoff[s] = t elif tokname == 'ignore': line = t.__code__.co_firstlineno file = t.__code__.co_filename self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) self.error = True else: for s in states: self.funcsym[s].append((f, t)) elif isinstance(t, StringTypes): if tokname == 'ignore': for s in states: self.ignore[s] = t if '\\' in t: self.log.warning("%s contains a literal backslash '\\'", f) elif tokname == 'error': self.log.error("Rule '%s' must be defined as a function", f) self.error = True else: for s in states: self.strsym[s].append((f, t)) else: self.log.error('%s not defined as a function or string', f) self.error = True # Sort the functions by line number for f in self.funcsym.values(): f.sort(key=lambda x: x[1].__code__.co_firstlineno) # Sort the strings by regular expression length for s in self.strsym.values(): s.sort(key=lambda x: len(x[1]), reverse=True) # Validate all of the t_rules collected def validate_rules(self): for state in self.stateinfo: # Validate all rules defined by functions for fname, f in self.funcsym[state]: line = f.__code__.co_firstlineno file = f.__code__.co_filename module = inspect.getmodule(f) self.modules.add(module) tokname = self.toknames[fname] if isinstance(f, types.MethodType): reqargs = 2 else: reqargs = 1 nargs = f.__code__.co_argcount if nargs > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.error = True continue if nargs < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.error = True continue if not _get_regex(f): self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) self.error = True continue try: c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), re.VERBOSE | self.reflags) if c.match(''): self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) self.error = True except re.error as e: self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) if '#' in _get_regex(f): self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) self.error = True # Validate all rules defined by strings for name, r in self.strsym[state]: tokname = self.toknames[name] if tokname == 'error': self.log.error("Rule '%s' must be defined as a function", name) self.error = True continue if tokname not in self.tokens and tokname.find('ignore_') < 0: self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) self.error = True continue try: c = re.compile('(?P<%s>%s)' % (name, r), re.VERBOSE | self.reflags) if (c.match('')): self.log.error("Regular expression for rule '%s' matches empty string", name) self.error = True except re.error as e: self.log.error("Invalid regular expression for rule '%s'. %s", name, e) if '#' in r: self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) self.error = True if not self.funcsym[state] and not self.strsym[state]: self.log.error("No rules defined for state '%s'", state) self.error = True # Validate the error function efunc = self.errorf.get(state, None) if efunc: f = efunc line = f.__code__.co_firstlineno file = f.__code__.co_filename module = inspect.getmodule(f) self.modules.add(module) if isinstance(f, types.MethodType): reqargs = 2 else: reqargs = 1 nargs = f.__code__.co_argcount if nargs > reqargs: self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) self.error = True if nargs < reqargs: self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) self.error = True for module in self.modules: self.validate_module(module) # ----------------------------------------------------------------------------- # validate_module() # # This checks to see if there are duplicated t_rulename() functions or strings # in the parser input file. This is done using a simple regular expression # match on each line in the source code of the given module. # ----------------------------------------------------------------------------- def validate_module(self, module): lines, linen = inspect.getsourcelines(module) fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') counthash = {} linen += 1 for line in lines: m = fre.match(line) if not m: m = sre.match(line) if m: name = m.group(1) prev = counthash.get(name) if not prev: counthash[name] = linen else: filename = inspect.getsourcefile(module) self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) self.error = True linen += 1 # ----------------------------------------------------------------------------- # lex(module) # # Build all of the regular expression rules from definitions in the supplied module # ----------------------------------------------------------------------------- def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', reflags=0, nowarn=False, outputdir=None, debuglog=None, errorlog=None): global lexer ldict = None stateinfo = {'INITIAL': 'inclusive'} lexobj = Lexer() lexobj.lexoptimize = optimize global token, input if errorlog is None: errorlog = PlyLogger(sys.stderr) if debug: if debuglog is None: debuglog = PlyLogger(sys.stderr) # Get the module dictionary used for the lexer if object: module = object # Get the module dictionary used for the parser if module: _items = [(k, getattr(module, k)) for k in dir(module)] ldict = dict(_items) # If no __file__ attribute is available, try to obtain it from the __module__ instead if '__file__' not in ldict: ldict['__file__'] = sys.modules[ldict['__module__']].__file__ else: ldict = get_caller_module_dict(2) if outputdir is None: # If no output directory is set, the location of the output files # is determined according to the following rules: # - If lextab specifies a package, files go into that package directory # - Otherwise, files go in the same directory as the specifying module if '.' not in lextab: srcfile = ldict['__file__'] else: parts = lextab.split('.') pkgname = '.'.join(parts[:-1]) exec('import %s' % pkgname) srcfile = getattr(sys.modules[pkgname], '__file__', '') outputdir = os.path.dirname(srcfile) # Determine if the module is package of a package or not. # If so, fix the tabmodule setting so that tables load correctly pkg = ldict.get('__package__') if pkg: if '.' not in lextab: lextab = pkg + '.' + lextab baselextab = lextab.split('.')[-1] # Collect parser information from the dictionary linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) linfo.get_all() if not optimize: if linfo.validate_all(): raise SyntaxError("Can't build lexer") if optimize and lextab: try: lexobj.readtab(lextab, ldict) token = lexobj.token input = lexobj.input lexer = lexobj return lexobj except ImportError: pass # Dump some basic debugging information if debug: debuglog.info('lex: tokens = %r', linfo.tokens) debuglog.info('lex: literals = %r', linfo.literals) debuglog.info('lex: states = %r', linfo.stateinfo) # Build a dictionary of valid token names lexobj.lextokens = set() for n in linfo.tokens: lexobj.lextokens.add(n) # Get literals specification if isinstance(linfo.literals, (list, tuple)): lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) else: lexobj.lexliterals = linfo.literals lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) # Get the stateinfo dictionary stateinfo = linfo.stateinfo regexs = {} # Build the master regular expressions for state in stateinfo: regex_list = [] # Add rules defined by functions first for fname, f in linfo.funcsym[state]: line = f.__code__.co_firstlineno file = f.__code__.co_filename regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) if debug: debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) # Now add all of the simple rules for name, r in linfo.strsym[state]: regex_list.append('(?P<%s>%s)' % (name, r)) if debug: debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) regexs[state] = regex_list # Build the master regular expressions if debug: debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') for state in regexs: lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) lexobj.lexstatere[state] = lexre lexobj.lexstateretext[state] = re_text lexobj.lexstaterenames[state] = re_names if debug: for i, text in enumerate(re_text): debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) # For inclusive states, we need to add the regular expressions from the INITIAL state for state, stype in stateinfo.items(): if state != 'INITIAL' and stype == 'inclusive': lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) lexobj.lexstateinfo = stateinfo lexobj.lexre = lexobj.lexstatere['INITIAL'] lexobj.lexretext = lexobj.lexstateretext['INITIAL'] lexobj.lexreflags = reflags # Set up ignore variables lexobj.lexstateignore = linfo.ignore lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') # Set up error functions lexobj.lexstateerrorf = linfo.errorf lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) if not lexobj.lexerrorf: errorlog.warning('No t_error rule is defined') # Set up eof functions lexobj.lexstateeoff = linfo.eoff lexobj.lexeoff = linfo.eoff.get('INITIAL', None) # Check state information for ignore and error rules for s, stype in stateinfo.items(): if stype == 'exclusive': if s not in linfo.errorf: errorlog.warning("No error rule is defined for exclusive state '%s'", s) if s not in linfo.ignore and lexobj.lexignore: errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) elif stype == 'inclusive': if s not in linfo.errorf: linfo.errorf[s] = linfo.errorf.get('INITIAL', None) if s not in linfo.ignore: linfo.ignore[s] = linfo.ignore.get('INITIAL', '') # Create global versions of the token() and input() functions token = lexobj.token input = lexobj.input lexer = lexobj # If in optimize mode, we write the lextab if lextab and optimize: try: lexobj.writetab(baselextab, outputdir) except IOError as e: errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) return lexobj # ----------------------------------------------------------------------------- # runmain() # # This runs the lexer as a main program # ----------------------------------------------------------------------------- def runmain(lexer=None, data=None): if not data: try: filename = sys.argv[1] f = open(filename) data = f.read() f.close() except IndexError: sys.stdout.write('Reading from standard input (type EOF to end):\n') data = sys.stdin.read() if lexer: _input = lexer.input else: _input = input _input(data) if lexer: _token = lexer.token else: _token = token while True: tok = _token() if not tok: break sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) # ----------------------------------------------------------------------------- # @TOKEN(regex) # # This decorator function can be used to set the regex expression on a function # when its docstring might need to be set in an alternative way # ----------------------------------------------------------------------------- def TOKEN(r): def set_regex(f): if hasattr(r, '__call__'): f.regex = _get_regex(r) else: f.regex = r return f return set_regex # Alternative spelling of the TOKEN decorator Token = TOKEN
import functools, hashlib import numpy as np from scipy.stats import norm as normal_dbn import regreg.api as rr from selection.algorithms.debiased_lasso import pseudoinverse_debiasing_matrix # load in the X matrix import rpy2.robjects as rpy from rpy2.robjects import numpy2ri rpy.r('library(hdi); data(riboflavin); X = riboflavin$x') numpy2ri.activate() X_full = np.asarray(rpy.r('X')) numpy2ri.deactivate() from selection.learning.utils import full_model_inference, liu_inference, pivot_plot from selection.learning.core import split_sampler, keras_fit, repeat_selection, infer_set_target from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam from selection.learning.learners import mixture_learner def highdim_model_inference(X, y, truth, selection_algorithm, sampler, lam_min, dispersion, success_params=(1, 1), fit_probability=keras_fit, fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'}, alpha=0.1, B=2000, naive=True, learner_klass=mixture_learner, how_many=None): n, p = X.shape XTX = X.T.dot(X) instance_hash = hashlib.md5() instance_hash.update(X.tobytes()) instance_hash.update(y.tobytes()) instance_hash.update(truth.tobytes()) instance_id = instance_hash.hexdigest() # run selection algorithm observed_set = repeat_selection(selection_algorithm, sampler, *success_params) observed_list = sorted(observed_set) # observed debiased LASSO estimate loss = rr.squared_error(X, y) pen = rr.l1norm(p, lagrange=lam_min) problem = rr.simple_problem(loss, pen) soln = problem.solve() grad = X.T.dot(X.dot(soln) - y) # gradient at beta_hat M = pseudoinverse_debiasing_matrix(X, observed_list) observed_target = soln[observed_list] - M.dot(grad) tmp = X.dot(M.T) target_cov = tmp.T.dot(tmp) * dispersion cross_cov = np.identity(p)[:,observed_list] * dispersion if len(observed_list) > 0: if how_many is None: how_many = len(observed_list) observed_list = observed_list[:how_many] # find the target, based on the observed outcome (pivots, covered, lengths, pvalues, lower, upper) = [], [], [], [], [], [] targets = [] true_target = truth[observed_list] results = infer_set_target(selection_algorithm, observed_set, observed_list, sampler, observed_target, target_cov, cross_cov, hypothesis=true_target, fit_probability=fit_probability, fit_args=fit_args, success_params=success_params, alpha=alpha, B=B, learner_klass=learner_klass) for i, result in enumerate(results): (pivot, interval, pvalue, _) = result pvalues.append(pvalue) pivots.append(pivot) covered.append((interval[0] < true_target[i]) * (interval[1] > true_target[i])) lengths.append(interval[1] - interval[0]) lower.append(interval[0]) upper.append(interval[1]) if len(pvalues) > 0: df = pd.DataFrame({'pivot':pivots, 'pvalue':pvalues, 'coverage':covered, 'length':lengths, 'upper':upper, 'lower':lower, 'id':[instance_id]*len(pvalues), 'target':true_target, 'variable':observed_list, 'B':[B]*len(pvalues)}) if naive: (naive_pvalues, naive_pivots, naive_covered, naive_lengths, naive_upper, naive_lower) = [], [], [], [], [], [] for j, idx in enumerate(observed_list): true_target = truth[idx] target_sd = np.sqrt(target_cov[j, j]) observed_target_j = observed_target[j] quantile = normal_dbn.ppf(1 - 0.5 * alpha) naive_interval = (observed_target_j - quantile * target_sd, observed_target_j + quantile * target_sd) naive_upper.append(naive_interval[1]) naive_lower.append(naive_interval[0]) naive_pivot = (1 - normal_dbn.cdf((observed_target_j - true_target) / target_sd)) naive_pivot = 2 * min(naive_pivot, 1 - naive_pivot) naive_pivots.append(naive_pivot) naive_pvalue = (1 - normal_dbn.cdf(observed_target_j / target_sd)) naive_pvalue = 2 * min(naive_pvalue, 1 - naive_pvalue) naive_pvalues.append(naive_pvalue) naive_covered.append((naive_interval[0] < true_target) * (naive_interval[1] > true_target)) naive_lengths.append(naive_interval[1] - naive_interval[0]) naive_df = pd.DataFrame({'naive_pivot':naive_pivots, 'naive_pvalue':naive_pvalues, 'naive_coverage':naive_covered, 'naive_length':naive_lengths, 'naive_upper':naive_upper, 'naive_lower':naive_lower, 'variable':observed_list, }) df = pd.merge(df, naive_df, on='variable') return df boot_design = False def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0): # description of statistical problem n, p = X_full.shape if boot_design: idx = np.random.choice(np.arange(n), n, replace=True) X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate else: X = X_full.copy() X = X - np.mean(X, 0)[None, :] X = X / np.std(X, 0)[None, :] truth = np.zeros(p) truth[:s] = np.linspace(signal[0], signal[1], s) np.random.shuffle(truth) truth *= sigma / np.sqrt(n) y = X.dot(truth) + sigma * np.random.standard_normal(n) lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed) lam_min, lam_1se = n * lam_min, n * lam_1se XTX = X.T.dot(X) XTXi = np.linalg.inv(XTX) resid = y - X.dot(XTXi.dot(X.T.dot(y))) dispersion = sigma**2 S = X.T.dot(y) covS = dispersion * X.T.dot(X) splitting_sampler = split_sampler(X * y[:, None], covS) def meta_algorithm(X, XTXi, resid, sampler): S = sampler.center.copy() ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X G = lasso_glmnet(X, ynew, *[None]*4) select = G.select(seed=seed) return set(list(select[0])) selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid) # run selection algorithm df = highdim_model_inference(X, y, truth, selection_algorithm, splitting_sampler, lam_min, sigma**2, # dispersion assumed known for now success_params=(1, 1), B=B, fit_probability=keras_fit, fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'}) if df is not None: liu_df = liu_inference(X, y, 1.00001 * lam_min, dispersion, truth, alpha=alpha, approximate_inverse='BN') return pd.merge(df, liu_df, on='variable') if __name__ == "__main__": import statsmodels.api as sm import matplotlib.pyplot as plt import pandas as pd U = np.linspace(0, 1, 101) plt.clf() init_seed = np.fabs(np.random.standard_normal() * 500) for i in range(500): df = simulate(seed=init_seed+i) csvfile = 'riboflavin_CV.csv' outbase = csvfile[:-4] if df is not None and i > 0: try: df = pd.concat([df, pd.read_csv(csvfile)]) except FileNotFoundError: pass df.to_csv(csvfile, index=False) if len(df['pivot']) > 0: pivot_ax, lengths_ax = pivot_plot(df, outbase) liu_pivot = df['liu_pivot'] liu_pivot = liu_pivot[~np.isnan(liu_pivot)] pivot_ax.plot(U, sm.distributions.ECDF(liu_pivot)(U), 'gray', label='Liu CV', linewidth=3) pivot_ax.legend() fig = pivot_ax.figure fig.savefig(csvfile[:-4] + '.pdf')
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """For component execution, includes driver, executor and publisher.""" import abc import copy from typing import Any, Dict, List, Optional import absl from tfx import types from tfx.dsl.components.base import base_node from tfx.dsl.components.base import executor_spec from tfx.orchestration import data_types from tfx.orchestration import metadata from tfx.orchestration import publisher from tfx.orchestration.config import base_component_config class BaseComponentLauncher(abc.ABC): """Responsible for launching driver, executor and publisher of component.""" def __init__( self, component: base_node.BaseNode, pipeline_info: data_types.PipelineInfo, driver_args: data_types.DriverArgs, metadata_connection: metadata.Metadata, beam_pipeline_args: List[str], additional_pipeline_args: Dict[str, Any], component_config: Optional[ base_component_config.BaseComponentConfig] = None, ): """Initialize a BaseComponentLauncher. Args: component: The Tfx node to launch. pipeline_info: An instance of data_types.PipelineInfo that holds pipeline properties. driver_args: An instance of data_types.DriverArgs that holds component specific driver args. metadata_connection: ML metadata connection. The connection is expected to not be opened when given to this object. beam_pipeline_args: Pipeline arguments for Beam powered Components. additional_pipeline_args: Additional pipeline args. component_config: Optional component specific config to instrument launcher on how to launch a component. Raises: ValueError: when component and component_config are not launchable by the launcher. """ self._pipeline_info = pipeline_info self._component_info = data_types.ComponentInfo( component_type=component.type, component_id=component.id, pipeline_info=self._pipeline_info) self._driver_args = driver_args self._driver_class = component.driver_class self._component_executor_spec = component.executor_spec self._input_dict = component.inputs self._output_dict = component.outputs self._exec_properties = component.exec_properties self._metadata_connection = metadata_connection self._beam_pipeline_args = beam_pipeline_args self._additional_pipeline_args = additional_pipeline_args self._component_config = component_config if not self.can_launch(self._component_executor_spec, self._component_config): raise ValueError( 'component.executor_spec with type "%s" and component config with' ' type "%s" are not launchable by "%s".' % ( type(self._component_executor_spec).__name__, type(self._component_config).__name__, type(self).__name__, )) @classmethod def create( cls, component: base_node.BaseNode, pipeline_info: data_types.PipelineInfo, driver_args: data_types.DriverArgs, metadata_connection: metadata.Metadata, beam_pipeline_args: List[str], additional_pipeline_args: Dict[str, Any], component_config: Optional[ base_component_config.BaseComponentConfig] = None, ) -> 'BaseComponentLauncher': """Initialize a ComponentLauncher directly from a BaseComponent instance. This class method is the contract between `TfxRunner` and `BaseComponentLauncher` to support launcher polymorphism. Sublcass of this class must make sure it can be initialized by the method. Args: component: The component to launch. pipeline_info: An instance of data_types.PipelineInfo that holds pipeline properties. driver_args: An instance of data_types.DriverArgs that holds component specific driver args. metadata_connection: ML metadata connection. The connection is expected to not be opened when given to this object. beam_pipeline_args: Pipeline arguments for Beam powered Components. additional_pipeline_args: Additional pipeline args. component_config: Optional component specific config to instrument launcher on how to launch a component. Returns: A new instance of component launcher. """ return cls( component=component, pipeline_info=pipeline_info, driver_args=driver_args, metadata_connection=metadata_connection, beam_pipeline_args=beam_pipeline_args, additional_pipeline_args=additional_pipeline_args, component_config=component_config) # pytype: disable=not-instantiable @classmethod @abc.abstractmethod def can_launch( cls, component_executor_spec: executor_spec.ExecutorSpec, component_config: base_component_config.BaseComponentConfig) -> bool: """Checks if the launcher can launch the executor spec with an optional component config.""" raise NotImplementedError def _run_driver( self, input_dict: Dict[str, types.Channel], output_dict: Dict[str, types.Channel], exec_properties: Dict[str, Any]) -> data_types.ExecutionDecision: """Prepare inputs, outputs and execution properties for actual execution.""" with self._metadata_connection as m: driver = self._driver_class(metadata_handler=m) execution_decision = driver.pre_execution( input_dict=input_dict, output_dict=output_dict, exec_properties=exec_properties, driver_args=self._driver_args, pipeline_info=self._pipeline_info, component_info=self._component_info) return execution_decision @abc.abstractmethod # TODO(jyzhao): consider returning an execution result. def _run_executor(self, execution_id: int, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None: """Execute underlying component implementation.""" raise NotImplementedError def _run_publisher(self, output_dict: Dict[str, List[types.Artifact]]) -> None: """Publish execution result to ml metadata.""" with self._metadata_connection as m: p = publisher.Publisher(metadata_handler=m) p.publish_execution( component_info=self._component_info, output_artifacts=output_dict) def launch(self) -> data_types.ExecutionInfo: """Execute the component, includes driver, executor and publisher. Returns: The execution decision of the launch. """ absl.logging.info('Running driver for %s', self._component_info.component_id) execution_decision = self._run_driver(self._input_dict, self._output_dict, self._exec_properties) if not execution_decision.use_cached_results: absl.logging.info('Running executor for %s', self._component_info.component_id) # Make a deep copy for input_dict and exec_properties, because they should # be immutable in this context. # output_dict can still be changed, specifically properties. self._run_executor(execution_decision.execution_id, copy.deepcopy(execution_decision.input_dict), execution_decision.output_dict, copy.deepcopy(execution_decision.exec_properties)) absl.logging.info('Running publisher for %s', self._component_info.component_id) self._run_publisher(output_dict=execution_decision.output_dict) return data_types.ExecutionInfo( input_dict=execution_decision.input_dict, output_dict=execution_decision.output_dict, exec_properties=execution_decision.exec_properties, execution_id=execution_decision.execution_id)
from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import gym import numpy as np import pickle import os import tensorflow as tf from ray.rllib.common import Agent, TrainingResult from ray.rllib.dqn import logger, models from ray.rllib.dqn.common.atari_wrappers_deprecated \ import wrap_dqn, ScaledFloatFrame from ray.rllib.dqn.common.schedules import LinearSchedule from ray.rllib.dqn.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer """The default configuration dict for the DQN algorithm. dueling: bool whether to use dueling dqn double_q: bool whether to use double dqn hiddens: array<int> hidden layer sizes of the state and action value networks model: dict config options to pass to the model constructor lr: float learning rate for adam optimizer schedule_max_timesteps: int max num timesteps for annealing schedules timesteps_per_iteration: int number of env steps to optimize for before returning buffer_size: int size of the replay buffer exploration_fraction: float fraction of entire training period over which the exploration rate is annealed exploration_final_eps: float final value of random action probability train_freq: int update the model every `train_freq` steps. batch_size: int size of a batched sampled from replay buffer for training print_freq: int how often to print out training progress set to None to disable printing checkpoint_freq: int how often to save the model. This is so that the best version is restored at the end of the training. If you do not wish to restore the best version at the end of the training set this variable to None. learning_starts: int how many steps of the model to collect transitions for before learning starts gamma: float discount factor grad_norm_clipping: int or None if not None, clip gradients during optimization at this value target_network_update_freq: int update the target network every `target_network_update_freq` steps. prioritized_replay: True if True prioritized replay buffer will be used. prioritized_replay_alpha: float alpha parameter for prioritized replay buffer prioritized_replay_beta0: float initial value of beta for prioritized replay buffer prioritized_replay_beta_iters: int number of iterations over which beta will be annealed from initial value to 1.0. If set to None equals to schedule_max_timesteps prioritized_replay_eps: float epsilon to add to the TD errors when updating priorities. num_cpu: int number of cpus to use for training """ DEFAULT_CONFIG = dict( dueling=True, double_q=True, hiddens=[256], model={}, lr=5e-4, schedule_max_timesteps=100000, timesteps_per_iteration=1000, buffer_size=50000, exploration_fraction=0.1, exploration_final_eps=0.02, train_freq=1, batch_size=32, print_freq=1, checkpoint_freq=10000, learning_starts=1000, gamma=1.0, grad_norm_clipping=10, target_network_update_freq=500, prioritized_replay=False, prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=None, prioritized_replay_eps=1e-6, num_cpu=16) class DQNAgent(Agent): def __init__(self, env_name, config, upload_dir=None): config.update({"alg": "DQN"}) Agent.__init__(self, env_name, config, upload_dir=upload_dir) with tf.Graph().as_default(): self._init() def _init(self): config = self.config env = gym.make(self.env_name) # TODO(ekl): replace this with RLlib preprocessors if "NoFrameskip" in self.env_name: env = ScaledFloatFrame(wrap_dqn(env)) self.env = env num_cpu = config["num_cpu"] tf_config = tf.ConfigProto( inter_op_parallelism_threads=num_cpu, intra_op_parallelism_threads=num_cpu) self.sess = tf.Session(config=tf_config) self.dqn_graph = models.DQNGraph(env, config) # Create the replay buffer if config["prioritized_replay"]: self.replay_buffer = PrioritizedReplayBuffer( config["buffer_size"], alpha=config["prioritized_replay_alpha"]) prioritized_replay_beta_iters = ( config["prioritized_replay_beta_iters"]) if prioritized_replay_beta_iters is None: prioritized_replay_beta_iters = ( config["schedule_max_timesteps"]) self.beta_schedule = LinearSchedule( prioritized_replay_beta_iters, initial_p=config["prioritized_replay_beta0"], final_p=1.0) else: self.replay_buffer = ReplayBuffer(config["buffer_size"]) self.beta_schedule = None # Create the schedule for exploration starting from 1. self.exploration = LinearSchedule( schedule_timesteps=int( config["exploration_fraction"] * config["schedule_max_timesteps"]), initial_p=1.0, final_p=config["exploration_final_eps"]) # Initialize the parameters and copy them to the target network. self.sess.run(tf.global_variables_initializer()) self.dqn_graph.update_target(self.sess) self.episode_rewards = [0.0] self.episode_lengths = [0.0] self.saved_mean_reward = None self.obs = self.env.reset() self.num_timesteps = 0 self.num_iterations = 0 self.file_writer = tf.summary.FileWriter(self.logdir, self.sess.graph) self.saver = tf.train.Saver(max_to_keep=None) def train(self): config = self.config sample_time, learn_time = 0, 0 for _ in range(config["timesteps_per_iteration"]): self.num_timesteps += 1 dt = time.time() # Take action and update exploration to the newest value action = self.dqn_graph.act( self.sess, np.array(self.obs)[None], self.exploration.value(self.num_timesteps))[0] new_obs, rew, done, _ = self.env.step(action) # Store transition in the replay buffer. self.replay_buffer.add(self.obs, action, rew, new_obs, float(done)) self.obs = new_obs self.episode_rewards[-1] += rew self.episode_lengths[-1] += 1 if done: self.obs = self.env.reset() self.episode_rewards.append(0.0) self.episode_lengths.append(0.0) sample_time += time.time() - dt if self.num_timesteps > config["learning_starts"] and \ self.num_timesteps % config["train_freq"] == 0: dt = time.time() # Minimize the error in Bellman's equation on a batch sampled # from replay buffer. if config["prioritized_replay"]: experience = self.replay_buffer.sample( config["batch_size"], beta=self.beta_schedule.value(self.num_timesteps)) (obses_t, actions, rewards, obses_tp1, dones, _, batch_idxes) = experience else: obses_t, actions, rewards, obses_tp1, dones = ( self.replay_buffer.sample(config["batch_size"])) batch_idxes = None td_errors = self.dqn_graph.train( self.sess, obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards)) if config["prioritized_replay"]: new_priorities = np.abs(td_errors) + ( config["prioritized_replay_eps"]) self.replay_buffer.update_priorities( batch_idxes, new_priorities) learn_time += (time.time() - dt) if self.num_timesteps > config["learning_starts"] and ( self.num_timesteps % config["target_network_update_freq"] == 0): # Update target network periodically. self.dqn_graph.update_target(self.sess) mean_100ep_reward = round(np.mean(self.episode_rewards[-101:-1]), 1) mean_100ep_length = round(np.mean(self.episode_lengths[-101:-1]), 1) num_episodes = len(self.episode_rewards) info = { "sample_time": sample_time, "learn_time": learn_time, "steps": self.num_timesteps, "episodes": num_episodes, "exploration": int( 100 * self.exploration.value(self.num_timesteps)) } logger.record_tabular("sample_time", sample_time) logger.record_tabular("learn_time", learn_time) logger.record_tabular("steps", self.num_timesteps) logger.record_tabular("buffer_size", len(self.replay_buffer)) logger.record_tabular("episodes", num_episodes) logger.record_tabular("mean 100 episode reward", mean_100ep_reward) logger.record_tabular( "% time spent exploring", int(100 * self.exploration.value(self.num_timesteps))) logger.dump_tabular() res = TrainingResult( self.experiment_id.hex, self.num_iterations, mean_100ep_reward, mean_100ep_length, info) self.num_iterations += 1 return res def save(self): checkpoint_path = self.saver.save( self.sess, os.path.join(self.logdir, "checkpoint"), global_step=self.num_iterations) extra_data = [ self.replay_buffer, self.beta_schedule, self.exploration, self.episode_rewards, self.episode_lengths, self.saved_mean_reward, self.obs, self.num_timesteps, self.num_iterations] pickle.dump(extra_data, open(checkpoint_path + ".extra_data", "wb")) return checkpoint_path def restore(self, checkpoint_path): self.saver.restore(self.sess, checkpoint_path) extra_data = pickle.load(open(checkpoint_path + ".extra_data", "rb")) self.replay_buffer = extra_data[0] self.beta_schedule = extra_data[1] self.exploration = extra_data[2] self.episode_rewards = extra_data[3] self.episode_lengths = extra_data[4] self.saved_mean_reward = extra_data[5] self.obs = extra_data[6] self.num_timesteps = extra_data[7] self.num_iterations = extra_data[8] def compute_action(self, observation): return self.dqn_graph.act( self.sess, np.array(observation)[None], 0.0)[0]
# coding=utf-8 # # Copyright 2012-2016 PressLabs SRL # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import re import hmac from datetime import datetime from django import forms from django.utils.six import text_type from payu.models import PayUIPN from payu.conf import (PAYU_MERCHANT, PAYU_MERCHANT_KEY, PAYU_TEST_TRANSACTION, PAYU_ORDER_DETAILS, PAYU_ORDER_DETAILS_DEFAULTS, PAYU_DATE_FORMATS, PAYU_CURRENCIES, PAYU_PAYMENT_METHODS, PAYU_LANGUAGES, PAYU_LU_CALLBACK) class ValueHiddenInput(forms.HiddenInput): """ Widget that renders only if it has a value. Used to remove unused fields from PayU buttons. """ template_name = 'custom_hidden.html' def _get_name(self, name): detail = re.match(r'^ORDER_(\d+)_(\d+)$', name) if detail and int(detail.group(2)) < len(PAYU_ORDER_DETAILS): name = 'ORDER_%s[]' % PAYU_ORDER_DETAILS[int(detail.group(2))] return name def get_context(self, name, value, attrs): context = super(ValueHiddenInput, self).get_context(name, value, attrs) context['widget']['name'] = self._get_name(context['widget']['name']) return context def render(self, name, value, attrs=None): if value is None: return text_type() name = self._get_name(name) return super(ValueHiddenInput, self).render(name, value or "", attrs) class OrderWidget(forms.MultiWidget): def __init__(self, attrs={}, *args, **kwargs): all_widgets = [ValueHiddenInput(attrs) for _ in PAYU_ORDER_DETAILS] super(OrderWidget, self).__init__(all_widgets, *args, **kwargs) def decompress(self, value): return [value.get(detail, '') for detail in PAYU_ORDER_DETAILS] class OrderField(forms.MultiValueField): widget = OrderWidget def __init__(self, *args, **kwargs): all_fields = tuple(forms.CharField() for _ in PAYU_ORDER_DETAILS) super(OrderField, self).__init__(all_fields, *args, **kwargs) class OrdersWidget(forms.MultiWidget): is_hidden = True def __init__(self, count, *args, **kwargs): all_widgets = tuple((OrderWidget()) for _ in range(count)) super(OrdersWidget, self).__init__(all_widgets, *args, **kwargs) class OrdersField(forms.MultiValueField): def __init__(self, *args, **kwargs): products = kwargs.get('initial', []) kwargs['label'] = '' all_fields = tuple() if products: self.widget = OrdersWidget(len(products)) all_fields = tuple((OrderField()) for _ in products) super(OrdersField, self).__init__(all_fields, *args, **kwargs) class PayULiveUpdateForm(forms.Form): MERCHANT = forms.CharField(widget=ValueHiddenInput, initial=PAYU_MERCHANT) LU_ENABLE_TOKEN = forms.CharField(widget=ValueHiddenInput, initial='') LU_TOKEN_TYPE = forms.CharField(widget=ValueHiddenInput, initial='') ORDER_REF = forms.CharField(widget=ValueHiddenInput, initial='') ORDER_DATE = forms.CharField(widget=ValueHiddenInput, initial=datetime.now().strftime('%Y-%m-%d %H:%M:%S')) ORDER = OrdersField() ORDER_SHIPPING = forms.CharField(widget=ValueHiddenInput) PRICES_CURRENCY = forms.ChoiceField(widget=ValueHiddenInput, choices=PAYU_CURRENCIES, initial='USD') DISCOUNT = forms.CharField(widget=ValueHiddenInput) DESTINATION_CITY = forms.CharField(widget=ValueHiddenInput) DESTINATION_STATE = forms.CharField(widget=ValueHiddenInput) DESTINATION_COUNTRY = forms.CharField(widget=ValueHiddenInput) PAY_METHOD = forms.ChoiceField(widget=ValueHiddenInput, choices=PAYU_PAYMENT_METHODS) ORDER_HASH = forms.CharField(widget=ValueHiddenInput, initial='') BILL_FNAME = forms.CharField(widget=ValueHiddenInput) BILL_LNAME = forms.CharField(widget=ValueHiddenInput) BILL_COUNTRYCODE = forms.CharField(widget=ValueHiddenInput) BILL_CITY = forms.CharField(widget=ValueHiddenInput) BILL_PHONE = forms.CharField(widget=ValueHiddenInput) BILL_EMAIL = forms.CharField(widget=ValueHiddenInput) BILL_COMPANY = forms.CharField(widget=ValueHiddenInput) BILL_FISCALCODE = forms.CharField(widget=ValueHiddenInput) CURRENCY = forms.ChoiceField(widget=ValueHiddenInput, choices=PAYU_CURRENCIES, initial='USD') AUTOMODE = forms.CharField(widget=ValueHiddenInput, initial='1') LANGUAGE = forms.ChoiceField(widget=ValueHiddenInput, choices=PAYU_LANGUAGES, initial='EN') SELECTED_INSTALLMENTS_NO = forms.CharField(widget=ValueHiddenInput) BACK_REF = forms.CharField(widget=ValueHiddenInput, initial=PAYU_LU_CALLBACK) TESTORDER = forms.CharField(widget=ValueHiddenInput, initial=str(PAYU_TEST_TRANSACTION).upper()) @property def signature(self): """ Compute the ORDER_HASH of the request. The hashable string is composed by getting the values from: MERCHANT ORDER_REF ORDER_DATE ORDER_PNAME[] ORDER_PCODE[] ORDER_PINFO[] ORDER_PRICE[] ORDER_QTY[] ORDER_VAT[] ORDER_SHIPPING PRICES_CURRENCY DISCOUNT DESTINATION_CITY DESTINATION_STATE DESTINATION_COUNTRY PAY_METHOD ORDER_PRICE_TYPE[] SELECTED_INSTALLMENTS_NO TESTORDER in this exact order. Next, we need to concatenate their lenghts with thier values, resulting in a string like: 8PAYUDEMO9789456123192016-10-05 11:12:279CD Player12MobilePhone6Laptop 10PROD_0489110PROD_0740910PROD_0496527Extended Warranty - 5 Years8 Dual SIM1117"Display482.371945.7545230171311220220220103RON2559 Bucuresti9Bucuresti2RO8CCVISAMC5GROSS5GROSS5GROSS4TRUE Using this string and the MERCHANT_KEY, we compute the HMAC. """ hashable_fields = ['MERCHANT', 'ORDER_REF', 'ORDER_DATE', 'ORDER_SHIPPING', 'PRICES_CURRENCY', 'DISCOUNT', 'DESTINATION_CITY', 'DESTINATION_STATE', 'DESTINATION_COUNTRY', 'PAY_METHOD', 'SELECTED_INSTALLMENTS_NO', 'TESTORDER'] result = text_type() # We need this hack since payU is not consistent # with the order of fields in hash string suffix = text_type() for field in self: if field.name == 'ORDER_HASH': continue field_value = field.value() if field.name in hashable_fields and field_value: encoded_value = text_type('{length}{value}').format( length=len(text_type(field_value).encode('utf-8')), value=field_value ) if field.name == 'TESTORDER' or \ field.name == 'SELECTED_INSTALLMENTS_NO': suffix += encoded_value else: result += encoded_value if field.name == 'ORDER': for detail in PAYU_ORDER_DETAILS: if any([detail in order and order[detail] for order in field_value]): for order in field_value: value = order.get(detail, '') item = text_type('{length}{value}').format( length=len(text_type(value).encode('utf-8')), value=value ) if detail == 'PRICE_TYPE': suffix += item else: result += item result += suffix result = result.encode('utf-8') return hmac.new(PAYU_MERCHANT_KEY, result).hexdigest() def _prepare_orders(self, orders): """ Each order needs to have all it's details filled with default value, or None, in case those are not already filled. """ for detail in PAYU_ORDER_DETAILS: if not any([detail in order for order in orders]): for order in orders: order[detail] = PAYU_ORDER_DETAILS_DEFAULTS.get(detail, None) return orders def __init__(self, **kwargs): initial = kwargs.get('initial', {}) orders = self._prepare_orders(initial.get('ORDER', [])) super(PayULiveUpdateForm, self).__init__(**kwargs) self.fields['ORDER'] = OrdersField(initial=orders) self.fields['ORDER_HASH'].initial = self.signature class PayUIPNForm(forms.ModelForm): class Meta: exclude = [] model = PayUIPN def __init__(self, data, *args, **kwargs): form_data = data.copy() for field in data: if field.endswith("[]"): form_data[field[:-2]] = ",".join([value for value in data.getlist(field) if value.strip() ]) if field == 'IPN_DATE': form_data[field] = datetime.strptime(data[field], "%Y%m%d%H%M%S") super(PayUIPNForm, self).__init__(form_data)
''' Created on December 25, 2016 This file is subject to the terms and conditions defined in the file 'LICENSE.txt', which is part of this source code package. @author: David Moss ''' import bot from locations.location import Location class Intelligence: """ Base Intelligence Module Class / Interface """ def __init__(self, botengine, parent): """ Instantiate this object :param parent: Parent object, either a location or a device object. """ import uuid self.intelligence_id = str(uuid.uuid4()) self.parent = parent def initialize(self, botengine): """ Initialize :param botengine: BotEngine environment """ return def destroy(self, botengine): """ This device or object is getting permanently deleted - it is no longer in the user's account. :param botengine: BotEngine environment """ return def new_version(self, botengine): """ Upgraded to a new bot version :param botengine: BotEngine environment """ return def mode_updated(self, botengine, current_mode): """ Mode was updated :param botengine: BotEngine environment :param current_mode: Current mode :param current_timestamp: Current timestamp """ return def device_measurements_updated(self, botengine, device_object): """ Device was updated :param botengine: BotEngine environment :param device_object: Device object that was updated """ return def device_metadata_updated(self, botengine, device_object): """ Evaluate a device that is new or whose goal/scenario was recently updated :param botengine: BotEngine environment :param device_object: Device object that was updated """ return def device_alert(self, botengine, device_object, alert_type, alert_params): """ Device sent an alert. When a device disconnects, it will send an alert like this: [{u'alertType': u'status', u'params': [{u'name': u'deviceStatus', u'value': u'2'}], u'deviceId': u'eb10e80a006f0d00'}] When a device reconnects, it will send an alert like this: [{u'alertType': u'on', u'deviceId': u'eb10e80a006f0d00'}] :param botengine: BotEngine environment :param device_object: Device object that sent the alert :param alert_type: Type of alert """ return def device_added(self, botengine, device_object): """ A new Device was added to this Location :param botengine: BotEngine environment :param device_object: Device object that is getting added """ return def device_deleted(self, botengine, device_object): """ Device is getting deleted :param botengine: BotEngine environment :param device_object: Device object that is getting deleted """ return def question_answered(self, botengine, question_object): """ The user answered a question :param botengine: BotEngine environment :param question_object: Question object """ return def datastream_updated(self, botengine, address, content): """ Data Stream Message Received :param botengine: BotEngine environment :param address: Data Stream address :param content: Content of the message """ if hasattr(self, address): getattr(self, address)(botengine, content) def schedule_fired(self, botengine, schedule_id): """ The bot executed on a hard coded schedule specified by our runtime.json file :param botengine: BotEngine environment :param schedule_id: Schedule ID that is executing from our list of runtime schedules """ return def timer_fired(self, botengine, argument): """ The bot's intelligence timer fired :param botengine: Current botengine environment :param argument: Argument applied when setting the timer """ return def file_uploaded(self, botengine, device_object, file_id, filesize_bytes, content_type, file_extension): """ A device file has been uploaded :param botengine: BotEngine environment :param device_object: Device object that uploaded the file :param file_id: File ID to reference this file at the server :param filesize_bytes: The file size in bytes :param content_type: The content type, for example 'video/mp4' :param file_extension: The file extension, for example 'mp4' """ return def coordinates_updated(self, botengine, latitude, longitude): """ Approximate coordinates of the parent proxy device object have been updated :param latitude: Latitude :param longitude: Longitude """ return def user_role_updated(self, botengine, user_id, role, alert_category, location_access, previous_alert_category, previous_location_access): """ A user changed roles :param botengine: BotEngine environment :param user_id: User ID that changed roles :param role: Application-layer agreed upon role integer which may auto-configure location_access and alert category :param alert_category: User's current alert/communications category (1=resident; 2=supporter) :param location_access: User's access to the location and devices. (0=None; 10=read location/device data; 20=control devices and modes; 30=update location info and manage devices) :param previous_alert_category: User's previous category, if any :param previous_location_access: User's previous access to the location, if any """ return def call_center_updated(self, botengine, user_id, status): """ Emergency call center status has changed. 0 = Unavailable 1 = Available, but the user does not have enough information to activate 2 = Registration pending 3 = Registered and activated 4 = Cancellation pending 5 = Cancelled :param botengine: BotEngine environment :param user_id: User ID that made the change :param status: Current call center status """ return def data_request_ready(self, botengine, reference, csv_dict): """ A botengine.request_data() asynchronous request for CSV data is ready. This is part of a very scalable method to extract large amounts of data from the server for the purpose of machine learning services. If a service needs to extract a large amount of data for one or multiple devices, the developer should call botengine.request_data(..) and also allow the bot to trigger off of trigger type 2048. The bot can exit its current execution. The server will independently gather all the necessary data and capture it into a LZ4-compressed CSV file on the server which is available for one day and accessible only by the bot through a public HTTPS URL identified by a cryptographic token. The bot then gets triggered and downloads the CSV data, passing the data throughout the environment with this data_request_ready() event-driven method. Developers are encouraged to use the 'reference' argument inside calls to botengine.request_data(..). The reference is passed back out at the completion of the request, allowing the developer to ensure the data request that is now available was truly destined for their microservice. Your bots will need to include the following configuration for data requests to operate: * runtime.json should include trigger 2048 * structure.json should include inside 'pip_install_remotely' a reference to the "lz4" Python package :param botengine: BotEngine environment :param reference: Optional reference passed into botengine.request_data(..) :param csv_dict: { device_object: 'csv data string' } """ return #=============================================================================== # Built-in Timer and Alarm methods. #=============================================================================== def start_timer_ms(self, botengine, milliseconds, argument=None, reference=""): """ Start a relative timer in milliseconds :param botengine: BotEngine environment :param seconds: Time in milliseconds for the timer to fire :param argument: Optional argument to provide when the timer fires. :param reference: Optional reference to use to manage this timer. """ # We seed the reference with this intelligence ID to make it unique against all other intelligence modules. if isinstance(self.parent, Location): # Location intelligence bot.start_location_intelligence_timer_ms(botengine, milliseconds, self.intelligence_id, argument, self.intelligence_id + str(reference)) else: # Device intelligence bot.start_device_intelligence_timer_ms(botengine, milliseconds, self.intelligence_id, argument, self.intelligence_id + str(reference)) def start_timer_s(self, botengine, seconds, argument=None, reference=""): """ Helper function with an explicit "_s" at the end, to start a timer in seconds :param botengine: BotEngine environment :param seconds: Time in seconds for the timer to fire :param argument: Optional argument to provide when the timer fires. :param reference: Optional reference to use to manage this timer. """ self.start_timer(botengine, seconds, argument, str(reference)) def start_timer(self, botengine, seconds, argument=None, reference=""): """ Start a relative timer in seconds :param botengine: BotEngine environment :param seconds: Time in seconds for the timer to fire :param argument: Optional argument to provide when the timer fires. :param reference: Optional reference to use to manage this timer. """ # We seed the reference with this intelligence ID to make it unique against all other intelligence modules. if isinstance(self.parent, Location): # Location intelligence bot.start_location_intelligence_timer(botengine, seconds, self.intelligence_id, argument, self.intelligence_id + str(reference)) else: # Device intelligence bot.start_device_intelligence_timer(botengine, seconds, self.intelligence_id, argument, self.intelligence_id + str(reference)) def is_timer_running(self, botengine, reference=""): """ Check if a timer or alarm with the given reference is running :param botengine: BotEngine environment :param reference: Reference :return: True if timers or alarms with the given reference are running. """ return botengine.is_timer_running(self.intelligence_id + str(reference)) def cancel_timers(self, botengine, reference=""): """ Cancel timers with the given reference :param botengine: BotEngine environment :param reference: Cancel all timers with the given reference """ botengine.cancel_timers(self.intelligence_id + str(reference)) def set_alarm(self, botengine, timestamp_ms, argument=None, reference=""): """ Set an absolute alarm :param botengine: BotEngine environment :param timestamp_ms: Absolute time in milliseconds for the timer to fire :param argument: Optional argument to provide when the timer fires. :param reference: Optional reference to use to manage this timer. """ # We seed the reference with this intelligence ID to make it unique against all other intelligence modules. if isinstance(self.parent, Location): # Location intelligence bot.set_location_intelligence_alarm(botengine, timestamp_ms, self.intelligence_id, argument, self.intelligence_id + str(reference)) else: # Device intelligence bot.set_device_intelligence_alarm(botengine, timestamp_ms, self.intelligence_id, argument, self.intelligence_id + str(reference)) def is_alarm_running(self, botengine, reference=""): """ Check if a timer or alarm with the given reference is running :param botengine: BotEngine environment :param reference: Reference :return: True if timers or alarms with the given reference are running. """ return botengine.is_timer_running(self.intelligence_id + str(reference)) def cancel_alarms(self, botengine, reference=""): """ Cancel alarms with the given reference :param botengine: BotEngine environment :param reference: Cancel all alarms with the given reference """ # It's not a mistake that this is forwarding to `cancel_timers`. # They're all the same thing underneath, and this is a convenience method help to avoid confusion and questions. botengine.cancel_timers(self.intelligence_id + str(reference))
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from kmip.core import enums from kmip.core.enums import KeyFormatType as KeyFormatTypeEnum from kmip.core.enums import Tags from kmip.core.enums import QueryFunction as QueryFunctionEnum from kmip.core.primitives import ByteString from kmip.core.primitives import Enumeration from kmip.core.primitives import Interval from kmip.core.primitives import Struct from kmip.core.primitives import TextString from kmip.core.utils import BytearrayStream class CertificateValue(ByteString): """ The bytes of a DER-encoded X.509 public key certificate. Used by the Certificate Managed Object to store the bytes of the certificate. See Section 2.2.1 of the KMIP 1.1. specification for more information. """ def __init__(self, value=b''): """ Construct a CertificateValue byte string. Args: value (bytes): A byte string (e.g., b'\x00\x01...') containing the certificate bytes to store. Optional, defaults to the empty byte string. """ super(CertificateValue, self).__init__(value, Tags.CERTIFICATE_VALUE) class Offset(Interval): """ An integer representing a positive change in time. Used by Rekey and Recertify requests to indicate the time difference between the InitializationDate and the ActivationDate of the replacement item to be created. See Sections 4.4, 4.5, and 4.8 of the KMIP 1.1 specification for more information. """ def __init__(self, value=None): """ Construct an Offset object. Args: value (int): An integer representing a positive change in time. Optional, defaults to None. """ super(Offset, self).__init__(value, Tags.OFFSET) class QueryFunction(Enumeration): """ An encodeable wrapper for the QueryFunction enumeration. Used by Query requests to specify the information to retrieve from the KMIP server. See Sections 4.25 and 9.1.3.2.24 of the KMIP 1.1 specification for more information. """ def __init__(self, value=None): """ Construct a QueryFunction object. Args: value (QueryFunction enum): A QueryFunction enumeration value, (e.g., QueryFunction.QUERY_OPERATIONS). Optional, default to None. """ super(QueryFunction, self).__init__( QueryFunctionEnum, value, Tags.QUERY_FUNCTION) class VendorIdentification(TextString): """ A text string uniquely identifying a KMIP vendor. Returned by KMIP servers upon receipt of a Query request for server information. See Section 4.25 of the KMIP 1.1. specification for more information. """ def __init__(self, value=None): """ Construct a VendorIdentification object. Args: value (str): A string describing a KMIP vendor. Optional, defaults to None. """ super(VendorIdentification, self).__init__( value, Tags.VENDOR_IDENTIFICATION) class ServerInformation(Struct): """ A structure containing vendor-specific fields and/or substructures. Returned by KMIP servers upon receipt of a Query request for server information. See Section 4.25 of the KMIP 1.1 specification for more information. Note: There are no example structures nor data encodings in the KMIP documentation of this object. Therefore this class handles encoding and decoding its data in a generic way, using a BytearrayStream for primary storage. The intent is for vendor-specific subclasses to decide how to decode this data from the stream attribute. Likewise, these subclasses must decide how to encode their data into the stream attribute. There are no arguments to the constructor and therefore no means by which to validate the object's contents. """ def __init__(self): """ Construct a ServerInformation object. """ super(ServerInformation, self).__init__(Tags.SERVER_INFORMATION) self.data = BytearrayStream() self.validate() def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the ServerInformation object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. """ super(ServerInformation, self).read(istream, kmip_version=kmip_version) tstream = BytearrayStream(istream.read(self.length)) self.data = BytearrayStream(tstream.read()) self.is_oversized(tstream) self.validate() def write(self, ostream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Write the data encoding the ServerInformation object to a stream. Args: ostream (Stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. """ tstream = BytearrayStream() tstream.write(self.data.buffer) self.length = tstream.length() super(ServerInformation, self).write( ostream, kmip_version=kmip_version ) ostream.write(tstream.buffer) def validate(self): """ Error check the types of the different parts of the ServerInformation object. """ self.__validate() def __validate(self): # NOTE (peter-hamilton): Intentional pass, no way to validate data. pass def __eq__(self, other): if isinstance(other, ServerInformation): if len(self.data) != len(other.data): return False elif self.data != other.data: return False else: return True else: return NotImplemented def __ne__(self, other): if isinstance(other, ServerInformation): return not (self == other) else: return NotImplemented def __repr__(self): return "ServerInformation()" def __str__(self): return str(self.data) class KeyFormatType(Enumeration): """ An encodeable wrapper for the KeyFormatType enumeration. Used to identify the format of different types of keys in KeyBlock and Digest objects, it can also be used to specify the format in which a key is returned when using the Get operation. See Sections 2.1.3, 2.1.7, 3.17, 4.11, and 9.1.3.2.3 of the KMIP 1.1 specification for more information. """ def __init__(self, value=KeyFormatTypeEnum.RAW): """ Construct a KeyFormatType object. Args: value (KeyFormatType): A KeyFormatType enumeration value, (e.g., KeyFormatType.PKCS_1). Optional, default to KeyFormatType.RAW. """ super(KeyFormatType, self).__init__( KeyFormatTypeEnum, value, Tags.KEY_FORMAT_TYPE)
from itertools import product import numpy as np from scipy.sparse import (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix) from sklearn import metrics from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_warns from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_greater from sklearn.utils.validation import check_random_state from sklearn.metrics.pairwise import pairwise_distances from sklearn import neighbors, datasets from sklearn.exceptions import DataConversionWarning rng = np.random.RandomState(0) # load and shuffle iris dataset iris = datasets.load_iris() perm = rng.permutation(iris.target.size) iris.data = iris.data[perm] iris.target = iris.target[perm] # load and shuffle digits digits = datasets.load_digits() perm = rng.permutation(digits.target.size) digits.data = digits.data[perm] digits.target = digits.target[perm] SPARSE_TYPES = (bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dok_matrix, lil_matrix) SPARSE_OR_DENSE = SPARSE_TYPES + (np.asarray,) ALGORITHMS = ('ball_tree', 'brute', 'kd_tree', 'auto') P = (1, 2, 3, 4, np.inf) # Filter deprecation warnings. neighbors.kneighbors_graph = ignore_warnings(neighbors.kneighbors_graph) neighbors.radius_neighbors_graph = ignore_warnings( neighbors.radius_neighbors_graph) def _weight_func(dist): """ Weight function to replace lambda d: d ** -2. The lambda function is not valid because: if d==0 then 0^-2 is not valid. """ # Dist could be multidimensional, flatten it so all values # can be looped with np.errstate(divide='ignore'): retval = 1. / dist return retval ** 2 def test_unsupervised_kneighbors(n_samples=20, n_features=5, n_query_pts=2, n_neighbors=5): # Test unsupervised neighbors methods X = rng.rand(n_samples, n_features) test = rng.rand(n_query_pts, n_features) for p in P: results_nodist = [] results = [] for algorithm in ALGORITHMS: neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, p=p) neigh.fit(X) results_nodist.append(neigh.kneighbors(test, return_distance=False)) results.append(neigh.kneighbors(test, return_distance=True)) for i in range(len(results) - 1): assert_array_almost_equal(results_nodist[i], results[i][1]) assert_array_almost_equal(results[i][0], results[i + 1][0]) assert_array_almost_equal(results[i][1], results[i + 1][1]) def test_unsupervised_inputs(): # test the types of valid input into NearestNeighbors X = rng.random_sample((10, 3)) nbrs_fid = neighbors.NearestNeighbors(n_neighbors=1) nbrs_fid.fit(X) dist1, ind1 = nbrs_fid.kneighbors(X) nbrs = neighbors.NearestNeighbors(n_neighbors=1) for input in (nbrs_fid, neighbors.BallTree(X), neighbors.KDTree(X)): nbrs.fit(input) dist2, ind2 = nbrs.kneighbors(X) assert_array_almost_equal(dist1, dist2) assert_array_almost_equal(ind1, ind2) def test_precomputed(random_state=42): """Tests unsupervised NearestNeighbors with a distance matrix.""" # Note: smaller samples may result in spurious test success rng = np.random.RandomState(random_state) X = rng.random_sample((10, 4)) Y = rng.random_sample((3, 4)) DXX = metrics.pairwise_distances(X, metric='euclidean') DYX = metrics.pairwise_distances(Y, X, metric='euclidean') for method in ['kneighbors']: # TODO: also test radius_neighbors, but requires different assertion # As a feature matrix (n_samples by n_features) nbrs_X = neighbors.NearestNeighbors(n_neighbors=3) nbrs_X.fit(X) dist_X, ind_X = getattr(nbrs_X, method)(Y) # As a dense distance matrix (n_samples by n_samples) nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='brute', metric='precomputed') nbrs_D.fit(DXX) dist_D, ind_D = getattr(nbrs_D, method)(DYX) assert_array_almost_equal(dist_X, dist_D) assert_array_almost_equal(ind_X, ind_D) # Check auto works too nbrs_D = neighbors.NearestNeighbors(n_neighbors=3, algorithm='auto', metric='precomputed') nbrs_D.fit(DXX) dist_D, ind_D = getattr(nbrs_D, method)(DYX) assert_array_almost_equal(dist_X, dist_D) assert_array_almost_equal(ind_X, ind_D) # Check X=None in prediction dist_X, ind_X = getattr(nbrs_X, method)(None) dist_D, ind_D = getattr(nbrs_D, method)(None) assert_array_almost_equal(dist_X, dist_D) assert_array_almost_equal(ind_X, ind_D) # Must raise a ValueError if the matrix is not of correct shape assert_raises(ValueError, getattr(nbrs_D, method), X) target = np.arange(X.shape[0]) for Est in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): print(Est) est = Est(metric='euclidean') est.radius = est.n_neighbors = 1 pred_X = est.fit(X, target).predict(Y) est.metric = 'precomputed' pred_D = est.fit(DXX, target).predict(DYX) assert_array_almost_equal(pred_X, pred_D) def test_precomputed_cross_validation(): # Ensure array is split correctly rng = np.random.RandomState(0) X = rng.rand(20, 2) D = pairwise_distances(X, metric='euclidean') y = rng.randint(3, size=20) for Est in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): metric_score = cross_val_score(Est(), X, y) precomp_score = cross_val_score(Est(metric='precomputed'), D, y) assert_array_equal(metric_score, precomp_score) def test_unsupervised_radius_neighbors(n_samples=20, n_features=5, n_query_pts=2, radius=0.5, random_state=0): # Test unsupervised radius-based query rng = np.random.RandomState(random_state) X = rng.rand(n_samples, n_features) test = rng.rand(n_query_pts, n_features) for p in P: results = [] for algorithm in ALGORITHMS: neigh = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm, p=p) neigh.fit(X) ind1 = neigh.radius_neighbors(test, return_distance=False) # sort the results: this is not done automatically for # radius searches dist, ind = neigh.radius_neighbors(test, return_distance=True) for (d, i, i1) in zip(dist, ind, ind1): j = d.argsort() d[:] = d[j] i[:] = i[j] i1[:] = i1[j] results.append((dist, ind)) assert_array_almost_equal(np.concatenate(list(ind)), np.concatenate(list(ind1))) for i in range(len(results) - 1): assert_array_almost_equal(np.concatenate(list(results[i][0])), np.concatenate(list(results[i + 1][0]))), assert_array_almost_equal(np.concatenate(list(results[i][1])), np.concatenate(list(results[i + 1][1]))) def test_kneighbors_classifier(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test k-neighbors classification rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) y_str = y.astype(str) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) knn.fit(X, y) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y[:n_test_pts]) # Test prediction with y_str knn.fit(X, y_str) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y_str[:n_test_pts]) def test_kneighbors_classifier_float_labels(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test k-neighbors classification rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors) knn.fit(X, y.astype(np.float)) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y[:n_test_pts]) def test_kneighbors_classifier_predict_proba(): # Test KNeighborsClassifier.predict_proba() method X = np.array([[0, 2, 0], [0, 2, 1], [2, 0, 0], [2, 2, 0], [0, 0, 2], [0, 0, 1]]) y = np.array([4, 4, 5, 5, 1, 1]) cls = neighbors.KNeighborsClassifier(n_neighbors=3, p=1) # cityblock dist cls.fit(X, y) y_prob = cls.predict_proba(X) real_prob = np.array([[0, 2. / 3, 1. / 3], [1. / 3, 2. / 3, 0], [1. / 3, 0, 2. / 3], [0, 1. / 3, 2. / 3], [2. / 3, 1. / 3, 0], [2. / 3, 1. / 3, 0]]) assert_array_equal(real_prob, y_prob) # Check that it also works with non integer labels cls.fit(X, y.astype(str)) y_prob = cls.predict_proba(X) assert_array_equal(real_prob, y_prob) # Check that it works with weights='distance' cls = neighbors.KNeighborsClassifier( n_neighbors=2, p=1, weights='distance') cls.fit(X, y) y_prob = cls.predict_proba(np.array([[0, 2, 0], [2, 2, 2]])) real_prob = np.array([[0, 1, 0], [0, 0.4, 0.6]]) assert_array_almost_equal(real_prob, y_prob) def test_radius_neighbors_classifier(n_samples=40, n_features=5, n_test_pts=10, radius=0.5, random_state=0): # Test radius-based classification rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) y_str = y.astype(str) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: neigh = neighbors.RadiusNeighborsClassifier(radius=radius, weights=weights, algorithm=algorithm) neigh.fit(X, y) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) y_pred = neigh.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y[:n_test_pts]) neigh.fit(X, y_str) y_pred = neigh.predict(X[:n_test_pts] + epsilon) assert_array_equal(y_pred, y_str[:n_test_pts]) def test_radius_neighbors_classifier_when_no_neighbors(): # Test radius-based classifier when no neighbors found. # In this case it should rise an informative exception X = np.array([[1.0, 1.0], [2.0, 2.0]]) y = np.array([1, 2]) radius = 0.1 z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers z2 = np.array([[1.01, 1.01], [1.4, 1.4]]) # one outlier weight_func = _weight_func for outlier_label in [0, -1, None]: for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: rnc = neighbors.RadiusNeighborsClassifier clf = rnc(radius=radius, weights=weights, algorithm=algorithm, outlier_label=outlier_label) clf.fit(X, y) assert_array_equal(np.array([1, 2]), clf.predict(z1)) if outlier_label is None: assert_raises(ValueError, clf.predict, z2) elif False: assert_array_equal(np.array([1, outlier_label]), clf.predict(z2)) def test_radius_neighbors_classifier_outlier_labeling(): # Test radius-based classifier when no neighbors found and outliers # are labeled. X = np.array([[1.0, 1.0], [2.0, 2.0], [0.99, 0.99], [0.98, 0.98], [2.01, 2.01]]) y = np.array([1, 2, 1, 1, 2]) radius = 0.1 z1 = np.array([[1.01, 1.01], [2.01, 2.01]]) # no outliers z2 = np.array([[1.4, 1.4], [1.01, 1.01], [2.01, 2.01]]) # one outlier correct_labels1 = np.array([1, 2]) correct_labels2 = np.array([-1, 1, 2]) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: clf = neighbors.RadiusNeighborsClassifier(radius=radius, weights=weights, algorithm=algorithm, outlier_label=-1) clf.fit(X, y) assert_array_equal(correct_labels1, clf.predict(z1)) assert_array_equal(correct_labels2, clf.predict(z2)) def test_radius_neighbors_classifier_zero_distance(): # Test radius-based classifier, when distance to a sample is zero. X = np.array([[1.0, 1.0], [2.0, 2.0]]) y = np.array([1, 2]) radius = 0.1 z1 = np.array([[1.01, 1.01], [2.0, 2.0]]) correct_labels1 = np.array([1, 2]) weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: clf = neighbors.RadiusNeighborsClassifier(radius=radius, weights=weights, algorithm=algorithm) clf.fit(X, y) assert_array_equal(correct_labels1, clf.predict(z1)) def test_neighbors_regressors_zero_distance(): # Test radius-based regressor, when distance to a sample is zero. X = np.array([[1.0, 1.0], [1.0, 1.0], [2.0, 2.0], [2.5, 2.5]]) y = np.array([1.0, 1.5, 2.0, 0.0]) radius = 0.2 z = np.array([[1.1, 1.1], [2.0, 2.0]]) rnn_correct_labels = np.array([1.25, 2.0]) knn_correct_unif = np.array([1.25, 1.0]) knn_correct_dist = np.array([1.25, 2.0]) for algorithm in ALGORITHMS: # we don't test for weights=_weight_func since user will be expected # to handle zero distances themselves in the function. for weights in ['uniform', 'distance']: rnn = neighbors.RadiusNeighborsRegressor(radius=radius, weights=weights, algorithm=algorithm) rnn.fit(X, y) assert_array_almost_equal(rnn_correct_labels, rnn.predict(z)) for weights, corr_labels in zip(['uniform', 'distance'], [knn_correct_unif, knn_correct_dist]): knn = neighbors.KNeighborsRegressor(n_neighbors=2, weights=weights, algorithm=algorithm) knn.fit(X, y) assert_array_almost_equal(corr_labels, knn.predict(z)) def test_radius_neighbors_boundary_handling(): """Test whether points lying on boundary are handled consistently Also ensures that even with only one query point, an object array is returned rather than a 2d array. """ X = np.array([[1.5], [3.0], [3.01]]) radius = 3.0 for algorithm in ALGORITHMS: nbrs = neighbors.NearestNeighbors(radius=radius, algorithm=algorithm).fit(X) results = nbrs.radius_neighbors([[0.0]], return_distance=False) assert_equal(results.shape, (1,)) assert_equal(results.dtype, object) assert_array_equal(results[0], [0, 1]) def test_RadiusNeighborsClassifier_multioutput(): # Test k-NN classifier on multioutput data rng = check_random_state(0) n_features = 2 n_samples = 40 n_output = 3 X = rng.rand(n_samples, n_features) y = rng.randint(0, 3, (n_samples, n_output)) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) weights = [None, 'uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): # Stack single output prediction y_pred_so = [] for o in range(n_output): rnn = neighbors.RadiusNeighborsClassifier(weights=weights, algorithm=algorithm) rnn.fit(X_train, y_train[:, o]) y_pred_so.append(rnn.predict(X_test)) y_pred_so = np.vstack(y_pred_so).T assert_equal(y_pred_so.shape, y_test.shape) # Multioutput prediction rnn_mo = neighbors.RadiusNeighborsClassifier(weights=weights, algorithm=algorithm) rnn_mo.fit(X_train, y_train) y_pred_mo = rnn_mo.predict(X_test) assert_equal(y_pred_mo.shape, y_test.shape) assert_array_almost_equal(y_pred_mo, y_pred_so) def test_kneighbors_classifier_sparse(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test k-NN classifier on sparse matrices # Like the above, but with various types of sparse matrices rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 X *= X > .2 y = ((X ** 2).sum(axis=1) < .5).astype(np.int) for sparsemat in SPARSE_TYPES: knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='auto') knn.fit(sparsemat(X), y) epsilon = 1e-5 * (2 * rng.rand(1, n_features) - 1) for sparsev in SPARSE_TYPES + (np.asarray,): X_eps = sparsev(X[:n_test_pts] + epsilon) y_pred = knn.predict(X_eps) assert_array_equal(y_pred, y[:n_test_pts]) def test_KNeighborsClassifier_multioutput(): # Test k-NN classifier on multioutput data rng = check_random_state(0) n_features = 5 n_samples = 50 n_output = 3 X = rng.rand(n_samples, n_features) y = rng.randint(0, 3, (n_samples, n_output)) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) weights = [None, 'uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): # Stack single output prediction y_pred_so = [] y_pred_proba_so = [] for o in range(n_output): knn = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm) knn.fit(X_train, y_train[:, o]) y_pred_so.append(knn.predict(X_test)) y_pred_proba_so.append(knn.predict_proba(X_test)) y_pred_so = np.vstack(y_pred_so).T assert_equal(y_pred_so.shape, y_test.shape) assert_equal(len(y_pred_proba_so), n_output) # Multioutput prediction knn_mo = neighbors.KNeighborsClassifier(weights=weights, algorithm=algorithm) knn_mo.fit(X_train, y_train) y_pred_mo = knn_mo.predict(X_test) assert_equal(y_pred_mo.shape, y_test.shape) assert_array_almost_equal(y_pred_mo, y_pred_so) # Check proba y_pred_proba_mo = knn_mo.predict_proba(X_test) assert_equal(len(y_pred_proba_mo), n_output) for proba_mo, proba_so in zip(y_pred_proba_mo, y_pred_proba_so): assert_array_almost_equal(proba_mo, proba_so) def test_kneighbors_regressor(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0): # Test k-neighbors regression rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y_target = y[:n_test_pts] weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) knn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_true(np.all(abs(y_pred - y_target) < 0.3)) def test_KNeighborsRegressor_multioutput_uniform_weight(): # Test k-neighbors in multi-output regression with uniform weight rng = check_random_state(0) n_features = 5 n_samples = 40 n_output = 4 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_output) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for algorithm, weights in product(ALGORITHMS, [None, 'uniform']): knn = neighbors.KNeighborsRegressor(weights=weights, algorithm=algorithm) knn.fit(X_train, y_train) neigh_idx = knn.kneighbors(X_test, return_distance=False) y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx]) y_pred = knn.predict(X_test) assert_equal(y_pred.shape, y_test.shape) assert_equal(y_pred_idx.shape, y_test.shape) assert_array_almost_equal(y_pred, y_pred_idx) def test_kneighbors_regressor_multioutput(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0): # Test k-neighbors in multi-output regression rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y = np.vstack([y, y]).T y_target = y[:n_test_pts] weights = ['uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) knn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = knn.predict(X[:n_test_pts] + epsilon) assert_equal(y_pred.shape, y_target.shape) assert_true(np.all(np.abs(y_pred - y_target) < 0.3)) def test_radius_neighbors_regressor(n_samples=40, n_features=3, n_test_pts=10, radius=0.5, random_state=0): # Test radius-based neighbors regression rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y_target = y[:n_test_pts] weight_func = _weight_func for algorithm in ALGORITHMS: for weights in ['uniform', 'distance', weight_func]: neigh = neighbors.RadiusNeighborsRegressor(radius=radius, weights=weights, algorithm=algorithm) neigh.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = neigh.predict(X[:n_test_pts] + epsilon) assert_true(np.all(abs(y_pred - y_target) < radius / 2)) def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight(): # Test radius neighbors in multi-output regression (uniform weight) rng = check_random_state(0) n_features = 5 n_samples = 40 n_output = 4 X = rng.rand(n_samples, n_features) y = rng.rand(n_samples, n_output) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for algorithm, weights in product(ALGORITHMS, [None, 'uniform']): rnn = neighbors. RadiusNeighborsRegressor(weights=weights, algorithm=algorithm) rnn.fit(X_train, y_train) neigh_idx = rnn.radius_neighbors(X_test, return_distance=False) y_pred_idx = np.array([np.mean(y_train[idx], axis=0) for idx in neigh_idx]) y_pred_idx = np.array(y_pred_idx) y_pred = rnn.predict(X_test) assert_equal(y_pred_idx.shape, y_test.shape) assert_equal(y_pred.shape, y_test.shape) assert_array_almost_equal(y_pred, y_pred_idx) def test_RadiusNeighborsRegressor_multioutput(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=3, random_state=0): # Test k-neighbors in multi-output regression with various weight rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = np.sqrt((X ** 2).sum(1)) y /= y.max() y = np.vstack([y, y]).T y_target = y[:n_test_pts] weights = ['uniform', 'distance', _weight_func] for algorithm, weights in product(ALGORITHMS, weights): rnn = neighbors.RadiusNeighborsRegressor(n_neighbors=n_neighbors, weights=weights, algorithm=algorithm) rnn.fit(X, y) epsilon = 1E-5 * (2 * rng.rand(1, n_features) - 1) y_pred = rnn.predict(X[:n_test_pts] + epsilon) assert_equal(y_pred.shape, y_target.shape) assert_true(np.all(np.abs(y_pred - y_target) < 0.3)) def test_kneighbors_regressor_sparse(n_samples=40, n_features=5, n_test_pts=10, n_neighbors=5, random_state=0): # Test radius-based regression on sparse matrices # Like the above, but with various types of sparse matrices rng = np.random.RandomState(random_state) X = 2 * rng.rand(n_samples, n_features) - 1 y = ((X ** 2).sum(axis=1) < .25).astype(np.int) for sparsemat in SPARSE_TYPES: knn = neighbors.KNeighborsRegressor(n_neighbors=n_neighbors, algorithm='auto') knn.fit(sparsemat(X), y) for sparsev in SPARSE_OR_DENSE: X2 = sparsev(X) assert_true(np.mean(knn.predict(X2).round() == y) > 0.95) def test_neighbors_iris(): # Sanity checks on the iris dataset # Puts three points of each label in the plane and performs a # nearest neighbor query on points near the decision boundary. for algorithm in ALGORITHMS: clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm=algorithm) clf.fit(iris.data, iris.target) assert_array_equal(clf.predict(iris.data), iris.target) clf.set_params(n_neighbors=9, algorithm=algorithm) clf.fit(iris.data, iris.target) assert_true(np.mean(clf.predict(iris.data) == iris.target) > 0.95) rgs = neighbors.KNeighborsRegressor(n_neighbors=5, algorithm=algorithm) rgs.fit(iris.data, iris.target) assert_greater(np.mean(rgs.predict(iris.data).round() == iris.target), 0.95) def test_neighbors_digits(): # Sanity check on the digits dataset # the 'brute' algorithm has been observed to fail if the input # dtype is uint8 due to overflow in distance calculations. X = digits.data.astype('uint8') Y = digits.target (n_samples, n_features) = X.shape train_test_boundary = int(n_samples * 0.8) train = np.arange(0, train_test_boundary) test = np.arange(train_test_boundary, n_samples) (X_train, Y_train, X_test, Y_test) = X[train], Y[train], X[test], Y[test] clf = neighbors.KNeighborsClassifier(n_neighbors=1, algorithm='brute') score_uint8 = clf.fit(X_train, Y_train).score(X_test, Y_test) score_float = clf.fit(X_train.astype(float), Y_train).score( X_test.astype(float), Y_test) assert_equal(score_uint8, score_float) def test_kneighbors_graph(): # Test kneighbors_graph to build the k-Nearest Neighbor graph. X = np.array([[0, 1], [1.01, 1.], [2, 0]]) # n_neighbors = 1 A = neighbors.kneighbors_graph(X, 1, mode='connectivity', include_self=True) assert_array_equal(A.toarray(), np.eye(A.shape[0])) A = neighbors.kneighbors_graph(X, 1, mode='distance') assert_array_almost_equal( A.toarray(), [[0.00, 1.01, 0.], [1.01, 0., 0.], [0.00, 1.40716026, 0.]]) # n_neighbors = 2 A = neighbors.kneighbors_graph(X, 2, mode='connectivity', include_self=True) assert_array_equal( A.toarray(), [[1., 1., 0.], [1., 1., 0.], [0., 1., 1.]]) A = neighbors.kneighbors_graph(X, 2, mode='distance') assert_array_almost_equal( A.toarray(), [[0., 1.01, 2.23606798], [1.01, 0., 1.40716026], [2.23606798, 1.40716026, 0.]]) # n_neighbors = 3 A = neighbors.kneighbors_graph(X, 3, mode='connectivity', include_self=True) assert_array_almost_equal( A.toarray(), [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) def test_kneighbors_graph_sparse(seed=36): # Test kneighbors_graph to build the k-Nearest Neighbor graph # for sparse input. rng = np.random.RandomState(seed) X = rng.randn(10, 10) Xcsr = csr_matrix(X) for n_neighbors in [1, 2, 3]: for mode in ["connectivity", "distance"]: assert_array_almost_equal( neighbors.kneighbors_graph(X, n_neighbors, mode=mode).toarray(), neighbors.kneighbors_graph(Xcsr, n_neighbors, mode=mode).toarray()) def test_radius_neighbors_graph(): # Test radius_neighbors_graph to build the Nearest Neighbor graph. X = np.array([[0, 1], [1.01, 1.], [2, 0]]) A = neighbors.radius_neighbors_graph(X, 1.5, mode='connectivity', include_self=True) assert_array_equal( A.toarray(), [[1., 1., 0.], [1., 1., 1.], [0., 1., 1.]]) A = neighbors.radius_neighbors_graph(X, 1.5, mode='distance') assert_array_almost_equal( A.toarray(), [[0., 1.01, 0.], [1.01, 0., 1.40716026], [0., 1.40716026, 0.]]) def test_radius_neighbors_graph_sparse(seed=36): # Test radius_neighbors_graph to build the Nearest Neighbor graph # for sparse input. rng = np.random.RandomState(seed) X = rng.randn(10, 10) Xcsr = csr_matrix(X) for n_neighbors in [1, 2, 3]: for mode in ["connectivity", "distance"]: assert_array_almost_equal( neighbors.radius_neighbors_graph(X, n_neighbors, mode=mode).toarray(), neighbors.radius_neighbors_graph(Xcsr, n_neighbors, mode=mode).toarray()) def test_neighbors_badargs(): # Test bad argument values: these should all raise ValueErrors assert_raises(ValueError, neighbors.NearestNeighbors, algorithm='blah') X = rng.random_sample((10, 2)) Xsparse = csr_matrix(X) y = np.ones(10) for cls in (neighbors.KNeighborsClassifier, neighbors.RadiusNeighborsClassifier, neighbors.KNeighborsRegressor, neighbors.RadiusNeighborsRegressor): assert_raises(ValueError, cls, weights='blah') assert_raises(ValueError, cls, p=-1) assert_raises(ValueError, cls, algorithm='blah') nbrs = cls(algorithm='ball_tree', metric='haversine') assert_raises(ValueError, nbrs.predict, X) assert_raises(ValueError, ignore_warnings(nbrs.fit), Xsparse, y) nbrs = cls() assert_raises(ValueError, nbrs.fit, np.ones((0, 2)), np.ones(0)) assert_raises(ValueError, nbrs.fit, X[:, :, None], y) nbrs.fit(X, y) assert_raises(ValueError, nbrs.predict, [[]]) if (isinstance(cls, neighbors.KNeighborsClassifier) or isinstance(cls, neighbors.KNeighborsRegressor)): nbrs = cls(n_neighbors=-1) assert_raises(ValueError, nbrs.fit, X, y) nbrs = neighbors.NearestNeighbors().fit(X) assert_raises(ValueError, nbrs.kneighbors_graph, X, mode='blah') assert_raises(ValueError, nbrs.radius_neighbors_graph, X, mode='blah') def test_neighbors_metrics(n_samples=20, n_features=3, n_query_pts=2, n_neighbors=5): # Test computing the neighbors for various metrics # create a symmetric matrix V = rng.rand(n_features, n_features) VI = np.dot(V, V.T) metrics = [('euclidean', {}), ('manhattan', {}), ('minkowski', dict(p=1)), ('minkowski', dict(p=2)), ('minkowski', dict(p=3)), ('minkowski', dict(p=np.inf)), ('chebyshev', {}), ('seuclidean', dict(V=rng.rand(n_features))), ('wminkowski', dict(p=3, w=rng.rand(n_features))), ('mahalanobis', dict(VI=VI))] algorithms = ['brute', 'ball_tree', 'kd_tree'] X = rng.rand(n_samples, n_features) test = rng.rand(n_query_pts, n_features) for metric, metric_params in metrics: results = [] p = metric_params.pop('p', 2) for algorithm in algorithms: # KD tree doesn't support all metrics if (algorithm == 'kd_tree' and metric not in neighbors.KDTree.valid_metrics): assert_raises(ValueError, neighbors.NearestNeighbors, algorithm=algorithm, metric=metric, metric_params=metric_params) continue neigh = neighbors.NearestNeighbors(n_neighbors=n_neighbors, algorithm=algorithm, metric=metric, p=p, metric_params=metric_params) neigh.fit(X) results.append(neigh.kneighbors(test, return_distance=True)) assert_array_almost_equal(results[0][0], results[1][0]) assert_array_almost_equal(results[0][1], results[1][1]) def test_callable_metric(): def custom_metric(x1, x2): return np.sqrt(np.sum(x1 ** 2 + x2 ** 2)) X = np.random.RandomState(42).rand(20, 2) nbrs1 = neighbors.NearestNeighbors(3, algorithm='auto', metric=custom_metric) nbrs2 = neighbors.NearestNeighbors(3, algorithm='brute', metric=custom_metric) nbrs1.fit(X) nbrs2.fit(X) dist1, ind1 = nbrs1.kneighbors(X) dist2, ind2 = nbrs2.kneighbors(X) assert_array_almost_equal(dist1, dist2) def test_metric_params_interface(): assert_warns(SyntaxWarning, neighbors.KNeighborsClassifier, metric_params={'p': 3}) def test_predict_sparse_ball_kd_tree(): rng = np.random.RandomState(0) X = rng.rand(5, 5) y = rng.randint(0, 2, 5) nbrs1 = neighbors.KNeighborsClassifier(1, algorithm='kd_tree') nbrs2 = neighbors.KNeighborsRegressor(1, algorithm='ball_tree') for model in [nbrs1, nbrs2]: model.fit(X, y) assert_raises(ValueError, model.predict, csr_matrix(X)) def test_non_euclidean_kneighbors(): rng = np.random.RandomState(0) X = rng.rand(5, 5) # Find a reasonable radius. dist_array = pairwise_distances(X).flatten() np.sort(dist_array) radius = dist_array[15] # Test kneighbors_graph for metric in ['manhattan', 'chebyshev']: nbrs_graph = neighbors.kneighbors_graph( X, 3, metric=metric, mode='connectivity', include_self=True).toarray() nbrs1 = neighbors.NearestNeighbors(3, metric=metric).fit(X) assert_array_equal(nbrs_graph, nbrs1.kneighbors_graph(X).toarray()) # Test radiusneighbors_graph for metric in ['manhattan', 'chebyshev']: nbrs_graph = neighbors.radius_neighbors_graph( X, radius, metric=metric, mode='connectivity', include_self=True).toarray() nbrs1 = neighbors.NearestNeighbors(metric=metric, radius=radius).fit(X) assert_array_equal(nbrs_graph, nbrs1.radius_neighbors_graph(X).A) # Raise error when wrong parameters are supplied, X_nbrs = neighbors.NearestNeighbors(3, metric='manhattan') X_nbrs.fit(X) assert_raises(ValueError, neighbors.kneighbors_graph, X_nbrs, 3, metric='euclidean') X_nbrs = neighbors.NearestNeighbors(radius=radius, metric='manhattan') X_nbrs.fit(X) assert_raises(ValueError, neighbors.radius_neighbors_graph, X_nbrs, radius, metric='euclidean') def check_object_arrays(nparray, list_check): for ind, ele in enumerate(nparray): assert_array_equal(ele, list_check[ind]) def test_k_and_radius_neighbors_train_is_not_query(): # Test kneighbors et.al when query is not training data for algorithm in ALGORITHMS: nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) X = [[0], [1]] nn.fit(X) test_data = [[2], [1]] # Test neighbors. dist, ind = nn.kneighbors(test_data) assert_array_equal(dist, [[1], [0]]) assert_array_equal(ind, [[1], [1]]) dist, ind = nn.radius_neighbors([[2], [1]], radius=1.5) check_object_arrays(dist, [[1], [1, 0]]) check_object_arrays(ind, [[1], [0, 1]]) # Test the graph variants. assert_array_equal( nn.kneighbors_graph(test_data).A, [[0., 1.], [0., 1.]]) assert_array_equal( nn.kneighbors_graph([[2], [1]], mode='distance').A, np.array([[0., 1.], [0., 0.]])) rng = nn.radius_neighbors_graph([[2], [1]], radius=1.5) assert_array_equal(rng.A, [[0, 1], [1, 1]]) def test_k_and_radius_neighbors_X_None(): # Test kneighbors et.al when query is None for algorithm in ALGORITHMS: nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) X = [[0], [1]] nn.fit(X) dist, ind = nn.kneighbors() assert_array_equal(dist, [[1], [1]]) assert_array_equal(ind, [[1], [0]]) dist, ind = nn.radius_neighbors(None, radius=1.5) check_object_arrays(dist, [[1], [1]]) check_object_arrays(ind, [[1], [0]]) # Test the graph variants. rng = nn.radius_neighbors_graph(None, radius=1.5) kng = nn.kneighbors_graph(None) for graph in [rng, kng]: assert_array_equal(rng.A, [[0, 1], [1, 0]]) assert_array_equal(rng.data, [1, 1]) assert_array_equal(rng.indices, [1, 0]) X = [[0, 1], [0, 1], [1, 1]] nn = neighbors.NearestNeighbors(n_neighbors=2, algorithm=algorithm) nn.fit(X) assert_array_equal( nn.kneighbors_graph().A, np.array([[0., 1., 1.], [1., 0., 1.], [1., 1., 0]])) def test_k_and_radius_neighbors_duplicates(): # Test behavior of kneighbors when duplicates are present in query for algorithm in ALGORITHMS: nn = neighbors.NearestNeighbors(n_neighbors=1, algorithm=algorithm) nn.fit([[0], [1]]) # Do not do anything special to duplicates. kng = nn.kneighbors_graph([[0], [1]], mode='distance') assert_array_equal( kng.A, np.array([[0., 0.], [0., 0.]])) assert_array_equal(kng.data, [0., 0.]) assert_array_equal(kng.indices, [0, 1]) dist, ind = nn.radius_neighbors([[0], [1]], radius=1.5) check_object_arrays(dist, [[0, 1], [1, 0]]) check_object_arrays(ind, [[0, 1], [0, 1]]) rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5) assert_array_equal(rng.A, np.ones((2, 2))) rng = nn.radius_neighbors_graph([[0], [1]], radius=1.5, mode='distance') assert_array_equal(rng.A, [[0, 1], [1, 0]]) assert_array_equal(rng.indices, [0, 1, 0, 1]) assert_array_equal(rng.data, [0, 1, 1, 0]) # Mask the first duplicates when n_duplicates > n_neighbors. X = np.ones((3, 1)) nn = neighbors.NearestNeighbors(n_neighbors=1) nn.fit(X) dist, ind = nn.kneighbors() assert_array_equal(dist, np.zeros((3, 1))) assert_array_equal(ind, [[1], [0], [1]]) # Test that zeros are explicitly marked in kneighbors_graph. kng = nn.kneighbors_graph(mode='distance') assert_array_equal( kng.A, np.zeros((3, 3))) assert_array_equal(kng.data, np.zeros(3)) assert_array_equal(kng.indices, [1., 0., 1.]) assert_array_equal( nn.kneighbors_graph().A, np.array([[0., 1., 0.], [1., 0., 0.], [0., 1., 0.]])) def test_include_self_neighbors_graph(): # Test include_self parameter in neighbors_graph X = [[2, 3], [4, 5]] kng = neighbors.kneighbors_graph(X, 1, include_self=True).A kng_not_self = neighbors.kneighbors_graph(X, 1, include_self=False).A assert_array_equal(kng, [[1., 0.], [0., 1.]]) assert_array_equal(kng_not_self, [[0., 1.], [1., 0.]]) rng = neighbors.radius_neighbors_graph(X, 5.0, include_self=True).A rng_not_self = neighbors.radius_neighbors_graph( X, 5.0, include_self=False).A assert_array_equal(rng, [[1., 1.], [1., 1.]]) assert_array_equal(rng_not_self, [[0., 1.], [1., 0.]]) def test_same_knn_parallel(): X, y = datasets.make_classification(n_samples=30, n_features=5, n_redundant=0, random_state=0) X_train, X_test, y_train, y_test = train_test_split(X, y) def check_same_knn_parallel(algorithm): clf = neighbors.KNeighborsClassifier(n_neighbors=3, algorithm=algorithm) clf.fit(X_train, y_train) y = clf.predict(X_test) dist, ind = clf.kneighbors(X_test) graph = clf.kneighbors_graph(X_test, mode='distance').toarray() clf.set_params(n_jobs=3) clf.fit(X_train, y_train) y_parallel = clf.predict(X_test) dist_parallel, ind_parallel = clf.kneighbors(X_test) graph_parallel = \ clf.kneighbors_graph(X_test, mode='distance').toarray() assert_array_equal(y, y_parallel) assert_array_almost_equal(dist, dist_parallel) assert_array_equal(ind, ind_parallel) assert_array_almost_equal(graph, graph_parallel) for algorithm in ALGORITHMS: yield check_same_knn_parallel, algorithm def test_dtype_convert(): classifier = neighbors.KNeighborsClassifier(n_neighbors=1) CLASSES = 15 X = np.eye(CLASSES) y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]] result = classifier.fit(X, y).predict(X) assert_array_equal(result, y) # ignore conversion to boolean in pairwise_distances @ignore_warnings(category=DataConversionWarning) def test_pairwise_boolean_distance(): # Non-regression test for #4523 # 'brute': uses scipy.spatial.distance through pairwise_distances # 'ball_tree': uses sklearn.neighbors.dist_metrics rng = np.random.RandomState(0) X = rng.uniform(size=(6, 5)) NN = neighbors.NearestNeighbors nn1 = NN(metric="jaccard", algorithm='brute').fit(X) nn2 = NN(metric="jaccard", algorithm='ball_tree').fit(X) assert_array_equal(nn1.kneighbors(X)[0], nn2.kneighbors(X)[0])
# The contents of this file are subject to the BitTorrent Open Source License # Version 1.1 (the License). You may not copy or use this file, in either # source code or executable form, except in compliance with the License. You # may obtain a copy of the License at http://www.bittorrent.com/license/. # # Software distributed under the License is distributed on an AS IS basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # written by Matt Chisholm, Steven Hazel, and Greg Hazel import locale import traceback import wx from UserDict import IterableUserDict from wx.lib.mixins.listctrl import ColumnSorterMixin from wx.lib.mixins.listctrl import getListCtrlSelection import os import sys if os.name == 'nt': LVM_FIRST = 0x1000 LVM_SETSELECTEDCOLUMN = (LVM_FIRST + 140) import win32gui def highlight_color(c): if c > 240: c *= 0.97 else: c = min(c * 1.10, 255) return int(c) SEL_FOC = wx.LIST_STATE_SELECTED | wx.LIST_STATE_FOCUSED def selectBeforePopup(ctrl, pos): """Ensures the item the mouse is pointing at is selected before a popup. Works with both single-select and multi-select lists.""" if not isinstance(ctrl, wx.ListCtrl): return n, flags = ctrl.HitTest(pos) if n < 0: return if not ctrl.GetItemState(n, wx.LIST_STATE_SELECTED): for i in xrange(ctrl.GetItemCount()): ctrl.SetItemState(i, 0, SEL_FOC) ctrl.SetItemState(n, SEL_FOC, SEL_FOC) class ContextMenuMixin(object): def __init__(self): self.context_menu = None self.column_context_menu = None self.Bind(wx.EVT_CONTEXT_MENU, self.OnContextMenu) self.Bind(wx.EVT_LIST_COL_RIGHT_CLICK, self.OnColumnContextMenu) def SetContextMenu(self, menu): self.context_menu = menu def SetColumnContextMenu(self, menu): self.column_context_menu = menu def OnColumnContextMenu(self, event): if self.column_context_menu: self.PopupMenu(self.column_context_menu) def OnContextMenu(self, event): pos = self.ScreenToClient(event.GetPosition()) top = self.GetItemRect(self.GetTopItem()) if pos[1] < top.y: event.Skip() return pos -= self._get_origin_offset() self.DoPopup(pos) def DoPopup(self, pos): """ pos should be in client coords """ if self.context_menu: selectBeforePopup(self, pos) selection = getListCtrlSelection(self) if len(selection) > 0: self.PopupMenu(self.context_menu) return class BTListColumn(wx.ListItem): def __init__(self, text, sample_data, renderer=None, comparator=None, enabled=True, width=50): wx.ListItem.__init__(self) self.SetText(text) self.renderer = renderer self.comparator = comparator self.enabled = enabled self.sample_data = sample_data self.width = width class BTListRow(IterableUserDict): __slots__ = ['data', 'index'] def __init__(self, index, data): self.data = data self.index = index def __getitem__(self, i): return self.data[i] class BTListCtrl(wx.ListCtrl, ColumnSorterMixin, ContextMenuMixin): # Part of this class based on: # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/426407 icon_size = 16 def __init__(self, parent): wx.ListCtrl.__init__(self, parent, wx.ID_ANY, style=wx.LC_REPORT) ContextMenuMixin.__init__(self) self.il = wx.ImageList(self.icon_size, self.icon_size) # TODO: use a real icon self.il.Add(self.draw_blank()) self.il.Add(self.draw_sort_arrow('up')) self.il.Add(self.draw_sort_arrow('down')) self.SetImageList(self.il, wx.IMAGE_LIST_SMALL) self.update_enabled_columns() for i, name in enumerate(self.enabled_columns): column = self.columns[name] column.SetColumn(i) self.InsertColumnItem(i, column) self.itemData_to_row = {} self.index_to_itemData = {} self.selected_column = None self.SelectColumn(self.enabled_columns[0]) cmenu = wx.Menu() for name in self.column_order: column = self.columns[name] id = wx.NewId() cmenu.AppendCheckItem(id, column.GetText()) cmenu.Check(id, column.enabled) self.Bind(wx.EVT_MENU, lambda e, c=column, id=id: self.toggle_column(c, id, e), id=id) self.SetColumnContextMenu(cmenu) ColumnSorterMixin.__init__(self, len(self.enabled_columns)) self._last_scrollpos = 0 if sys.platform != "darwin": self.Bind(wx.EVT_ERASE_BACKGROUND, self.OnEraseBackground) self.default_rect = wx.Rect(0,0) def OnEraseBackground(self, event=None): nsp = self.GetScrollPos(wx.VERTICAL) if self._last_scrollpos != nsp: self._last_scrollpos = nsp # should only refresh visible items, hmm wx.CallAfter(self.Refresh) dc = wx.ClientDC(self) # erase the section of the background which is not covered by the # items or the selected column highlighting dc.SetBackground(wx.Brush(self.GetBackgroundColour())) f = self.GetRect() r = wx.Region(0, 0, f.width, f.height) x = self.GetVisibleViewRect() offset = self._get_origin_offset(include_header=True) x.Offset(offset) r.SubtractRect(x) if '__WXMSW__' in wx.PlatformInfo: c = self.GetColumnRect(self.enabled_columns.index(self.selected_column)) r.SubtractRect(c) dc.SetClippingRegionAsRegion(r) dc.Clear() if '__WXMSW__' in wx.PlatformInfo: # draw the selected column highlighting under the items dc.DestroyClippingRegion() r = wx.Region(0, 0, f.width, f.height) r.SubtractRect(x) dc.SetClippingRegionAsRegion(r) dc.SetPen(wx.TRANSPARENT_PEN) hc = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW) r = highlight_color(hc.Red()) g = highlight_color(hc.Green()) b = highlight_color(hc.Blue()) hc.Set(r, g, b) dc.SetBrush(wx.Brush(hc)) dc.DrawRectangle(c.x, c.y, c.width, c.height) def update_enabled_columns(self): self.enabled_columns = [name for name in self.column_order if self.columns[name].enabled] def toggle_column(self, tcolumn, id, event): self.update_column_widths() sort_col = self.get_sort_column() tcolumn.enabled = not tcolumn.enabled self.column_context_menu.Check(id, tcolumn.enabled) self.update_enabled_columns() if not tcolumn.enabled: self.DeleteColumn(tcolumn.GetColumn()) new_col_names = [] for i, name in enumerate(self.enabled_columns): column = self.columns[name] column.SetColumn(i) if column == tcolumn: self.InsertColumnItem(i, column) new_col_names.append(name) self.SetColumnWidth(column.GetColumn(), column.width) self.SetColumnCount(len(self.enabled_columns)) self.SortListItems(col=sort_col) for itemData in self.itemData_to_row.iterkeys(): self.InsertRow(itemData, self.itemData_to_row[itemData], sort=True, force_update_columns=new_col_names) #self.SortItems() def set_default_widths(self): # must be called before *any* data is put into the control. sample_data = {} for name in self.column_order: sample_data[name] = self.columns[name].sample_data sample_row = BTListRow(None, sample_data) self.InsertRow(-1, sample_row) for name in self.column_order: column = self.columns[name] if name in self.enabled_columns: self.SetColumnWidth(column.GetColumn(), wx.LIST_AUTOSIZE) column.width = self.GetColumnWidth(column.GetColumn()) dc = wx.ClientDC(self) header_width = dc.GetTextExtent(column.GetText())[0] header_width += 4 # arbitrary allowance for header decorations column.width = max(column.width, header_width) if name in self.enabled_columns: self.SetColumnWidth(column.GetColumn(), column.width) self.default_rect = self.GetItemRect(0) self.DeleteRow(-1) def _get_origin_offset(self, include_header=None): if include_header is None: # Hm, I think this is a legit bug in wxGTK if '__WXGTK__' in wx.PlatformInfo: include_header = True else: include_header = False if include_header: i = self.GetTopItem() try: r = self.GetItemRect(i) except wx._core.PyAssertionError: r = self.default_rect return (r.x, r.y) return (0, 0) def add_image(self, image): b = wx.BitmapFromImage(image) if not b.Ok(): raise Exception("The image (%s) is not valid." % image) if (sys.platform == "darwin" and (b.GetWidth(), b.GetHeight()) == (self.icon_size, self.icon_size)): return self.il.Add(b) b2 = wx.EmptyBitmap(self.icon_size, self.icon_size) dc = wx.MemoryDC() dc.SelectObject(b2) dc.SetBackgroundMode(wx.TRANSPARENT) dc.Clear() x = (b2.GetWidth() - b.GetWidth()) / 2 y = (b2.GetHeight() - b.GetHeight()) / 2 dc.DrawBitmap(b, x, y, True) dc.SelectObject(wx.NullBitmap) b2.SetMask(wx.Mask(b2, (255, 255, 255))) return self.il.Add(b2) # Arrow drawing def draw_blank(self): b = wx.EmptyBitmap(self.icon_size, self.icon_size) dc = wx.MemoryDC() dc.SelectObject(b) dc.SetBackgroundMode(wx.TRANSPARENT) dc.Clear() dc.SelectObject(wx.NullBitmap) b.SetMask(wx.Mask(b, (255, 255, 255))) return b # this builds an identical arrow to the windows listctrl arrows, in themed # and non-themed mode. def draw_sort_arrow(self, direction): b = wx.EmptyBitmap(self.icon_size, self.icon_size) w, h = b.GetSize() ho = (h - 5) / 2 dc = wx.MemoryDC() dc.SelectObject(b) colour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_GRAYTEXT) dc.SetBackgroundMode(wx.TRANSPARENT) dc.Clear() dc.SetPen(wx.Pen(colour)) for i in xrange(5): if direction == 'down': j = 4 - i else: j = i dc.DrawLine(i,j+ho,9-i,j+ho) dc.SelectObject(wx.NullBitmap) b.SetMask(wx.Mask(b, (255, 255, 255))) return b def GetBottomItem(self): total = self.GetItemCount() top = self.GetTopItem() pp = self.GetCountPerPage() # I purposefully do not subtract 1 from pp, because pp is whole items bottom = min(top + pp, total - 1) return bottom def SelectColumn(self, col): """Color the selected column (MSW only)""" if self.selected_column == col: return col_num = self.enabled_columns.index(col) if os.name == 'nt': win32gui.PostMessage(self.GetHandle(), LVM_SETSELECTEDCOLUMN, col_num, 0) if self.selected_column is not None: self.RefreshCol(self.selected_column) self.RefreshCol(col) self.selected_column = col def render_column_text(self, row, name): """Renders the column value into a string""" item = self.columns[name] value = row[name] if value is None: text = '?' elif item.renderer is not None: try: text = item.renderer(value) except: text = '?' # BUG: for debugging only traceback.print_exc() else: text = unicode(value) return text def get_column_image(self, row): return None def _update_indexes(self, start = 0): for i in xrange(start, self.GetItemCount()): itemData = self.GetItemData(i) self.itemData_to_row[itemData].index = i self.index_to_itemData[i] = itemData # Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py def SortItems(self, sorter=None): if sorter is None: sorter = self.GetColumnSorter() # TODO: # this step is to see if the list needs resorted. # improve this by stopping the first time the order would be changed. d = [None,] * self.GetItemCount() for i in xrange(len(d)): # use real GetItemData, so the sorter can translate d[i] = wx.ListCtrl.GetItemData(self, i) n = list(d) n.sort(sorter) if n != d: wx.ListCtrl.SortItems(self, sorter) self._update_indexes() self.SelectColumn(self.enabled_columns[self._col]) def SortListItems(self, col=-1, ascending=1): if col in self.enabled_columns: col = self.enabled_columns.index(col) else: col = 0 ColumnSorterMixin.SortListItems(self, col=col, ascending=ascending) def GetSelection(self): return getListCtrlSelection(self) def GetSelectionData(self): indexes = self.GetSelection() data = [] for i in indexes: data.append(self.GetItemData(i)) return data # Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py def GetListCtrl(self): return self # Used by the ColumnSorterMixin, see wx/lib/mixins/listctrl.py def GetSortImages(self): return (1, 2) def GetColumnSorter(self): """Returns a callable object to be used for comparing column values when sorting.""" return self.__ColumnSorter def TranslateItemData(self, itemData): return itemData def __ColumnSorter(self, itemData1, itemData2): """Allows custom compare functions, in self.colcmps.""" col = self._col ascending = self._colSortFlag[col] if col < len(self.enabled_columns): name = self.enabled_columns[col] else: name = self.column_order[0] itemData1 = self.TranslateItemData(itemData1) itemData2 = self.TranslateItemData(itemData2) item1 = self.itemData_to_row[itemData1][name] item2 = self.itemData_to_row[itemData2][name] column = self.columns[name] if column.comparator != None: # use custom cmp method cmpVal = column.comparator(item1, item2) elif isinstance(item1, str) or isinstance(item2, str): # Internationalization of string sorting with locale module cmpVal = locale.strcoll(unicode(item1), unicode(item2)) else: cmpVal = cmp(item1, item2) # If the items are equal then pick something else to make the sort value unique if cmpVal == 0: cmpVal = apply(cmp, self.GetSecondarySortValues(col, itemData1, itemData2)) if ascending: return cmpVal else: return -cmpVal def RefreshCol(self, col): if col in self.enabled_columns: self.RefreshRect(self.GetColumnRect(self.enabled_columns.index(col))) def HitTestColumn(self, pos): """ pos should be in client coords """ i = self.GetTopItem() r = self.GetItemRect(i) x, y = self._get_origin_offset() if pos[1] >= (r.y - y): return None loc = 0 for n in xrange(self.GetColumnCount()): loc += self.GetColumnWidth(n) if pos[0] < loc: return n def GetVisibleViewRect(self): width = 0 for n in xrange(self.GetColumnCount()): width += self.GetColumnWidth(n) height = 0 if self.GetItemCount() > 0: i = self.GetTopItem() r1 = self.GetItemRect(i) last = min(i + self.GetCountPerPage(), self.GetItemCount() - 1) r2 = self.GetItemRect(last) height = r2.y + r2.height - r1.y x, y = self._get_origin_offset() # there is a 2 pixel strip on either side which is not part of the item if '__WXMSW__' in wx.PlatformInfo: x += 2 width -= 4 return wx.Rect(x, y, x+width, y+height) def GetViewRect(self): width = 0 for n in xrange(self.GetColumnCount()): width += self.GetColumnWidth(n) height = 0 if self.GetItemCount() > 0: r1 = self.GetItemRect(0) r2 = self.GetItemRect(self.GetItemCount() - 1) height = r2.y + r2.height - r1.y x, y = self._get_origin_offset() return wx.Rect(x, y, x+width, y+height) def _GetColumnWidthExtent(self, col): col_locs = [0] loc = 0 num_cols = min(col+1, self.GetColumnCount()) for n in xrange(num_cols): loc += self.GetColumnWidth(n) col_locs.append(loc) x0 = col_locs[col] x1 = col_locs[col+1] - 1 return x0, x1 def GetColumnRect(self, col): x0, x1 = self._GetColumnWidthExtent(col) r = self.GetItemRect(0) y0 = r.y y1 = self.GetClientSize()[1] x_scroll = self.GetScrollPos(wx.HORIZONTAL) return wx.RectPP(wx.Point(x0 - x_scroll, y0), wx.Point(x1 - x_scroll, y1)) def GetCellRect(self, row, col): x0, x1 = self._GetColumnWidthExtent(col) r = self.GetItemRect(row) y0 = r.y y1 = r.GetBottom() x_scroll = self.GetScrollPos(wx.HORIZONTAL) return wx.RectPP(wx.Point(x0 - x_scroll, y0), wx.Point(x1 - x_scroll, y1)) def DeselectAll(self): self.SetItemState(-1, 0, wx.LIST_STATE_SELECTED) # fallback. for extremely long lists a generator should be used #for i in xrange(self.GetItemCount()): # self.SetItemState(i, 0, wx.LIST_STATE_SELECTED) def InsertRow(self, itemData, lr, sort=True, colour=None, force_update_columns=[]): # pre-render all data image_id = self.get_column_image(lr) row_text = {} for i, name in enumerate(self.enabled_columns): row_text[i] = self.render_column_text(lr, name) if itemData not in self.itemData_to_row: # this is Add # TODO: insert in sorted order instead of sorting i = self.InsertStringItem(self.GetItemCount(), '') lr.index = i self.SetItemData(i, itemData) for col in xrange(len(self.enabled_columns)): self.SetStringItem(index=lr.index, col=col, label=row_text[col]) else: # this is Update old_lr = self.itemData_to_row[itemData] lr.index = old_lr.index for col, colname in enumerate(self.enabled_columns): if lr[colname] != old_lr[colname] or \ colname in force_update_columns: self.SetStringItem(index=lr.index, col=col, label=row_text[col]) self.itemData_to_row[itemData] = lr self.index_to_itemData[i] = itemData if colour is not None: if self.GetItemTextColour(lr.index) != colour: self.SetItemTextColour(lr.index, colour) self.SetItemImage(lr.index, image_id) if sort: # TODO: move to update-only once things are inserted in sorted order self.SortItems() def SetItemImage(self, index, image_id): item = self.GetItem(index) if item.GetImage() != image_id: wx.ListCtrl.SetItemImage(self, index, image_id) def DeleteRow(self, itemData): lr = self.itemData_to_row.pop(itemData) self.DeleteItem(lr.index) self._update_indexes(lr.index) def GetRow(self, index): itemData = self.index_to_itemData[index] return self.itemData_to_row[itemData] def HasRow(self, itemData): return itemData in self.itemData_to_row # Persistence methods def get_column_widths(self): widths = {} for name in self.column_order: column = self.columns[name] if column.enabled: column.width = self.GetColumnWidth(column.GetColumn()) widths[name] = column.width return widths def set_column_widths(self, widths): # backward compatibility with development versions if isinstance(widths, list): return for name, width in widths.iteritems(): column = self.columns[name] column.width = width if column.enabled: self.SetColumnWidth(column.GetColumn(), column.width) def update_column_widths(self): for name in self.enabled_columns: column = self.columns[name] column.width = self.GetColumnWidth(column.GetColumn()) def get_sort_column(self): if self._col < len(self.enabled_columns): sort_col = self.enabled_columns[self._col] else: sort_col = None return sort_col def get_sort_order(self): return self._colSortFlag[self._col] class HashableListView(BTListCtrl): """wx.ListCtrl expects integer identifiers for each row. This subclass lets you use any hashable as the identifier instead.""" def __init__(self, *a, **k): BTListCtrl.__init__(self, *a, **k) self.itemData_to_hashable = {} self.hashable_to_itemData = {} self.unique_index = 0 def GetNewItemData(self): self.unique_index += 1 return self.unique_index def GetItemData(self, index): itemData = BTListCtrl.GetItemData(self, index) return self.itemData_to_hashable[itemData] def SetItemData(self, index, hashable): itemData = self.hashable_to_itemData[hashable] BTListCtrl.SetItemData(self, index, itemData) def InsertRow(self, hashable, row, sort=True, colour=None, force_update_columns=[]): if hashable not in self.hashable_to_itemData: itemData = self.GetNewItemData() self.hashable_to_itemData[hashable] = itemData self.itemData_to_hashable[itemData] = hashable b = BTListCtrl.InsertRow(self, hashable, row, sort=sort, colour=colour, force_update_columns=force_update_columns) return b def DeleteRow(self, hashable): itemData = self.hashable_to_itemData.pop(hashable) del self.itemData_to_hashable[itemData] return BTListCtrl.DeleteRow(self, hashable) def TranslateItemData(self, itemData): return self.itemData_to_hashable[itemData] def GetRowFromKey(self, hashable): return self.itemData_to_row[hashable]
# -*- coding: utf-8 -*- from __future__ import unicode_literals import logging from django.conf import settings from django.http import HttpRequest, HttpResponse from django.middleware.csrf import ( CSRF_KEY_LENGTH, CsrfViewMiddleware, get_token, ) from django.template import RequestContext, Template from django.template.context_processors import csrf from django.test import SimpleTestCase, override_settings from django.views.decorators.csrf import ( csrf_exempt, ensure_csrf_cookie, requires_csrf_token, ) # Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests def post_form_response(): resp = HttpResponse(content=""" <html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html> """, mimetype="text/html") return resp def post_form_view(request): """A view that returns a POST form (without a token)""" return post_form_response() # Response/views used for template tag tests def token_view(request): """A view that uses {% csrf_token %}""" context = RequestContext(request, processors=[csrf]) template = Template("{% csrf_token %}") return HttpResponse(template.render(context)) def non_token_view_using_request_processor(request): """ A view that doesn't use the token, but does use the csrf view processor. """ context = RequestContext(request, processors=[csrf]) template = Template("") return HttpResponse(template.render(context)) class TestingHttpRequest(HttpRequest): """ A version of HttpRequest that allows us to change some things more easily """ def is_secure(self): return getattr(self, '_is_secure_override', False) class CsrfViewMiddlewareTest(SimpleTestCase): # The csrf token is potentially from an untrusted source, so could have # characters that need dealing with. _csrf_id_cookie = b"<1>\xc2\xa1" _csrf_id = "1" def _get_GET_no_csrf_cookie_request(self): return TestingHttpRequest() def _get_GET_csrf_cookie_request(self): req = TestingHttpRequest() req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie return req def _get_POST_csrf_cookie_request(self): req = self._get_GET_csrf_cookie_request() req.method = "POST" return req def _get_POST_no_csrf_cookie_request(self): req = self._get_GET_no_csrf_cookie_request() req.method = "POST" return req def _get_POST_request_with_token(self): req = self._get_POST_csrf_cookie_request() req.POST['csrfmiddlewaretoken'] = self._csrf_id return req def _check_token_present(self, response, csrf_id=None): self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id)) def test_process_view_token_too_long(self): """ If the token is longer than expected, it is ignored and a new token is created. """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000 CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH) def test_process_response_get_token_used(self): """ When get_token is used, check that the cookie is created and headers patched. """ req = self._get_GET_no_csrf_cookie_request() # Put tests for CSRF_COOKIE_* settings here with self.settings(CSRF_COOKIE_NAME='myname', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get('myname', False) self.assertNotEqual(csrf_cookie, False) self.assertEqual(csrf_cookie['domain'], '.example.com') self.assertEqual(csrf_cookie['secure'], True) self.assertEqual(csrf_cookie['httponly'], True) self.assertEqual(csrf_cookie['path'], '/test/') self.assertIn('Cookie', resp2.get('Vary', '')) def test_process_response_get_token_not_used(self): """ Check that if get_token() is not called, the view middleware does not add a cookie. """ # This is important to make pages cacheable. Pages which do call # get_token(), assuming they use the token, are not cacheable because # the token is specific to the user req = self._get_GET_no_csrf_cookie_request() # non_token_view_using_request_processor does not call get_token(), but # does use the csrf request processor. By using this, we are testing # that the view processor is properly lazy and doesn't call get_token() # until needed. CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {}) resp = non_token_view_using_request_processor(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False) self.assertEqual(csrf_cookie, False) # Check the request processing def test_process_request_no_csrf_cookie(self): """ Check that if no CSRF cookies is present, the middleware rejects the incoming request. This will stop login CSRF. """ req = self._get_POST_no_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_process_request_csrf_cookie_no_token(self): """ Check that if a CSRF cookie is present but no token, the middleware rejects the incoming request. """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_process_request_csrf_cookie_and_token(self): """ Check that if both a cookie and a token is present, the middleware lets it through. """ req = self._get_POST_request_with_token() req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_process_request_csrf_cookie_no_token_exempt_view(self): """ Check that if a CSRF cookie is present and no token, but the csrf_exempt decorator has been applied to the view, the middleware lets it through """ req = self._get_POST_csrf_cookie_request() req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {}) self.assertIsNone(req2) def test_csrf_token_in_header(self): """ Check that we can pass in the token in a header instead of in the form """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(CSRF_HEADER_NAME='HTTP_X_CSRFTOKEN_CUSTOMIZED') def test_csrf_token_in_header_with_customized_name(self): """ settings.CSRF_HEADER_NAME can be used to customize the CSRF header name """ req = self._get_POST_csrf_cookie_request() req.META['HTTP_X_CSRFTOKEN_CUSTOMIZED'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) def test_put_and_delete_rejected(self): """ Tests that HTTP PUT and DELETE methods have protection """ req = TestingHttpRequest() req.method = 'PUT' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) req = TestingHttpRequest() req.method = 'DELETE' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(403, req2.status_code) def test_put_and_delete_allowed(self): """ Tests that HTTP PUT and DELETE methods can get through with X-CSRFToken and a cookie """ req = self._get_GET_csrf_cookie_request() req.method = 'PUT' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) req = self._get_GET_csrf_cookie_request() req.method = 'DELETE' req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) # Tests for the template tag method def test_token_node_no_csrf_cookie(self): """ Check that CsrfTokenNode works when no CSRF cookie is set """ req = self._get_GET_no_csrf_cookie_request() resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) self._check_token_present(resp, token) def test_token_node_empty_csrf_cookie(self): """ Check that we get a new token if the csrf_cookie is the empty string """ req = self._get_GET_no_csrf_cookie_request() req.COOKIES[settings.CSRF_COOKIE_NAME] = b"" CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) token = get_token(req) self.assertIsNotNone(token) self._check_token_present(resp, token) def test_token_node_with_csrf_cookie(self): """ Check that CsrfTokenNode works when a CSRF cookie is set """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_exempt_view(self): """ Check that get_token still works for a view decorated with 'csrf_exempt'. """ req = self._get_GET_csrf_cookie_request() CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {}) resp = token_view(req) self._check_token_present(resp) def test_get_token_for_requires_csrf_token_view(self): """ Check that get_token works for a view decorated solely with requires_csrf_token """ req = self._get_GET_csrf_cookie_request() resp = requires_csrf_token(token_view)(req) self._check_token_present(resp) def test_token_node_with_new_csrf_cookie(self): """ Check that CsrfTokenNode works when a CSRF cookie is created by the middleware (when one was not already present) """ req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME] self._check_token_present(resp, csrf_id=csrf_cookie.value) @override_settings(DEBUG=True) def test_https_bad_referer(self): """ Test that a POST HTTPS request with a bad referer is rejected """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - https://www.evil.org/somepage does not ' 'match any trusted origins.', status_code=403, ) @override_settings(DEBUG=True) def test_https_malformed_referer(self): """ A POST HTTPS request with a bad referer is rejected. """ malformed_referer_msg = 'Referer checking failed - Referer is malformed.' req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'http://http://www.example.com/' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - Referer is insecure while host is secure.', status_code=403, ) # Empty req.META['HTTP_REFERER'] = '' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # Non-ASCII req.META['HTTP_REFERER'] = b'\xd8B\xf6I\xdf' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing scheme # >>> urlparse('//example.com/') # ParseResult(scheme='', netloc='example.com', path='/', params='', query='', fragment='') req.META['HTTP_REFERER'] = '//example.com/' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) # missing netloc # >>> urlparse('https://') # ParseResult(scheme='https', netloc='', path='', params='', query='', fragment='') req.META['HTTP_REFERER'] = 'https://' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains(response, malformed_referer_msg, status_code=403) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer(self): """ A POST HTTPS request with a good referer is accepted. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com/somepage' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com']) def test_https_good_referer_2(self): """ A POST HTTPS request with a good referer is accepted where the referer contains no trailing slash. """ # See ticket #15617 req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://www.example.com' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com', USE_X_FORWARDED_PORT=True) def test_https_good_referer_behind_proxy(self): """ A POST HTTPS request is accepted when USE_X_FORWARDED_PORT=True. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META.update({ 'HTTP_HOST': '10.0.0.2', 'HTTP_REFERER': 'https://www.example.com/somepage', 'SERVER_PORT': '8080', 'HTTP_X_FORWARDED_HOST': 'www.example.com', 'HTTP_X_FORWARDED_PORT': '443', }) req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['dashboard.example.com']) def test_https_csrf_trusted_origin_allowed(self): """ A POST HTTPS request with a referer added to the CSRF_TRUSTED_ORIGINS setting is accepted. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://dashboard.example.com' req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(req2) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_TRUSTED_ORIGINS=['.example.com']) def test_https_csrf_wildcard_trusted_origin_allowed(self): """ A POST HTTPS request with a referer that matches a CSRF_TRUSTED_ORIGINS wildcard is accepted. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://dashboard.example.com' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(response) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com') def test_https_good_referer_matches_cookie_domain(self): """ A POST HTTPS request with a good referer should be accepted from a subdomain that's allowed by CSRF_COOKIE_DOMAIN. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'https://foo.example.com/' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(response) @override_settings(ALLOWED_HOSTS=['www.example.com'], CSRF_COOKIE_DOMAIN='.example.com') def test_https_good_referer_matches_cookie_domain_with_different_port(self): """ A POST HTTPS request with a good referer should be accepted from a subdomain that's allowed by CSRF_COOKIE_DOMAIN and a non-443 port. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_HOST'] = 'www.example.com' req.META['HTTP_REFERER'] = 'https://foo.example.com:4443/' req.META['SERVER_PORT'] = '4443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(response) @override_settings(CSRF_COOKIE_DOMAIN='.example.com', DEBUG=True) def test_https_reject_insecure_referer(self): """ A POST HTTPS request from an insecure referer should be rejected. """ req = self._get_POST_request_with_token() req._is_secure_override = True req.META['HTTP_REFERER'] = 'http://example.com/' req.META['SERVER_PORT'] = '443' response = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertContains( response, 'Referer checking failed - Referer is insecure while host is secure.', status_code=403, ) def test_ensures_csrf_cookie_no_middleware(self): """ The ensure_csrf_cookie() decorator works without middleware. """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") req = self._get_GET_no_csrf_cookie_request() resp = view(req) self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertIn('Cookie', resp.get('Vary', '')) def test_ensures_csrf_cookie_with_middleware(self): """ The ensure_csrf_cookie() decorator works with the CsrfViewMiddleware enabled. """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") req = self._get_GET_no_csrf_cookie_request() CsrfViewMiddleware().process_view(req, view, (), {}) resp = view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)) self.assertIn('Cookie', resp2.get('Vary', '')) def test_ensures_csrf_cookie_no_logging(self): """ ensure_csrf_cookie() doesn't log warnings (#19436). """ @ensure_csrf_cookie def view(request): # Doesn't insert a token or anything return HttpResponse(content="") class TestHandler(logging.Handler): def emit(self, record): raise Exception("This shouldn't have happened!") logger = logging.getLogger('django.request') test_handler = TestHandler() old_log_level = logger.level try: logger.addHandler(test_handler) logger.setLevel(logging.WARNING) req = self._get_GET_no_csrf_cookie_request() view(req) finally: logger.removeHandler(test_handler) logger.setLevel(old_log_level) def test_csrf_cookie_age(self): """ CSRF cookie age can be set using settings.CSRF_COOKIE_AGE. """ req = self._get_GET_no_csrf_cookie_request() MAX_AGE = 123 with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_AGE=MAX_AGE, CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) max_age = resp2.cookies.get('csrfcookie').get('max-age') self.assertEqual(max_age, MAX_AGE) def test_csrf_cookie_age_none(self): """ CSRF cookie age does not have max age set and therefore uses session-based cookies. """ req = self._get_GET_no_csrf_cookie_request() MAX_AGE = None with self.settings(CSRF_COOKIE_NAME='csrfcookie', CSRF_COOKIE_DOMAIN='.example.com', CSRF_COOKIE_AGE=MAX_AGE, CSRF_COOKIE_PATH='/test/', CSRF_COOKIE_SECURE=True, CSRF_COOKIE_HTTPONLY=True): # token_view calls get_token() indirectly CsrfViewMiddleware().process_view(req, token_view, (), {}) resp = token_view(req) resp2 = CsrfViewMiddleware().process_response(req, resp) max_age = resp2.cookies.get('csrfcookie').get('max-age') self.assertEqual(max_age, '') def test_post_data_read_failure(self): """ #20128 -- IOErrors during POST data reading should be caught and treated as if the POST data wasn't there. """ class CsrfPostRequest(HttpRequest): """ HttpRequest that can raise an IOError when accessing POST data """ def __init__(self, token, raise_error): super(CsrfPostRequest, self).__init__() self.method = 'POST' self.raise_error = False self.COOKIES[settings.CSRF_COOKIE_NAME] = token self.POST['csrfmiddlewaretoken'] = token self.raise_error = raise_error def _load_post_and_files(self): raise IOError('error reading input data') def _get_post(self): if self.raise_error: self._load_post_and_files() return self._post def _set_post(self, post): self._post = post POST = property(_get_post, _set_post) token = 'ABC' req = CsrfPostRequest(token, raise_error=False) resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertIsNone(resp) req = CsrfPostRequest(token, raise_error=True) resp = CsrfViewMiddleware().process_view(req, post_form_view, (), {}) self.assertEqual(resp.status_code, 403)
import logging import os.path import platform import socket import subprocess import threading from threading import Timer from math import ceil from optparse import OptionParser from sys import exit, stdout if platform.system() == 'Windows': is_win = True else: is_win = False if is_win: bin_admin_path = r'C:\Program Files\Veritas\NetBackup\bin\admincmd' BPGETCONFIG = r'bpgetconfig.exe' BPSETCONFIG = r'bpsetconfig.exe' else: bin_admin_path = r'/usr/openv/netbackup/bin/admincmd' BPGETCONFIG = r'bpgetconfig' BPSETCONFIG = r'bpsetconfig' if is_win: bin_admin_path = r'C:\Program Files\Veritas\NetBackup\bin\admincmd' else: bin_admin_path = r'/usr/openv/netbackup/bin/admincmd' FORMAT = 'thread %(thread)d: %(message)s ' PBX_PORT = 1556 BPCD_PORT = 13724 usage = "usage: %prog [options] host1 host2 host3 ..." parser = OptionParser(usage) parser.add_option("-f", "--file", dest="filename", help="read hosts from file, one per line;") parser.add_option("-b", "--bin_admin", dest="bin_admin", default=bin_admin_path, help="path to .../netbackup/bin/admincmd") parser.add_option("-s", "--skip_bpgetconfig", action="store_true", dest="skip_bpgetconfig", default=False, help="Don't run bpgetconfig to confirm connection") parser.add_option("-n", "--num_threads", dest="num_threads", default=100, type=int, help="number of threads to run simultaneously") parser.add_option("-e", "--emm", dest="emm", type=str, help="EMMSERVER entry") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="print status messages to stdout") parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="print debug messages to stdout") (options, args) = parser.parse_args() hosts = args result = [] if options.debug: logging.basicConfig(stream=stdout, format=FORMAT, level=logging.DEBUG) else: if options.verbose: logging.basicConfig(stream=stdout, format=FORMAT, level=logging.INFO) else: logging.basicConfig(stream=stdout, format=FORMAT, level=logging.WARN) if options.filename: if os.path.isfile(options.filename): f = open(options.filename) hosts = hosts + [x.rstrip() for x in f.read().splitlines()] f.close() if len(hosts) == 0: logging.critical('No hosts were provided for a check') exit(1) if not options.skip_bpgetconfig: if not os.path.isfile(os.path.join(options.bin_admin, BPGETCONFIG)): logging.critical("Can't find bpgetconfig in %s" % options.bin_admin) exit(1) def split(arr, size): arrs = [] while len(arr) > size: pice = arr[:size] arrs.append(pice) arr = arr[size:] arrs.append(arr) return arrs class Host(object): def __init__(self, host): self.name = host self.pbx = True self.bpcd = True self.cert = True self.bpgetconfig = True @property def partial(self): return not self.complete and self.failed @property def failed(self): return not self.pbx or not self.bpcd or not self.bpgetconfig @property def complete(self): return not self.pbx and not self.bpcd and not self.bpgetconfig def report(self): if not self.failed: print 'host %s was reachable' % self.name else: if self.complete: print 'host %s was completely unreachable' % self.name else: print 'host %s was partially unreachable bpcd: %s, pbx %s, bpgetconfig %s, certificate %s' % (self.name, self.bpcd, self.pbx, self.bpgetconfig, self.cert) def test_soc(host, port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(5.0) logging.info("testing connection to %s port %s" % (host, port)) try: if sock.connect_ex((host, port)) == 0: sock.close() return True else: sock.close() return False except Exception: return False def check_nbu_port(task_list): for h in task_list: host = Host(h) host.pbx = test_soc(host.name, PBX_PORT) host.bpcd = test_soc(host.name, BPCD_PORT) FNULL = open(os.devnull, 'w') if options.emm: try: logging.info("updating EMM for %s" % (host.name)) proc = subprocess.Popen([os.path.join(options.bin_admin, BPSETCONFIG), "-h", host.name], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) timer = Timer(5, proc.kill) try: timer.start() out, err = proc.communicate("EMMSERVER = %s" % (options.emm)) logging.info(err) out = out.strip() finally: timer.cancel() logging.debug("bpgetconfig from %s returned >>%s%s%s<<" % (host.name, os.linesep, out, os.linesep)) if len(out) == 0: host.bpgetconfig = False logging.info(err) if err[:9] == "the vnetd": host.cert = False except subprocess.CalledProcessError: host.bpsetconfig = False host.bpgetconfig = False if not options.skip_bpgetconfig: try: logging.info("testing connection via bpgetconfig for %s" % (host)) proc = subprocess.Popen([os.path.join(options.bin_admin, BPGETCONFIG), "-M", host.name], stdout=subprocess.PIPE, stderr=subprocess.PIPE) timer = Timer(5, proc.kill) try: timer.start() out, err = proc.communicate() logging.info(err) out = out.strip() finally: timer.cancel() logging.debug("bpgetconfig from %s returned >>%s%s%s<<" % (host.name, os.linesep, out, os.linesep)) if len(out) == 0: host.bpgetconfig = False logging.info(err) if err[:9] == "the vnetd": host.cert = False except subprocess.CalledProcessError: host.bpgetconfig = False else: logging.info('bpgetconfig test was skipped for %s' % host.name) result.append(host) return threads = [] if __name__ == '__main__': part_hosts = split(hosts, int(ceil(float(len(hosts)) / options.num_threads))) for task_list in part_hosts: t = threading.Thread(target=check_nbu_port, args=(task_list,)) threads.append(t) t.start() for t in threads: t.join() for host in result: host.report()
import const import base_player from random import randint, shuffle class Player(base_player.BasePlayer): def __init__(self): base_player.BasePlayer.__init__(self) self._playerName = "Beornwulf" self._playerYear = "1" # year of study self._version = "1.1" # version of AI self._playerDescription = "Basic Hunter-Killer algorithm." self._mode = "hunt" self._allSquares = self.initialiseBoard() self._targets = self.initialiseHunt() self._shots = [] self._hits = [] self._priorityTargets = [] self._killCount = 0 def resetGame(self): """ Resets internal variables and lists between games. """ self._mode = "hunt" self._allSquares = self.initialiseBoard() self._targets = self.initialiseHunt() self._shots = [] self._hits = [] self._priorityTargets = [] self._killCount = 0 def initialiseBoard(self): """ Creates a list of all possible board squares. """ board = [[x, y] for x in range(6) for y in range(6)] board.extend([x, y] for x in range(6, 12) for y in range(12)) return board def initialiseHunt(self): """ Create an initial list of targets for the hunt phase. Currently a checkerboard pattern. """ hunt_targets = [] for item in self._allSquares: x, y = item if (x + y) % 2 == 0: hunt_targets.append(item) shuffle(hunt_targets) #print len(hunt_targets) return hunt_targets def deployFleet(self): """ Deploy fleet in random positions. """ self.resetGame() self._initBoards() destroyer = [[0, 0], [0, 1]] cruiser = [[0, 0], [0, 1], [0, 2]] battleship = [[0, 0], [0, 1], [0, 2], [0, 3]] hovercraft = [[0, 0], [0, 1], [1, 1], [-1, 1], [1, 2], [-1, 2]] aircraftcarrier = [[0, 0], [-1, 0], [1, 0], [0, 1], [0, 2], [0, 3]] fleet = [destroyer, cruiser, battleship, hovercraft, aircraftcarrier] boardSquares = [[x, y] for x in range(6) for y in range(6)] boardSquares.extend([x, y] for x in range(6, 12) for y in range(12)) for i in fleet: isValid = False while isValid is False: r = randint(0, (len(boardSquares) - 1)) keyPoint = boardSquares[r] ship = [] for j in i: square = [] square.append(j[0] + keyPoint[0]) square.append(j[1] + keyPoint[1]) ship.append(square) isValid = True internalValid = False while not internalValid: for square in ship: x, y = square if not square in boardSquares: isValid = False elif self._playerBoard[x][y] == const.OCCUPIED: isValid = False internalValid = True if isValid: for square in ship: x, y = square self._playerBoard[x][y] = const.OCCUPIED return self._playerBoard def chooseMove(self): """ Derermines phase of attack and calls appropriate method. Exports target square. """ if self._mode == "hunt": target = self.huntMode() elif self._mode == "kill": target = self.killMode() #print len(self._shots), "shots taken" #print "shot at:", self._shots #print len(self._hits), "hits:", self._hits row, col = target return row, col def huntMode(self): """ Selects a random target from the checkerboard of available targets. """ #print "available targets:", self._targets if len(self._targets) == 0: for i in self._allSquares: if not i in self._shots: self._targets.append(i) target = self._targets.pop() self._shots.append(target) return target def killMode(self): """ Once a ship has been found, focuses shots in that area until all squares adjacent to a hit have been targeted. Returns to hunt mode after 6 hits. """ target_list = [] for i in self._hits: a = i[0] b = i[1] target_list.append([a, b + 1]) target_list.append([a, b - 1]) target_list.append([a + 1, b]) target_list.append([a - 1, b]) #print "potential priority targets:", target_list for i in target_list: if i in self._priorityTargets: pass # print i, "is in _priorityTargets" elif i in self._shots: pass # print i, "is in _shots" elif not i in self._allSquares: pass # print i, "is off the board" else: self._priorityTargets.append(i) if len(self._priorityTargets) == 0: self._mode = "hunt" target = self.huntMode() self._killCount = 0 return target elif self._killCount > 5: self._mode = "hunt" target = self.huntMode() self._killCount = 0 return target else: #print "priority targets:", self._priorityTargets target = self._priorityTargets.pop() self._shots.append(target) return target def setOutcome(self, entry, row, col): """ entry: the outcome of your shot onto your opponent, expected value is const.HIT for hit and const.MISSED for missed. row: (int) the board row number (e.g. row A is 0) col: (int) the board column (e.g. col 2 is represented by value 3) so A3 case is (0,2) """ if entry == const.HIT: shot = [row, col] #print "hit" Outcome = const.HIT self._hits.append(shot) #print "hits so far:", self._hits self._mode = "kill" self._killCount += 1 elif entry == const.MISSED: #print "missed" Outcome = const.MISSED else: raise Exception("Invalid input!") self._opponenBoard[row][col] = Outcome #print "shots taken:", len(self._shots) #print "shots hit:", len(self._hits) def getOpponentMove(self, row, col): """ You might like to keep track of where your opponent has missed, but here we just acknowledge it. Note case A3 is represented as row = 0, col = 2. """ if ((self._playerBoard[row][col] == const.OCCUPIED) or (self._playerBoard[row][col] == const.HIT)): # They may (stupidly) hit the same square twice so we check # for occupied or hit self._playerBoard[row][col] = const.HIT result = const.HIT else: # You might like to keep track of where your opponent has missed, # but here we just acknowledge it result = const.MISSED return result def getPlayer(): """ MUST NOT be changed, used to get a instance of your class.""" return Player()
# coding: utf-8 # # Copyright 2015 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from core import jobs from core.domain import feedback_domain from core.platform import models import feconf from google.appengine.ext import ndb (base_models, feedback_models, exp_models,) = models.Registry.import_models([ models.NAMES.base_model, models.NAMES.feedback, models.NAMES.exploration ]) transaction_services = models.Registry.import_transaction_services() class FeedbackAnalyticsRealtimeModel( jobs.BaseRealtimeDatastoreClassForContinuousComputations): num_open_threads = ndb.IntegerProperty(default=0) num_total_threads = ndb.IntegerProperty(default=0) class FeedbackAnalyticsAggregator(jobs.BaseContinuousComputationManager): """A continuous-computation job that computes analytics for feedback threads of explorations. """ @classmethod def get_event_types_listened_to(cls): return [feconf.EVENT_TYPE_NEW_THREAD_CREATED, feconf.EVENT_TYPE_THREAD_STATUS_CHANGED] @classmethod def _get_realtime_datastore_class(cls): return FeedbackAnalyticsRealtimeModel @classmethod def _get_batch_job_manager_class(cls): return FeedbackAnalyticsMRJobManager @classmethod def _handle_incoming_event(cls, active_realtime_layer, event_type, *args): """Records thread analytics in the given realtime layer. Args: active_realtime_layer: int. The currently active realtime datastore layer. event_type: str. The event triggered by the student. *args: Variable length argument list. The first element of *args corresponds to the id of the exploration currently being played. """ exp_id = args[0] def _increment_open_threads_count(): realtime_class = cls._get_realtime_datastore_class() realtime_model_id = realtime_class.get_realtime_id( active_realtime_layer, exp_id) model = realtime_class.get(realtime_model_id, strict=False) if model is None: realtime_class( id=realtime_model_id, num_open_threads=1, realtime_layer=active_realtime_layer).put() else: model.num_open_threads += 1 model.put() def _increment_total_threads_count(): realtime_class = cls._get_realtime_datastore_class() realtime_model_id = realtime_class.get_realtime_id( active_realtime_layer, exp_id) model = realtime_class.get(realtime_model_id, strict=False) if model is None: realtime_class( id=realtime_model_id, num_total_threads=1, realtime_layer=active_realtime_layer).put() else: model.num_total_threads += 1 model.put() def _decrement_open_threads_count(): realtime_class = cls._get_realtime_datastore_class() realtime_model_id = realtime_class.get_realtime_id( active_realtime_layer, exp_id) model = realtime_class.get(realtime_model_id, strict=False) if model is None: realtime_class( id=realtime_model_id, num_open_threads=-1, realtime_layer=active_realtime_layer).put() else: model.num_open_threads -= 1 model.put() if event_type == feconf.EVENT_TYPE_NEW_THREAD_CREATED: transaction_services.run_in_transaction( _increment_total_threads_count) transaction_services.run_in_transaction( _increment_open_threads_count) elif event_type == feconf.EVENT_TYPE_THREAD_STATUS_CHANGED: old_status = args[1] updated_status = args[2] # Status changed from closed to open. if (old_status != feedback_models.STATUS_CHOICES_OPEN and updated_status == feedback_models.STATUS_CHOICES_OPEN): transaction_services.run_in_transaction( _increment_open_threads_count) # Status changed from open to closed. elif (old_status == feedback_models.STATUS_CHOICES_OPEN and updated_status != feedback_models.STATUS_CHOICES_OPEN): transaction_services.run_in_transaction( _decrement_open_threads_count) # Public query methods. @classmethod def get_thread_analytics_multi(cls, exploration_ids): """Gets the thread analytics for the explorations specified by the exploration_ids. Args: exploration_ids: list(str). IDs of the explorations to get analytics for. Returns: list(dict). Each dict in this list corresponds to an exploration ID in the input list, and has two keys: - num_open_threads: int. The count of open feedback threads for this exploration. - num_total_threads: int. The count of all feedback threads for this exploration. """ realtime_model_ids = cls.get_multi_active_realtime_layer_ids( exploration_ids) realtime_models = cls._get_realtime_datastore_class().get_multi( realtime_model_ids) feedback_thread_analytics_models = ( feedback_models.FeedbackAnalyticsModel.get_multi( exploration_ids)) return [feedback_domain.FeedbackAnalytics( exploration_ids[i], (realtime_models[i].num_open_threads if realtime_models[i] is not None else 0) + (feedback_thread_analytics_models[i].num_open_threads if feedback_thread_analytics_models[i] is not None else 0), (realtime_models[i].num_total_threads if realtime_models[i] is not None else 0) + (feedback_thread_analytics_models[i].num_total_threads if feedback_thread_analytics_models[i] is not None else 0) ) for i in range(len(exploration_ids))] @classmethod def get_thread_analytics(cls, exploration_id): """Retrieves the analytics for feedback threads. Args: exploration_id: str. ID of exploration to get statistics for. Returns: dict with two keys: - num_open_threads: int. The count of open feedback threads for this exploration. - num_total_threads: int. The count of all feedback threads for this exploration. """ return FeedbackAnalyticsAggregator.get_thread_analytics_multi( [exploration_id])[0] class FeedbackAnalyticsMRJobManager( jobs.BaseMapReduceJobManagerForContinuousComputations): """Job that creates FeedbackAnalyticsModels for explorations by calculating various analytics for feedback threads corresponding to an exploration. Currently, this job calculates the number of open feedback threads, as well as the total feedback thread count for each exploration. """ @classmethod def _get_continuous_computation_class(cls): return FeedbackAnalyticsAggregator @classmethod def entity_classes_to_map_over(cls): return [feedback_models.FeedbackThreadModel] @staticmethod def map(item): """Map function. Args: item: FeedbackThreadModel. A feedback thread model instance. Yields: A tuple of two elements: - str. The exploration id associated to the feedback thread. - str. The feedback thread's status. """ yield (item.exploration_id, item.status) @staticmethod def reduce(key, stringified_values): """Reduce function. Args: key: str. The exploration ID. stringified_values: list(str). List of all statuses from all mappers tagged with the given key. """ num_open_threads = stringified_values.count( feedback_models.STATUS_CHOICES_OPEN) num_total_threads = len(stringified_values) feedback_models.FeedbackAnalyticsModel.create( key, num_open_threads, num_total_threads)
########################################################################## # # Copyright (c) 2018, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import six import imath import IECore import IECoreScene import Gaffer import GafferScene import GafferSceneTest class TweakPlugTest( GafferSceneTest.SceneTestCase ) : def testConstructor( self ) : p = GafferScene.TweakPlug( "test", 10.0, GafferScene.TweakPlug.Mode.Multiply, enabled = False ) self.assertEqual( p["name"].defaultValue(), "" ) self.assertEqual( p["name"].getValue(), "test" ) self.assertIsInstance( p["value"], Gaffer.FloatPlug ) self.assertEqual( p["value"].defaultValue(), 10 ) self.assertEqual( p["value"].getValue(), 10 ) self.assertEqual( p["mode"].defaultValue(), p.Mode.Replace ) self.assertEqual( p["mode"].getValue(), p.Mode.Multiply ) self.assertEqual( p["enabled"].defaultValue(), True ) self.assertEqual( p["enabled"].getValue(), False ) def testCreateCounterpart( self ) : p = GafferScene.TweakPlug( "test", 10.0, GafferScene.TweakPlug.Mode.Multiply ) p2 = p.createCounterpart( "p2", Gaffer.Plug.Direction.In ) self.assertIsInstance( p2, GafferScene.TweakPlug ) self.assertEqual( p2.getName(), "p2" ) self.assertEqual( p2.direction(), Gaffer.Plug.Direction.In ) self.assertEqual( p2.keys(), p.keys() ) for n in p2.keys() : self.assertIsInstance( p2[n], p[n].__class__ ) def testTweakParameters( self ) : tweaks = GafferScene.TweaksPlug() tweaks.addChild( GafferScene.TweakPlug( "a", 1.0, GafferScene.TweakPlug.Mode.Replace ) ) tweaks.addChild( GafferScene.TweakPlug( "b", 10.0, GafferScene.TweakPlug.Mode.Multiply ) ) parameters = IECore.CompoundData( { "a" : 0.0, "b" : 2.0 } ) self.assertTrue( tweaks.applyTweaks( parameters ) ) self.assertEqual( parameters, IECore.CompoundData( { "a" : 1.0, "b" : 20.0 } ) ) def testTweakNetwork( self ) : network = IECoreScene.ShaderNetwork( shaders = { "texture" : IECoreScene.Shader( "image", "ai:shader", { "filename" : "test.tx", "sscale" : 1.0 } ), "surface" : IECoreScene.Shader( "lambert", "ai:surface", { "Kd" : 1.0 } ) }, output = "surface" ) tweaks = GafferScene.TweaksPlug() tweaks.addChild( GafferScene.TweakPlug( "texture.sscale", 10.0, GafferScene.TweakPlug.Mode.Multiply ) ) tweaks.addChild( GafferScene.TweakPlug( "texture.sscale", 1.0, GafferScene.TweakPlug.Mode.Add ) ) tweaks.addChild( GafferScene.TweakPlug( "surface.Kd", 0.5, GafferScene.TweakPlug.Mode.Multiply ) ) tweaks.addChild( GafferScene.TweakPlug( "Kd", 0.25, GafferScene.TweakPlug.Mode.Add ) ) self.assertTrue( tweaks.applyTweaks( network ) ) self.assertEqual( network.getShader( "texture" ).parameters["sscale"].value, 11.0 ) self.assertEqual( network.getShader( "surface" ).parameters["Kd"].value, 0.75 ) def testThrowOnMissingShader( self ) : network = IECoreScene.ShaderNetwork( shaders = { "surface" : IECoreScene.Shader( "lambert", "ai:surface" ) }, ) tweaks = Gaffer.Plug() tweaks.addChild( GafferScene.TweakPlug( "missingShader", 0.5 ) ) with six.assertRaisesRegex( self, RuntimeError, "" ) : GafferScene.TweakPlug.applyTweaks( tweaks, network ) def testWrongDataType( self ) : p = GafferScene.TweakPlug( "test", imath.Color3f( 1 ) ) p["mode"].setValue( p.Mode.Multiply ) self.assertIsInstance( p["value"], Gaffer.Color3fPlug ) d = IECore.CompoundData( { "test" : 1 } ) with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak to \"test\" : Value of type \"IntData\" does not match parameter of type \"Color3fData\"" ) : p.applyTweak( d ) def testMissingMode( self ) : p = GafferScene.TweaksPlug() p["t"] = GafferScene.TweakPlug( "test", 0.5, GafferScene.TweakPlug.Mode.Replace ) d = IECore.CompoundData() with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak with mode Replace to \"test\" : This parameter does not exist" ) : p.applyTweaks( d, missingMode = GafferScene.TweakPlug.MissingMode.Error ) self.assertEqual( d, IECore.CompoundData() ) d = IECore.CompoundData() self.assertFalse( p.applyTweaks( d, missingMode = GafferScene.TweakPlug.MissingMode.Ignore ) ) self.assertEqual( d, IECore.CompoundData() ) d = IECore.CompoundData() self.assertTrue( p.applyTweaks( d, missingMode = GafferScene.TweakPlug.MissingMode.IgnoreOrReplace ) ) self.assertEqual( d, IECore.CompoundData( { "test" : 0.5 } ) ) d = IECore.CompoundData() p["t"]["mode"].setValue( GafferScene.TweakPlug.Mode.Add ) with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak with mode Add to \"test\" : This parameter does not exist" ) : p.applyTweaks( d, missingMode = GafferScene.TweakPlug.MissingMode.Error ) self.assertEqual( d, IECore.CompoundData() ) with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak with mode Add to \"test\" : This parameter does not exist" ) : p.applyTweaks( d, missingMode = GafferScene.TweakPlug.MissingMode.IgnoreOrReplace ) self.assertEqual( d, IECore.CompoundData() ) self.assertFalse( p.applyTweaks( d, missingMode = GafferScene.TweakPlug.MissingMode.Ignore ) ) self.assertEqual( d, IECore.CompoundData() ) def testTweaksPlug( self ) : p = GafferScene.TweaksPlug() self.assertFalse( p.acceptsChild( Gaffer.Plug() ) ) self.assertFalse( p.acceptsInput( Gaffer.Plug() ) ) p.addChild( GafferScene.TweakPlug( "x", 10.0, GafferScene.TweakPlug.Mode.Replace ) ) p2 = p.createCounterpart( "p2", Gaffer.Plug.Direction.In ) self.assertIsInstance( p2, GafferScene.TweaksPlug ) self.assertEqual( p2.getName(), "p2" ) self.assertEqual( p2.direction(), Gaffer.Plug.Direction.In ) self.assertEqual( p2.keys(), p.keys() ) def testOldSerialisation( self ) : # Old scripts call a constructor with an outdated signature as below. plug = GafferScene.TweakPlug( "exposure", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) def testMissingModeForShaderNetwork( self ) : network = IECoreScene.ShaderNetwork( shaders = { "surface" : IECoreScene.Shader( "lambert", "ai:surface", { "Kd" : 0.25 } ) }, output = "surface" ) p = GafferScene.TweaksPlug() p["t"] = GafferScene.TweakPlug( "Ks", 0.5, GafferScene.TweakPlug.Mode.Replace ) networkCopy = network.copy() with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak with mode Replace to \"Ks\" : This parameter does not exist" ) : p.applyTweaks( networkCopy ) with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak with mode Replace to \"Ks\" : This parameter does not exist" ) : p.applyTweaks( networkCopy, missingMode = GafferScene.TweakPlug.MissingMode.Error ) self.assertEqual( networkCopy, network ) self.assertFalse( p.applyTweaks( networkCopy, missingMode = GafferScene.TweakPlug.MissingMode.Ignore ) ) self.assertEqual( networkCopy, network ) p["t"]["name"].setValue( "missingShader.parameterName" ) with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak \"missingShader.parameterName\" because shader \"missingShader\" does not exist" ) : p.applyTweaks( networkCopy ) with six.assertRaisesRegex( self, RuntimeError, "Cannot apply tweak \"missingShader.parameterName\" because shader \"missingShader\" does not exist" ) : p.applyTweaks( networkCopy, missingMode = GafferScene.TweakPlug.MissingMode.Error ) self.assertEqual( networkCopy, network ) self.assertFalse( p.applyTweaks( networkCopy, missingMode = GafferScene.TweakPlug.MissingMode.Ignore ) ) self.assertEqual( networkCopy, network ) def testApplyReturnValues( self ) : parameters = IECore.CompoundData( { "a" : 0.0, "b" : 2.0 } ) tweaks = GafferScene.TweaksPlug() # Test none to apply self.assertFalse( tweaks.applyTweaks( parameters ) ) # Test none enabled tweaks.addChild( GafferScene.TweakPlug( "a", 1.0, GafferScene.TweakPlug.Mode.Replace, False ) ) tweaks.addChild( GafferScene.TweakPlug( "b", 10.0, GafferScene.TweakPlug.Mode.Multiply, False ) ) tweakedParameters = parameters.copy() self.assertFalse( tweaks.applyTweaks( parameters ) ) self.assertEqual( tweakedParameters, parameters ) # Test enabled tweaks[0]["enabled"].setValue( True ) tweaks[1]["enabled"].setValue( True ) self.assertTrue( tweaks.applyTweaks( parameters ) ) # Test non-matching altParameters = IECore.CompoundData( { "c" : 0.0, "d" : 2.0 } ) tweakedAltParameters = altParameters.copy() self.assertFalse( tweaks.applyTweaks( tweakedAltParameters, missingMode = GafferScene.TweakPlug.MissingMode.Ignore ) ) self.assertEqual( tweakedAltParameters, altParameters ) # Test empty names tweaks[0]["name"].setValue( "" ) tweaks[1]["name"].setValue( "" ) tweakedParameters = parameters.copy() self.assertFalse( tweaks.applyTweaks( parameters ) ) self.assertEqual( tweakedParameters, parameters ) def testApplyReturnValuesNetworkEdits( self ) : network = IECoreScene.ShaderNetwork( shaders = { "surface" : IECoreScene.Shader( "lambert", "surface", { "c" : imath.Color3f( 1.0 ) } ) }, output = "surface" ) textureShader = GafferSceneTest.TestShader( "texture" ) tweaks = GafferScene.TweaksPlug() tweaks.addChild( GafferScene.TweakPlug( "c", Gaffer.Color3fPlug(), GafferScene.TweakPlug.Mode.Replace, False ) ) tweaks[0]["value"].setInput( textureShader["out"] ) # Test none to apply tweakedNetwork = network.copy() self.assertFalse( tweaks.applyTweaks( tweakedNetwork ) ) self.assertEqual( tweakedNetwork, network ) # Test enabled tweaks[0]["enabled"].setValue( True ) tweakedNetwork = network.copy() self.assertTrue( tweaks.applyTweaks( tweakedNetwork ) ) self.assertEqual( tweakedNetwork.inputConnections( "surface" ), [ ( ( "texture", "" ), ( "surface", "c" ) ) ] ) def testCreateOutputCounterpart( self ) : p = GafferScene.TweakPlug( "test", 10.0, GafferScene.TweakPlug.Mode.Multiply ) p2 = p.createCounterpart( "p2", Gaffer.Plug.Direction.Out ) self.assertIsInstance( p2, GafferScene.TweakPlug ) self.assertEqual( p2.getName(), "p2" ) self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out ) self.assertEqual( p2.keys(), p.keys() ) for n in p2.keys() : self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out ) self.assertIsInstance( p2[n], p[n].__class__ ) if __name__ == "__main__": unittest.main()
# Copyright 2013: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import multiprocessing import jsonschema import mock from rally.plugins.common.runners import serial from rally.task import runner from rally.task import scenario from tests.unit import fakes from tests.unit import test BASE = "rally.task.runner." class ScenarioRunnerHelpersTestCase(test.TestCase): @mock.patch(BASE + "utils.format_exc") def test_format_result_on_timeout(self, mock_format_exc): mock_exc = mock.MagicMock() expected = { "duration": 100, "idle_duration": 0, "output": {"additive": [], "complete": []}, "atomic_actions": {}, "error": mock_format_exc.return_value } self.assertEqual(runner.format_result_on_timeout(mock_exc, 100), expected) mock_format_exc.assert_called_once_with(mock_exc) @mock.patch(BASE + "context.ContextManager") def test_get_scenario_context(self, mock_context_manager): mock_context_obj = mock.MagicMock() mock_map_for_scenario = ( mock_context_manager.return_value.map_for_scenario) self.assertEqual(mock_map_for_scenario.return_value, runner._get_scenario_context(mock_context_obj)) mock_context_manager.assert_called_once_with(mock_context_obj) mock_map_for_scenario.assert_called_once_with() def test_run_scenario_once_internal_logic(self): context = runner._get_scenario_context( fakes.FakeContext({}).context) scenario_cls = mock.MagicMock() args = (2, scenario_cls, "test", context, {}) runner._run_scenario_once(args) expected_calls = [ mock.call(context), mock.call().test(), mock.call().idle_duration(), mock.call().idle_duration(), mock.call().atomic_actions() ] scenario_cls.assert_has_calls(expected_calls, any_order=True) @mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer) def test_run_scenario_once_without_scenario_output(self, mock_timer): args = (1, fakes.FakeScenario, "do_it", mock.MagicMock(), {}) result = runner._run_scenario_once(args) expected_result = { "duration": fakes.FakeTimer().duration(), "timestamp": fakes.FakeTimer().timestamp(), "idle_duration": 0, "error": [], "output": {"additive": [], "complete": []}, "atomic_actions": {} } self.assertEqual(expected_result, result) @mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer) def test_run_scenario_once_with_added_scenario_output(self, mock_timer): args = (1, fakes.FakeScenario, "with_add_output", mock.MagicMock(), {}) result = runner._run_scenario_once(args) expected_result = { "duration": fakes.FakeTimer().duration(), "timestamp": fakes.FakeTimer().timestamp(), "idle_duration": 0, "error": [], "output": {"additive": [{"chart_plugin": "FooPlugin", "description": "Additive description", "data": [["a", 1]], "title": "Additive"}], "complete": [{"data": [["a", [[1, 2], [2, 3]]]], "description": "Complete description", "title": "Complete", "chart_plugin": "BarPlugin"}]}, "atomic_actions": {} } self.assertEqual(expected_result, result) @mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer) def test_run_scenario_once_with_returned_scenario_output(self, mock_timer): args = (1, fakes.FakeScenario, "with_output", mock.MagicMock(), {}) result = runner._run_scenario_once(args) expected_result = { "duration": fakes.FakeTimer().duration(), "timestamp": fakes.FakeTimer().timestamp(), "idle_duration": 0, "error": [], "output": {"additive": [{"chart_plugin": "StackedArea", "description": "", "data": [["a", 1]], "title": "Scenario output"}], "complete": []}, "atomic_actions": {} } self.assertEqual(expected_result, result) @mock.patch(BASE + "rutils.Timer", side_effect=fakes.FakeTimer) def test_run_scenario_once_exception(self, mock_timer): args = (1, fakes.FakeScenario, "something_went_wrong", mock.MagicMock(), {}) result = runner._run_scenario_once(args) expected_error = result.pop("error") expected_result = { "duration": fakes.FakeTimer().duration(), "timestamp": fakes.FakeTimer().timestamp(), "idle_duration": 0, "output": {"additive": [], "complete": []}, "atomic_actions": {} } self.assertEqual(expected_result, result) self.assertEqual(expected_error[:2], ["Exception", "Something went wrong"]) class ScenarioRunnerResultTestCase(test.TestCase): def test_validate(self): config = [ { "duration": 1.0, "idle_duration": 1.0, "output": {"additive": [], "complete": []}, "atomic_actions": {"test1": 1.0}, "error": [] }, { "duration": 2.0, "idle_duration": 2.0, "output": {"additive": [{"chart_plugin": "StackedArea", "data": [["a", 1]], "title": "Scenario output", "description": ""}], "complete": []}, "atomic_actions": {"test2": 2.0}, "error": ["a", "b", "c"] } ] self.assertEqual(config[0], runner.ScenarioRunnerResult(config[0])) self.assertEqual(config[1], runner.ScenarioRunnerResult(config[1])) def test_validate_failed(self): config = {"a": 10} self.assertRaises(jsonschema.ValidationError, runner.ScenarioRunnerResult, config) class ScenarioRunnerTestCase(test.TestCase): @mock.patch(BASE + "rutils.Timer.duration", return_value=10) def test_run(self, mock_timer_duration): runner_obj = serial.SerialScenarioRunner( mock.MagicMock(), mock.MagicMock()) runner_obj._run_scenario = mock.MagicMock() scenario_name = "NovaServers.boot_server_from_volume_and_delete" config_kwargs = {"image": {"id": 1}, "flavor": {"id": 1}} context_obj = { "task": runner_obj.task, "scenario_name": scenario_name, "admin": {"credential": mock.MagicMock()}, "config": { "cleanup": ["nova", "cinder"], "some_ctx": 2, "users": {} } } result = runner_obj.run(scenario_name, context_obj, config_kwargs) self.assertEqual(result, mock_timer_duration.return_value) self.assertEqual(list(runner_obj.result_queue), []) cls_name, method_name = scenario_name.split(".", 1) cls = scenario.Scenario.get(scenario_name)._meta_get("cls_ref") expected_config_kwargs = {"image": 1, "flavor": 1} runner_obj._run_scenario.assert_called_once_with( cls, method_name, context_obj, expected_config_kwargs) def test_runner_send_result_exception(self): runner_obj = serial.SerialScenarioRunner( mock.MagicMock(), mock.MagicMock()) self.assertRaises( jsonschema.ValidationError, lambda: runner_obj._send_result(mock.MagicMock())) def test_abort(self): runner_obj = serial.SerialScenarioRunner( mock.MagicMock(), mock.MagicMock()) self.assertFalse(runner_obj.aborted.is_set()) runner_obj.abort() self.assertTrue(runner_obj.aborted.is_set()) def test__create_process_pool(self): runner_obj = serial.SerialScenarioRunner( mock.MagicMock(), mock.MagicMock()) processes_to_start = 10 def worker_process(i): pass counter = ((i,) for i in range(100)) process_pool = runner_obj._create_process_pool( processes_to_start, worker_process, counter) self.assertEqual(processes_to_start, len(process_pool)) for process in process_pool: self.assertIsInstance(process, multiprocessing.Process) @mock.patch(BASE + "ScenarioRunner._send_result") def test__join_processes(self, mock_scenario_runner__send_result): process = mock.MagicMock(is_alive=mock.MagicMock(return_value=False)) processes = 10 process_pool = collections.deque([process] * processes) mock_result_queue = mock.MagicMock( empty=mock.MagicMock(return_value=True)) runner_obj = serial.SerialScenarioRunner( mock.MagicMock(), mock.MagicMock()) runner_obj._join_processes(process_pool, mock_result_queue) self.assertEqual(processes, process.join.call_count) mock_result_queue.close.assert_called_once_with()
# VapURL - Expiring URL service. # Copyright (c) 2009 Aaron McBride and John Lawlor # MIT License (see LICENSE.txt) # https://github.com/nogwater/vapurl import os import re import cgi import random import datetime import wsgiref.handlers from google.appengine.api import users from google.appengine.ext import webapp from google.appengine.ext.webapp import template from google.appengine.ext import db from django.utils import simplejson from models import VapUrl from models import ErrorMessage from models import Counter import config class MainHandler(webapp.RequestHandler): def post(self): """Supports the creation of new VapURLs""" template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.'), 'url':'http://' } self.createVapUrl(template_data) path = os.path.join(os.path.dirname(__file__), 'templates/index.html') self.response.out.write(template.render(path, template_data)) def get(self): """Show main page, create a new VapURL, or redirect.""" template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.'), 'url':'http://' } name = self.request.path[1:] if len(name) > 0: #redirect if needed self.redirectByName(name) else: #create if needed self.createVapUrl(template_data) vapurlController = VapurlController() vapurlController.cleanup() path = os.path.join(os.path.dirname(__file__), 'templates/index.html') self.response.out.write(template.render(path, template_data)) def createVapUrl(self, template_data): """Creates a VapURL based on the URL passed in as an argument. Updates template_data.""" url = self.request.get('url') if url != None and url != "": vapurlController = VapurlController() url = vapurlController.sanitize_url(url) template_data['url'] = url max_time = self.request.get('max_time') custom_time = self.request.get('custom_time') try: max_time = int(max_time) except: max_time = 60 self.logError("max_time not an int") if max_time == -1: #Custom Max try: max_time = 1440 * int(custom_time.replace(',','')) if max_time < 0 or int(custom_time.replace(',','')) > 1000: max_time = 60 except: max_time = 60 self.logError("custom max_time not an int") else: if max_time not in [1 , 60, 1440, 10080]: #keep approved list? or make range? max_time = 60 max_visits = self.request.get('max_visits') custom_visits = self.request.get('custom_visits') try: max_visits = int(max_visits) except: max_visits = 1 self.logError("max_visits not an int") if max_visits == -1: #Custom Max try: max_visits = int(custom_visits.replace(',','')) if max_visits < 0 or int(custom_visits.replace(',','')) > 1000000: max_visits = 60 except: max_visits = 60 self.logError("custom max_visits not an int") else: if max_visits not in [1, 5, 10, 25, 50, 100]: #keep approved list? or make range? max_visits = 60 vapUrl = vapurlController.create(url, max_time, max_visits) if vapUrl != None: template_data['vapurl'] = config.baseURL + vapUrl.name template_data['name'] = vapUrl.name template_data['visits_remaining'] = vapUrl.visits_remaining template_data['exp_datetime'] = vapUrl.exp_datetime else: self.logError("error creating VapUrl. probably a bad URL:" + url) # probably BadValueError: Invalid URL: url def redirectByName(self, name): destination = None vapUrls = VapUrl.all() vapUrls.filter('name =', name) vapUrls.filter('vaporized =', False) vapUrls.filter('exp_datetime >=', datetime.datetime.now()) # can't use more than one inequality filter :( #vapUrls.filter('visits_remaining >', 0) if vapUrls.count() > 0: vapUrl = vapUrls[0] if vapUrl.visits_remaining > 0: destination = vapUrl.link vapUrl.visits_remaining -= 1 vapUrl.put() if destination == None: destination = '/' # we didn't go anywhere, so let's go home else: counters = Counter.all() counters.filter('type = ', 'visits/alltime') if counters.count() > 0: counter = counters[0] #TODO: make transactional counter.count += 1 counter.put() else: counter = Counter() counter.type = 'visits/alltime' counter.count = 1 counter.put() self.redirect(destination) def logError(self, error_text): errorMsg = ErrorMessage() errorMsg.text = error_text errorMsg.put() class VapurlController: def sanitize_url(self, url): if url.find('http://http://') == 0 or url.find('http://https://') == 0: url = url[7:] # remove 'http://' if url.find('://') == -1: url = 'http://' + url url = url[:1024] # trim if long return url def cleanup(self): """Marks dead entries as vaporized""" #update data model #todo: remove this after the data model has been upgraded #vapUrls = VapUrl.all() #for vapUrl in vapUrls: # if vapUrl.vaporized == None: # vapUrl.vaporized = False # vapUrl.put() # timed out vapUrls = VapUrl.all() vapUrls.filter('vaporized =', False) vapUrls.order('exp_datetime') vapUrls.filter('exp_datetime <', datetime.datetime.now()) for vapUrl in vapUrls: vapUrl.vaporized = True vapUrl.put() # visited out vapUrls = VapUrl.all() vapUrls.filter('vaporized =', False) vapUrls.order('visits_remaining') vapUrls.filter('visits_remaining <', 1) for vapUrl in vapUrls: vapUrl.vaporized = True vapUrl.put() def create(self, url, max_time, max_visits): """Creates a new VapUrl and returns it if successful""" random.seed(str(random.random()) + url) name = ''.join([random.choice('abcdefghijklmnopqrstuvwxyz-0123456789') for i in range(10)]) vapUrl = None try: vapUrl = VapUrl() vapUrl.name = name vapUrl.link = db.Link(url) vapUrl.vaporized = False vapUrl.exp_datetime = datetime.datetime.now() + datetime.timedelta(minutes=max_time) vapUrl.visits_max = max_visits vapUrl.visits_remaining = max_visits vapUrl.put() except: vapUrl = None if vapUrl != None: counters = Counter.all() counters.filter('type = ', 'creates/alltime') if counters.count() > 0: counter = counters[0] #TODO: make transactional counter.count += 1 counter.put() else: counter = Counter() counter.type = 'creates/alltime' counter.count = 1 counter.put() return vapUrl; class HelpHandler(webapp.RequestHandler): def get(self): template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.') } path = os.path.join(os.path.dirname(__file__), 'templates/help.html') self.response.out.write(template.render(path, template_data)) class AboutHandler(webapp.RequestHandler): def get(self): template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.') } path = os.path.join(os.path.dirname(__file__), 'templates/about.html') self.response.out.write(template.render(path, template_data)) class InfoHandler(webapp.RequestHandler): def get(self): template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.') } vapurlController = VapurlController() vapurlController.cleanup() name = self.request.get('id') vapUrls = VapUrl.all() vapUrls.filter('name =', name) if vapUrls.count() > 0: vapUrl = vapUrls[0] template_data['have_info'] = True template_data['name'] = vapUrl.name template_data['create_datetime'] = vapUrl.create_datetime template_data['exp_datetime'] = vapUrl.exp_datetime if vapUrl.visits_max: template_data['visits_used'] = (vapUrl.visits_max - vapUrl.visits_remaining) else: template_data['visits_used'] = 'unknown' template_data['visits_remaining'] = vapUrl.visits_remaining path = os.path.join(os.path.dirname(__file__), 'templates/info.html') self.response.out.write(template.render(path, template_data)) class ApiHandler(webapp.RequestHandler): def get(self): request_path = self.request.path.lower() if request_path == '/api/create' or request_path == '/api/create/': self.create() else: template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.') } path = os.path.join(os.path.dirname(__file__), 'templates/api.html') self.response.out.write(template.render(path, template_data)) def post(self): request_path = self.request.path.lower() if request_path == '/api/create' or request_path == '/api/create/': self.create() else: template_data = { 'version': os.environ['CURRENT_VERSION_ID'][:os.environ['CURRENT_VERSION_ID'].rfind('.')].replace('-','.') } path = os.path.join(os.path.dirname(__file__), 'templates/api.html') self.response.out.write(template.render(path, template_data)) def create(self): """Parses input params, creates a vapurl, and prints the result as JSON.""" result = {} try: url = self.request.get('url') minutes = int(self.request.get('minutes')) visits = int(self.request.get('visits')) if url == None or url == "": result['error'] = "url is missing" elif minutes < 1 or minutes > 1440000: result['error'] = "minutes is out of range" elif visits < 1 or visits > 1000000: result['error'] = "visits is out of range" else: vapurlController = VapurlController() url = vapurlController.sanitize_url(url) vapUrl = vapurlController.create(url, minutes, visits) if vapUrl != None: result['id'] = vapUrl.name else: result['error'] = "problem creating vapurl" except: result['error'] = "problem parsing parameters" self.response.out.write(simplejson.dumps(result)) def main(): application = webapp.WSGIApplication([('/help', HelpHandler), ('/api', ApiHandler), ('/api/create', ApiHandler), ('/about', AboutHandler), ('/info.*', InfoHandler), ('/.*', MainHandler)], debug=True) wsgiref.handlers.CGIHandler().run(application) if __name__ == '__main__': main()
#!/usr/bin/python # -*- coding: utf-8 -*- r""" Bot to upload pages from a file. This bot takes its input from a file that contains a number of pages to be put on the wiki. The pages should all have the same begin and end text (which may not overlap). By default the text should have the intended title of the page as the first text in bold (that is, between ''' and '''), you can modify this behavior with command line options. The default is not to include the begin and end text in the page, if you want to include that text, use the -include option. Specific arguments: -begin:xxx Specify the text that marks the beginning of a page -end:xxx Specify the text that marks the end of a page -file:xxx Give the filename we are getting our material from (default: dict.txt) -include The beginning and end markers should be included in the page. -titlestart:xxx Use xxx in place of ''' for identifying the beginning of page title -titleend:xxx Use xxx in place of ''' for identifying the end of page title -notitle do not include the title, including titlestart, and titleend, in the page -nocontent If page has this statment it doesn't append (example: -nocontent:"{{infobox") -noredirect if you don't want to upload on redirect page it is True by default and bot adds pages to redirected pages -summary:xxx Use xxx as the edit summary for the upload - if a page exists, standard messages are appended after xxx for appending, prepending, or replacement -autosummary Use MediaWikis autosummary when creating a new page, overrides -summary in this case -minor set minor edit flag on page edits -showdiff show difference between page and page to upload; it forces -always=False; default to False. If the page to be uploaded already exists: -safe do nothing (default) -appendtop add the text to the top of it -appendbottom add the text to the bottom of it -force overwrite the existing page It's possible to define a separator after the 'append' modes which is added between the exisiting and new text. For example -appendtop:foo would add 'foo' between the parts. The \n (two separate characters) is replaced by the newline character. """ # # (C) Andre Engels, 2004 # (C) Pywikibot team, 2005-2017 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals import codecs import os import re from warnings import warn import pywikibot from pywikibot import config, i18n from pywikibot.bot import SingleSiteBot, CurrentPageBot from pywikibot.bot import OptionHandler from pywikibot.exceptions import ArgumentDeprecationWarning class NoTitle(Exception): """No title found.""" def __init__(self, offset): """Constructor.""" self.offset = offset class PageFromFileRobot(SingleSiteBot, CurrentPageBot): """ Responsible for writing pages to the wiki. Titles and contents are given by a PageFromFileReader. """ def __init__(self, **kwargs): """Constructor.""" self.availableOptions.update({ 'always': True, 'force': False, 'append': None, 'summary': None, 'minor': False, 'autosummary': False, 'nocontent': '', 'redirect': True, 'showdiff': False, }) super(PageFromFileRobot, self).__init__(**kwargs) self.availableOptions.update( {'always': False if self.getOption('showdiff') else True}) def init_page(self, page): """Do not try to update site before calling treat.""" pass def treat(self, page_tuple): """Process page tuple, set page to current page and treat it.""" title, content = page_tuple page = pywikibot.Page(self.site, title) page.text = content.strip() super(PageFromFileRobot, self).treat(page) def treat_page(self): """Upload page content.""" page = self.current_page title = page.title() # save the content retrieved from generator contents = page.text # delete page's text to get it from live wiki del page.text if self.getOption('summary'): comment = self.getOption('summary') else: comment = i18n.twtranslate(self.site, 'pagefromfile-msg') comment_top = comment + " - " + i18n.twtranslate( self.site, 'pagefromfile-msg_top') comment_bottom = comment + " - " + i18n.twtranslate( self.site, 'pagefromfile-msg_bottom') comment_force = "%s *** %s ***" % ( comment, i18n.twtranslate(self.site, 'pagefromfile-msg_force')) if page.exists(): if not self.getOption('redirect') and page.isRedirectPage(): pywikibot.output(u"Page %s is redirect, skipping!" % title) return pagecontents = page.text nocontent = self.getOption('nocontent') if nocontent and ( nocontent in pagecontents or nocontent.lower() in pagecontents): pywikibot.output('Page has %s so it is skipped' % nocontent) return if self.getOption('append'): separator = self.getOption('append')[1] if separator == r'\n': separator = '\n' if self.getOption('append')[0] == 'top': above, below = contents, pagecontents comment = comment_top else: above, below = pagecontents, contents comment = comment_bottom pywikibot.output('Page {0} already exists, appending on {1}!'.format( title, self.getOption('append')[0])) contents = above + separator + below elif self.getOption('force'): pywikibot.output(u"Page %s already exists, ***overwriting!" % title) comment = comment_force else: pywikibot.output(u"Page %s already exists, not adding!" % title) return else: if self.getOption('autosummary'): comment = config.default_edit_summary = '' self.put_current(contents, summary=comment, minor=self.getOption('minor'), show_diff=self.getOption('showdiff')) class PageFromFileReader(OptionHandler): """Generator class, responsible for reading the file.""" # Adapt these to the file you are using. 'begin' and # 'end' are the beginning and end of each entry. Take text that # should be included and does not occur elsewhere in the text. # TODO: make config variables for these. availableOptions = { 'begin': '{{-start-}}', 'end': '{{-stop-}}', 'titlestart': "'''", 'titleend': "'''", 'include': False, 'notitle': False, } def __init__(self, filename, **kwargs): """Constructor. Check if self.file name exists. If not, ask for a new filename. User can quit. """ super(PageFromFileReader, self).__init__(**kwargs) self.filename = filename self.pageStartMarker = self.getOption('begin') self.pageEndMarker = self.getOption('end') self.titleStartMarker = self.getOption('titlestart') self.titleEndMarker = self.getOption('titleend') self.include = self.getOption('include') self.notitle = self.getOption('notitle') def __iter__(self): """Read file and yield a tuple of page title and content.""" pywikibot.output('\n\nReading \'%s\'...' % self.filename) try: with codecs.open(self.filename, 'r', encoding=config.textfile_encoding) as f: text = f.read() except IOError as err: pywikibot.output(str(err)) raise IOError position = 0 length = 0 while True: try: length, title, contents = self.findpage(text[position:]) except AttributeError: if not length: pywikibot.output(u'\nStart or end marker not found.') else: pywikibot.output(u'End of file.') break except NoTitle as err: pywikibot.output(u'\nNo title found - skipping a page.') position += err.offset continue position += length yield title, contents def findpage(self, text): """Find page to work on.""" pageR = re.compile(re.escape(self.pageStartMarker) + "(.*?)" + re.escape(self.pageEndMarker), re.DOTALL) titleR = re.compile(re.escape(self.titleStartMarker) + "(.*?)" + re.escape(self.titleEndMarker)) location = pageR.search(text) if self.include: contents = location.group() else: contents = location.group(1) try: title = titleR.search(contents).group(1) if self.notitle: # Remove title (to allow creation of redirects) contents = titleR.sub('', contents, count=1) except AttributeError: raise NoTitle(location.end()) else: return location.end(), title, contents def main(*args): """ Process command line arguments and invoke bot. If args is an empty list, sys.argv is used. @param args: command line arguments @type args: list of unicode """ filename = "dict.txt" options = {} r_options = {} for arg in pywikibot.handle_args(args): arg, sep, value = arg.partition(':') option = arg.partition('-')[2] # reader options if option == 'start': r_options['begin'] = value warn('-start param (text that marks the beginning) of a page has ' 'been deprecated in favor of begin; make sure to use the ' 'updated param.', ArgumentDeprecationWarning) elif option in ('begin', 'end', 'titlestart', 'titleend'): r_options[option] = value elif option == 'file': filename = value elif option in ('include', 'notitle'): r_options[option] = True # bot options elif option == 'appendbottom': options['append'] = ('bottom', value) elif option == 'appendtop': options['append'] = ('top', value) elif option in ('force', 'minor', 'autosummary', 'showdiff'): options[option] = True elif option == 'safe': options['force'] = False options['append'] = None elif option == 'noredirect': options['redirect'] = False elif option in ('nocontent', 'summary'): options[option] = value else: pywikibot.output(u"Disregarding unknown argument %s." % arg) failed_filename = False while not os.path.isfile(filename): pywikibot.output('\nFile \'%s\' does not exist. ' % filename) _input = pywikibot.input( 'Please enter the file name [q to quit]:') if _input == 'q': failed_filename = True break else: filename = _input # show help text from the top of this file if reader failed # or User quit. if failed_filename: pywikibot.bot.suggest_help(missing_parameters=['-file']) return False else: reader = PageFromFileReader(filename, **r_options) bot = PageFromFileRobot(generator=reader, **options) bot.run() if __name__ == "__main__": main()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2021 The SymbiFlow Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC import os import datetime import argparse from prjxray.db import Database from fasm2bels.lib import progressbar_utils from fasm2bels.database.connection_database_cache import DatabaseCache """ This file is used to create a basic form of the device database imported from the prjxray database. The connection database will hold information on nodes, sites and tiles that are required when reconstructing the routing tree of a given FASM file. """ def create_tables(conn): """ Create connection database scheme. """ connection_database_sql_file = os.path.join( os.path.dirname(__file__), "connection_database.sql") with open(connection_database_sql_file, 'r') as f: c = conn.cursor() c.executescript(f.read()) conn.commit() def import_site_type(db, write_cur, site_types, site_type_name): assert site_type_name not in site_types site_type = db.get_site_type(site_type_name) if site_type_name in site_types: return write_cur.execute("INSERT INTO site_type(name) VALUES (?)", (site_type_name, )) site_types[site_type_name] = write_cur.lastrowid for site_pin in site_type.get_site_pins(): pin_info = site_type.get_site_pin(site_pin) write_cur.execute( """ INSERT INTO site_pin(name, site_type_pkey, direction) VALUES (?, ?, ?)""", (pin_info.name, site_types[site_type_name], pin_info.direction.value)) def build_pss_object_mask(db, tile_type_name): """ Looks for objects present in PSS* tiles of Zynq7 and masks out those that are purely PS related and not configued by the PL. """ tile_type = db.get_tile_type(tile_type_name) sites = tile_type.get_sites() masked_wires = [] masked_pips = [] # Get all IOPADS for MIO and DDR signals iopad_sites = [s for s in sites if s.type == "IOPAD"] for site in iopad_sites: # Get pins/wires site_pins = [p for p in site.site_pins if p.name == "IO"] for site_pin in site_pins: # Mask the wire masked_wires.append(site_pin.wire) # Find a PIP(s) for this wire, mask them as well as wires on # their other sides. for p in tile_type.get_pips(): if p.net_from == site_pin.wire: masked_pips.append(p.name) masked_wires.append(p.net_to) if p.net_to == site_pin.wire: masked_pips.append(p.name) masked_wires.append(p.net_from) # Masked sites names masked_sites = [(s.prefix, s.name) for s in iopad_sites] return masked_sites, masked_wires, masked_pips def import_tile_type(db, write_cur, tile_types, site_types, tile_type_name): assert tile_type_name not in tile_types tile_type = db.get_tile_type(tile_type_name) # For Zynq7 PSS* tiles build a list of sites, wires and PIPs to ignore if tile_type_name.startswith("PSS"): masked_sites, masked_wires, masked_pips = build_pss_object_mask( db, tile_type_name) else: masked_sites = [] masked_wires = [] write_cur.execute("INSERT INTO tile_type(name) VALUES (?)", (tile_type_name, )) tile_types[tile_type_name] = write_cur.lastrowid wires = {} for wire, wire_rc_element in tile_type.get_wires().items(): if wire in masked_wires: continue write_cur.execute( """ INSERT INTO wire_in_tile(name, phy_tile_type_pkey, tile_type_pkey) VALUES (?, ?, ?)""", ( wire, tile_types[tile_type_name], tile_types[tile_type_name], )) wires[wire] = write_cur.lastrowid for site in tile_type.get_sites(): if (site.prefix, site.name) in masked_sites: continue if site.type not in site_types: import_site_type(db, write_cur, site_types, site.type) def add_wire_to_site_relation(db, write_cur, tile_types, site_types, tile_type_name): tile_type = db.get_tile_type(tile_type_name) for site in tile_type.get_sites(): if site.type not in site_types: continue write_cur.execute( """ INSERT INTO site(name, x_coord, y_coord, site_type_pkey, tile_type_pkey) VALUES (?, ?, ?, ?, ?)""", (site.name, site.x, site.y, site_types[site.type], tile_types[tile_type_name])) site_pkey = write_cur.lastrowid for site_pin in site.site_pins: write_cur.execute( """ SELECT pkey FROM site_pin WHERE name = ? AND site_type_pkey = ?""", (site_pin.name, site_types[site.type])) result = write_cur.fetchone() site_pin_pkey = result[0] write_cur.execute( """ UPDATE wire_in_tile SET site_pkey = ?, site_pin_pkey = ? WHERE name = ? and tile_type_pkey = ?;""", (site_pkey, site_pin_pkey, site_pin.wire, tile_types[tile_type_name])) def build_tile_type_indicies(write_cur): write_cur.execute( "CREATE INDEX site_pin_index ON site_pin(name, site_type_pkey);") write_cur.execute( "CREATE INDEX wire_name_index ON wire_in_tile(name, tile_type_pkey);") write_cur.execute( "CREATE INDEX wire_tile_site_index ON wire_in_tile(tile_type_pkey, site_pkey);" ) write_cur.execute( "CREATE INDEX wire_site_pin_index ON wire_in_tile(site_pin_pkey);") write_cur.execute( "CREATE INDEX tile_type_index ON phy_tile(tile_type_pkey);") def build_other_indicies(write_cur): write_cur.execute("CREATE INDEX phy_tile_name_index ON phy_tile(name);") write_cur.execute( "CREATE INDEX phy_tile_location_index ON phy_tile(grid_x, grid_y);") write_cur.execute( "CREATE INDEX site_instance_index on site_instance(name);") def import_phy_grid(db, grid, conn): write_cur = conn.cursor() tile_types = {} site_types = {} for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) if gridinfo.tile_type not in tile_types: if gridinfo.tile_type in tile_types: continue import_tile_type(db, write_cur, tile_types, site_types, gridinfo.tile_type) write_cur.connection.commit() build_tile_type_indicies(write_cur) write_cur.connection.commit() for tile_type in tile_types: add_wire_to_site_relation(db, write_cur, tile_types, site_types, tile_type) for tile in grid.tiles(): gridinfo = grid.gridinfo_at_tilename(tile) loc = grid.loc_of_tilename(tile) # tile: pkey name tile_type_pkey grid_x grid_y write_cur.execute( """ INSERT INTO phy_tile(name, tile_type_pkey, grid_x, grid_y) VALUES (?, ?, ?, ?)""", ( tile, tile_types[gridinfo.tile_type], loc.grid_x, loc.grid_y, )) phy_tile_pkey = write_cur.lastrowid tile_type = db.get_tile_type(gridinfo.tile_type) for site, instance_site in zip(tile_type.sites, tile_type.get_instance_sites(gridinfo)): write_cur.execute( """ INSERT INTO site_instance(name, x_coord, y_coord, site_pkey, phy_tile_pkey, prohibited) SELECT ?, ?, ?, site.pkey, ?, ? FROM site WHERE site.name = ? AND site.x_coord = ? AND site.y_coord = ? AND site.site_type_pkey = (SELECT pkey FROM site_type WHERE name = ?) AND tile_type_pkey = ?; """, ( instance_site.name, instance_site.x, instance_site.y, phy_tile_pkey, instance_site.name in gridinfo.prohibited_sites, site.name, site.x, site.y, site.type, tile_types[gridinfo.tile_type], )) build_other_indicies(write_cur) write_cur.connection.commit() def import_nodes(db, grid, conn): # Some nodes are just 1 wire, so start by enumerating all wires. cur = conn.cursor() write_cur = conn.cursor() write_cur.execute("""BEGIN EXCLUSIVE TRANSACTION;""") tile_wire_map = {} wires = {} for tile in progressbar_utils.progressbar(grid.tiles()): gridinfo = grid.gridinfo_at_tilename(tile) tile_type = db.get_tile_type(gridinfo.tile_type) cur.execute( """SELECT pkey, tile_type_pkey FROM phy_tile WHERE name = ?;""", (tile, )) phy_tile_pkey, tile_type_pkey = cur.fetchone() for wire in tile_type.get_wires(): # pkey node_pkey tile_pkey wire_in_tile_pkey cur.execute( """ SELECT pkey FROM wire_in_tile WHERE name = ? and tile_type_pkey = ?;""", (wire, tile_type_pkey)) wire_in_tile_pkey = cur.fetchone() if wire_in_tile_pkey is None: continue wire_in_tile_pkey = wire_in_tile_pkey[0] write_cur.execute( """ INSERT INTO wire(phy_tile_pkey, wire_in_tile_pkey) VALUES (?, ?);""", (phy_tile_pkey, wire_in_tile_pkey)) assert (tile, wire) not in tile_wire_map wire_pkey = write_cur.lastrowid tile_wire_map[(tile, wire)] = wire_pkey wires[wire_pkey] = None write_cur.execute("""COMMIT TRANSACTION;""") connections = db.connections() for connection in progressbar_utils.progressbar( connections.get_connections()): a_pkey = tile_wire_map[(connection.wire_a.tile, connection.wire_a.wire)] b_pkey = tile_wire_map[(connection.wire_b.tile, connection.wire_b.wire)] a_node = wires[a_pkey] b_node = wires[b_pkey] if a_node is None: a_node = set((a_pkey, )) if b_node is None: b_node = set((b_pkey, )) if a_node is not b_node: a_node |= b_node for wire in a_node: wires[wire] = a_node nodes = {} for wire_pkey, node in wires.items(): if node is None: node = set((wire_pkey, )) assert wire_pkey in node nodes[id(node)] = node wires_assigned = set() for node in progressbar_utils.progressbar(nodes.values()): write_cur.execute("""INSERT INTO node(number_pips) VALUES (0);""") node_pkey = write_cur.lastrowid for wire_pkey in node: wires_assigned.add(wire_pkey) write_cur.execute( """ UPDATE wire SET node_pkey = ? WHERE pkey = ? ;""", (node_pkey, wire_pkey)) assert len(set(wires.keys()) ^ wires_assigned) == 0 del tile_wire_map del nodes del wires write_cur.execute( "CREATE INDEX wire_in_tile_index ON wire(wire_in_tile_pkey);") write_cur.execute( "CREATE INDEX wire_index ON wire(phy_tile_pkey, wire_in_tile_pkey);") write_cur.execute("CREATE INDEX wire_node_index ON wire(node_pkey);") write_cur.connection.commit() def count_sites_on_nodes(conn): cur = conn.cursor() print("{}: Counting sites on nodes".format(datetime.datetime.now())) cur.execute(""" WITH node_sites(node_pkey, number_site_pins) AS ( SELECT wire.node_pkey, count(wire_in_tile.site_pin_pkey) FROM wire_in_tile INNER JOIN wire ON wire.wire_in_tile_pkey = wire_in_tile.pkey WHERE wire_in_tile.site_pin_pkey IS NOT NULL GROUP BY wire.node_pkey ) SELECT max(node_sites.number_site_pins) FROM node_sites; """) # Nodes are only expected to have 1 site assert cur.fetchone()[0] == 1 print("{}: Assigning site wires for nodes".format(datetime.datetime.now())) cur.execute(""" WITH site_wires(wire_pkey, node_pkey) AS ( SELECT wire.pkey, wire.node_pkey FROM wire_in_tile INNER JOIN wire ON wire.wire_in_tile_pkey = wire_in_tile.pkey WHERE wire_in_tile.site_pin_pkey IS NOT NULL ) UPDATE node SET site_wire_pkey = ( SELECT site_wires.wire_pkey FROM site_wires WHERE site_wires.node_pkey = node.pkey ); """) cur.connection.commit() def create_channels(db_root, part, connection_database): db = Database(db_root, part) grid = db.grid() if os.path.exists(connection_database): return with DatabaseCache(connection_database) as conn: create_tables(conn) print("{}: About to load database".format(datetime.datetime.now())) import_phy_grid(db, grid, conn) print("{}: Initial database formed".format(datetime.datetime.now())) import_nodes(db, grid, conn) print("{}: Connections made".format(datetime.datetime.now())) count_sites_on_nodes(conn) print("{}: Counted sites".format(datetime.datetime.now())) def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--db-root', required=True, help="Path to prjxray database for given FASM file part.") parser.add_argument( '--part', required=True, help="Name of part being targeted.") parser.add_argument( '--connection-database-output', required=True, help="Path to SQLite3 database for the given part.") args = parser.parse_args() create_channels(args.db_root, args.part, args.connection_database_output) if __name__ == "__main__": main()
"""Copyright (c) 2010-2012 David Rio Vierra Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.""" #-# Modified by D.C.-G. for translation purpose import os import traceback from OpenGL import GL import numpy from albow import Widget, IntField, Column, Row, Label, Button, CheckBox, AttrRef, FloatField, alert, CheckBoxLabel, IntInputRow, \ showProgress from albow.translate import _ from depths import DepthOffset from editortools.editortool import EditorTool from editortools.nudgebutton import NudgeButton from editortools.tooloptions import ToolOptions from glbackground import Panel from glutils import gl from mceutils import setWindowCaption, alertException, drawFace import mcplatform from operation import Operation import pymclevel from pymclevel.box import Vector from renderer import PreviewRenderer import pygame from select import SelectionOperation from pymclevel.pocket import PocketWorld from pymclevel.leveldbpocket import PocketLeveldbWorld from pymclevel import block_copy, BoundingBox, BOParser import logging log = logging.getLogger(__name__) from config import config from albow.root import get_root class CoordsInput(Widget): is_gl_container = True def __init__(self, editor): Widget.__init__(self) self.nudgeButton = NudgeButton(editor) self.nudgeButton.nudge = self._nudge self.xField = IntField(value=0) self.yField = IntField(value=0) self.zField = IntField(value=0) for field in (self.xField, self.yField, self.zField): field.change_action = self._coordsChanged field.enter_passes = False offsetCol = Column((Row((Label('X'), self.xField)), Row((Label('Y'), self.yField)), Row((Label('Z'), self.zField)))) nudgeOffsetRow = Row((offsetCol, self.nudgeButton)) self.add(nudgeOffsetRow) self.shrink_wrap() def getCoords(self): return self.xField.value, self.yField.value, self.zField.value def setCoords(self, coords): x, y, z = coords self.xField.text = str(x) self.yField.text = str(y) self.zField.text = str(z) coords = property(getCoords, setCoords, None) def _coordsChanged(self): self.coordsChanged() def coordsChanged(self): # called when the inputs change. override or replace pass def _nudge(self, nudge): self.nudge(nudge) def nudge(self, nudge): # nudge is a 3-tuple where one of the elements is -1 or 1, and the others are 0. pass class BlockCopyOperation(Operation): def __init__(self, editor, sourceLevel, sourceBox, destLevel, destPoint, copyAir, copyWater, copyBiomes, staticCommands, moveSpawnerPos, regenerateUUID): super(BlockCopyOperation, self).__init__(editor, destLevel) self.sourceLevel = sourceLevel self.sourceBox = sourceBox self.destPoint = Vector(*destPoint) self.copyAir = copyAir self.copyWater = copyWater self.copyBiomes = copyBiomes self.staticCommands = staticCommands self.moveSpawnerPos = moveSpawnerPos self.regenerateUUID = regenerateUUID self.sourceBox, self.destPoint = block_copy.adjustCopyParameters(self.level, self.sourceLevel, self.sourceBox, self.destPoint) self.canUndo = False def dirtyBox(self): return BoundingBox(self.destPoint, self.sourceBox.size) def name(self): return _("Copy {0} blocks").format(self.sourceBox.volume) def perform(self, recordUndo=True): if self.level.saving: alert(_("Cannot perform action while saving is taking place")) return if recordUndo: self.canUndo = True self.undoLevel = self.extractUndo(self.level, BoundingBox(self.destPoint, self.sourceBox.size)) blocksToCopy = None if not (self.copyAir and self.copyWater): blocksToCopy = range(pymclevel.materials.id_limit) if not self.copyAir: blocksToCopy.remove(0) if not self.copyWater: blocksToCopy.remove(8) if not self.copyWater: blocksToCopy.remove(9) with setWindowCaption("Copying - "): i = self.level.copyBlocksFromIter(self.sourceLevel, self.sourceBox, self.destPoint, blocksToCopy, create=True, biomes=self.copyBiomes, staticCommands=self.staticCommands, moveSpawnerPos=self.moveSpawnerPos, regenerateUUID=self.regenerateUUID, first=False) showProgress(_("Copying {0:n} blocks...").format(self.sourceBox.volume), i) @staticmethod def bufferSize(): return 123456 class CloneOperation(Operation): def __init__(self, editor, sourceLevel, sourceBox, originSourceBox, destLevel, destPoint, copyAir, copyWater, copyBiomes, staticCommands, moveSpawnerPos, regenerateUUID, repeatCount): super(CloneOperation, self).__init__(editor, destLevel) self.blockCopyOps = [] dirtyBoxes = [] if repeatCount > 1: # clone tool only delta = destPoint - editor.toolbar.tools[0].selectionBox().origin else: delta = (0, 0, 0) for i in xrange(repeatCount): op = BlockCopyOperation(editor, sourceLevel, sourceBox, destLevel, destPoint, copyAir, copyWater, copyBiomes, staticCommands, moveSpawnerPos, regenerateUUID) dirty = op.dirtyBox() # bounds check - xxx move to BoundingBox if dirty.miny >= destLevel.Height or dirty.maxy < 0: continue if destLevel.Width != 0: if dirty.minx >= destLevel.Width or dirty.maxx < 0: continue if dirty.minz >= destLevel.Length or dirty.maxz < 0: continue dirtyBoxes.append(dirty) self.blockCopyOps.append(op) destPoint += delta if len(dirtyBoxes): def enclosingBox(dirtyBoxes): return reduce(lambda a, b: a.union(b), dirtyBoxes) self._dirtyBox = enclosingBox(dirtyBoxes) if repeatCount > 1 and self.selectOriginalAfterRepeat: dirtyBoxes.append(originSourceBox) dirty = enclosingBox(dirtyBoxes) points = (dirty.origin, dirty.maximum - (1, 1, 1)) self.selectionOps = [SelectionOperation(editor.selectionTool, points)] else: self._dirtyBox = None self.selectionOps = [] self.canUndo = False selectOriginalAfterRepeat = True def dirtyBox(self): return self._dirtyBox def perform(self, recordUndo=True): if self.level.saving: alert(_("Cannot perform action while saving is taking place")) return with setWindowCaption("COPYING - "): self.editor.freezeStatus(_("Copying %0.1f million blocks") % (float(self._dirtyBox.volume) / 1048576.,)) if recordUndo: chunks = set() for op in self.blockCopyOps: chunks.update(op.dirtyBox().chunkPositions) self.undoLevel = self.extractUndoChunks(self.level, chunks) [i.perform(False) for i in self.blockCopyOps] [i.perform(recordUndo) for i in self.selectionOps] self.canUndo = True def undo(self): super(CloneOperation, self).undo() [i.undo() for i in self.selectionOps] def redo(self): super(CloneOperation, self).redo() [i.redo() for i in self.selectionOps] class CloneToolPanel(Panel): useOffsetInput = True def transformEnable(self): return not isinstance(self.tool.level, pymclevel.MCInfdevOldLevel) def __init__(self, tool, editor, _parent=None): Panel.__init__(self, name='Panel.CloneToolPanel') self.tool = tool rotaterollRow = Row(( Label(config.keys.rotateClone.get()), Button("Rotate", width=80, action=tool.rotate, enable=self.transformEnable), Label(config.keys.rollClone.get()), Button("Roll", width=80, action=tool.roll, enable=self.transformEnable), )) flipmirrorRow = Row(( Label(config.keys.flip.get()), Button("Flip", width=80, action=tool.flip, enable=self.transformEnable), Label(config.keys.mirror.get()), Button("Mirror", width=80, action=tool.mirror, enable=self.transformEnable), )) self.alignCheckBox = CheckBox(ref=AttrRef(self.tool, 'chunkAlign')) self.alignLabel = Label("Chunk Align") self.alignLabel.mouse_down = self.alignCheckBox.mouse_down alignRow = Row(( self.alignCheckBox, self.alignLabel )) # headerLabel = Label("Clone Offset") if self.useOffsetInput: self.offsetInput = CoordsInput(editor) self.offsetInput.coordsChanged = tool.offsetChanged self.offsetInput.nudgeButton.bg_color = tool.color self.offsetInput.nudge = tool.nudge else: self.nudgeButton = NudgeButton(editor) self.nudgeButton.bg_color = tool.color self.nudgeButton.nudge = tool.nudge repeatField = IntField(ref=AttrRef(tool, 'repeatCount')) repeatField.min = 1 repeatField.max = 1000 repeatRow = Row(( Label("Repeat"), repeatField )) self.repeatField = repeatField scaleField = FloatField(ref=AttrRef(tool, 'scaleFactor')) scaleField.min = 0.1 scaleField.max = 8 if self.transformEnable(): scaleRow = Row(( Label("Scale Factor"), scaleField )) else: scaleRow = Row(( Label("Scale Factor: 1.0"), )) self.scaleField = scaleField self.copyAirCheckBox = CheckBox(ref=AttrRef(self.tool, "copyAir")) self.copyAirLabel = Label("Copy Air") self.copyAirLabel.mouse_down = self.copyAirCheckBox.mouse_down self.copyAirLabel.tooltipText = "Shortcut: Alt-1" self.copyAirCheckBox.tooltipText = self.copyAirLabel.tooltipText copyAirRow = Row((self.copyAirCheckBox, self.copyAirLabel)) self.copyWaterCheckBox = CheckBox(ref=AttrRef(self.tool, "copyWater")) self.copyWaterLabel = Label("Copy Water") self.copyWaterLabel.mouse_down = self.copyWaterCheckBox.mouse_down self.copyWaterLabel.tooltipText = "Shortcut: Alt-2" self.copyWaterCheckBox.tooltipText = self.copyWaterLabel.tooltipText copyWaterRow = Row((self.copyWaterCheckBox, self.copyWaterLabel)) self.copyBiomesCheckBox = CheckBox(ref=AttrRef(self.tool, "copyBiomes")) self.copyBiomesLabel = Label("Copy Biome(s)") self.copyBiomesLabel.mouse_down = self.copyBiomesCheckBox.mouse_down self.copyBiomesLabel.tooltipText = "Shortcut: Alt-3" self.copyBiomesCheckBox.tooltipText = self.copyBiomesLabel.tooltipText copyBiomesRow = Row((self.copyBiomesCheckBox, self.copyBiomesLabel)) self.staticCommandsCheckBox = CheckBox(ref=AttrRef(self.tool, "staticCommands")) self.staticCommandsLabel = Label("Update Command Block Coords") self.staticCommandsLabel.mouse_down = self.staticCommandsCheckBox.mouse_down self.staticCommandsLabel.tooltipText = "When a command block is moved, and it contains a command, automatically update static coordinates (x y z) within that command.\nShortcut: Alt-4" self.staticCommandsCheckBox.tooltipText = self.staticCommandsLabel.tooltipText staticCommandsRow = Row((self.staticCommandsCheckBox, self.staticCommandsLabel)) self.moveSpawnerPosCheckBox = CheckBox(ref=AttrRef(self.tool, "moveSpawnerPos")) self.moveSpawnerPosLabel = Label("Update Spawner Coords") self.moveSpawnerPosLabel.mouse_down = self.moveSpawnerPosCheckBox.mouse_down self.moveSpawnerPosLabel.tooltipText = "When a spawner is moved, automatically update its spawning coordinates.\nShortcut: Alt-5" self.moveSpawnerPosCheckBox.tooltipText = self.moveSpawnerPosLabel.tooltipText moveSpawnerPosRow = Row((self.moveSpawnerPosCheckBox, self.moveSpawnerPosLabel)) self.regenerateUUIDCheckBox = CheckBox(ref=AttrRef(self.tool, "regenerateUUID")) self.regenerateUUIDLabel = Label("Regenerate Entity UUID") self.regenerateUUIDLabel.mouse_down = self.regenerateUUIDCheckBox.mouse_down self.regenerateUUIDLabel.tooltipText = "Automatically generate new UUIDs for every entity copied. [RECOMMENDED]\nShortcut: Alt-6" self.regenerateUUIDCheckBox.tooltipText = self.regenerateUUIDLabel.tooltipText regenerateUUIDRow = Row((self.regenerateUUIDCheckBox, self.regenerateUUIDLabel)) self.performButton = Button("Clone", width=100, align="c") self.performButton.tooltipText = "Shortcut: Enter" self.performButton.action = tool.confirm self.performButton.enable = lambda: (tool.destPoint is not None) max_height = self.tool.editor.mainViewport.height - self.tool.editor.toolbar.height - self.tool.editor.subwidgets[0].height # - self.performButton.height - 2 def buildPage(*items): height = 0 cls = [] idx = 0 for i, r in enumerate(items): r.margin=0 r.shrink_wrap() height += r.height if height > max_height: cls.append(Column(items[idx:i], spacing=2, margin=0)) idx = i height = 0 cls.append(Column(items[idx:], spacing=2, margin=0)) return cls if self.useOffsetInput: cols = buildPage(rotaterollRow, flipmirrorRow, alignRow, self.offsetInput, repeatRow, scaleRow, copyAirRow, copyWaterRow, copyBiomesRow, staticCommandsRow, moveSpawnerPosRow, regenerateUUIDRow) else: cols = buildPage(rotaterollRow, flipmirrorRow, alignRow, self.nudgeButton, scaleRow, copyAirRow, copyWaterRow, copyBiomesRow, staticCommandsRow, moveSpawnerPosRow, regenerateUUIDRow) row = Row(cols, spacing=0, margin=2) row.shrink_wrap() col = Column((row, self.performButton), spacing=2) self.add(col) self.anchor = "lwh" self.shrink_wrap() class CloneToolOptions(ToolOptions): def __init__(self, tool): ToolOptions.__init__(self, name='Panel.CloneToolOptions') self.tool = tool self.autoPlaceCheckBox = CheckBox(ref=AttrRef(tool, "placeImmediately")) self.autoPlaceLabel = Label("Place Immediately") self.autoPlaceLabel.mouse_down = self.autoPlaceCheckBox.mouse_down tooltipText = "When the clone tool is chosen, place the clone at the selection right away." self.autoPlaceLabel.tooltipText = self.autoPlaceCheckBox.tooltipText = tooltipText spaceLabel = Label("") cloneNudgeLabel = Label("Clone Fast Nudge Settings") cloneNudgeCheckBox = CheckBoxLabel("Move by the width of selection ", ref=config.fastNudgeSettings.cloneWidth, tooltipText="Moves clone by his width") cloneNudgeNumber = IntInputRow("Width of clone movement: ", ref=config.fastNudgeSettings.cloneWidthNumber, width=100, min=2, max=50) row = Row((self.autoPlaceCheckBox, self.autoPlaceLabel)) col = Column((Label("Clone Options"), row, spaceLabel, cloneNudgeLabel, cloneNudgeCheckBox, cloneNudgeNumber, Button("OK", action=self.dismiss))) self.add(col) self.shrink_wrap() class CloneTool(EditorTool): surfaceBuild = True toolIconName = "clone" tooltipText = "Clone\nRight-click for options" level = None repeatCount = 1 _scaleFactor = 1.0 _chunkAlign = False @property def scaleFactor(self): return self._scaleFactor @scaleFactor.setter def scaleFactor(self, val): self.rescaleLevel(val) self._scaleFactor = val @property def chunkAlign(self): return self._chunkAlign @chunkAlign.setter def chunkAlign(self, value): self._chunkAlign = value self.alignDestPoint() def alignDestPoint(self): if self.destPoint is not None: x, y, z = self.destPoint self.destPoint = Vector((x >> 4) << 4, y, (z >> 4) << 4) placeImmediately = config.clone.placeImmediately.property() panelClass = CloneToolPanel color = (0.3, 1.0, 0.3, 0.19) def __init__(self, *args): self.rotation = 0 EditorTool.__init__(self, *args) self.previewRenderer = None self.panel = None self.optionsPanel = CloneToolOptions(self) self.destPoint = None self.snapCloneKey = 0 self.root = get_root() @property def statusText(self): if self.destPoint is None: return "Click to set this item down." if self.draggingFace is not None: return _("Mousewheel to move along the third axis. Hold {0} to only move along one axis.").format(_(config.keys.snapCloneToAxis.get())) return "Click and drag to reposition the item. Double-click to pick it up. Click Clone or press Enter to confirm." def quickNudge(self, nudge): if config.fastNudgeSettings.cloneWidth.get(): return map(int.__mul__, nudge, self.selectionBox().size) nudgeWidth = config.fastNudgeSettings.cloneWidthNumber.get() return map(lambda x: x * nudgeWidth, nudge) copyAir = config.clone.copyAir.property() copyWater = config.clone.copyWater.property() copyBiomes = config.clone.copyBiomes.property() staticCommands = config.clone.staticCommands.property() moveSpawnerPos = config.clone.moveSpawnerPos.property() regenerateUUID = config.clone.regenerateUUID.property() def nudge(self, nudge): if self.destPoint is None: if self.selectionBox() is None: return self.destPoint = self.selectionBox().origin if self.chunkAlign: x, y, z = nudge nudge = x << 4, y, z << 4 if self.editor.rightClickNudge: nudge = self.quickNudge(nudge) # self.panel.performButton.enabled = True self.destPoint = self.destPoint + nudge self.updateOffsets() def selectionChanged(self): if self.selectionBox() is not None and "CloneToolPanel" in str(self.panel): self.updateSchematic() self.updateOffsets() def updateOffsets(self): if self.panel and self.panel.useOffsetInput and self.destPoint is not None: self.panel.offsetInput.setCoords(self.destPoint - self.selectionBox().origin) def offsetChanged(self): if self.panel: if not self.panel.useOffsetInput: return box = self.selectionBox() if box is None: return delta = self.panel.offsetInput.coords self.destPoint = box.origin + delta def toolEnabled(self): return not (self.selectionBox() is None) def cancel(self): self.discardPreviewer() if self.panel: self.panel.parent.remove(self.panel) self.panel = None self.destPoint = None self.level = None self.originalLevel = None def toolReselected(self): self.pickUp() def safeToolDistance(self): return numpy.sqrt(sum([self.level.Width ** 2, self.level.Height ** 2, self.level.Length ** 2])) def toolSelected(self): box = self.selectionBox() if box is None: self.editor.toolbar.selectTool(-1) return if box.volume > self.maxBlocks: self.editor.mouseLookOff() alert(_("Selection exceeds {0:n} blocks. Increase the block buffer setting and try again.").format( self.maxBlocks)) self.editor.toolbar.selectTool(-1) return self.rotation = 0 self.repeatCount = 1 self._scaleFactor = 1.0 if self.placeImmediately: self.destPoint = box.origin else: self.destPoint = None self.updateSchematic() self.cloneCameraDistance = max(self.cloneCameraDistance, self.safeToolDistance()) self.showPanel() cloneCameraDistance = 0 @property def cameraDistance(self): return self.cloneCameraDistance @alertException def rescaleLevel(self, factor): if factor == 1: self.level = self.originalLevel self.setupPreview() return oldshape = self.originalLevel.Blocks.shape blocks = self.originalLevel.Blocks data = self.originalLevel.Data roundedShape = oldshape newshape = map(lambda x: int(x * factor), oldshape) for i, part in enumerate(newshape): if part == 0: newshape[i] = 1 xyzshape = newshape[0], newshape[2], newshape[1] newlevel = pymclevel.MCSchematic(xyzshape, mats=self.editor.level.materials) srcgrid = numpy.mgrid[0:roundedShape[0]:1.0 / factor, 0:roundedShape[1]:1.0 / factor, 0:roundedShape[2]:1.0 / factor].astype('uint') dstgrid = numpy.mgrid[0:newshape[0], 0:newshape[1], 0:newshape[2]].astype('uint') srcgrid = srcgrid[map(slice, dstgrid.shape)] dstgrid = dstgrid[map(slice, srcgrid.shape)] def copyArray(dest, src): dest[dstgrid[0], dstgrid[1], dstgrid[2]] = src[srcgrid[0], srcgrid[1], srcgrid[2]] copyArray(newlevel.Blocks, blocks) copyArray(newlevel.Data, data) self.level = newlevel self.setupPreview() @alertException def updateSchematic(self): # extract blocks with setWindowCaption("COPYING - "): self.editor.freezeStatus("Copying to clone buffer...") box = self.selectionBox() self.level = self.editor.level.extractSchematic(box) self.originalLevel = self.level # self.level.cloneToolScaleFactor = 1.0 self.rescaleLevel(self.scaleFactor) self.setupPreview() def showPanel(self): if self.panel: self.panel.set_parent(None) self.panel = self.panelClass(self, self.editor) self.panel.centery = (self.editor.mainViewport.height - self.editor.toolbar.height) / 2 + self.editor.subwidgets[0].height self.panel.left = self.editor.left self.editor.add(self.panel) def setupPreview(self, alpha=1.0): self.discardPreviewer() if self.level: self.previewRenderer = PreviewRenderer(self.level, alpha) self.previewRenderer.position = self.editor.renderer.position self.editor.addWorker(self.previewRenderer) else: self.editor.toolbar.selectTool(-1) @property def canRotateLevel(self): return not isinstance(self.level, (pymclevel.MCInfdevOldLevel, PocketWorld, PocketLeveldbWorld)) def rotatedSelectionSize(self): if self.canRotateLevel: sizes = self.level.Blocks.shape return sizes[0], sizes[2], sizes[1] else: return self.level.size # =========================================================================== # def getSelectionRanges(self): # return self.editor.selectionTool.selectionBox() # # =========================================================================== @staticmethod def getBlockAt(): return None # use level's blockAt def getReticleOrigin(self): # returns a new origin for the current selection, where the old origin is at the new selection's center. pos, direction = self.editor.blockFaceUnderCursor lev = self.editor.level size = self.rotatedSelectionSize() if not size: return if size[1] >= self.editor.level.Height: direction = ( 0, 1, 0) # always use the upward face whenever we're splicing full-height pieces, to avoid "jitter" # print size; raise SystemExit if any(direction) and pos[1] >= 0: x, y, z = map(lambda p, s, d: p - s / 2 + s * d / 2 + (d > 0), pos, size, direction) else: x, y, z = map(lambda p, s: p - s / 2, pos, size) if self.chunkAlign: x &= ~0xf z &= ~0xf sy = size[1] if sy > lev.Height: # don't snap really tall stuff to the height return Vector(x, y, z) if y + sy > lev.Height: y = lev.Height - sy if y < 0: y = 0 if not lev.Width == 0 and lev.Length == 0: sx = size[0] if x + sx > lev.Width: x = lev.Width - sx if x < 0: x = 0 sz = size[2] if z + sz > lev.Length: z = lev.Length - sz if z < 0: z = 0 return Vector(x, y, z) def getReticleBox(self): pos = self.getReticleOrigin() sizes = self.rotatedSelectionSize() if sizes is None: return return BoundingBox(pos, sizes) def getDestBox(self): selectionSize = self.rotatedSelectionSize() return BoundingBox(self.destPoint, selectionSize) def drawTerrainReticle(self): if self.level is None: return if self.destPoint is not None: destPoint = self.destPoint if self.draggingFace is not None: # debugDrawPoint() destPoint = self.draggingOrigin() self.drawTerrainPreview(destPoint) else: self.drawTerrainPreview(self.getReticleBox().origin) draggingColor = (0.77, 1.0, 0.55, 0.05) def drawToolReticle(self): if self.level is None: return GL.glPolygonOffset(DepthOffset.CloneMarkers, DepthOffset.CloneMarkers) color = self.color if self.destPoint is not None: color = (self.color[0], self.color[1], self.color[2], 0.06) box = self.getDestBox() if self.draggingFace is not None: o = list(self.draggingOrigin()) s = list(box.size) for i in xrange(3): if i == self.draggingFace >> 1: continue o[i] -= 1000 s[i] += 2000 guideBox = BoundingBox(o, s) color = self.draggingColor GL.glColor(1.0, 1.0, 1.0, 0.33) with gl.glEnable(GL.GL_BLEND, GL.GL_TEXTURE_2D, GL.GL_DEPTH_TEST): self.editor.sixteenBlockTex.bind() drawFace(guideBox, self.draggingFace ^ 1) else: box = self.getReticleBox() if box is None: return self.drawRepeatedCube(box, color) GL.glPolygonOffset(DepthOffset.CloneReticle, DepthOffset.CloneReticle) if self.destPoint: box = self.getDestBox() if self.draggingFace is not None: box = BoundingBox(self.draggingOrigin(), box.size) face, point = self.boxFaceUnderCursor(box) if face is not None: GL.glEnable(GL.GL_BLEND) GL.glDisable(GL.GL_DEPTH_TEST) GL.glColor(*self.color) drawFace(box, face) GL.glDisable(GL.GL_BLEND) GL.glEnable(GL.GL_DEPTH_TEST) def drawRepeatedCube(self, box, color): # draw several cubes according to the repeat count # it's not really sensible to repeat a crane because the origin point is literally out of this world. delta = box.origin - self.selectionBox().origin for i in xrange(self.repeatCount): self.editor.drawConstructionCube(box, color) box = BoundingBox(box.origin + delta, box.size) def drawToolMarkers(self): selectionBox = self.selectionBox() if selectionBox: widg = self.editor.find_widget(pygame.mouse.get_pos()) try: if self.panel and (widg is self.panel.nudgeButton or widg.parent is self.panel.nudgeButton): color = self.color except: try: if self.panel and (widg is self.panel.offsetInput.nudgeButton or widg.parent is self.panel.offsetInput.nudgeButton): color = self.color except: pass finally: try: self.editor.drawConstructionCube(self.getDestBox(), color) except: pass def sourceLevel(self): return self.level @alertException def rotate(self, amount=1, blocksOnly=False): if self.canRotateLevel: self.rotation += amount self.rotation &= 0x3 for i in xrange(amount & 0x3): if blocksOnly: self.level.rotateLeftBlocks() else: self.level.rotateLeft() self.previewRenderer.level = self.level @alertException def roll(self, amount=1, blocksOnly=False): if self.canRotateLevel: for i in xrange(amount & 0x3): if blocksOnly: self.level.rollBlocks() else: self.level.roll() self.previewRenderer.level = self.level @alertException def flip(self, amount=1, blocksOnly=False): if self.canRotateLevel: for i in xrange(amount & 0x1): if blocksOnly: self.level.flipVerticalBlocks() else: self.level.flipVertical() self.previewRenderer.level = self.level @alertException def mirror(self, blocksOnly=False): if self.canRotateLevel: yaw = int(self.editor.mainViewport.yaw) % 360 if (45 <= yaw < 135) or (225 < yaw <= 315): if blocksOnly: self.level.flipEastWestBlocks() else: self.level.flipEastWest() else: if blocksOnly: self.level.flipNorthSouthBlocks() else: self.level.flipNorthSouth() self.previewRenderer.level = self.level def option1(self): self.copyAir = not self.copyAir def option2(self): self.copyWater = not self.copyWater def option3(self): self.copyBiomes = not self.copyBiomes def option4(self): self.staticCommands = not self.staticCommands def option5(self): self.moveSpawnerPos = not self.moveSpawnerPos draggingFace = None draggingStartPoint = None def draggingOrigin(self): p = self._draggingOrigin() return p def _draggingOrigin(self): dragPos = map(int, map(numpy.floor, self.positionOnDraggingPlane())) delta = map(lambda s, e: e - int(numpy.floor(s)), self.draggingStartPoint, dragPos) if self.snapCloneKey == 1: ad = map(abs, delta) midx = ad.index(max(ad)) d = [0, 0, 0] d[midx] = delta[midx] dragY = self.draggingFace >> 1 d[dragY] = delta[dragY] delta = d p = self.destPoint + delta if self.chunkAlign: p = [i // 16 * 16 for i in p] return Vector(*p) def positionOnDraggingPlane(self): pos = self.editor.mainViewport.cameraPosition dim = self.draggingFace >> 1 # if key.get_mods() & KMOD_SHIFT: # dim = self.findBestTrackingPlane(self.draggingFace) # distance = self.draggingStartPoint[dim] - pos[dim] distance += self.draggingY mouseVector = self.editor.mainViewport.mouseVector scale = distance / (mouseVector[dim] or 1) point = map(lambda a, b: a * scale + b, mouseVector, pos) return point draggingY = 0 @alertException def mouseDown(self, evt, pos, direction): box = self.selectionBox() if not box: return self.draggingY = 0 if self.destPoint is not None: if evt.num_clicks == 2: self.pickUp() return face, point = self.boxFaceUnderCursor(self.getDestBox()) if face is not None: self.draggingFace = face self.draggingStartPoint = point else: self.destPoint = self.getReticleOrigin() if self.panel and self.panel.useOffsetInput: self.panel.offsetInput.setCoords(self.destPoint - box.origin) print "Destination: ", self.destPoint @alertException def mouseUp(self, evt, pos, direction): if self.draggingFace is not None: self.destPoint = self.draggingOrigin() self.updateOffsets() self.draggingFace = None self.draggingStartPoint = None def keyDown(self,evt): keyname = evt.dict.get('keyname', None) or self.root.getKey(evt) if keyname == config.keys.snapCloneToAxis.get(): self.snapCloneKey = 1 def keyUp(self, evt): keyname = evt.dict.get('keyname', None) or self.root.getKey(evt) if keyname == config.keys.snapCloneToAxis.get(): self.snapCloneKey = 0 def increaseToolReach(self): if self.draggingFace is not None: d = (1, -1)[self.draggingFace & 1] if self.draggingFace >> 1 != 1: # xxxxx y d = -d self.draggingY += d x, y, z = self.editor.mainViewport.cameraPosition pos = [x, y, z] pos[self.draggingFace >> 1] += d self.editor.mainViewport.cameraPosition = tuple(pos) else: self.cloneCameraDistance = self.editor._incrementReach(self.cloneCameraDistance) return True def decreaseToolReach(self): if self.draggingFace is not None: d = (1, -1)[self.draggingFace & 1] if self.draggingFace >> 1 != 1: # xxxxx y d = -d self.draggingY -= d x, y, z = self.editor.mainViewport.cameraPosition pos = [x, y, z] pos[self.draggingFace >> 1] -= d self.editor.mainViewport.cameraPosition = tuple(pos) else: self.cloneCameraDistance = self.editor._decrementReach(self.cloneCameraDistance) return True def resetToolReach(self): if self.draggingFace is not None: x, y, z = self.editor.mainViewport.cameraPosition pos = [x, y, z] pos[self.draggingFace >> 1] += (1, -1)[self.draggingFace & 1] * -self.draggingY self.editor.mainViewport.cameraPosition = tuple(pos) self.draggingY = 0 else: self.cloneCameraDistance = max(self.editor.defaultCameraToolDistance, self.safeToolDistance()) return True def pickUp(self): if self.destPoint is None: return box = self.selectionBox() # pick up the object. reset the tool distance to the object's distance from the camera d = map(lambda a, b, c: abs(a - b - c / 2), self.editor.mainViewport.cameraPosition, self.destPoint, box.size) self.cloneCameraDistance = numpy.sqrt(d[0] * d[0] + d[1] * d[1] + d[2] * d[2]) self.destPoint = None # self.panel.performButton.enabled = False print "Picked up" @alertException def confirm(self): destPoint = self.destPoint if destPoint is None: return sourceLevel = self.sourceLevel() sourceBox = sourceLevel.bounds destLevel = self.editor.level op = CloneOperation(editor=self.editor, sourceLevel=sourceLevel, sourceBox=sourceBox, originSourceBox=self.selectionBox(), destLevel=destLevel, destPoint=self.destPoint, copyAir=self.copyAir, copyWater=self.copyWater, copyBiomes=self.copyBiomes, staticCommands=self.staticCommands, moveSpawnerPos=self.moveSpawnerPos, regenerateUUID=self.regenerateUUID, repeatCount=self.repeatCount) self.editor.toolbar.selectTool( -1) # deselect tool so that the clone tool's selection change doesn't update its schematic self.editor.addOperation(op) if op.canUndo: self.editor.addUnsavedEdit() dirtyBox = op.dirtyBox() if dirtyBox: self.editor.invalidateBox(dirtyBox) self.editor.renderer.invalidateChunkMarkers() self.editor.currentOperation = None self.destPoint = None self.level = None def discardPreviewer(self): if self.previewRenderer is None: return self.previewRenderer.stopWork() self.previewRenderer.discardAllChunks() self.editor.removeWorker(self.previewRenderer) self.previewRenderer = None class ConstructionToolPanel(CloneToolPanel): useOffsetInput = False class ConstructionToolOptions(ToolOptions): def __init__(self, tool): ToolOptions.__init__(self, name='Panel.ConstructionToolOptions') self.tool = tool importNudgeLabel = Label("Import Fast Nudge Settings:") importNudgeCheckBox = CheckBoxLabel("Move by the width of schematic ", ref=config.fastNudgeSettings.importWidth, tooltipText="Moves selection by his width") importNudgeNumber = IntInputRow("Width of import movement: ", ref=config.fastNudgeSettings.importWidthNumber, width=100, min=2, max=50) col = Column((Label("Import Options"), importNudgeLabel, importNudgeCheckBox, importNudgeNumber, Button("OK", action=self.dismiss))) self.add(col) self.shrink_wrap() class ConstructionTool(CloneTool): surfaceBuild = True toolIconName = "crane" tooltipText = "Import\nRight-click for options" panelClass = ConstructionToolPanel def toolEnabled(self): return True def selectionChanged(self): pass def updateSchematic(self): self.originalLevel = self.level self.scaleFactor = 1.0 def quickNudge(self, nudge): if config.fastNudgeSettings.importWidth.get(): return map(int.__mul__, nudge, self.selectionBox().size) nudgeWidth = config.fastNudgeSettings.importWidthNumber.get() return map(lambda x: x * nudgeWidth, nudge) def __init__(self, *args): CloneTool.__init__(self, *args) self.level = None self.optionsPanel = ConstructionToolOptions(self) self.testBoardKey = 0 @property def statusText(self): if self.destPoint is None: return "Click to set this item down." return "Click and drag to reposition the item. Double-click to pick it up. Click Import or press Enter to confirm." def showPanel(self): CloneTool.showPanel(self) self.panel.performButton.text = "Import" self.updateSchematic() def toolReselected(self): self.toolSelected() # def cancel(self): # print "Cancelled Clone" # self.level = None # super(ConstructionTool, self).cancel(self) # def createTestBoard(self, anyBlock=True): if anyBlock: allBlocks = [self.editor.level.materials[a, b] for a in xrange(256) for b in xrange(16)] blockWidth = 64 else: allBlocks = self.editor.level.materials.allBlocks blockWidth = 16 blockCount = len(allBlocks) width = blockWidth * 3 + 1 rows = blockCount // blockWidth + 1 length = rows * 3 + 1 height = 3 schematic = pymclevel.MCSchematic((width, height, length), mats=self.editor.level.materials) schematic.Blocks[:, :, 0] = 1 for i, block in enumerate(allBlocks): col = (i % blockWidth) * 3 + 1 row = (i // blockWidth) * 3 schematic.Blocks[col:col + 2, row:row + 2, 2] = block.ID schematic.Data[col:col + 2, row:row + 2, 2] = block.blockData return schematic def toolSelected(self): self.editor.mouseLookOff() if self.editor.testBoardKey == 1: self.editor.testBoardKey = 0 self.loadLevel(self.createTestBoard()) return self.editor.mouseLookOff() clipFilename = mcplatform.askOpenFile(title='Import a schematic or level...', schematics=True) # xxx mouthful if clipFilename: if unicode(clipFilename).split(".")[-1] in ("schematic", "schematic.gz", "zip", "inv"): self.loadSchematic(clipFilename) elif unicode(clipFilename).split(".")[-1].lower() == "nbt": structure = pymclevel.schematic.StructureNBT(filename=clipFilename) self.loadLevel(structure.toSchematic()) elif unicode(clipFilename).split(".")[-1].lower() == "bo2": self.loadLevel(BOParser.BO2(clipFilename).getSchematic()) elif unicode(clipFilename).split(".")[-1].lower() == "bo3": self.loadLevel(BOParser.BO3(clipFilename).getSchematic()) # alert("BO3 support is currently not available") else: self.loadSchematic(clipFilename) print "Canceled" if self.level is None: print "No level selected." self.editor.toolbar.selectTool(-1) # CloneTool.toolSelected(self) originalLevelSize = (0, 0, 0) def loadSchematic(self, filename): """ actually loads a schematic or a level """ try: level = pymclevel.fromFile(filename, readonly=True) self.loadLevel(level) except Exception as e: logging.warn(u"Unable to import file %s : %s", filename, e) traceback.print_exc() if filename: # self.editor.toolbar.selectTool(-1) alert( _(u"I don't know how to import this file: {0}.\n\nError: {1!r}").format(os.path.basename(filename), e)) return @alertException def loadLevel(self, level): if level: self.level = level self.repeatCount = 1 self.destPoint = None self.editor.currentTool = self # because save window triggers loseFocus, which triggers tool.cancel... hmmmmmm self.cloneCameraDistance = self.safeToolDistance() self.chunkAlign = isinstance(self.level, pymclevel.MCInfdevOldLevel) and all( b % 16 == 0 for b in self.level.bounds.size) self.setupPreview() self.originalLevelSize = (self.level.Width, self.level.Height, self.level.Length) self.showPanel() return def selectionSize(self): if not self.level: return None return self.originalLevelSize def selectionBox(self): if not self.level: return None return BoundingBox((0, 0, 0), self.selectionSize()) def sourceLevel(self): return self.level def mouseDown(self, evt, pos, direction): # x,y,z = pos box = self.selectionBox() if not box: return CloneTool.mouseDown(self, evt, pos, direction)
""" The Client class, handling direct communication with the API """ import base64 import os import time import uuid from collections import OrderedDict from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes, serialization from cryptography.hazmat.primitives.asymmetric import padding from suds.client import Client as SudsClient from suds.plugin import DocumentPlugin from suds.sudsobject import Object as SudsObject from suds.xsd.doctor import Import, ImportDoctor from . import __version__ try: from urllib.parse import urlencode, quote_plus except ImportError: from urllib import urlencode, quote_plus try: import suds_requests except ImportError: suds_requests = None URI_TEMPLATE = 'https://{}/wsdl/?service={}' MODE_RO = 'readonly' MODE_RW = 'readwrite' def convert_value(value): """ None and boolean values are not accepted by the Transip API. This method converts - None and False to an empty string, - True to 1 """ if isinstance(value, bool): return 1 if value else '' if not value: return '' return value class WSDLFixPlugin(DocumentPlugin): # pylint: disable=W0232 """ A SudsFilter to fix wsdl document before it is parsed. """ def loaded(self, context): # pylint: disable=R0201 """ Replaces an invalid type in the wsdl document with a validy type. """ context.document = context.document.replace(b'xsd:array', b'soapenc:Array') class Client: """ A client-base class, for other classes to base their service implementation on. Contains methods to set and sign cookie and to retrieve the correct WSDL for specific parts of the TransIP API. Note: You either need to supply a private_key or a private_key_file. Args: service_name (str): Name of the service. login (str): The TransIP username. private_key (str, optional): The content of the private key for accessing the TransIP API. private_key_file (str, optional): Path the the private key for accessing the TransIP API. Defaults to 'decrypted_key'. endpoint (str): The TransIP API endpoint. Defaults to 'api.transip.nl'. """ def __init__(self, service_name, login, private_key=None, private_key_file='decrypted_key', endpoint='api.transip.nl'): self.service_name = service_name self.login = login self.private_key = private_key self.private_key_file = private_key_file self.endpoint = endpoint self.url = URI_TEMPLATE.format(endpoint, service_name) imp = Import('http://schemas.xmlsoap.org/soap/encoding/') doc = ImportDoctor(imp) suds_kwargs = dict() if suds_requests: suds_kwargs['transport'] = suds_requests.RequestsTransport() self.soap_client = SudsClient(self.url, doctor=doc, plugins=[WSDLFixPlugin()], **suds_kwargs) def _sign(self, message): """ Uses the decrypted private key to sign the message. """ if self.private_key: keydata = self.private_key elif os.path.exists(self.private_key_file): with open(self.private_key_file) as private_key: keydata = private_key.read() else: raise RuntimeError('The private key does not exist.') private_key = serialization.load_pem_private_key( str.encode(keydata), password=None, backend=default_backend() ) signature = private_key.sign( str.encode(message), padding.PKCS1v15(), hashes.SHA512(), ) signature = base64.b64encode(signature) signature = quote_plus(signature) return signature def _build_signature_message(self, service_name, method_name, timestamp, nonce, additional=None): """ Builds the message that should be signed. This message contains specific information about the request in a specific order. """ if additional is None: additional = [] sign = OrderedDict() # Add all additional parameters first for index, value in enumerate(additional): if isinstance(value, list): for entryindex, entryvalue in enumerate(value): if isinstance(entryvalue, SudsObject): for objectkey, objectvalue in entryvalue: objectvalue = convert_value(objectvalue) sign[str(index) + '[' + str(entryindex) + '][' + objectkey + ']'] = objectvalue elif isinstance(value, SudsObject): for entryindex, entryvalue in value: key = str(index) + '[' + str(entryindex) + ']' sign[key] = convert_value(entryvalue) else: sign[index] = convert_value(value) sign['__method'] = method_name sign['__service'] = service_name sign['__hostname'] = self.endpoint sign['__timestamp'] = timestamp sign['__nonce'] = nonce return urlencode(sign) \ .replace('%5B', '[') \ .replace('%5D', ']') \ .replace('+', '%20') \ .replace('%7E', '~') # Comply with RFC3989. This replacement is also in TransIP's sample PHP library. def update_cookie(self, cookies): """ Updates the cookie for the upcoming call to the API. """ temp = [] for k, val in cookies.items(): temp.append("%s=%s" % (k, val)) cookiestring = ';'.join(temp) self.soap_client.set_options(headers={'Cookie': cookiestring}) def build_cookie(self, method, mode, parameters=None): """ Build a cookie for the request. Keyword arguments: method -- the method to be called on the service. mode -- Read-only (MODE_RO) or read-write (MODE_RW) """ timestamp = int(time.time()) nonce = str(uuid.uuid4())[:32] message_to_sign = self._build_signature_message( service_name=self.service_name, method_name=method, timestamp=timestamp, nonce=nonce, additional=parameters ) signature = self._sign(message_to_sign) cookies = { "nonce": nonce, "timestamp": timestamp, "mode": mode, "clientVersion": __version__, "login": self.login, "signature": signature } return cookies def _simple_request(self, method, *args, **kwargs): """ Helper method to create a request in a DRY way """ cookie = self.build_cookie(mode=kwargs.get('mode', MODE_RO), method=method, parameters=args) self.update_cookie(cookie) return getattr(self.soap_client.service, method)(*args)
""" Map Interface Module Copyright 2013 Rob "N3X15" Nelson <nexis@7chan.org> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import os, itertools, sys, numpy, logging, hashlib from byond.map.format import GetMapFormat, Load as LoadMapFormats from byond.DMI import DMI from byond.directions import SOUTH, IMAGE_INDICES from byond.basetypes import Atom, BYONDString, BYONDValue, BYONDFileRef, BYOND2RGBA # from byond.objtree import ObjectTree from PIL import Image, ImageChops # Cache _icons = {} _dmis = {} LoadMapFormats() # From StackOverflow def trim(im): bg = Image.new(im.mode, im.size, im.getpixel((0, 0))) diff = ImageChops.difference(im, bg) diff = ImageChops.add(diff, diff, 2.0, -100) bbox = diff.getbbox() if bbox: return im.crop(bbox) # Bytes def tint_image(image, tint_color): return ImageChops.multiply(image, Image.new('RGBA', image.size, tint_color)) class LocationIterator: def __init__(self, _map): self.map = _map self.x = -1 self.y = 0 self.z = 0 self.max_z = len(self.map.zLevels) def __iter__(self): return self def __next__(self): return self.next() def next(self): self.x += 1 zLev = self.map.zLevels[self.z] if self.x >= zLev.width: self.y += 1 self.x = 0 if self.y >= zLev.height: self.z += 1 self.y = 0 if self.z >= self.max_z: raise StopIteration t = self.map.GetTileAt(self.x, self.y, self.z) # print('{} = {}'.format((self.x,self.y,self.z),str(t))) return t class TileIterator: def __init__(self, _map): self.map = _map self.pos = -1 self.max = len(self.map.tiles) def __iter__(self): return self def __next__(self): return self.next() def next(self): self.pos += 1 if self.pos >= self.max: raise StopIteration t = self.map.tiles[self.pos] #print('#{} = {}'.format(self.pos,str(t))) return t class AtomIterator: def __init__(self, _map): self.map = _map self.pos = -1 self.max = len(self.map.instances) def __iter__(self): return self def __next__(self): return self.next() def next(self): self.pos += 1 if self.pos >= len(self.max): raise StopIteration t = self.map.instances[self.pos] # print('#{} = {}'.format(self.pos,str(t))) return t class Tile(object): def __init__(self, _map, master=False): # : Map's copy of the tile, used for tracking. self.master = master self.coords = (0, 0, 0) self.origID = '' self.ID = -1 self.instances = [] self.locations = [] self.frame = None self.unselected_frame = None self.areaSelected = True self.log = logging.getLogger(__name__ + '.Tile') self.map = _map self._hash = None self.orig_hash = None def UpdateHash(self, no_map_update=False): if self._hash is None: # Why MD5? Because the shorter the string, the faster the comparison. self._hash = hashlib.md5(str(self)).hexdigest() if not no_map_update: self.ID=self.map.UpdateTile(self) if self.ID==-1: raise Error('self.ID == -1') def InvalidateHash(self): if self._hash is not None: self.orig_hash = self._hash self._hash = None def GetHash(self): self.UpdateHash() return self._hash def RemoveAtom(self, atom, hash=True): ''' :param Atom atom: Atom to remove. Raises ValueError if not found. ''' if atom is None: return self.instances.remove(atom.ID) self.InvalidateHash() if hash: self.UpdateHash() def AppendAtom(self, atom, hash=True): ''' :param Atom atom: Atom to add. ''' if atom is None: return atom.UpdateMap(self.map) self.instances.append(atom.ID) self.InvalidateHash() if hash: self.UpdateHash() def CountAtom(self, atom): ''' :param Atom atom: Atom to count. :return int: Count of atoms ''' return self.instances.count(atom.ID) def copy(self, origID=False): tile = self.map.CreateTile() tile.ID = self.ID tile.instances = [x for x in self.instances] if origID: tile.origID = self.origID if not self._hash: self.UpdateHash(no_map_update=True) tile._hash = self._hash return tile def GetAtoms(self): atoms = [] for id in self.instances: if id is None: continue a = self.map.GetInstance(id) if a is None: self.log.debug('Unknown instance ID {}!'.format(id)) continue atoms += [a] return atoms def SortAtoms(self): return sorted(self.GetAtoms(), reverse=True) def GetAtom(self, idx): return self.map.GetInstance(self.instances[idx]) def GetInstances(self): return self.instances def rmLocation(self, coord, autoclean=True): if coord in self.locations: self.locations.remove(coord) if autoclean and len(self.locations) == 0: self.map.tiles[self.ID] = None # Mark ready for recovery self.map._tile_idmap.pop(self.GetHash(), None) def addLocation(self, coord): if coord not in self.locations: self.locations.append(coord) def __str__(self): return self._serialize() def __ne__(self, tile): return not self.__eq__(tile) def __eq__(self, other): return other and ((other._hash and self._hash and self._hash == other._hash) or (len(self.instances) == len(other.instances) and self.instances == other.instances)) # else: # return all(self.instances[i] == other.instances[i] for i in xrange(len(self.instances))) def _serialize(self): return ','.join([str(i) for i in self.GetAtoms()]) def RenderToMapTile(self, passnum, basedir, renderflags, **kwargs): img = Image.new('RGBA', (96, 96)) self.offset = (32, 32) foundAPixelOffset = False render_types = kwargs.get('render_types', ()) skip_alpha = kwargs.get('skip_alpha', False) # for atom in sorted(self.GetAtoms(), reverse=True): for atom in self.SortAtoms(): if len(render_types) > 0: found = False for path in render_types: if atom.path.startswith(path): found = True if not found: continue aid = atom.ID # Ignore /areas. They look like ass. if atom.path.startswith('/area'): if not (renderflags & MapRenderFlags.RENDER_AREAS): continue # We're going to turn space black for smaller images. if atom.path == '/turf/space': if not (renderflags & MapRenderFlags.RENDER_STARS): continue if 'icon' not in atom.properties: logging.critical('UNKNOWN ICON IN {0} (atom #{1})'.format(self.origID, aid)) logging.info(atom.MapSerialize()) logging.info(atom.MapSerialize(Atom.FLAG_INHERITED_PROPERTIES)) continue dmi_file = atom.properties['icon'].value if 'icon_state' not in atom.properties: # Grab default icon_state ('') if we can't find the one defined. atom.properties['icon_state'] = BYONDString("") state = atom.properties['icon_state'].value direction = SOUTH if 'dir' in atom.properties: try: direction = int(atom.properties['dir'].value) except ValueError: logging.critical('FAILED TO READ dir = ' + repr(atom.properties['dir'].value)) continue icon_key = '{0}|{1}|{2}'.format(dmi_file, state, direction) frame = None pixel_x = 0 pixel_y = 0 if icon_key in _icons: frame, pixel_x, pixel_y = _icons[icon_key] else: dmi_path = os.path.join(basedir, dmi_file) dmi = None if dmi_path in _dmis: dmi = _dmis[dmi_path] else: try: dmi = DMI(dmi_path) dmi.loadAll() _dmis[dmi_path] = dmi except Exception as e: print(str(e)) for prop in ['icon', 'icon_state', 'dir']: print('\t{0}'.format(atom.dumpPropInfo(prop))) pass if dmi.img is None: logging.warning('Unable to open {0}!'.format(dmi_path)) continue if dmi.img.mode not in ('RGBA', 'P'): logging.warn('{} is mode {}!'.format(dmi_file, dmi.img.mode)) if direction not in IMAGE_INDICES: logging.warn('Unrecognized direction {} on atom {} in tile {}!'.format(direction, atom.MapSerialize(), self.origID)) direction = SOUTH # DreamMaker property editor shows dir = 2. WTF? frame = dmi.getFrame(state, direction, 0) if frame == None: # Get the error/default state. frame = dmi.getFrame("", direction, 0) if frame == None: continue if frame.mode != 'RGBA': frame = frame.convert("RGBA") pixel_x = 0 if 'pixel_x' in atom.properties: pixel_x = int(atom.properties['pixel_x'].value) pixel_y = 0 if 'pixel_y' in atom.properties: pixel_y = int(atom.properties['pixel_y'].value) _icons[icon_key] = (frame, pixel_x, pixel_y) # Handle BYOND alpha and coloring c_frame = frame alpha = int(atom.getProperty('alpha', 255)) if skip_alpha: alpha = 255 color = atom.getProperty('color', '#FFFFFF') if alpha != 255 or color != '#FFFFFF': c_frame = tint_image(frame, BYOND2RGBA(color, alpha)) img.paste(c_frame, (32 + pixel_x, 32 - pixel_y), c_frame) # Add to the top of the stack. if pixel_x != 0 or pixel_y != 0: if passnum == 0: return # Wait for next pass foundAPixelOffset = True if passnum == 1 and not foundAPixelOffset: return None if not self.areaSelected: # Fade out unselected tiles. bands = list(img.split()) # Excluding alpha band for i in range(3): bands[i] = bands[i].point(lambda x: x * 0.4) img = Image.merge(img.mode, bands) return img class MapLayer: def __init__(self, z, _map, height=255, width=255): self.initial_load=False self.map = _map self.min = (0, 0) self.max = (height - 1, width - 1) self.tiles = None self.Resize(height, width) self.z = z def GetTile(self, x, y): # return self.tiles[y][x] t = self.map.GetTileByID(self.tiles[x, y]) t.coords = (x, y, self.z) return t def SetTile(self, x, y, tile): ''' :param x int: :param y int: :param tile Tile: ''' ''' if not self.initial_load: # Remove old tile. oldid = self.tiles[x, y] if oldid < len(self.map.instances): t = self.map.tiles[oldid] if t: t.rmLocation((x, y, self.z)) ''' # Set new tile. if not self.initial_load: tile.ID=self.map.UpdateTile(tile) self.tiles[x, y] = tile.ID #self.map.tiles[tile.ID].addLocation((x, y, self.z)) def SetTileID(self, x, y, newID): ''' :param x int: :param y int: :param newID int: ''' if newID is None: raise Exception('newID cannot be None') t = self.map.tiles[newID] if t is None: raise KeyError('Unknown tile #{}'.format(newID)) #self.SetTile(x, y, t) ''' if not self.initial_load: # Remove old tile. oldid = self.tiles[x, y] if oldid < len(self.map.instances): t = self.map.tiles[oldid] if t: t.rmLocation((x, y, self.z)) ''' self.tiles[x, y] = newID #self.map.tiles[newID].addLocation((x, y, self.z)) def Resize(self, height, width): self.height = height self.width = width basetile = self.map.basetile; if self.tiles is None: self.tiles = numpy.empty((height, width), int) # object) for y in xrange(height): for x in xrange(width): self.SetTile(x, y, basetile) else: self.tiles.resize(height, width) # self.tiles = [[Tile(self.map) for _ in xrange(width)] for _ in xrange(height)] class MapRenderFlags: RENDER_STARS = 1 RENDER_AREAS = 2 class Map: def __init__(self, tree=None, **kwargs): self.zLevels = [] self._instance_idmap = {} # md5 -> id self._tile_idmap = {} # md5 -> id self.basetile = Tile(self) self.instances = [] # Atom self.tiles = [] # Tile self.DMIs = {} self.tree = tree self.generatedTexAtlas = False self.selectedAreas = () self.whitelistTypes = None self.forgiving_atom_lookups = kwargs.get('forgiving_atom_lookups', False) self.log = logging.getLogger(__name__ + '.Map') self.missing_atoms = set() self.basetile.UpdateHash(); def ResetTilestore(self): '''For loading maps. Resets tile data to a pristine state.''' self.instances = [] # Atom self.tiles = [] # Tile self.basetile = None def GetTileByID(self, tileID): t = self.tiles[tileID] if t is None: return None t = t.copy() t.master = False return t def GetInstance(self, atomID): a=None try: a = self.instances[atomID] except IndexError as e: self.log.critical('Unable to find instance {}!') raise e if a is None: # print('WARNING: #{0} not found'.format(atomID)) return None a = a.copy() # a.master = False return a def UpdateTile(self, t): ''' Update tile registry. :param t Tile: Tile to update. :return Tile ID: ''' thash = t.GetHash() # if t.ID >= 0 and t.ID < len(self.tiles) and self.tiles[t.ID] is not None: # self.tiles[t.ID].rmLocation(t.coords) tiles_action = "-" ''' if t in self.tiles: t.ID = self.tiles.index(t) else: ''' idmap_action = "-" if thash not in self._tile_idmap: idmap_action = "Added" t.ID = len(self.tiles) self.tiles += [t.copy()] self._tile_idmap[thash] = t.ID tiles_action = "Added" #print('Assigned ID #{} to tile {}'.format(t.ID,thash)) elif self._tile_idmap[thash] != t.ID: t.ID = self._tile_idmap[thash] idmap_action = "Updated" #print('Updated tile {1} to ID #{0}'.format(t.ID,thash)) #print('Updated #{} - Tiles: {}, idmap: {}'.format(t.ID, thash, tiles_action, idmap_action)) self.tiles[t.ID].addLocation(t.coords) return t.ID def UpdateAtom(self, a): ''' Update tile registry. :param a Atom: Tile to update. ''' thash = a.GetHash() if a.ID and len(self.instances) < a.ID and self.instances[a.ID] is not None: self.instances[a.ID].rmLocation(self, a.coords) if thash not in self._instance_idmap: a.ID = len(self.instances) self.instances += [a.copy()] self._instance_idmap[thash] = a.ID #print('Assigned ID #{} to atom {}'.format(a.ID,thash)) else: a.ID = self._instance_idmap[thash] if a.coords is not None: self.instances[a.ID].addLocation(a.coords) return a.ID def CreateZLevel(self, height, width, z= -1): zLevel = MapLayer(z if z >= 0 else len(self.zLevels), self, height, width) if z >= 0: self.zLevels[z] = zLevel else: self.zLevels.append(zLevel) return zLevel def Atoms(self): '''Iterates over all instances in the map. ''' return AtomIterator(self) def Tiles(self): '''Iterates over all tiles of the map. ''' return TileIterator(self) def Locations(self): return LocationIterator(self) def Load(self, filename, **kwargs): _, ext = os.path.splitext(filename) fmt = kwargs.get('format', 'dmm2' if ext == 'dmm2' else 'dmm') reader = GetMapFormat(self, fmt) reader.Load(filename, **kwargs) def Save(self, filename, **kwargs): _, ext = os.path.splitext(filename) fmt = kwargs.get('format', 'dmm2' if ext == 'dmm2' else 'dmm') reader = GetMapFormat(self, fmt) reader.Save(filename, **kwargs) def writeMap2(self, filename, flags=0): self.filename = filename tileFlags = 0 atomFlags = 0 if flags & Map.WRITE_OLD_IDS: tileFlags |= Tile.FLAG_USE_OLD_ID atomFlags |= Atom.FLAG_USE_OLD_ID padding = len(self.tileTypes[-1].ID2String()) with open(filename, 'w') as f: f.write('// Atom Instances\n') for atom in self.instances: f.write('{0} = {1}\n'.format(atom.ID, atom.MapSerialize(atomFlags))) f.write('// Tiles\n') for tile in self.tileTypes: f.write('{0}\n'.format(tile.MapSerialize2(tileFlags, padding))) f.write('// Layout\n') for z in self.zLevels.keys(): f.write('\n(1,1,{0}) = {{"\n'.format(z)) zlevel = self.zLevels[z] for y in xrange(zlevel.height): for x in xrange(zlevel.width): tile = zlevel.GetTileAt(x, y) if flags & Map.WRITE_OLD_IDS: f.write(tile.origID) else: f.write(tile.ID2String(padding)) f.write("\n") f.write('"}\n') def GetTileAt(self, x, y, z): ''' :param int x: :param int y: :param int z: :rtype Tile: ''' if z < len(self.zLevels): return self.zLevels[z].GetTile(x, y) def CopyTileAt(self, x, y, z): ''' :param int x: :param int y: :param int z: :rtype Tile: ''' return self.GetTileAt(x, y, z).copy() def SetTileAt(self, x, y, z, tile): ''' :param int x: :param int y: :param int z: ''' if z < len(self.zLevels): self.zLevels[z].SetTile(x, y, tile) def CreateTile(self): ''' :rtype Tile: ''' return Tile(self) def generateTexAtlas(self, basedir, renderflags=0): if self.generatedTexAtlas: return print('--- Generating texture atlas...') self._icons = {} self._dmis = {} self.generatedTexAtlas = True for tid in xrange(len(self.tileTypes)): tile = self.tileTypes[tid] img = Image.new('RGBA', (96, 96)) tile.offset = (32, 32) tile.areaSelected = True tile.render_deferred = False for atom in sorted(tile.GetAtoms(), reverse=True): aid = atom.id # Ignore /areas. They look like ass. if atom.path.startswith('/area'): if not (renderflags & MapRenderFlags.RENDER_AREAS): continue # We're going to turn space black for smaller images. if atom.path == '/turf/space': if not (renderflags & MapRenderFlags.RENDER_STARS): continue if 'icon' not in atom.properties: print('CRITICAL: UNKNOWN ICON IN {0} (atom #{1})'.format(tile.origID, aid)) print(atom.MapSerialize()) print(atom.MapSerialize(Atom.FLAG_INHERITED_PROPERTIES)) continue dmi_file = atom.properties['icon'].value if 'icon_state' not in atom.properties: # Grab default icon_state ('') if we can't find the one defined. atom.properties['icon_state'] = BYONDString("") state = atom.properties['icon_state'].value direction = SOUTH if 'dir' in atom.properties: try: direction = int(atom.properties['dir'].value) except ValueError: print('FAILED TO READ dir = ' + repr(atom.properties['dir'].value)) continue icon_key = '{0}:{1}[{2}]'.format(dmi_file, state, direction) frame = None pixel_x = 0 pixel_y = 0 if icon_key in self._icons: frame, pixel_x, pixel_y = self._icons[icon_key] else: dmi_path = os.path.join(basedir, dmi_file) dmi = None if dmi_path in self._dmis: dmi = self._dmis[dmi_path] else: try: dmi = self.loadDMI(dmi_path) self._dmis[dmi_path] = dmi except Exception as e: print(str(e)) for prop in ['icon', 'icon_state', 'dir']: print('\t{0}'.format(atom.dumpPropInfo(prop))) pass if dmi.img is None: self.log.warn('Unable to open {0}!'.format(dmi_path)) continue if dmi.img.mode not in ('RGBA', 'P'): self.log.warn('{} is mode {}!'.format(dmi_file, dmi.img.mode)) if direction not in IMAGE_INDICES: self.log.warn('Unrecognized direction {} on atom {} in tile {}!'.format(direction, atom.MapSerialize(), tile.origID)) direction = SOUTH # DreamMaker property editor shows dir = 2. WTF? frame = dmi.getFrame(state, direction, 0) if frame == None: # Get the error/default state. frame = dmi.getFrame("", direction, 0) if frame == None: continue if frame.mode != 'RGBA': frame = frame.convert("RGBA") pixel_x = 0 if 'pixel_x' in atom.properties: pixel_x = int(atom.properties['pixel_x'].value) pixel_y = 0 if 'pixel_y' in atom.properties: pixel_y = int(atom.properties['pixel_y'].value) self._icons[icon_key] = (frame, pixel_x, pixel_y) img.paste(frame, (32 + pixel_x, 32 - pixel_y), frame) # Add to the top of the stack. if pixel_x != 0 or pixel_y != 0: tile.render_deferred = True tile.frame = img # Fade out unselected tiles. bands = list(img.split()) # Excluding alpha band for i in range(3): bands[i] = bands[i].point(lambda x: x * 0.4) tile.unselected_frame = Image.merge(img.mode, bands) self.tileTypes[tid] = tile def renderAtom(self, atom, basedir, skip_alpha=False): if 'icon' not in atom.properties: logging.critical('UNKNOWN ICON IN ATOM #{0} ({1})'.format(atom.ID, atom.path)) logging.info(atom.MapSerialize()) logging.info(atom.MapSerialize(Atom.FLAG_INHERITED_PROPERTIES)) return None # else: # logging.info('Icon found for #{}.'.format(atom.ID)) dmi_file = atom.properties['icon'].value if dmi_file is None: return None # Grab default icon_state ('') if we can't find the one defined. state = atom.getProperty('icon_state', '') direction = SOUTH if 'dir' in atom.properties: try: direction = int(atom.properties['dir'].value) except ValueError: logging.critical('FAILED TO READ dir = ' + repr(atom.properties['dir'].value)) return None icon_key = '{0}|{1}|{2}'.format(dmi_file, state, direction) frame = None pixel_x = 0 pixel_y = 0 if icon_key in _icons: frame, pixel_x, pixel_y = _icons[icon_key] else: dmi_path = os.path.join(basedir, dmi_file) dmi = None if dmi_path in _dmis: dmi = _dmis[dmi_path] else: try: dmi = DMI(dmi_path) dmi.loadAll() _dmis[dmi_path] = dmi except Exception as e: print(str(e)) for prop in ['icon', 'icon_state', 'dir']: print('\t{0}'.format(atom.dumpPropInfo(prop))) pass if dmi.img is None: logging.warning('Unable to open {0}!'.format(dmi_path)) return None if dmi.img.mode not in ('RGBA', 'P'): logging.warn('{} is mode {}!'.format(dmi_file, dmi.img.mode)) if direction not in IMAGE_INDICES: logging.warn('Unrecognized direction {} on atom {}!'.format(direction, str(atom))) direction = SOUTH # DreamMaker property editor shows dir = 2. WTF? frame = dmi.getFrame(state, direction, 0) if frame == None: # Get the error/default state. frame = dmi.getFrame("", direction, 0) if frame == None: return None if frame.mode != 'RGBA': frame = frame.convert("RGBA") pixel_x = 0 if 'pixel_x' in atom.properties: pixel_x = int(atom.properties['pixel_x'].value) pixel_y = 0 if 'pixel_y' in atom.properties: pixel_y = int(atom.properties['pixel_y'].value) _icons[icon_key] = (frame, pixel_x, pixel_y) # Handle BYOND alpha and coloring c_frame = frame alpha = int(atom.getProperty('alpha', 255)) if skip_alpha: alpha = 255 color = atom.getProperty('color', '#FFFFFF') if alpha != 255 or color != '#FFFFFF': c_frame = tint_image(frame, BYOND2RGBA(color, alpha)) return c_frame def generateImage(self, filename_tpl, basedir='.', renderflags=0, z=None, **kwargs): ''' Instead of generating on a tile-by-tile basis, this creates a large canvas and places each atom on it after sorting layers. This resolves the pixel_(x,y) problem. ''' if z is None: for z in range(len(self.zLevels)): self.generateImage(filename_tpl, basedir, renderflags, z, **kwargs) return self.selectedAreas = () skip_alpha = False render_types = () if 'area' in kwargs: self.selectedAreas = kwargs['area'] if 'render_types' in kwargs: render_types = kwargs['render_types'] if 'skip_alpha' in kwargs: skip_alpha = kwargs['skip_alpha'] print('Checking z-level {0}...'.format(z)) instancePositions = {} for y in range(self.zLevels[z].height): for x in range(self.zLevels[z].width): t = self.zLevels[z].GetTile(x, y) # print('*** {},{}'.format(x,y)) if t is None: continue if len(self.selectedAreas) > 0: renderThis = True for atom in t.GetAtoms(): if atom.path.startswith('/area'): if atom.path not in self.selectedAreas: renderThis = False if not renderThis: continue for atom in t.GetAtoms(): if atom is None: continue iid = atom.ID if atom.path.startswith('/area'): if atom.path not in self.selectedAreas: continue # Check for render restrictions if len(render_types) > 0: found = False for path in render_types: if atom.path.startswith(path): found = True if not found: continue # Ignore /areas. They look like ass. if atom.path.startswith('/area'): if not (renderflags & MapRenderFlags.RENDER_AREAS): continue # We're going to turn space black for smaller images. if atom.path == '/turf/space': if not (renderflags & MapRenderFlags.RENDER_STARS): continue if iid not in instancePositions: instancePositions[iid] = [] # pixel offsets ''' pixel_x = int(atom.getProperty('pixel_x', 0)) pixel_y = int(atom.getProperty('pixel_y', 0)) t_o_x = int(round(pixel_x / 32)) t_o_y = int(round(pixel_y / 32)) pos = (x + t_o_x, y + t_o_y) ''' pos = (x, y) instancePositions[iid].append(pos) t=None if len(instancePositions) == 0: return print(' Rendering...') levelAtoms = [] for iid in instancePositions: levelAtoms += [self.GetInstance(iid)] pic = Image.new('RGBA', ((self.zLevels[z].width + 2) * 32, (self.zLevels[z].height + 2) * 32), "black") # Bounding box, used for cropping. bbox = [99999, 99999, 0, 0] # Replace {z} with current z-level. filename = filename_tpl.replace('{z}', str(z)) pastes = 0 for atom in sorted(levelAtoms, reverse=True): if atom.ID not in instancePositions: levelAtoms.remove(atom) continue icon = self.renderAtom(atom, basedir, skip_alpha) if icon is None: levelAtoms.remove(atom) continue for x, y in instancePositions[atom.ID]: new_bb = self.getBBoxForAtom(x, y, atom, icon) # print('{0},{1} = {2}'.format(x, y, new_bb)) # Adjust cropping bounds if new_bb[0] < bbox[0]: bbox[0] = new_bb[0] if new_bb[1] < bbox[1]: bbox[1] = new_bb[1] if new_bb[2] > bbox[2]: bbox[2] = new_bb[2] if new_bb[3] > bbox[3]: bbox[3] = new_bb[3] pic.paste(icon, new_bb, icon) pastes += 1 icon=None # Cleanup levelAtoms.remove(atom) levelAtoms = None instancePositions = None if len(self.selectedAreas) == 0: # Autocrop (only works if NOT rendering stars or areas) #pic = trim(pic) # FIXME: MemoryError on /vg/. pic=pic # Hack else: # if nSelAreas == 0: # continue pic = pic.crop(bbox) if pic is not None: # Saev filedir = os.path.dirname(os.path.abspath(filename)) if not os.path.isdir(filedir): os.makedirs(filedir) print(' -> {} ({}x{}) - {} objects'.format(filename, pic.size[0], pic.size[1], pastes)) pic.save(filename, 'PNG') def getBBoxForAtom(self, x, y, atom, icon): icon_width, icon_height = icon.size pixel_x = int(atom.getProperty('pixel_x', 0)) pixel_y = int(atom.getProperty('pixel_y', 0)) return self.tilePosToBBox(x, y, pixel_x, pixel_y, icon_height, icon_width) def tilePosToBBox(self, tile_x, tile_y, pixel_x, pixel_y, icon_height, icon_width): # Tile Pos X = tile_x * 32 Y = tile_y * 32 # pixel offsets X += pixel_x Y -= pixel_y # BYOND coordinates -> PIL coords. # BYOND uses LOWER left. # PIL uses UPPER left X += 0 Y += 32 - icon_height return ( X, Y, X + icon_width, Y + icon_height ) # So we can read a map without parsing the tree. def GetAtom(self, path): if self.tree is not None: atom = self.tree.GetAtom(path) if atom is None and self.forgiving_atom_lookups: self.missing_atoms.add(path) return Atom(path, '(map)', missing=True) return atom return Atom(path)
#!/usr/bin/env python import os import sys import numpy as np import warnings from astropy.io import fits from astropy.utils.exceptions import AstropyWarning from astropy.table import Table, vstack, Column from astropy.time import Time import healpy as hp from dlnpyutils import utils as dln, coords #import subprocess import time from argparse import ArgumentParser import socket #from dustmaps.sfd import SFDQuery from astropy.coordinates import SkyCoord #from sklearn.cluster import DBSCAN #from scipy.optimize import least_squares #from scipy.interpolate import interp1d # Combine data for one NSC healpix region if __name__ == "__main__": parser = ArgumentParser(description='Combine NSC Instcal Catalogs.') parser.add_argument('version', type=str, nargs=1, help='Version number') parser.add_argument('--makelist', action='store_true', help='Make healpix list') parser.add_argument('-r','--redo', action='store_true', help='Redo this HEALPIX') parser.add_argument('--nmulti', type=int, default=20, help='Number of jobs to run') parser.add_argument('--nocuts', action='store_true', help='Do not apply any quality cuts') args = parser.parse_args() t0 = time.time() hostname = socket.gethostname() host = hostname.split('.')[0] # Inputs version = args.version redo = args.redo makelist = args.makelist nmulti = args.nmulti nocuts = args.nocuts nside = 128 radeg = 180 / np.pi # on thing/hulk use if (host == "thing") or (host == "hulk"): basedir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/" mssdir = "/mss1/" localdir = "/d0/" tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/" # on gp09 use if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"): basedir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/" mssdir = "/net/mss1/" localdir = "/data0/" tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/" t0 = time.time() # Combine all of the data if ~os.path.exists(basedir+'combine'): os.mkdir(basedir+'combine/') if ~os.path.exists(basedir+'combine/logs/'): os.mkdir(basedir+'combine/logs/') if ~os.path.exists(localdir+'dnidever/nsc/instcal/'+version+'/'): os.mkdir(localdir+'dnidever/nsc/instcal/'+version+'/') plotsdir = basedir+'plots/' if ~os.path.exists(plotsdir): os.mkdir(plotsdir) # Log file #------------------ # format is nsc_combine_main.DATETIME.log ltime = time.localtime() # time.struct_time(tm_year=2019, tm_mon=7, tm_mday=22, tm_hour=0, tm_min=30, tm_sec=20, tm_wday=0, tm_yday=203, tm_isdst=1) smonth = str(ltime[1]) if ltime[1]<10: smonth = '0'+smonth sday = str(ltime[2]) if ltime[2]<10: sday = '0'+sday syear = str(ltime[0])[2:] shour = str(ltime[3]) if ltime[3]<10: shour='0'+shour sminute = str(ltime[4]) if ltime[4]<10: sminute='0'+sminute ssecond = str(int(ltime[5])) if ltime[5]<10: ssecond='0'+ssecond logtime = smonth+sday+syear+shour+sminute+ssecond logfile = basedir+'combine/logs/nsc_instcal_combine_main.'+logtime+'.log' #JOURNAL,logfile print("Combining NOAO InstCal catalogs") #goto,STARTRUNNING # Restore the calibration summary file temp = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',1) schema = dict(temp.dtype.fields) schema['chipindx'] = (int,0) schema['ngoodchipwcs'] = (int,0) schema['wcscal'] = (np.str,50) schema['telstat'] = (np.str,50) dt = np.dtype(schema) calstr = np.zeros(len(temp),dtype=dt) calstr['chipindx'] = -1 for n in temp.dtype.names: calstr[n]=temp[n] # Add WCSCAL and TELSTAT information coords = fits.getdata(basedir+'lists/allcoords.fits',1) fluxfile = calstr['file'] fluxfile = fluxfile.replace('/net','') ind1,ind2 = dln.match(fluxfile,coords['file']) calstr['wcscal'][ind1] = coords['wcscal'][ind2] # Failed (3153), Poor (14), Successful (308190) calstr['telstat'][ind1] = coords['telstat'][ind2] # NAN (68188), Not (1222), Track (241826), UNKNOWN (116), Unknown (5) # the 2054 failed exposures did not match b/c no fluxfile info # Only want exposures with successful SE processing gd,ncalstr = dln.where(calstr['success']==1) calstr = calstr[gd] si = np.argsort(calstr['expdir']) calstr = calstr[si] chstr = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',2) nchstr = len(chstr) # Get indices for CHSTR siexp = np.argsort(chstr['expdir']) chstr = chstr[siexp] expdir = chstr['expdir'] brklo,nbrk = dln.where(expdir != np.roll(expdir,1)) brkhi = [brklo[1:nbrk]-1,len(expdir)-1] nchexp = brkhi-brklo+1 if ncalstr==len(brklo): Exception('number of exposures in CALSTR and CHSTR do not match') calstr['chipindx'] = brklo calstr['nchips'] = nchexp # Getting number of good chip WCS for each exposures for i in range(len(calstr): calstr['ngoodchipwcs'][i] = np.sum(chstr['ngaiamatch']][brklo[i]:brkhi[i]+1]>0) # Fixing absolute paths of flux filename cfile = calstr['file'] cfile = cfile.replace('/net/mss1/','') cfile = cfile.replace('/mss1/','') # Fixing very negative RAs print('FIXING NEGATIVE RAs in CALSTR and CHSTR') #bdra, = np.where(chstr.cenra lt -180,nbdra) bdra,nbdra = dln.where(chstr['cenra']<0) dum,uibd = np.unique(chstr['expdir'][bdra],return_indices=True) ind1,ind2 = dln.match(calstr['expdir'],chstr['expdir'][bdra[uibd]]) nmatch = len(ind1) for i in range(nmatch): ind3,ind4 = dln.match(chstr['expdir'][bdra],calstr['expdir'][ind1[i]]) # Fix CALSTR RA chra = chstr['cenra'][bdra[ind3]] bd1,nbd1 = dln.where(chra < -180) if nbd1>0: chra[bd1]+=360 cenra = np.mean(dln.minmax(chra)) if cenra<0: cenra+=360 calstr['ra'][ind1[i]] = cenra # Fix CHSTR CENRA bd2,nbd2 = dln.where(chra<0) if nbd2>0: chra[bd2]+=360 chstr['cenra']][bdra[ind3]] = chra # Fix CHSTR VRA vra = chstr['vra'][bdra[ind3]] bd3,nbd3 = dln.where(vra<0) if nbd3>0: vra[bd3]+=360 chstr['vra'][bdra[ind3]] = vra # Fix instrument in STR and CHSTR print('FIXING INSTRUMENT IN STR AND CHSTR') type = ['c4d','k4m','ksb'] for i=0,len(type)-1: gd,ngd = dln.where(stregex(calstr.expdir,'/'+type[i]+'/',/boolean)==1) if ngd>0: calstr[gd].instrument=type[i] gd,ngd = dln.where(stregex(chstr.expdir,'/'+type[i]+'/',/boolean)==1) if ngd>0: chstr[gd].instrument=type[i] ## Fix missing AIRMASS #bdam, = np.where(str.airmass lt 0.9,nbdam) #for i=0,nbdam-1 do begin # type = ['c4d','k4m','ksb'] # obs = ['ctio','kpno','kpno'] # MATCH,str[bdam[i]].instrument,type,ind1,ind2,/sort # obsname = obs[ind2] # OBSERVATORY,obsname,obstr # lat = obstr.latitude # lon = obstr.longitude # jd = date2jd(str[bdam[i]].dateobs) # ra = str[bdam[i]].ra # dec = str[bdam[i]].dec # str[bdam[i]].airmass = AIRMASS(jd,ra,dec,lat,lon) #endfor # THIS IS STILL RETURNING -1, IS ONE OF THE VALUES WRONG?? # APPLY RELEASE-DATE CUTS list1 = fits.getdata(basedir+'lists/decam_instcal_list.fits',1) list2 = fits.getdata(basedir+'lists/mosaic3_instcal_list.fits',1) list3 = fits.getdata(basedir+'lists/bok90prime_instcal_list.fits',1) elist = np.hstack((list1,list2,list3)) fluxfile = [f[10:] for f in elist['fluxfile']] ind1,ind2 = dln.match(fluxfile,cfile) # some don't match because they were from a previous version # of the input list release_date = np.zeros(len(calstr),dtype=(np.str,100))+'2020-01-01 00:00:00' release_date[ind2] = elist['release_date'][ind1] release_date = release_date.strip().replace(' ','T') trelease = Time(release_date, format='isot', scale='utc') #release_cutoff = [2017,4,24] # v1 - April 24, 2017 #release_cutoff = [2017,10,11] # v2 - Oct 11, 2017 release_cutoff = [2019,7,9] # v3 - July 9, 2019 release_date_cutoff = ('%04d-%02d-%02d' % (release_cutoff[0],release_cutoff[1],release_cutoff[2]))+'T00:00:00' tcutoff = Time(release_date_cutoff, format='isot', scale='utc') gdrelease,ngdrelease,bdrelease,nbdrelease = dln.where(trelease.mjd <= tcutoff.mjd,comp=True) print(str(ngdrelease)+' exposures are PUBLIC') calstr = calstr[gdrelease] # impose the public data cut # Zero-point structure dt_zpstr = np.dtype([('instrument',np.str,10),('filter',np.str,10),('amcoef',float,2),('thresh',0)]) zpstr = np.zeros(10,dtype=dtype_zpstr) zpstr['thresh'] = 0.5 zpstr['instrument'][0:7] = 'c4d' zpstr['filter'][0:7] = ['u','g','r','i','z','Y','VR'] zpstr['amcoef'][0] = [-1.60273, -0.375253] # c4d-u zpstr['amcoef'][1] = [0.277124, -0.198037] # c4d-g zpstr['amcoef'][2] = [0.516382, -0.115443] # c4d-r zpstr['amcoef'][3] = [0.380338, -0.067439] # c4d-i zpstr['amcoef'][4] = [0.123924, -0.096877] # c4d-z zpstr['amcoef'][5] = [-1.06529, -0.051967] # c4d-Y zpstr['amcoef'][6] = [1.004357, -0.081105] # c4d-VR # Mosiac3 z-band zpstr['instrument'][7] = 'k4m' zpstr['filter'][7] = 'z' zpstr['amcoef'][7] = [-2.687201, -0.73573] # k4m-z # Bok 90Prime, g and r zpstr['instrument'][8] = 'ksb' zpstr['filter'][8] = 'g' zpstr['amcoef'][8] = [-2.859646, -1.40837] # ksb-g zpstr['instrument'][9] = 'ksb' zpstr['filter'][9] = 'r' zpstr['amcoef'][9] = [-4.008771, -0.25718] # ksb-r nzpstr = len(zpstr) #STOP,'DOUBLE-CHECK THESE ZERO-POINTS!!!' # APPLY QA CUTS IN ZEROPOINT AND SEEING if ~nocuts: print('APPLYING QA CUTS') #fwhmthresh = 3.0 # arcsec, v1 fwhmthresh = 2.0 # arcsec, v2 #filters = ['u','g','r','i','z','Y','VR'] #nfilters = len(filters) #zpthresh = [2.0,2.0,2.0,2.0,2.0,2.0,2.0] #zpthresh = [0.5,0.5,0.5,0.5,0.5,0.5,0.5] badzpmask = np.zeros(len(calstr),bool)+True for i in range(nzpstr): ind,nind = dln.where((calstr['instrument']==zpstr['instrument']][i]) & (calstr['filter']==zpstr['filter'][i]) & (calstr['success']==1)) print(zpstr['instrument'][i]+'-'+zpstr['filter'][i]+' '+str(nind)+' exposures') if nind>0: calstr1 = calstr[ind] zpterm = calstr1['zpterm'] bdzp,nbdzp = dln.where(~np.isfinite(zpterm)) # fix Infinity/NAN if nbdzp>0:zpterm[bdzp] = 999999.9 am = calstr1['airmass'] mjd = calstr1['mjd'] bdam,nbdam = dln.where(am < 0.9) if nbdam>0: am[bdam] = np.median(am) # I GOT TO HERE IN THE TRANSLATING!!! glactc,calstr1.ra,calstr1.dec,2000.0,glon,glat,1,/deg # Measure airmass dependence gg0,ngg0 = dln.where((np.abs(zpterm)<50) & (am<2.0)) coef0 = dln.poly_fit(am[gg0],zpterm[gg0],1,robust=True) zpf = dln.poly(am,coef0) sig0 = np.mad(zpterm[gg0]-zpf[gg0]) gg,ngg = dln.where(np.abs(zpterm-zpf) < (np.maximum(3.5*sig0,0.2))) coef = dln.poly_fit(am[gg],zpterm[gg],1,robust=True) print(zpstr['instrument'][i]+'-'+zpstr['filter'][i]+' '+str(coef)) # Trim out bad exposures to determine the correlations and make figures gg,ngg = dln.where(np.abs(zpterm-zpf) lt (3.5*sig0 > 0.2) and calstr1.airmass lt 2.0 and calstr1.fwhm lt 2.0 and calstr1.rarms lt 0.15 & calstr1.decrms lt 0.15 and calstr1.success eq 1 and calstr1.wcscal eq 'Successful' and calstr1.zptermerr lt 0.05 & calstr1.zptermsig lt 0.08 and (calstr1.ngoodchipwcs eq calstr1.nchips) & (calstr1.instrument ne 'c4d' or calstr1.zpspatialvar_nccd le 5 or (calstr1.instrument eq 'c4d' and calstr1.zpspatialvar_nccd gt 5 and calstr1.zpspatialvar_rms lt 0.1)) and $ np.abs(glat) gt 10 and calstr1.nrefmatch gt 100 and calstr1.exptime ge 30) # Zpterm with airmass dependence removed relzpterm = zpterm + 25 # 25 to get "absolute" zpterm relzpterm -= (zpstr['amcoef'][i])[1]*(am-1) # CURRENTLY K4M/KSB HAVE EXPTIME-DEPENDENCE IN THE ZEROPOINTS!! if (zpstr['instrument'][i]=='k4m') | (zpstr['instrument'][i]=='ksb'): print('REMOVING EXPTIME-DEPENDENCE IN K4M/KSB ZEROPOINTS!!!') relzpterm += 2.5*np.log10(calstr1['exptime']) # Fit temporal variation in zpterm mjd0 = 56200 xx = calstr1['mjd'][gg]-mjd0 yy = relzpterm[gg] invvar = 1.0/calstr1['zptermerr'][gg]**2 nord = 3 bkspace = 200 sset1 = bspline_iterfit(xx,yy,invvar=invvar,nord=nord,bkspace=bkspace,yfit=yfit1) sig1 = mad(yy-yfit1) gd,ngd = dln.where(yy-yfit1 > -3*sig1) # refit sset = bspline_iterfit(xx[gd],yy[gd],invvar=invvar[gd],nord=nord,bkspace=bkspace) yfit = bspline_valu(xx,sset) allzpfit = bspline_valu(calstr1.mjd-mjd0,sset) # Make some figures # ZPterm vs. airmass pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_airmass' ps_open,pfile,/color,thick=4,/encap hess,am[gg],relzpterm[gg],dx=0.01,dy=0.02,xr=[0.9,2.5],yr=[-0.5,0.5]+median(relzpterm[gg]),xtit='Airmass',ytit='Zero-point',$ tit=zpstr[i].instrument+'-'+zpstr[i].filter x = scale_vector(findgen(100),0.5,2.0) oplot,x,poly(x,coef),co=250 ps_close ps2png,pfile+'.eps',/eps # ZPterm vs. time (density) pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_time_density' ps_open,pfile,/color,thick=4,/encap hess,calstr1[gg].mjd-mjd0,relzpterm[gg],dx=2,dy=0.02,yr=[-0.5,0.5]+median(relzpterm[gg]),xtit='Time (days)',ytit='Zero-point',$ tit=zpstr[i].instrument+'-'+zpstr[i].filter oplot,calstr1[gg].mjd-mjd0,allzpfit[gg],ps=1,sym=0.3,co=250 xyouts,50,-0.45+median(relzpterm[gg]),'MJD!d0!n = '+str(mjd0,2),align=0,charsize=1.2 ps_close ps2png,pfile+'.eps',/eps # ZPterm vs. time (points) pfile = plotsdir+zpstr[i].instrument+'-'+zpstr[i].filter+'_zpterm_time' ps_open,pfile,/color,thick=4,/encap plot,calstr1[gg].mjd-mjd0,relzpterm[gg],ps=1,sym=0.5,yr=[-0.5,0.5]+median(relzpterm[gg]),xs=1,ys=1,xtit='Time (days)',ytit='Zero-point',$ tit=zpstr[i].instrument+'-'+zpstr[i].filter,thick=1 oplot,calstr1[gg].mjd-mjd0,allzpfit[gg],ps=1,sym=0.3,co=250 xyouts,50,-0.45+median(relzpterm[gg]),'MJD!d0!n = '+str(mjd0,2),align=0,charsize=1.2 ps_close ps2png,pfile+'.eps',/eps # Remove temporal variations to get residual values relzpterm -= allzpfit # Find the GOOD exposures #------------------------ # We are using ADDITIVE zpterm # calmag = instmag + zpterm # if there are clouds then instmag is larger/fainter # and zpterm is smaller (more negative) #bdind, = np.where(calstr[ind].zpterm-medzp lt -zpthresh[i],nbdind) gdmask = (relzpterm >= -zpstr['thresh'][i]) & (relzpterm <= zpstr['thresh'][i]) gdind,ngdind,bdind,nbdind = dln.where(gdmask,comp=True) print(' '+str(nbdind)+' exposures with ZPTERM below the threshold') if ngdind>0: badzpmask[ind[gdind]] = 0 # Get bad DECaLS and SMASH exposures badexp = np.zeros(len(calstr),bool) READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/smash_badexposures.txt',smashexpnum,format='A',comment='#',/silent MATCH,int(calstr.expnum),int(smashexpnum),ind1,ind2,/sort,count=nmatch if nmatch>0: badexp[ind1] = 1 badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='c4d') # make sure they are DECam exposures READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/decals_bad_expid.txt',decalsexpnum,format='A',comment='#',/silent MATCH,int(calstr.expnum),int(decalsexpnum),ind1,ind2,/sort,count=nmatch if nmatch>0: badexp[ind1] = 1 badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='c4d') # make sure they are DECam exposures READCOL,'/home/dnidever/projects/noaosourcecatalog/obslog/mzls_bad_expid.txt',mzlsexpnum,format='A',comment='#',/silent MATCH,int(calstr.expnum),int(mzlsexpnum),ind1,ind2,/sort,count=nmatch if nmatch>0: badexp[ind1] = 1 badexp[ind1] = badexp[ind1] & (calstr['instrument'][ind1]=='k4m') # make sure they are Mosaic3 exposures # Final QA cuts # Many of the short u-band exposures have weird ZPTERMs, not sure why # There are a few exposures with BAD WCS, RA>360! bdexp,nbdexp = dln.where((calstr['success']==0) | # SE failure (calstr['wcscal']!='Successful') | # CP WCS failure (calstr['fwhm']>fwhmthresh) | # bad seeing (calstr['ra']>360) | # bad WCS/coords (calstr['rarms']>0.15) | (calstr['decrms']>0.15) | # bad WCS (badzpmask==1) | # bad ZPTERM (calstr['zptermerr']>0.05) | # bad ZPTERMERR (calstr['nrefmatch']<5) | # few phot ref match (badexp==1) | # bad SMASH/LS exposure #(calstr['ngoodchipwcs']<calstr['nchips'] | # not all chips astrom calibrated ((calstr['instrument']=='c4d') & (calstr['zpspatialvar_nccd']>5) & (calstr['zpspatialvar_rms']>0.1)))) # bad spatial zpterm # rarms/decrms, nrefmatch print('QA cuts remove '+str(nbdexp)+' exposures') # Remove torem = np.zeros(nchstr,bool) for i in range(nbdexp): torem[calstr[bdexp[i]].chipindx:calstr[bdexp[i]].chipindx+calstr[bdexp[i]].nchips-1]=1 bdchstr,nbdchstr = dln.where(torem==1) REMOVE,bdchstr,chstr REMOVE,bdexp,calstr # Get new CHIPINDEX values # make two arrays of old and new indices to transfer # the new index values into an array with the size of # the old CHSTR trimoldindex = lindgen(nchstr) # index into original array, but "bad" ones removed/trimed remove,bdchstr,trimoldindex trimnewindex = lindgen(len(trimoldindex)) # new index of trimmed array newindex = lonarr(nchstr)-1 newindex[trimoldindex] = trimnewindex # new index in original array newchipindex = newindex[calstr.chipindx] str.chipindx = newchipindex ncalstr = len(calstr) # SHOULD INCLUDE CUTS ON ZTERMERR OR NPHOTMATCH #STOP,'SHOULD INCLUDE CUTS ON ZTERMERR OR NPHOTMATCH' #STARTRUNNING: # CREATE LIST OF HEALPIX AND OVERLAPPING EXPOSURES # Which healpix pixels have data listfile = basedir+'lists/nsc_instcal_combine_healpix_list.fits' if makelist | ~os.path.exists(listfile): print('Finding the Healpix pixels with data') radius = 1.1 dtype_healstr = np.dtype([('file',np.str,200),('base',np.str,200),('pix',int)]) healstr = np.zeros(100000,dtype=dtype_healstr) nhealstr = len(healstr) cnt = 0 for i in range(ncalstr): if i % 1e3 == 0: print(str(i)) theta = (90-calstr[i].dec)/radeg phi = calstr[i].ra/radeg ANG2VEC,theta,phi,vec QUERY_DISC,nside,vec,radius,listpix,nlistpix,/deg,/inclusive # Use the chip corners to figure out which ones actually overlap chstr1 = chstr[calstr['chipindx']][i]:calstr['chipindx'][i]+calstr['nchips'][i].nchips] # rotate to tangent plane so it can handle RA=0/360 and poles properly ROTSPHCEN,chstr1.vra,chstr1.vdec,calstr[i].ra,calstr[i].dec,vlon,vlat,/gnomic # loop over healpix overlap = np.zeros(nlistpix,bool) for j in range(nlistpix): PIX2VEC_RING,nside,listpix[j],vec,vertex vertex = transpose(reform(vertex)) # [1,3,4] -> [4,3] VEC2ANG,vertex,hdec,hra,/astro ROTSPHCEN,hra,hdec,calstr[i].ra,calstr[i].dec,hlon,hlat,/gnomic # loop over chips for k in range(calstr['nchips'][i]): overlap[j] >= coords.doPolygonsOverlap(hlon,hlat,vlon[*,k],vlat[*,k]) # Only keep the healpix with real overlaps gdlistpix,ngdlistpix = dln.where(overlap==1) if ngdlistpix>0: listpix = listpix[gdlistpix] nlistpix = ngdlistpix else: del(listpix) nlistpix = 0 if nlistpix==0: Exception('No healpix for this exposure. Something is wrong!') # Add new elements to array if (cnt+nlistpix)>nhealstr: old = healstr healstr = np.zeros(nhealstr+10000,dtype=dtype_healstr) healstr[0:nhealstr] = old nhealstr += 1e4 del(old) # Add to the structure healstr['file'][cnt:cnt+nlistpix] = calstr['expdir'][i]+'/'+calstr['base'][i]+'_cat.fits' healstr['base'][cnt:cnt+nlistpix] = calstr['base'][i] healstr['pix'][cnt:cnt+nlistpix] = listpix cnt += nlistpix # Trim extra elements healstr = healstr[0:cnt] nhealstr = len(healstr) # Get uniq pixels ui = uniq(healstr.pix,sort(healstr.pix)) upix = healstr[ui].pix nupix = len(upix) print(calstr(nupix)+' Healpix pixels have overlapping data') # Get start/stop indices for each pixel idx = sort(healstr.pix) healstr = healstr[idx] q = healstr.pix lo,nlo = dln.where(q != np.roll(q,1)) #hi, = np.where(q ne shift(q,-1)) hi = [lo[1:nlo-1]-1,nhealstr-1] nexp = hi-lo+1 dtype_index = np.dtype([('pix',int),('lo',int),('hi',int),('nexp',int)]) index = np.zeros(nupix,dtype=dtype_index) index['pix'] = upix index['lo'] = lo index['hi'] = hi index['nexp'] = nexp npix = len(index) # Replace /net/dl1/ with /dl1/ so it will work on all machines healstr['file'] = healstr['file'].replace('/net/dl1/','/dl1/') # Write the full list plus an index print('Writing list to '+listfile) Table(healstr).write(listfile) # append other fits binary tables hdulist = fits.open(listfile) hdu = fits.table_to_hdu(Table(indexj)) # second, catalog hdulist.append(hdu) hdulist.writeto(listfile,overwrite=True) hdulist.close() if os.path.exists(listfile+'.gz'): os.remove(listfile+'.gz') ret = subprocess.call(['gzip',listfile]) # compress final catalog # Copy to local directory for faster reading speed if os.path.exists(localdir+'dnidever/nsc/instcal/'+version+'/'): os.delete(localdir+'dnidever/nsc/instcal/'+version+'/') os.copy(listfile+'.gz',localdir+'dnidever/nsc/instcal/'+version+'/') # PUT NSIDE IN HEADER!! # Using existing list else: print('Reading list from '+listfile) healstr = fits.getdata(listfile,1) index = fits.getdata(listfile,2) upix = index['pix'] npix = len(index) # Copy to local directory for faster reading speed file_copy,listfile,localdir+'dnidever/nsc/instcal/'+version+'/',/over # Load the list of healpix pixels for this server to be run LOCALLY pixfile = basedir+'lists/combine_pix_'+host+'.txt' READLINE,pixfile,pixlist,count=npixlist rnd = sort(randomu(1,npixlist)) # RANDOMIZE!! pixlist = int(pixlist[rnd]) print('Running '+str(npixlist)+' jobs on '+host+' with nmult='+str(nmulti)) cmd = "nsc_instcal_combine,"+str(pixlist,2)+",nside="+str(nside,2)+",version='"+version+"',/local,/filesexist" if keyword_set(redo) then cmd+=',/redo' cmddir = strarr(npixlist)+localdir+'dnidever/nsc/instcal/'+version+'/tmp/' # Now run the combination program on each healpix pixel a = '' & read,a,prompt='Press RETURN to start' PBS_DAEMON,cmd,cmddir,jobs=jobs,/hyperthread,/idle,prefix='nsccmb',nmulti=nmulti,wait=1 ## Make the commands #cmd = "nsc_instcal_combine,"+str(index.pix,2)+",nside="+str(nside,2)+",version='"+version+"'" #if keyword_set(redo) then cmd+=',/redo' #cmddir = strarr(npix)+localdir+'dnidever/nsc/instcal/'+version+'/tmp/' ## Check if the output file exists #if not keyword_set(redo) then begin # outfiles = dir+'combine/'+str(upix/1000,2)+'/'+str(upix,2)+'.fits.gz' # test = file_test(outfiles) # gd, = np.where(test eq 0,ngd,comp=bd,ncomp=nbd) # if nbd gt 0 then begin # print,str(nbd,2),' files already exist and /redo not set.' # endif # if ngd eq 0 then begin # print,'No files to process' # return # endif # print,str(ngd,2),' files left to process' # cmd = cmd[gd] # cmddir = cmddir[gd] #endif ## Prioritize longest-running jobs FIRST ## Use prediction program #PIX2ANG_RING,nside,index.pix,theta,phi #ra = phi*radeg #dec = 90-theta*radeg #glactc,ra,dec,2000.0,glon,glat,1,/deg #dt = predictcombtime(glon,glat,index.nexp) ## Do the sorting #hsi = reverse(sort(dt)) #cmd = cmd[hsi] #cmddir = cmddir[hsi] #dt = dt[hsi] #index = index[hsi] # Divide into three using total times #tot = total(dt>10) #totcum = total(dt>10,/cum) #print,min(where(totcum ge tot/3)) #print,min(where(totcum ge 2*tot/3)) #ncmd = len(cmd) #nhalf = ncmd/2 ## Randomize 1st half for hulk/thing/gp09 #cmd1 = cmd[0:(nhalf-1)] #cmdadir1 = cmddir[0:(nhalf-1)] #pix1 = index[0:(nhalf-1)].pix #index1 = index[0:(nhalf-1)] ## now randomize #rnd = sort(randomu(1,len(cmd1))) #cmd1 = cmd1[rnd] #cmddir1 = cmddir1[rnd] #pix1 = pix1[rnd] #index1 = index1[rnd] # Slice it up ## hulk, 1st ##cmd = cmd[0:(nhalf-1):3] ##cmddir = cmddir[0:(nhalf-1):3] ##pix = index[0:(nhalf-1):3].pix #cmd = cmd1[0:(nhalf/3)-1] #cmddir = cmddir1[0:(nhalf/3)-1] #pix = pix1[0:(nhalf/3)-1] # thing, 2nd ##cmd = cmd[1:(nhalf-1):3] ##cmddir = cmddir[1:(nhalf-1):3] ##pix = index[1:(nhalf-1):3].pix #cmd = cmd1[(nhalf/3):(2*nhalf/3)-1] #cmddir = cmddir1[(nhalf/3):(2*nhalf/3)-1] #pix = pix1[(nhalf/3):(2*nhalf/3)-1] # gp09, 3rd ##cmd = cmd[2:(nhalf-1):3] ##cmddir = cmddir[2:(nhalf-1):3] ##pix = index[2:(nhalf-1):3].pix #cmd = cmd1[(2*nhalf/3):*] #cmddir = cmddir1[(2*nhalf/3):*] #pix = pix1[(2*nhalf/3):*] # gp05 #cmd = cmd[nhalf:*:4] #cmddir = cmddir[nhalf:*:4] #pix = index[nhalf:*:4].pix # gp06 #cmd = cmd[nhalf+1:*:4] #cmddir = cmddir[nhalf+1:*:4] #pix = index[nhalf+1:*:4].pix # gp07 #cmd = cmd[nhalf+2:*:4] #cmddir = cmddir[nhalf+2:*:4] #pix = index[nhalf+2:*:4].pix # gp08 #cmd = cmd[nhalf+3:*:4] #cmddir = cmddir[nhalf+3:*:4] #pix = index[nhalf+3:*:4].pix ## Prioritize longest-running jobs FIRST ## Load the DECam run times #sum1 = mrdfits(dir+'nsccmb_summary_hulk.fits',1) #sum2 = mrdfits(dir+'nsccmb_summary_thing.fits',1) #sum3 = mrdfits(dir+'nsccmb_summary_gp09.fits',1) #sum = [sum1,sum2,sum3] #si = sort(sum.mtime) #sum = sum[si] ## only keep fairly recent ones #gd, = np.where(sum.mtime gt 1.4897704e+09,ngd) #sum = sum[gd] ## Deal with duplicates #dbl = doubles(sum.pix,count=ndbl) #alldbl = doubles(sum.pix,/all,count=nalldbl) #torem = bytarr(nalldbl) #for i=0,ndbl-1 do begin # MATCH,sum[alldbl].pix,sum[dbl[i]].pix,ind1,ind2,/sort,count=nmatch # torem[ind1[0:nmatch-2]] = 1 #endfor #bd=where(torem eq 1,nbd) #remove,alldbl[bd],sum #dt = lonarr(len(index))-1 #MATCH,index.pix,sum.pix,ind1,ind2,/sort,count=nmatch #dt[ind1] = sum[ind2].dt ## Do the sorting #hsi = reverse(sort(dt)) #cmd = cmd[hsi] #cmddir = cmddir[hsi] #dt = dt[hsi] # ## Divide into three using total times #tot = total(dt>10) #totcum = total(dt>10,/cum) #print,min(where(totcum ge tot/3)) #print,min(where(totcum ge 2*tot/3)) ## Start with healpix with low NEXP and far from MW midplane, LMC/SMC #pix2ang_ring,nside,index.pix,theta,phi #pixra = phi*radeg #pixdec = 90-theta*radeg #glactc,pixra,pixdec,2000.0,pixgl,pixgb,1,/deg #cel2lmc,pixra,pixdec,palmc,radlmc #cel2smc,pixra,pixdec,rasmc,radsmc #gdpix, = np.where(index.nexp lt 50 and np.abs(pixgb) gt 10 and radlmc gt 5 and radsmc gt 5,ngdpix) # #outfile = dldir+'users/dnidever/nsc/instcal/combine/'+str(index.pix,2)+'.fits' # Now run the combination program on each healpix pixel PBS_DAEMON,cmd,cmddir,jobs=jobs,/hyperthread,/idle,prefix='nsccmb',nmulti=nmulti,wait=1 # RUN NSC_COMBINE_SUMMARY WHEN IT'S DONE!!! ## Load all the summary/metadata files #print,'Creating Healpix summary file' #sumstr = replicate({pix:0L,nexposures:0L,nobjects:0L,success:0},nupix) #sumstr.pix = upix #for i=0,nupix-1 do begin # if (i+1) mod 5000 eq 0 then print,i+1 # file = dir+'combine/'+str(upix[i],2)+'.fits' # if file_test(file) eq 1 then begin # meta = MRDFITS(file,1,/silent) # sumstr[i].nexposures = len(meta) # hd = headfits(file,exten=2) # sumstr[i].nobjects = sxpar(hd,'naxis2') # sumstr[i].success = 1 # endif else begin # sumstr[i].success = 0 # endelse #endfor #gd, = np.where(sumstr.success eq 1,ngd) #print,str(ngd,2),' Healpix successfully processed' #print,'Writing summary file to ',dir+'combine/nsc_instcal_combine.fits' #MWRFITS,sumstr,dir+'combine/nsc_instcal_combine.fits',/create # End logfile #------------ #JOURNAL
import unittest import numpy import chainer from chainer import cuda from chainer.testing import attr class TestLink(unittest.TestCase): def setUp(self): self.link = chainer.Link(x=(2, 3), y=2) self.p = numpy.array([1, 2, 3], dtype='f') self.link.add_persistent('p', self.p) self.link.name = 'a' def check_param_init(self, name, shape, dtype): self.assertTrue(hasattr(self.link, name)) var = getattr(self.link, name) self.assertEqual(var.name, name) self.assertIsInstance(var, chainer.Variable) self.assertEqual(var.data.shape, shape) self.assertEqual(var.data.dtype, dtype) self.assertTrue(numpy.all(numpy.isnan(var.data))) self.assertEqual(var.grad.shape, shape) self.assertEqual(var.grad.dtype, dtype) self.assertTrue(numpy.all(numpy.isnan(var.grad))) def test_init(self): self.check_param_init('x', (2, 3), 'f') self.check_param_init('y', (2,), 'f') def test_add_param(self): self.link.add_param('z', (2, 3)) self.check_param_init('z', (2, 3), 'f') self.link.add_param('w', (2, 3), dtype='d') self.check_param_init('w', (2, 3), 'd') def test_add_persistent(self): self.assertTrue(hasattr(self.link, 'p')) self.assertIs(self.link.p, self.p) self.link.add_persistent('q', 'abc') self.assertTrue(hasattr(self.link, 'q')) self.assertEqual(self.link.q, 'abc') def test_copy(self): link = self.link.copy() self.assertTrue(hasattr(link, 'x')) self.assertTrue(hasattr(link, 'y')) self.assertTrue(hasattr(link, 'p')) self.assertIsNot(link.x, self.link.x) self.assertIs(link.x.data, self.link.x.data) self.assertIsNot(link.y, self.link.y) self.assertIs(link.y.data, self.link.y.data) self.assertIs(link.p, self.link.p) self.assertIs(link.name, None) def test_to_cpu_on_cpu(self): x = self.link.x.data gx = self.link.x.grad y = self.link.y.data gy = self.link.y.grad p = self.link.p self.link.to_cpu() self.assertIs(self.link.x.data, x) self.assertIs(self.link.x.grad, gx) self.assertIs(self.link.y.data, y) self.assertIs(self.link.y.grad, gy) self.assertIs(self.link.p, p) @attr.gpu def test_to_cpu(self): self.link.to_gpu() self.link.to_cpu() self.assertIs(self.link.xp, numpy) self.assertIsInstance(self.link.x.data, numpy.ndarray) self.assertIsInstance(self.link.x.grad, numpy.ndarray) self.assertIsInstance(self.link.y.data, numpy.ndarray) self.assertIsInstance(self.link.y.grad, numpy.ndarray) self.assertIsInstance(self.link.p, numpy.ndarray) @attr.gpu def test_to_gpu(self): cupy = cuda.cupy self.link.to_gpu() self.assertIs(self.link.xp, cupy) self.assertIsInstance(self.link.x.data, cupy.ndarray) self.assertIsInstance(self.link.x.grad, cupy.ndarray) self.assertIsInstance(self.link.y.data, cupy.ndarray) self.assertIsInstance(self.link.y.grad, cupy.ndarray) self.assertIsInstance(self.link.p, cupy.ndarray) def test_params(self): params = list(self.link.params()) self.assertEqual({id(p) for p in params}, {id(self.link.x), id(self.link.y)}) def test_namedparams(self): namedparams = list(self.link.namedparams()) self.assertEqual({(name, id(p)) for name, p in namedparams}, {('/x', id(self.link.x)), ('/y', id(self.link.y))}) def test_links(self): links = list(self.link.links()) self.assertIs(links[0], self.link) def test_links_skipself(self): links = list(self.link.links(skipself=True)) self.assertFalse(links) # empty def test_namedlinks(self): pl = list(self.link.namedlinks()) self.assertEqual(len(pl), 1) self.assertEqual(pl[0][0], '/') self.assertIs(pl[0][1], self.link) def test_copyparams(self): self.link.x.grad.fill(0) self.link.y.grad.fill(1) gx = self.link.x.grad.copy() gy = self.link.y.grad.copy() l = chainer.Link(x=(2, 3), y=2) l.x.data.fill(2) l.x.grad.fill(3) l.y.data.fill(4) l.y.grad.fill(5) self.link.copyparams(l) numpy.testing.assert_array_equal(self.link.x.data, l.x.data) numpy.testing.assert_array_equal(self.link.x.grad, gx) numpy.testing.assert_array_equal(self.link.y.data, l.y.data) numpy.testing.assert_array_equal(self.link.y.grad, gy) def test_zerograds(self): gx_expect = numpy.zeros_like(self.link.x.data) gy_expect = numpy.zeros_like(self.link.y.data) self.link.zerograds() numpy.testing.assert_array_equal(self.link.x.grad, gx_expect) numpy.testing.assert_array_equal(self.link.y.grad, gy_expect) def test_addgrads(self): l = chainer.Link(x=(2, 3), y=2) l.x.grad.fill(1) l.y.grad.fill(2) self.link.x.grad.fill(-1) self.link.y.grad.fill(-2) self.link.addgrads(l) gx_expect = numpy.zeros_like(l.x.grad) gy_expect = numpy.zeros_like(l.y.grad) numpy.testing.assert_array_equal(self.link.x.grad, gx_expect) numpy.testing.assert_array_equal(self.link.y.grad, gy_expect) class TestChain(unittest.TestCase): def setUp(self): self.l1 = chainer.Link(x=(2, 3)) self.l2 = chainer.Link(x=2) self.l3 = chainer.Link(x=3) self.c1 = chainer.Chain(l1=self.l1) self.c1.add_link('l2', self.l2) self.c2 = chainer.Chain(c1=self.c1, l3=self.l3) def test_init(self): self.assertIs(self.c1.l1, self.l1) self.assertIs(self.c1['l1'], self.l1) self.assertEqual(self.l1.name, 'l1') self.assertIs(self.c2.c1, self.c1) self.assertIs(self.c2['c1'], self.c1) self.assertEqual(self.c1.name, 'c1') self.assertIs(self.c2.l3, self.l3) self.assertIs(self.c2['l3'], self.l3) self.assertEqual(self.l3.name, 'l3') def test_add_link(self): self.assertIs(self.c1.l2, self.l2) self.assertEqual(self.l2.name, 'l2') def test_copy(self): c2 = self.c2.copy() self.assertIs(c2.name, None) self.assertTrue(hasattr(c2, 'c1')) self.assertEqual(c2.c1.name, 'c1') self.assertIsNot(c2.c1, self.c1) self.assertEqual(c2.c1.l1.name, 'l1') self.assertIsNot(c2.c1.l1, self.l1) self.assertIsNot(c2.c1.l1.x, self.l1.x) self.assertIs(c2.c1.l1.x.data, self.l1.x.data) self.assertIs(c2.c1.l1.x.grad, None) self.assertIs(c2.name, None) self.assertTrue(hasattr(c2.c1, 'l2')) self.assertEqual(c2.c1.l2.name, 'l2') self.assertIsNot(c2.c1.l2, self.l2) self.assertIsNot(c2.c1.l2.x, self.l2.x) self.assertIs(c2.c1.l2.x.data, self.l2.x.data) self.assertIs(c2.c1.l2.x.grad, None) self.assertTrue(hasattr(c2, 'l3')) self.assertEqual(c2.l3.name, 'l3') self.assertIsNot(c2.l3, self.l3) self.assertIsNot(c2.l3.x, self.l3.x) self.assertIs(c2.l3.x.data, self.l3.x.data) self.assertIs(c2.l3.x.grad, None) def test_to_cpu_on_cpu(self): x1 = self.l1.x.data gx1 = self.l1.x.grad x2 = self.l2.x.data gx2 = self.l2.x.grad x3 = self.l3.x.data gx3 = self.l3.x.grad self.c2.to_cpu() self.assertIs(self.l1.x.data, x1) self.assertIs(self.l1.x.grad, gx1) self.assertIs(self.l2.x.data, x2) self.assertIs(self.l2.x.grad, gx2) self.assertIs(self.l3.x.data, x3) self.assertIs(self.l3.x.grad, gx3) @attr.gpu def test_to_cpu(self): self.c2.to_gpu() self.c2.to_cpu() self.assertIs(self.c2.xp, numpy) self.assertIs(self.c1.xp, numpy) self.assertIs(self.l1.xp, numpy) self.assertIs(self.l2.xp, numpy) self.assertIs(self.l3.xp, numpy) self.assertIsInstance(self.l1.x.data, numpy.ndarray) self.assertIsInstance(self.l1.x.grad, numpy.ndarray) self.assertIsInstance(self.l2.x.data, numpy.ndarray) self.assertIsInstance(self.l2.x.grad, numpy.ndarray) self.assertIsInstance(self.l3.x.data, numpy.ndarray) self.assertIsInstance(self.l3.x.grad, numpy.ndarray) @attr.gpu def test_to_gpu(self): cupy = cuda.cupy self.c2.to_gpu() self.assertIs(self.c2.xp, cupy) self.assertIs(self.c1.xp, cupy) self.assertIs(self.l1.xp, cupy) self.assertIs(self.l2.xp, cupy) self.assertIs(self.l3.xp, cupy) self.assertIsInstance(self.l1.x.data, cupy.ndarray) self.assertIsInstance(self.l1.x.grad, cupy.ndarray) self.assertIsInstance(self.l2.x.data, cupy.ndarray) self.assertIsInstance(self.l2.x.grad, cupy.ndarray) self.assertIsInstance(self.l3.x.data, cupy.ndarray) self.assertIsInstance(self.l3.x.grad, cupy.ndarray) def test_params(self): params = list(self.c2.params()) self.assertEqual({id(p) for p in params}, {id(self.l1.x), id(self.l2.x), id(self.l3.x)}) def test_namedparams(self): namedparams = list(self.c2.namedparams()) self.assertEqual({(name, id(p)) for name, p in namedparams}, {('/c1/l1/x', id(self.l1.x)), ('/c1/l2/x', id(self.l2.x)), ('/l3/x', id(self.l3.x))}) def test_links(self): links = list(self.c2.links()) self.assertEqual({id(l) for l in links}, {id(l) for l in [self.l1, self.l2, self.l3, self.c1, self.c2]}) def test_links_skipself(self): links = list(self.c2.links(skipself=True)) self.assertEqual({id(l) for l in links}, {id(l) for l in [self.l1, self.l2, self.l3, self.c1]}) def test_namedlinks(self): namedlinks = list(self.c2.namedlinks()) self.assertEqual({(name, id(l)) for name, l in namedlinks}, {('/', id(self.c2)), ('/c1', id(self.c1)), ('/c1/l1', id(self.l1)), ('/c1/l2', id(self.l2)), ('/l3', id(self.l3))}) def test_namedlinks_skipself(self): namedlinks = list(self.c2.namedlinks(skipself=True)) self.assertEqual({(name, id(l)) for name, l in namedlinks}, {('/c1', id(self.c1)), ('/c1/l1', id(self.l1)), ('/c1/l2', id(self.l2)), ('/l3', id(self.l3))}) def test_children(self): children = list(self.c2.children()) self.assertEqual({id(c) for c in children}, {id(self.c1), id(self.l3)}) def test_copyparams(self): l1 = chainer.Link(x=(2, 3)) l2 = chainer.Link(x=2) l3 = chainer.Link(x=3) c1 = chainer.Chain(l1=l1, l2=l2) c2 = chainer.Chain(c1=c1, l3=l3) l1.x.data.fill(0) l2.x.data.fill(1) l3.x.data.fill(2) self.c2.copyparams(c2) numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data) numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data) numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data) def test_zerograds(self): self.c2.zerograds() numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3))) numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2)) numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3)) def test_addgrads(self): l1 = chainer.Link(x=(2, 3)) l2 = chainer.Link(x=2) l3 = chainer.Link(x=3) c1 = chainer.Chain(l1=l1, l2=l2) c2 = chainer.Chain(c1=c1, l3=l3) l1.x.grad.fill(1) l2.x.grad.fill(2) l3.x.grad.fill(3) self.l1.x.grad.fill(-1) self.l2.x.grad.fill(-2) self.l3.x.grad.fill(-3) self.c2.addgrads(c2) numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3))) numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2)) numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3)) class TestChainList(unittest.TestCase): def setUp(self): self.l1 = chainer.Link(x=(2, 3)) self.l2 = chainer.Link(x=2) self.l3 = chainer.Link(x=3) self.c1 = chainer.ChainList(self.l1) self.c1.add_link(self.l2) self.c2 = chainer.ChainList(self.c1, self.l3) def test_init(self): self.assertIs(self.c1[0], self.l1) self.assertEqual(self.l1.name, '0') self.assertIs(self.c2[0], self.c1) self.assertEqual(self.c1.name, '0') self.assertIs(self.c2[1], self.l3) self.assertEqual(self.l3.name, '1') def test_add_link(self): self.assertIs(self.c1[1], self.l2) self.assertEqual(self.l2.name, '1') def test_iter(self): links = list(self.c2) self.assertEqual(2, len(links)) self.assertIs(links[0], self.c1) self.assertIs(links[1], self.l3) def test_len(self): self.assertEqual(len(self.c1), 2) self.assertEqual(len(self.c2), 2) def test_copy(self): c2 = self.c2.copy() self.assertIs(c2.name, None) self.assertIsNot(c2[0], self.c1) self.assertEqual(c2[0].name, '0') self.assertIsNot(c2[0][0], self.l1) self.assertEqual(c2[0][0].name, '0') self.assertIsNot(c2[0][0].x, self.l1.x) self.assertIs(c2[0][0].x.data, self.l1.x.data) self.assertIs(c2[0][0].x.grad, None) self.assertIsNot(c2[0][1], self.l2) self.assertEqual(c2[0][1].name, '1') self.assertIsNot(c2[0][1].x, self.l2.x) self.assertIs(c2[0][1].x.data, self.l2.x.data) self.assertIs(c2[0][1].x.grad, None) self.assertIsNot(c2[1], self.l3) self.assertEqual(c2[1].name, '1') self.assertIsNot(c2[1].x, self.l3.x) self.assertIs(c2[1].x.data, self.l3.x.data) self.assertIs(c2[1].x.grad, None) def test_to_cpu_on_cpu(self): x1 = self.l1.x.data gx1 = self.l1.x.grad x2 = self.l2.x.data gx2 = self.l2.x.grad x3 = self.l3.x.data gx3 = self.l3.x.grad self.c2.to_cpu() self.assertIs(self.l1.x.data, x1) self.assertIs(self.l1.x.grad, gx1) self.assertIs(self.l2.x.data, x2) self.assertIs(self.l2.x.grad, gx2) self.assertIs(self.l3.x.data, x3) self.assertIs(self.l3.x.grad, gx3) @attr.gpu def test_to_cpu(self): self.c2.to_gpu() self.c2.to_cpu() self.assertIs(self.c2.xp, numpy) self.assertIs(self.c1.xp, numpy) self.assertIs(self.l1.xp, numpy) self.assertIs(self.l2.xp, numpy) self.assertIs(self.l3.xp, numpy) self.assertIsInstance(self.l1.x.data, numpy.ndarray) self.assertIsInstance(self.l1.x.grad, numpy.ndarray) self.assertIsInstance(self.l2.x.data, numpy.ndarray) self.assertIsInstance(self.l2.x.grad, numpy.ndarray) self.assertIsInstance(self.l3.x.data, numpy.ndarray) self.assertIsInstance(self.l3.x.grad, numpy.ndarray) @attr.gpu def test_to_gpu(self): cupy = cuda.cupy self.c2.to_gpu() self.assertIs(self.c2.xp, cupy) self.assertIs(self.c1.xp, cupy) self.assertIs(self.l1.xp, cupy) self.assertIs(self.l2.xp, cupy) self.assertIs(self.l3.xp, cupy) self.assertIsInstance(self.l1.x.data, cupy.ndarray) self.assertIsInstance(self.l1.x.grad, cupy.ndarray) self.assertIsInstance(self.l2.x.data, cupy.ndarray) self.assertIsInstance(self.l2.x.grad, cupy.ndarray) self.assertIsInstance(self.l3.x.data, cupy.ndarray) self.assertIsInstance(self.l3.x.grad, cupy.ndarray) def test_params(self): params = list(self.c2.params()) self.assertEqual({id(p) for p in params}, {id(self.l1.x), id(self.l2.x), id(self.l3.x)}) def test_namedparams(self): namedparams = list(self.c2.namedparams()) self.assertEqual({(name, id(p)) for name, p in namedparams}, {('/0/0/x', id(self.l1.x)), ('/0/1/x', id(self.l2.x)), ('/1/x', id(self.l3.x))}) def test_links(self): links = list(self.c2.links()) self.assertEqual({id(l) for l in links}, {id(l) for l in [self.l1, self.l2, self.l3, self.c1, self.c2]}) def test_links_skipself(self): links = list(self.c2.links(skipself=True)) self.assertEqual({id(l) for l in links}, {id(l) for l in [self.l1, self.l2, self.l3, self.c1]}) def test_namedlinks(self): namedlinks = list(self.c2.namedlinks()) self.assertEqual({(name, id(l)) for name, l in namedlinks}, {('/', id(self.c2)), ('/0', id(self.c1)), ('/0/0', id(self.l1)), ('/0/1', id(self.l2)), ('/1', id(self.l3))}) def test_namedlinks_skipself(self): namedlinks = list(self.c2.namedlinks(skipself=True)) self.assertEqual({(name, id(l)) for name, l in namedlinks}, {('/0', id(self.c1)), ('/0/0', id(self.l1)), ('/0/1', id(self.l2)), ('/1', id(self.l3))}) def test_children(self): self.assertEqual(tuple(id(c) for c in self.c2.children()), (id(self.c1), id(self.l3))) self.assertEqual(tuple(id(c) for c in self.c1.children()), (id(self.l1), id(self.l2))) def test_copyparams(self): l1 = chainer.Link(x=(2, 3)) l2 = chainer.Link(x=2) l3 = chainer.Link(x=3) c1 = chainer.ChainList(l1, l2) c2 = chainer.ChainList(c1, l3) l1.x.data.fill(0) l2.x.data.fill(1) l3.x.data.fill(2) self.c2.copyparams(c2) numpy.testing.assert_array_equal(self.l1.x.data, l1.x.data) numpy.testing.assert_array_equal(self.l2.x.data, l2.x.data) numpy.testing.assert_array_equal(self.l3.x.data, l3.x.data) def test_zerograds(self): self.c2.zerograds() numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3))) numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2)) numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3)) def test_addgrads(self): l1 = chainer.Link(x=(2, 3)) l2 = chainer.Link(x=2) l3 = chainer.Link(x=3) c1 = chainer.ChainList(l1, l2) c2 = chainer.ChainList(c1, l3) l1.x.grad.fill(1) l2.x.grad.fill(2) l3.x.grad.fill(3) self.l1.x.grad.fill(-1) self.l2.x.grad.fill(-2) self.l3.x.grad.fill(-3) self.c2.addgrads(c2) numpy.testing.assert_array_equal(self.l1.x.grad, numpy.zeros((2, 3))) numpy.testing.assert_array_equal(self.l2.x.grad, numpy.zeros(2)) numpy.testing.assert_array_equal(self.l3.x.grad, numpy.zeros(3))
# pylint: disable=W0611 # # Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE). # http://www.logilab.fr/ -- mailto:contact@logilab.fr # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation; either version 2 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. """some functions that may be useful for various checkers """ import string from logilab import astng from logilab.common.compat import builtins BUILTINS_NAME = builtins.__name__ COMP_NODE_TYPES = astng.ListComp, astng.SetComp, astng.DictComp, astng.GenExpr def safe_infer(node): """return the inferred value for the given node. Return None if inference failed or if there is some ambiguity (more than one node has been inferred) """ try: inferit = node.infer() value = inferit.next() except astng.InferenceError: return try: inferit.next() return # None if there is ambiguity on the inferred node except StopIteration: return value def is_super(node): """return True if the node is referencing the "super" builtin function """ if getattr(node, 'name', None) == 'super' and \ node.root().name == BUILTINS_NAME: return True return False def is_error(node): """return true if the function does nothing but raising an exception""" for child_node in node.get_children(): if isinstance(child_node, astng.Raise): return True return False def is_raising(body): """return true if the given statement node raise an exception""" for node in body: if isinstance(node, astng.Raise): return True return False def is_empty(body): """return true if the given node does nothing but 'pass'""" return len(body) == 1 and isinstance(body[0], astng.Pass) builtins = __builtins__.copy() SPECIAL_BUILTINS = ('__builtins__',) # '__path__', '__file__') def is_builtin(name): # was is_native_builtin """return true if <name> could be considered as a builtin defined by python """ if name in builtins: return True if name in SPECIAL_BUILTINS: return True return False def is_defined_before(var_node): """return True if the variable node is defined by a parent node (list, set, dict, or generator comprehension, lambda) or in a previous sibling node on the same line (statement_defining ; statement_using) """ varname = var_node.name _node = var_node.parent while _node: if isinstance(_node, COMP_NODE_TYPES): for ass_node in _node.nodes_of_class(astng.AssName): if ass_node.name == varname: return True elif isinstance(_node, astng.For): for ass_node in _node.target.nodes_of_class(astng.AssName): if ass_node.name == varname: return True elif isinstance(_node, astng.With): if _node.vars is None: # quickfix : case in which 'with' is used without 'as' return False if _node.vars.name == varname: return True elif isinstance(_node, (astng.Lambda, astng.Function)): if _node.args.is_argument(varname): return True if getattr(_node, 'name', None) == varname: return True break _node = _node.parent # possibly multiple statements on the same line using semi colon separator stmt = var_node.statement() _node = stmt.previous_sibling() lineno = stmt.fromlineno while _node and _node.fromlineno == lineno: for ass_node in _node.nodes_of_class(astng.AssName): if ass_node.name == varname: return True for imp_node in _node.nodes_of_class( (astng.From, astng.Import)): if varname in [name[1] or name[0] for name in imp_node.names]: return True _node = _node.previous_sibling() return False def is_func_default(node): """return true if the given Name node is used in function default argument's value """ parent = node.scope() if isinstance(parent, astng.Function): for default_node in parent.args.defaults: for default_name_node in default_node.nodes_of_class(astng.Name): if default_name_node is node: return True return False def is_func_decorator(node): """return true if the name is used in function decorator""" parent = node.parent while parent is not None: if isinstance(parent, astng.Decorators): return True if parent.is_statement or isinstance(parent, astng.Lambda): break parent = parent.parent return False def is_ancestor_name(frame, node): """return True if `frame` is a astng.Class node with `node` in the subtree of its bases attribute """ try: bases = frame.bases except AttributeError: return False for base in bases: if node in base.nodes_of_class(astng.Name): return True return False def assign_parent(node): """return the higher parent which is not an AssName, Tuple or List node """ while node and isinstance(node, (astng.AssName, astng.Tuple, astng.List)): node = node.parent return node def overrides_an_abstract_method(class_node, name): """return True if pnode is a parent of node""" for ancestor in class_node.ancestors(): if name in ancestor and isinstance(ancestor[name], astng.Function) and \ ancestor[name].is_abstract(pass_is_abstract=False): return True return False def overrides_a_method(class_node, name): """return True if <name> is a method overridden from an ancestor""" for ancestor in class_node.ancestors(): if name in ancestor and isinstance(ancestor[name], astng.Function): return True return False PYMETHODS = set(('__new__', '__init__', '__del__', '__hash__', '__str__', '__repr__', '__len__', '__iter__', '__delete__', '__get__', '__set__', '__getitem__', '__setitem__', '__delitem__', '__contains__', '__getattribute__', '__getattr__', '__setattr__', '__delattr__', '__call__', '__enter__', '__exit__', '__cmp__', '__ge__', '__gt__', '__le__', '__lt__', '__eq__', '__nonzero__', '__neg__', '__invert__', '__mul__', '__imul__', '__rmul__', '__div__', '__idiv__', '__rdiv__', '__add__', '__iadd__', '__radd__', '__sub__', '__isub__', '__rsub__', '__pow__', '__ipow__', '__rpow__', '__mod__', '__imod__', '__rmod__', '__and__', '__iand__', '__rand__', '__or__', '__ior__', '__ror__', '__xor__', '__ixor__', '__rxor__', # XXX To be continued )) def check_messages(*messages): """decorator to store messages that are handled by a checker method""" def store_messages(func): func.checks_msgs = messages return func return store_messages class IncompleteFormatString(Exception): """A format string ended in the middle of a format specifier.""" pass class UnsupportedFormatCharacter(Exception): """A format character in a format string is not one of the supported format characters.""" def __init__(self, index): Exception.__init__(self, index) self.index = index def parse_format_string(format_string): """Parses a format string, returning a tuple of (keys, num_args), where keys is the set of mapping keys in the format string, and num_args is the number of arguments required by the format string. Raises IncompleteFormatString or UnsupportedFormatCharacter if a parse error occurs.""" keys = set() num_args = 0 def next_char(i): i += 1 if i == len(format_string): raise IncompleteFormatString return (i, format_string[i]) i = 0 while i < len(format_string): c = format_string[i] if c == '%': i, c = next_char(i) # Parse the mapping key (optional). key = None if c == '(': depth = 1 i, c = next_char(i) key_start = i while depth != 0: if c == '(': depth += 1 elif c == ')': depth -= 1 i, c = next_char(i) key_end = i - 1 key = format_string[key_start:key_end] # Parse the conversion flags (optional). while c in '#0- +': i, c = next_char(i) # Parse the minimum field width (optional). if c == '*': num_args += 1 i, c = next_char(i) else: while c in string.digits: i, c = next_char(i) # Parse the precision (optional). if c == '.': i, c = next_char(i) if c == '*': num_args += 1 i, c = next_char(i) else: while c in string.digits: i, c = next_char(i) # Parse the length modifier (optional). if c in 'hlL': i, c = next_char(i) # Parse the conversion type (mandatory). if c not in 'diouxXeEfFgGcrs%': raise UnsupportedFormatCharacter(i) if key: keys.add(key) elif c != '%': num_args += 1 i += 1 return keys, num_args
import os import urllib import tempfile import shutil from unittest import mock import tornado.testing import tornado.wsgi import mopidy from mopidy.http import actor, handlers class HttpServerTest(tornado.testing.AsyncHTTPTestCase): def get_config(self): return { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "allowed_origins": [], "csrf_protection": True, "default_app": "mopidy", } } def get_app(self): core = mock.Mock() core.get_version = mock.MagicMock(name="get_version") core.get_version.return_value = mopidy.__version__ testapps = [dict(name="testapp")] teststatics = [dict(name="teststatic")] apps = [ { "name": "mopidy", "factory": handlers.make_mopidy_app_factory( testapps, teststatics ), } ] http_server = actor.HttpServer( config=self.get_config(), core=core, sockets=[], apps=apps, statics=[], ) return tornado.web.Application(http_server._get_request_handlers()) class RootRedirectTest(HttpServerTest): def test_should_redirect_to_mopidy_app(self): response = self.fetch("/", method="GET", follow_redirects=False) assert response.code == 302 assert response.headers["Location"] == "/mopidy/" class MopidyAppTest(HttpServerTest): def test_should_return_index(self): response = self.fetch("/mopidy/", method="GET") body = response.body.decode() assert "This web server is a part of the Mopidy music server." in body assert "testapp" in body assert "teststatic" in body assert response.headers["X-Mopidy-Version"] == mopidy.__version__ assert response.headers["Cache-Control"] == "no-cache" def test_without_slash_should_redirect(self): response = self.fetch("/mopidy", method="GET", follow_redirects=False) assert response.code == 301 assert response.headers["Location"] == "/mopidy/" def test_should_return_static_files(self): response = self.fetch("/mopidy/mopidy.css", method="GET") assert "html {" in response.body.decode() assert response.headers["X-Mopidy-Version"] == mopidy.__version__ assert response.headers["Cache-Control"] == "no-cache" class MopidyWebSocketHandlerTest(HttpServerTest): def test_should_return_ws(self): response = self.fetch("/mopidy/ws", method="GET") assert 'Can "Upgrade" only to "WebSocket".' == response.body.decode() def test_should_return_ws_old(self): response = self.fetch("/mopidy/ws/", method="GET") assert 'Can "Upgrade" only to "WebSocket".' == response.body.decode() class MopidyRPCHandlerTest(HttpServerTest): def test_should_return_rpc_error(self): cmd = tornado.escape.json_encode({"action": "get_version"}) response = self.fetch( "/mopidy/rpc", method="POST", body=cmd, headers={"Content-Type": "application/json"}, ) assert { "jsonrpc": "2.0", "id": None, "error": { "message": "Invalid Request", "code": (-32600), "data": "'jsonrpc' member must be included", }, } == tornado.escape.json_decode(response.body) def test_should_return_parse_error(self): cmd = "{[[[]}" response = self.fetch( "/mopidy/rpc", method="POST", body=cmd, headers={"Content-Type": "application/json"}, ) assert { "jsonrpc": "2.0", "id": None, "error": {"message": "Parse error", "code": (-32700)}, } == tornado.escape.json_decode(response.body) def test_should_return_mopidy_version(self): cmd = tornado.escape.json_encode( { "method": "core.get_version", "params": [], "jsonrpc": "2.0", "id": 1, } ) response = self.fetch( "/mopidy/rpc", method="POST", body=cmd, headers={"Content-Type": "application/json"}, ) assert { "jsonrpc": "2.0", "id": 1, "result": mopidy.__version__, } == tornado.escape.json_decode(response.body) def test_should_return_extra_headers(self): response = self.fetch("/mopidy/rpc", method="HEAD") assert "Accept" in response.headers assert "X-Mopidy-Version" in response.headers assert "Cache-Control" in response.headers assert "Content-Type" in response.headers def test_should_require_correct_content_type(self): cmd = tornado.escape.json_encode( { "method": "core.get_version", "params": [], "jsonrpc": "2.0", "id": 1, } ) response = self.fetch( "/mopidy/rpc", method="POST", body=cmd, headers={"Content-Type": "text/plain"}, ) assert response.code == 415 assert response.reason == "Content-Type must be application/json" def test_different_origin_returns_access_denied(self): response = self.fetch( "/mopidy/rpc", method="OPTIONS", headers={"Host": "me:6680", "Origin": "http://evil:666"}, ) assert response.code == 403 assert response.reason == "Access denied for origin http://evil:666" def test_same_origin_returns_cors_headers(self): response = self.fetch( "/mopidy/rpc", method="OPTIONS", headers={"Host": "me:6680", "Origin": "http://me:6680"}, ) assert ( response.headers["Access-Control-Allow-Origin"] == "http://me:6680" ) assert ( response.headers["Access-Control-Allow-Headers"] == "Content-Type" ) class MopidyRPCHandlerNoCSRFProtectionTest(HttpServerTest): def get_config(self): config = super().get_config() config["http"]["csrf_protection"] = False return config def get_cmd(self): return tornado.escape.json_encode( { "method": "core.get_version", "params": [], "jsonrpc": "2.0", "id": 1, } ) def test_should_ignore_incorrect_content_type(self): response = self.fetch( "/mopidy/rpc", method="POST", body=self.get_cmd(), headers={"Content-Type": "text/plain"}, ) assert response.code == 200 def test_should_ignore_missing_content_type(self): response = self.fetch( "/mopidy/rpc", method="POST", body=self.get_cmd(), headers={} ) assert response.code == 200 def test_different_origin_returns_allowed(self): response = self.fetch( "/mopidy/rpc", method="OPTIONS", headers={"Host": "me:6680", "Origin": "http://evil:666"}, ) assert response.code == 204 def test_should_not_return_cors_headers(self): response = self.fetch( "/mopidy/rpc", method="OPTIONS", headers={"Host": "me:6680", "Origin": "http://me:6680"}, ) assert "Access-Control-Allow-Origin" not in response.headers assert "Access-Control-Allow-Headers" not in response.headers class HttpServerWithStaticFilesTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): config = { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "default_app": "static", } } core = mock.Mock() statics = [dict(name="static", path=os.path.dirname(__file__))] http_server = actor.HttpServer( config=config, core=core, sockets=[], apps=[], statics=statics ) return tornado.web.Application(http_server._get_request_handlers()) def test_without_slash_should_redirect(self): response = self.fetch("/static", method="GET", follow_redirects=False) assert response.code == 301 assert response.headers["Location"] == "/static/" def test_can_serve_static_files(self): response = self.fetch("/static/test_server.py", method="GET") assert 200 == response.code assert response.headers["X-Mopidy-Version"] == mopidy.__version__ assert response.headers["Cache-Control"] == "no-cache" def wsgi_app_factory(config, core): def wsgi_app(environ, start_response): status = "200 OK" response_headers = [("Content-type", "text/plain")] start_response(status, response_headers) return [b"Hello, world!\n"] return [ ( "(.*)", tornado.web.FallbackHandler, {"fallback": tornado.wsgi.WSGIContainer(wsgi_app)}, ), ] class HttpServerWithWsgiAppTest(tornado.testing.AsyncHTTPTestCase): def get_app(self): config = { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "default_app": "wsgi", } } core = mock.Mock() apps = [{"name": "wsgi", "factory": wsgi_app_factory}] http_server = actor.HttpServer( config=config, core=core, sockets=[], apps=apps, statics=[] ) return tornado.web.Application(http_server._get_request_handlers()) def test_without_slash_should_redirect(self): response = self.fetch("/wsgi", method="GET", follow_redirects=False) assert response.code == 301 assert response.headers["Location"] == "/wsgi/" def test_can_wrap_wsgi_apps(self): response = self.fetch("/wsgi/", method="GET") assert response.code == 200 assert "Hello, world!" in response.body.decode() def default_webapp_factory(config, core): class MainHandler(tornado.web.RequestHandler): def get(self): self.write("Hello from default webapp") return [("/", MainHandler, {})] class HttpServerWithAppDefaultApp(tornado.testing.AsyncHTTPTestCase): def get_app(self): config = { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "default_app": "default_app", } } core = mock.Mock() apps = [dict(name="default_app", factory=default_webapp_factory)] http_server = actor.HttpServer( config=config, core=core, sockets=[], apps=apps, statics=[] ) return tornado.web.Application(http_server._get_request_handlers()) def test_should_redirect_to_default_app(self): response = self.fetch("/", method="GET", follow_redirects=False) assert response.code == 302 assert response.headers["Location"] == "/default_app/" response = self.fetch( "/default_app/", method="GET", follow_redirects=True ) assert response.code == 200 assert "Hello from default webapp" in response.body.decode() class HttpServerWithStaticDefaultApp(tornado.testing.AsyncHTTPTestCase): def get_app(self): config = { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "default_app": "default_app", } } core = mock.Mock() statics = [dict(name="default_app", path=os.path.dirname(__file__))] http_server = actor.HttpServer( config=config, core=core, sockets=[], apps=[], statics=statics ) return tornado.web.Application(http_server._get_request_handlers()) def test_should_redirect_to_default_app(self): response = self.fetch("/", method="GET", follow_redirects=False) assert response.code == 302 assert response.headers["Location"] == "/default_app/" class HttpServerWithInvalidDefaultApp(HttpServerTest): def get_config(self): config = super(HttpServerWithInvalidDefaultApp, self).get_config() config["http"]["default_app"] = "invalid_webclient" return config def test_should_redirect_to_clients_list(self): response = self.fetch("/", method="GET", follow_redirects=False) assert response.code == 302 assert response.headers["Location"] == "/mopidy/" response = self.fetch("/", method="GET") body = response.body.decode() assert "This web server is a part of the Mopidy music server." in body assert "testapp" in body assert "teststatic" in body assert response.headers["X-Mopidy-Version"] == mopidy.__version__ assert response.headers["Cache-Control"] == "no-cache" def cookie_secret_app_factory(config, core): class BaseHandler(tornado.web.RequestHandler): def get_current_user(self): return self.get_secure_cookie("user") class LoginHandler(BaseHandler): def get(self): self.write("This is a login form") def post(self): self.set_secure_cookie("user", self.get_argument("name")) self.write("Logged in") class MainHandler(BaseHandler): def get(self): if not self.current_user: self.write("Unknown user...") return name = tornado.escape.xhtml_escape(self.current_user) self.write("Hello, " + name) return [("/", MainHandler, {}), ("/login", LoginHandler, {})] class HttpServerTestLoginWithSecureCookie(tornado.testing.AsyncHTTPTestCase): def get_app(self): self._dirpath = tempfile.mkdtemp() config = { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "default_app": "mopidy", }, "core": {"data_dir": self._dirpath}, } core = mock.Mock() apps = [{"name": "cookie_secret", "factory": cookie_secret_app_factory}] http_server = actor.HttpServer( config=config, core=core, sockets=[], apps=apps, statics=[] ) return tornado.web.Application( http_server._get_request_handlers(), cookie_secret=http_server._get_cookie_secret(), ) def test_main_access_without_login(self): response = self.fetch("/cookie_secret", method="GET") assert 200 == response.code assert "Unknown user..." in response.body.decode() def test_accessing_login_form_get(self): response = self.fetch("/cookie_secret/login", method="GET") assert 200 == response.code assert "This is a login form" in response.body.decode() def test_login(self): post_data = {"name": "theuser"} body = urllib.parse.urlencode(post_data) response = self.fetch("/cookie_secret/login", method="POST", body=body) assert 200 == response.code assert "Logged in" in response.body.decode() shutil.rmtree(self._dirpath) def test_get_secure_cookie(tmp_path): config = { "http": { "hostname": "127.0.0.1", "port": 6680, "zeroconf": "", "default_app": "mopidy", }, "core": {"data_dir": tmp_path}, } core = mock.Mock() http_server = actor.HttpServer( config=config, core=core, sockets=[], apps=[], statics=[] ) # first secret, generating secret_1 = http_server._get_cookie_secret() assert isinstance(secret_1, str) assert secret_1 != "" assert len(secret_1) == 64 # second secret, from file secret_2 = http_server._get_cookie_secret() assert secret_1 == secret_2
#!/usr/bin/env python # -*- encoding: utf-8 -*- # # Py2neo documentation build configuration file, created by # sphinx-quickstart on Fri Oct 17 16:03:15 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) sys.path.append(os.path.abspath("./_ext")) # -- Project metadata ----------------------------------------------------- from py2neo import __copyright__ from py2neo.meta import get_metadata, get_version_data # -- General configuration ------------------------------------------------ metadata = get_metadata() version_data = get_version_data() # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'sphinx.ext.intersphinx', 'ogm', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Py2neo' copyright = __copyright__ # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # version: The short X.Y version. # release: The full version, including alpha/beta/rc tags. if version_data["rtd"] == "latest": release = version = "(master)" elif version_data["pre"]: version = ".".join(map(str, version_data["release"][:2])) release = version + " (pre-release)" else: release = version = ".".join(map(str, version_data["release"][:2])) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # Mappings to third-party docs intersphinx_mapping = { "packaging": ("https://packaging.pypa.io/en/latest/", None), "python": ("https://docs.python.org/3", None), } # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'pydoctheme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'sidebarwidth': 330, 'collapsiblesidebar': True, #'logo': 'py2neo-v4.280x336.png', #'logo_align': 'left', #'github_user': 'technige', #'github_repo': 'py2neo', #'github_branch': 'v3', #'travis_button': True, } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ["_themes"] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = "py2neo %s" % release # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = html_title # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "../art/py2neo-2018.291x50.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "../art/py2neo.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = { # '**': [ # 'about.html', 'navigation.html', 'searchbox.html', 'donate.html', # ] #} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Py2neodoc' html_context = { "version_data": version_data, } # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). 'papersize': 'a4paper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'Py2neo.tex', 'Py2neo Documentation', metadata["author"], 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'py2neo', 'Py2neo Documentation', [metadata["author"]], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Py2neo', 'Py2neo Documentation', metadata["author"], 'Py2neo', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False # -- Options for Epub output ---------------------------------------------- # Bibliographic Dublin Core info. epub_title = 'Py2neo' epub_author = metadata["author"] epub_publisher = metadata["author"] epub_copyright = copyright # The basename for the epub file. It defaults to the project name. #epub_basename = 'Py2neo' # The HTML theme for the epub output. Since the default themes are not optimized # for small screen space, using the same theme for HTML and epub output is # usually not wise. This defaults to 'epub', a theme designed to save visual # space. #epub_theme = 'epub' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # A sequence of (type, uri, title) tuples for the guide element of content.opf. #epub_guide = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # Choose between 'default' and 'includehidden'. #epub_tocscope = 'default' # Fix unsupported image types using the PIL. #epub_fix_images = False # Scale large images. #epub_max_image_width = 0 # How to display URL addresses: 'footnote', 'no', or 'inline'. #epub_show_urls = 'inline' # If false, no index is generated. #epub_use_index = True
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for fused_batch_norm related functionality in tensorflow.ops.nn.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradient_checker from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_grad from tensorflow.python.ops import nn_impl from tensorflow.python.platform import test class BatchNormalizationTest(test.TestCase): def _batch_norm(self, x, mean, var, offset, scale, epsilon): # We compute the batch norm manually in this function because # nn_impl.batch_normalization does not support float16 yet. # TODO(reedwm): Add float16 support to nn_impl.batch_normalization. inv = math_ops.rsqrt(var + epsilon) * scale y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv) return math_ops.cast(y, x.dtype) def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format): if data_format not in ['NHWC', 'NCHW']: raise ValueError('data_format must be NCHW or NHWC, ' 'got %s.' % data_format) if data_format == 'NCHW': x = array_ops.transpose(x, [0, 2, 3, 1]) y = self._batch_norm(x, mean, var, offset, scale, epsilon) if data_format == 'NCHW': y = array_ops.transpose(y, [0, 3, 1, 2]) return self.evaluate(y) def _test_inference(self, x_shape, x_dtype, scale_shape, scale_dtype, use_gpu=True, data_format='NHWC'): np.random.seed(1) x_val = np.random.random_sample(x_shape).astype(x_dtype) scale_val = np.random.random_sample(scale_shape).astype(scale_dtype) offset_val = np.random.random_sample(scale_shape).astype(scale_dtype) mean_val = np.random.random_sample(scale_shape).astype(scale_dtype) var_val = np.random.random_sample(scale_shape).astype(scale_dtype) with self.cached_session(use_gpu=use_gpu) as sess: x = constant_op.constant(x_val, name='x') scale = constant_op.constant(scale_val, name='scale') offset = constant_op.constant(offset_val, name='offset') mean = constant_op.constant(mean_val, name='mean') var = constant_op.constant(var_val, name='variance') epsilon = 0.001 y, _, _ = nn_impl.fused_batch_norm( x, scale, offset, mean=mean, variance=var, epsilon=epsilon, data_format=data_format, is_training=False) y_val = self.evaluate(y) y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon, data_format) # An atol value of 1e-3 is too small for float16's, because some adjacent # float16 values that y_val can take are greater than 1e-3 apart, e.g. # 2.16602 and 2.16797. atol = 2e-3 if x_dtype == np.float16 else 1e-3 self.assertAllClose(y_ref, y_val, atol=atol) def _training_ref(self, x, scale, offset, epsilon, data_format): if data_format not in ['NHWC', 'NCHW']: raise ValueError('data_format must be NCHW or NHWC, ' 'got %s.' % data_format) if data_format == 'NCHW': x = array_ops.transpose(x, [0, 2, 3, 1]) mean, var = nn_impl.moments( math_ops.cast(x, scale.dtype), [0, 1, 2], keep_dims=False) y = self._batch_norm(x, mean, var, offset, scale, epsilon) if data_format == 'NCHW': y = array_ops.transpose(y, [0, 3, 1, 2]) return self.evaluate(y), self.evaluate(mean), self.evaluate(var) def _test_training(self, x_shape, x_dtype, scale_shape, scale_dtype, use_gpu=True, data_format='NHWC'): np.random.seed(1) x_val = np.random.random_sample(x_shape).astype(x_dtype) scale_val = np.random.random_sample(scale_shape).astype(scale_dtype) offset_val = np.random.random_sample(scale_shape).astype(scale_dtype) with self.cached_session(use_gpu=use_gpu) as sess: x = constant_op.constant(x_val, name='x') scale = constant_op.constant(scale_val, name='scale') offset = constant_op.constant(offset_val, name='offset') epsilon = 0.001 y, mean, var = nn_impl.fused_batch_norm( x, scale, offset, epsilon=epsilon, data_format=data_format, is_training=True) y_val, mean_val, var_val = self.evaluate([y, mean, var]) y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset, epsilon, data_format) y_atol = 2e-3 if x_dtype == np.float16 else 1e-3 self.assertAllClose(y_ref, y_val, atol=y_atol) self.assertAllClose(mean_ref, mean_val, atol=1e-3) # This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as # the denominator in the formula to calculate variance, while # tf.nn.fused_batch_norm has Bessel's correction built in. sample_size = x_val.size / scale_val.size var_ref = var_ref * sample_size / (max(sample_size - 1.0, 1.0)) self.assertAllClose(var_ref, var_val, atol=1e-3) def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape): """Computes the gradient error for float16 inputs and/or outputs. This returns the same value as gradient_checker.compute_gradient_error. The difference is that gradient_checker.compute_gradient_error does not numerically compute the gradients in a numerically stable way for float16 tensors. To fix this, this function requires float32 versions of x and y to numerically compute the gradients, to compare with the float16 symbolically computed gradients. Args: x: The input tensor. x32: A float32 version of x. x_shape: The shape of x. y: The output tensor. y32: A float32 version of y. Must be calculated based on x32, not x. y_shape: The shape of y. Returns: The maximum error in between the two Jacobians, as in gradient_checker.compute_gradient_error. """ x_init_val = np.random.random_sample(x_shape).astype(np.float16) x32_init_val = x_init_val.astype(np.float32) # TODO(reedwm): Do not perform the unnecessary computations in # compute_gradient, since they double the computation time of this function. theoretical_grad, _ = gradient_checker.compute_gradient( x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val) _, numerical_grad = gradient_checker.compute_gradient( x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val) # If grad is empty, no error. if theoretical_grad.size == 0 and numerical_grad.size == 0: return 0 return np.fabs(theoretical_grad - numerical_grad).max() def _test_gradient(self, x_shape, x_dtype, scale_shape, scale_dtype, use_gpu=True, data_format='NHWC', is_training=True): np.random.seed(1) x_val = np.random.random_sample(x_shape).astype(x_dtype) scale_val = np.random.random_sample(scale_shape).astype(scale_dtype) offset_val = np.random.random_sample(scale_shape).astype(scale_dtype) with self.cached_session(use_gpu=use_gpu): x = constant_op.constant(x_val, name='x') scale = constant_op.constant(scale_val, name='scale') offset = constant_op.constant(offset_val, name='offset') if is_training: pop_mean = None pop_var = None else: pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype) pop_var = np.random.random_sample(scale_shape).astype(scale_dtype) y, _, _ = nn_impl.fused_batch_norm( x, scale, offset, mean=pop_mean, variance=pop_var, data_format=data_format, is_training=is_training) if x_dtype != np.float16: err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape) err_scale = gradient_checker.compute_gradient_error( scale, scale_shape, y, x_shape) err_offset = gradient_checker.compute_gradient_error( offset, scale_shape, y, x_shape) else: x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32) y32, _, _ = nn_impl.fused_batch_norm( x32, scale, offset, mean=pop_mean, variance=pop_var, data_format=data_format, is_training=is_training) err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32, x_shape) err_scale = self._compute_gradient_error_float16( scale, scale, scale_shape, y, y32, x_shape) err_offset = self._compute_gradient_error_float16( offset, offset, scale_shape, y, y32, x_shape) x_err_tolerance = 2e-3 if x_dtype == np.float16 else 1e-3 scale_err_tolerance = 1e-3 self.assertLess(err_x, x_err_tolerance) self.assertLess(err_scale, scale_err_tolerance) self.assertLess(err_offset, scale_err_tolerance) def _test_grad_grad(self, x_shape, x_dtype, scale_shape, scale_dtype, use_gpu=True, data_format='NHWC', is_training=True, err_tolerance=1e-3): np.random.seed(1) x_val = np.random.random_sample(x_shape).astype(x_dtype) grad_y_val = np.random.random_sample(x_shape).astype(x_dtype) scale_val = np.random.random_sample(scale_shape).astype(scale_dtype) offset_val = np.random.random_sample(scale_shape).astype(scale_dtype) with self.cached_session(use_gpu=use_gpu) as sess: x = constant_op.constant(x_val, name='x') grad_y = constant_op.constant(grad_y_val, name='grad_y') scale = constant_op.constant(scale_val, name='scale') offset = constant_op.constant(offset_val, name='offset') if is_training: pop_mean = None pop_var = None else: pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype) pop_var = np.random.random_sample(scale_shape).astype(scale_dtype) y, _, _ = nn_impl.fused_batch_norm( x, scale, offset, mean=pop_mean, variance=pop_var, data_format=data_format, is_training=is_training) grad_x, grad_scale, grad_offset = gradients_impl.gradients( y, [x, scale, offset], grad_y) if is_training: epsilon = y.op.get_attr('epsilon') data_format = y.op.get_attr('data_format') grad_vals = self.evaluate([grad_x, grad_scale, grad_offset]) grad_internal = nn_grad._BatchNormGrad(grad_y, x, scale, pop_mean, pop_var, epsilon, data_format) grad_internal_vals = self.evaluate(list(grad_internal)) for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals): self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance) if x_dtype != np.float16: err_grad_grad_y_1 = gradient_checker.compute_gradient_error( grad_y, x_shape, grad_x, x_shape) err_grad_grad_y_2 = gradient_checker.compute_gradient_error( grad_y, x_shape, grad_scale, scale_shape) err_grad_grad_y_3 = gradient_checker.compute_gradient_error( grad_y, x_shape, grad_offset, scale_shape) # In freeze mode, grad_x is not a function of x. if is_training: err_grad_x_1 = gradient_checker.compute_gradient_error( x, x_shape, grad_x, x_shape) err_grad_x_2 = gradient_checker.compute_gradient_error( x, x_shape, grad_scale, scale_shape) err_grad_scale = gradient_checker.compute_gradient_error( scale, scale_shape, grad_x, x_shape) else: x32 = constant_op.constant(x_val, dtype=dtypes.float32, name='x32') grad_y32 = constant_op.constant( grad_y_val, dtype=dtypes.float32, name='grad_y32') y32, _, _ = nn_impl.fused_batch_norm( x32, scale, offset, mean=pop_mean, variance=pop_var, data_format=data_format, is_training=is_training) grad_x32, grad_scale32, grad_offset32 = gradients_impl.gradients( y32, [x32, scale, offset], grad_y32) err_grad_grad_y_1 = self._compute_gradient_error_float16( grad_y, grad_y32, x_shape, grad_x, grad_x32, x_shape) err_grad_grad_y_2 = self._compute_gradient_error_float16( grad_y, grad_y32, x_shape, grad_scale, grad_scale32, scale_shape) err_grad_grad_y_3 = self._compute_gradient_error_float16( grad_y, grad_y32, x_shape, grad_offset, grad_offset32, scale_shape) # In freeze mode, grad_x is not a function of x. if is_training: err_grad_x_1 = self._compute_gradient_error_float16( x, x32, x_shape, grad_x, grad_x32, x_shape) err_grad_x_2 = self._compute_gradient_error_float16( x, x32, x_shape, grad_scale, grad_scale32, scale_shape) err_grad_scale = self._compute_gradient_error_float16( scale, scale, scale_shape, grad_x, grad_x32, x_shape) self.assertLess(err_grad_grad_y_1, err_tolerance) self.assertLess(err_grad_grad_y_2, err_tolerance) self.assertLess(err_grad_grad_y_3, err_tolerance) if is_training: self.assertLess(err_grad_x_1, err_tolerance) self.assertLess(err_grad_x_2, err_tolerance) self.assertLess(err_grad_scale, err_tolerance) def testInferenceShape1(self): x_shape = [1, 1, 6, 1] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_inference( x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC') self._test_inference( x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW') self._test_inference( x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC') def testInferenceShape2(self): x_shape = [1, 1, 6, 2] if test.is_gpu_available(cuda_only=True): for dtype in [np.float16, np.float32]: self._test_inference( x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC') self._test_inference( x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC') def testInferenceShape3(self): x_shape = [1, 2, 1, 6] if test.is_gpu_available(cuda_only=True): for dtype in [np.float16, np.float32]: self._test_inference( x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW') def testInferenceShape4(self): x_shape = [27, 131, 127, 6] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_inference( x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW') self._test_inference( x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC') self._test_inference( x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC') def testInferenceShape5(self): x_shape = [0, 131, 127, 6] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_inference( x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW') self._test_inference( x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC') self._test_inference( x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC') def testTrainingShape1(self): x_shape = [1, 1, 6, 1] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_training( x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC') self._test_training( x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW') self._test_training( x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC') def testTrainingShape2(self): x_shape = [1, 1, 6, 2] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_training( x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC') self._test_training( x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC') def testTrainingShape3(self): x_shape = [1, 2, 1, 6] if test.is_gpu_available(cuda_only=True): for dtype in [np.float16, np.float32]: self._test_training( x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW') def testTrainingShape4(self): x_shape = [27, 131, 127, 6] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_training( x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW') self._test_training( x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC') self._test_training( x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC') def testTrainingShape5(self): x_shape = [0, 131, 127, 6] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_training( x_shape, dtype, [131], np.float32, use_gpu=True, data_format='NCHW') self._test_training( x_shape, dtype, [6], np.float32, use_gpu=True, data_format='NHWC') self._test_training( x_shape, dtype, [6], np.float32, use_gpu=False, data_format='NHWC') def testBatchNormGradShape1(self): for is_training in [True, False]: x_shape = [1, 1, 6, 1] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_gradient( x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NHWC', is_training=is_training) self._test_gradient( x_shape, dtype, [1], np.float32, use_gpu=True, data_format='NCHW', is_training=is_training) self._test_gradient( x_shape, dtype, [1], np.float32, use_gpu=False, data_format='NHWC', is_training=is_training) def testBatchNormGradShape2(self): for is_training in [True, False]: x_shape = [1, 1, 6, 2] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_gradient( x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NHWC', is_training=is_training) self._test_gradient( x_shape, dtype, [2], np.float32, use_gpu=False, data_format='NHWC', is_training=is_training) def testBatchNormGradShape3(self): for is_training in [True, False]: x_shape = [1, 2, 1, 6] if test.is_gpu_available(cuda_only=True): for dtype in [np.float16, np.float32]: self._test_gradient( x_shape, dtype, [2], np.float32, use_gpu=True, data_format='NCHW', is_training=is_training) def testBatchNormGradShape4(self): for is_training in [True, False]: x_shape = [5, 7, 11, 4] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_gradient( x_shape, dtype, [7], np.float32, use_gpu=True, data_format='NCHW', is_training=is_training) self._test_gradient( x_shape, dtype, [4], np.float32, use_gpu=True, data_format='NHWC', is_training=is_training) self._test_gradient( x_shape, dtype, [4], np.float32, use_gpu=False, data_format='NHWC', is_training=is_training) def testBatchNormGradShape5(self): for is_training in [True, False]: x_shape = [0, 7, 11, 4] for dtype in [np.float16, np.float32]: if test.is_gpu_available(cuda_only=True): self._test_gradient( x_shape, dtype, [7], np.float32, use_gpu=True, data_format='NCHW', is_training=is_training) self._test_gradient( x_shape, dtype, [4], np.float32, use_gpu=True, data_format='NHWC', is_training=is_training) self._test_gradient( x_shape, dtype, [4], np.float32, use_gpu=False, data_format='NHWC', is_training=is_training) def _testBatchNormGradGrad(self, config): shape = config['shape'] err_tolerance = config['err_tolerance'] dtype = config['dtype'] for is_training in [True, False]: if test.is_gpu_available(cuda_only=True): self._test_grad_grad( shape, dtype, [shape[3]], np.float32, use_gpu=True, data_format='NHWC', is_training=is_training, err_tolerance=err_tolerance) self._test_grad_grad( shape, dtype, [shape[1]], np.float32, use_gpu=True, data_format='NCHW', is_training=is_training, err_tolerance=err_tolerance) self._test_grad_grad( shape, dtype, [shape[3]], np.float32, use_gpu=False, data_format='NHWC', is_training=is_training, err_tolerance=err_tolerance) def testBatchNormGradGradConfig1(self): config = { 'shape': [2, 3, 4, 5], 'err_tolerance': 1e-2, 'dtype': np.float32, } self._testBatchNormGradGrad(config) def testBatchNormGradGradConfig2(self): config = { 'shape': [2, 3, 2, 2], 'err_tolerance': 1e-3, 'dtype': np.float32, } self._testBatchNormGradGrad(config) def testBatchNormGradGradConfig3(self): config = { 'shape': [2, 3, 4, 5], 'err_tolerance': 1e-2, 'dtype': np.float16, } self._testBatchNormGradGrad(config) def testBatchNormGradGradConfig4(self): config = { 'shape': [2, 3, 2, 2], 'err_tolerance': 2e-3, 'dtype': np.float16, } self._testBatchNormGradGrad(config) if __name__ == '__main__': test.main()
# # Copyright 2017 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import division from collections import namedtuple, OrderedDict from functools import partial from math import isnan import logbook import numpy as np import pandas as pd from six import iteritems, itervalues, PY2 from zipline.assets import Future from zipline.finance.transaction import Transaction import zipline.protocol as zp from zipline.utils.sentinel import sentinel from .position import Position from ._finance_ext import ( PositionStats, calculate_position_tracker_stats, update_position_last_sale_prices, ) log = logbook.Logger('Performance') class PositionTracker(object): """The current state of the positions held. Parameters ---------- data_frequency : {'daily', 'minute'} The data frequency of the simulation. """ def __init__(self, data_frequency): self.positions = OrderedDict() self._unpaid_dividends = {} self._unpaid_stock_dividends = {} self._positions_store = zp.Positions() self.data_frequency = data_frequency # cache the stats until something alters our positions self._dirty_stats = True self._stats = PositionStats.new() def update_position(self, asset, amount=None, last_sale_price=None, last_sale_date=None, cost_basis=None): self._dirty_stats = True if asset not in self.positions: position = Position(asset) self.positions[asset] = position else: position = self.positions[asset] if amount is not None: position.amount = amount if last_sale_price is not None: position.last_sale_price = last_sale_price if last_sale_date is not None: position.last_sale_date = last_sale_date if cost_basis is not None: position.cost_basis = cost_basis def execute_transaction(self, txn): self._dirty_stats = True asset = txn.asset if asset not in self.positions: position = Position(asset) self.positions[asset] = position else: position = self.positions[asset] position.update(txn) if position.amount == 0: del self.positions[asset] try: # if this position exists in our user-facing dictionary, # remove it as well. del self._positions_store[asset] except KeyError: pass def handle_commission(self, asset, cost): # Adjust the cost basis of the stock if we own it if asset in self.positions: self._dirty_stats = True self.positions[asset].adjust_commission_cost_basis(asset, cost) def handle_splits(self, splits): """Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list A list of splits. Each split is a tuple of (asset, ratio). Returns ------- int: The leftover cash from fractional shares after modifying each position. """ total_leftover_cash = 0 for asset, ratio in splits: if asset in self.positions: self._dirty_stats = True # Make the position object handle the split. It returns the # leftover cash from a fractional share, if there is any. position = self.positions[asset] leftover_cash = position.handle_split(asset, ratio) total_leftover_cash += leftover_cash return total_leftover_cash def earn_dividends(self, cash_dividends, stock_dividends): """Given a list of dividends whose ex_dates are all the next trading day, calculate and store the cash and/or stock payments to be paid on each dividend's pay date. Parameters ---------- cash_dividends : iterable of (asset, amount, pay_date) namedtuples stock_dividends: iterable of (asset, payment_asset, ratio, pay_date) namedtuples. """ for cash_dividend in cash_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend # Store the earned dividends so that they can be paid on the # dividends' pay_dates. div_owed = self.positions[cash_dividend.asset].earn_dividend( cash_dividend, ) try: self._unpaid_dividends[cash_dividend.pay_date].append(div_owed) except KeyError: self._unpaid_dividends[cash_dividend.pay_date] = [div_owed] for stock_dividend in stock_dividends: self._dirty_stats = True # only mark dirty if we pay a dividend div_owed = self.positions[ stock_dividend.asset ].earn_stock_dividend(stock_dividend) try: self._unpaid_stock_dividends[stock_dividend.pay_date].append( div_owed, ) except KeyError: self._unpaid_stock_dividends[stock_dividend.pay_date] = [ div_owed, ] def pay_dividends(self, next_trading_day): """ Returns a cash payment based on the dividends that should be paid out according to the accumulated bookkeeping of earned, unpaid, and stock dividends. """ net_cash_payment = 0.0 try: payments = self._unpaid_dividends[next_trading_day] # Mark these dividends as paid by dropping them from our unpaid del self._unpaid_dividends[next_trading_day] except KeyError: payments = [] # representing the fact that we're required to reimburse the owner of # the stock for any dividends paid while borrowing. for payment in payments: net_cash_payment += payment['amount'] # Add stock for any stock dividends paid. Again, the values here may # be negative in the case of short positions. try: stock_payments = self._unpaid_stock_dividends[next_trading_day] except KeyError: stock_payments = [] for stock_payment in stock_payments: payment_asset = stock_payment['payment_asset'] share_count = stock_payment['share_count'] # note we create a Position for stock dividend if we don't # already own the asset if payment_asset in self.positions: position = self.positions[payment_asset] else: position = self.positions[payment_asset] = Position( payment_asset, ) position.amount += share_count return net_cash_payment def maybe_create_close_position_transaction(self, asset, dt, data_portal): if not self.positions.get(asset): return None amount = self.positions.get(asset).amount price = data_portal.get_spot_value( asset, 'price', dt, self.data_frequency) # Get the last traded price if price is no longer available if isnan(price): price = self.positions.get(asset).last_sale_price return Transaction( asset=asset, amount=-amount, dt=dt, price=price, order_id=None, ) def get_positions(self): positions = self._positions_store for asset, pos in iteritems(self.positions): # Adds the new position if we didn't have one before, or overwrite # one we have currently positions[asset] = pos.protocol_position return positions def get_position_list(self): return [ pos.to_dict() for asset, pos in iteritems(self.positions) if pos.amount != 0 ] def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False): self._dirty_stats = True if handle_non_market_minutes: previous_minute = data_portal.trading_calendar.previous_minute(dt) get_price = partial( data_portal.get_adjusted_value, field='price', dt=previous_minute, perspective_dt=dt, data_frequency=self.data_frequency, ) else: get_price = partial( data_portal.get_scalar_asset_spot_value, field='price', dt=dt, data_frequency=self.data_frequency, ) update_position_last_sale_prices(self.positions, get_price, dt) @property def stats(self): """The current status of the positions. Returns ------- stats : PositionStats The current stats position stats. Notes ----- This is cached, repeated access will not recompute the stats until the stats may have changed. """ if self._dirty_stats: calculate_position_tracker_stats(self.positions, self._stats) self._dirty_stats = False return self._stats if PY2: def move_to_end(ordered_dict, key, last=False): if last: ordered_dict[key] = ordered_dict.pop(key) else: # please don't do this in python 2 ;_; new_first_element = ordered_dict.pop(key) # the items (without the given key) in the order they were inserted items = ordered_dict.items() # reset the ordered_dict to re-insert in the new order ordered_dict.clear() ordered_dict[key] = new_first_element # add the items back in their original order ordered_dict.update(items) else: move_to_end = OrderedDict.move_to_end PeriodStats = namedtuple( 'PeriodStats', 'net_liquidation gross_leverage net_leverage', ) not_overridden = sentinel( 'not_overridden', 'Mark that an account field has not been overridden', ) class Ledger(object): """The ledger tracks all orders and transactions as well as the current state of the portfolio and positions. Attributes ---------- portfolio : zipline.protocol.Portfolio The updated portfolio being managed. account : zipline.protocol.Account The updated account being managed. position_tracker : PositionTracker The current set of positions. todays_returns : float The current day's returns. In minute emission mode, this is the partial day's returns. In daily emission mode, this is ``daily_returns[session]``. daily_returns_series : pd.Series The daily returns series. Days that have not yet finished will hold a value of ``np.nan``. daily_returns_array : np.ndarray The daily returns as an ndarray. Days that have not yet finished will hold a value of ``np.nan``. """ def __init__(self, trading_sessions, capital_base, data_frequency): if len(trading_sessions): start = trading_sessions[0] else: start = None # Have some fields of the portfolio changed? This should be accessed # through ``self._dirty_portfolio`` self.__dirty_portfolio = False self._immutable_portfolio = zp.Portfolio(start, capital_base) self._portfolio = zp.MutableView(self._immutable_portfolio) self.daily_returns_series = pd.Series( np.nan, index=trading_sessions, ) # Get a view into the storage of the returns series. Metrics # can access this directly in minute mode for performance reasons. self.daily_returns_array = self.daily_returns_series.values self._previous_total_returns = 0 # this is a component of the cache key for the account self._position_stats = None # Have some fields of the account changed? self._dirty_account = True self._immutable_account = zp.Account() self._account = zp.MutableView(self._immutable_account) # The broker blotter can override some fields on the account. This is # way to tangled up at the moment but we aren't fixing it today. self._account_overrides = {} self.position_tracker = PositionTracker(data_frequency) self._processed_transactions = {} self._orders_by_modified = {} self._orders_by_id = OrderedDict() # Keyed by asset, the previous last sale price of positions with # payouts on price differences, e.g. Futures. # # This dt is not the previous minute to the minute for which the # calculation is done, but the last sale price either before the period # start, or when the price at execution. self._payout_last_sale_prices = {} @property def todays_returns(self): # compute today's returns in returns space instead of portfolio-value # space to work even when we have capital changes return ( (self.portfolio.returns + 1) / (self._previous_total_returns + 1) - 1 ) @property def _dirty_portfolio(self): return self.__dirty_portfolio @_dirty_portfolio.setter def _dirty_portfolio(self, value): if value: # marking the portfolio as dirty also marks the account as dirty self.__dirty_portfolio = self._dirty_account = value else: self.__dirty_portfolio = value def start_of_session(self, session_label): self._processed_transactions.clear() self._orders_by_modified.clear() self._orders_by_id.clear() # Save the previous day's total returns so that ``todays_returns`` # produces returns since yesterday. This does not happen in # ``end_of_session`` because we want ``todays_returns`` to produce the # correct value in metric ``end_of_session`` handlers. self._previous_total_returns = self.portfolio.returns def end_of_bar(self, session_ix): # make daily_returns hold the partial returns, this saves many # metrics from doing a concat and copying all of the previous # returns self.daily_returns_array[session_ix] = self.todays_returns def end_of_session(self, session_ix): # save the daily returns time-series self.daily_returns_series[session_ix] = self.todays_returns def sync_last_sale_prices(self, dt, data_portal, handle_non_market_minutes=False): self.position_tracker.sync_last_sale_prices( dt, data_portal, handle_non_market_minutes=handle_non_market_minutes, ) self._dirty_portfolio = True @staticmethod def _calculate_payout(multiplier, amount, old_price, price): return (price - old_price) * multiplier * amount def _cash_flow(self, amount): self._dirty_portfolio = True p = self._portfolio p.cash_flow += amount p.cash += amount def process_transaction(self, transaction): """Add a transaction to ledger, updating the current state as needed. Parameters ---------- transaction : zp.Transaction The transaction to execute. """ asset = transaction.asset if isinstance(asset, Future): try: old_price = self._payout_last_sale_prices[asset] except KeyError: self._payout_last_sale_prices[asset] = transaction.price else: position = self.position_tracker.positions[asset] amount = position.amount price = transaction.price self._cash_flow( self._calculate_payout( asset.price_multiplier, amount, old_price, price, ), ) if amount + transaction.amount == 0: del self._payout_last_sale_prices[asset] else: self._payout_last_sale_prices[asset] = price else: self._cash_flow(-(transaction.price * transaction.amount)) self.position_tracker.execute_transaction(transaction) # we only ever want the dict form from now on transaction_dict = transaction.to_dict() try: self._processed_transactions[transaction.dt].append( transaction_dict, ) except KeyError: self._processed_transactions[transaction.dt] = [transaction_dict] def process_splits(self, splits): """Processes a list of splits by modifying any positions as needed. Parameters ---------- splits: list[(Asset, float)] A list of splits. Each split is a tuple of (asset, ratio). """ leftover_cash = self.position_tracker.handle_splits(splits) if leftover_cash > 0: self._cash_flow(leftover_cash) def process_order(self, order): """Keep track of an order that was placed. Parameters ---------- order : zp.Order The order to record. """ try: dt_orders = self._orders_by_modified[order.dt] except KeyError: self._orders_by_modified[order.dt] = OrderedDict([ (order.id, order), ]) self._orders_by_id[order.id] = order else: self._orders_by_id[order.id] = dt_orders[order.id] = order # to preserve the order of the orders by modified date move_to_end(dt_orders, order.id, last=True) move_to_end(self._orders_by_id, order.id, last=True) def process_commission(self, commission): """Process the commission. Parameters ---------- commission : zp.Event The commission being paid. """ asset = commission['asset'] cost = commission['cost'] self.position_tracker.handle_commission(asset, cost) self._cash_flow(-cost) def close_position(self, asset, dt, data_portal): txn = self.position_tracker.maybe_create_close_position_transaction( asset, dt, data_portal, ) if txn is not None: self.process_transaction(txn) def process_dividends(self, next_session, asset_finder, adjustment_reader): """Process dividends for the next session. This will earn us any dividends whose ex-date is the next session as well as paying out any dividends whose pay-date is the next session """ position_tracker = self.position_tracker # Earn dividends whose ex_date is the next trading day. We need to # check if we own any of these stocks so we know to pay them out when # the pay date comes. held_sids = set(position_tracker.positions) if held_sids: cash_dividends = adjustment_reader.get_dividends_with_ex_date( held_sids, next_session, asset_finder ) stock_dividends = ( adjustment_reader.get_stock_dividends_with_ex_date( held_sids, next_session, asset_finder ) ) # Earning a dividend just marks that we need to get paid out on # the dividend's pay-date. This does not affect our cash yet. position_tracker.earn_dividends( cash_dividends, stock_dividends, ) # Pay out the dividends whose pay-date is the next session. This does # affect out cash. self._cash_flow( position_tracker.pay_dividends( next_session, ), ) def capital_change(self, change_amount): self.update_portfolio() portfolio = self._portfolio # we update the cash and total value so this is not dirty portfolio.portfolio_value += change_amount portfolio.cash += change_amount def transactions(self, dt=None): """Retrieve the dict-form of all of the transactions in a given bar or for the whole simulation. Parameters ---------- dt : pd.Timestamp or None, optional The particular datetime to look up transactions for. If not passed, or None is explicitly passed, all of the transactions will be returned. Returns ------- transactions : list[dict] The transaction information. """ if dt is None: # flatten the by-day transactions return [ txn for by_day in itervalues(self._processed_transactions) for txn in by_day ] return self._processed_transactions.get(dt, []) def orders(self, dt=None): """Retrieve the dict-form of all of the orders in a given bar or for the whole simulation. Parameters ---------- dt : pd.Timestamp or None, optional The particular datetime to look up order for. If not passed, or None is explicitly passed, all of the orders will be returned. Returns ------- orders : list[dict] The order information. """ if dt is None: # orders by id is already flattened return [o.to_dict() for o in itervalues(self._orders_by_id)] return [ o.to_dict() for o in itervalues(self._orders_by_modified.get(dt, {})) ] @property def positions(self): return self.position_tracker.get_position_list() def _get_payout_total(self, positions): calculate_payout = self._calculate_payout payout_last_sale_prices = self._payout_last_sale_prices total = 0 for asset, old_price in iteritems(payout_last_sale_prices): position = positions[asset] payout_last_sale_prices[asset] = price = position.last_sale_price amount = position.amount total += calculate_payout( asset.price_multiplier, amount, old_price, price, ) return total def update_portfolio(self): """Force a computation of the current portfolio state. """ if not self._dirty_portfolio: return portfolio = self._portfolio pt = self.position_tracker portfolio.positions = pt.get_positions() position_stats = pt.stats portfolio.positions_value = position_value = ( position_stats.net_value ) portfolio.positions_exposure = position_stats.net_exposure self._cash_flow(self._get_payout_total(pt.positions)) start_value = portfolio.portfolio_value # update the new starting value portfolio.portfolio_value = end_value = portfolio.cash + position_value pnl = end_value - start_value if start_value != 0: returns = pnl / start_value else: returns = 0.0 portfolio.pnl += pnl portfolio.returns = ( (1 + portfolio.returns) * (1 + returns) - 1 ) # the portfolio has been fully synced self._dirty_portfolio = False @property def portfolio(self): """Compute the current portfolio. Notes ----- This is cached, repeated access will not recompute the portfolio until the portfolio may have changed. """ self.update_portfolio() return self._immutable_portfolio def calculate_period_stats(self): position_stats = self.position_tracker.stats portfolio_value = self.portfolio.portfolio_value if portfolio_value == 0: gross_leverage = net_leverage = np.inf else: gross_leverage = position_stats.gross_exposure / portfolio_value net_leverage = position_stats.net_exposure / portfolio_value return portfolio_value, gross_leverage, net_leverage def override_account_fields(self, settled_cash=not_overridden, accrued_interest=not_overridden, buying_power=not_overridden, equity_with_loan=not_overridden, total_positions_value=not_overridden, total_positions_exposure=not_overridden, regt_equity=not_overridden, regt_margin=not_overridden, initial_margin_requirement=not_overridden, maintenance_margin_requirement=not_overridden, available_funds=not_overridden, excess_liquidity=not_overridden, cushion=not_overridden, day_trades_remaining=not_overridden, leverage=not_overridden, net_leverage=not_overridden, net_liquidation=not_overridden): """Override fields on ``self.account``. """ # mark that the portfolio is dirty to override the fields again self._dirty_account = True self._account_overrides = kwargs = { k: v for k, v in locals().items() if v is not not_overridden } del kwargs['self'] @property def account(self): if self._dirty_account: portfolio = self.portfolio account = self._account # If no attribute is found in the ``_account_overrides`` resort to # the following default values. If an attribute is found use the # existing value. For instance, a broker may provide updates to # these attributes. In this case we do not want to over write the # broker values with the default values. account.settled_cash = portfolio.cash account.accrued_interest = 0.0 account.buying_power = np.inf account.equity_with_loan = portfolio.portfolio_value account.total_positions_value = ( portfolio.portfolio_value - portfolio.cash ) account.total_positions_exposure = ( portfolio.positions_exposure ) account.regt_equity = portfolio.cash account.regt_margin = np.inf account.initial_margin_requirement = 0.0 account.maintenance_margin_requirement = 0.0 account.available_funds = portfolio.cash account.excess_liquidity = portfolio.cash account.cushion = ( (portfolio.cash / portfolio.portfolio_value) if portfolio.portfolio_value else np.nan ) account.day_trades_remaining = np.inf (account.net_liquidation, account.gross_leverage, account.net_leverage) = self.calculate_period_stats() account.leverage = account.gross_leverage # apply the overrides for k, v in iteritems(self._account_overrides): setattr(account, k, v) # the account has been fully synced self._dirty_account = False return self._immutable_account
import builtins from io import StringIO import numpy as np import pytest from pandas._libs.tslibs import iNaT from pandas.errors import UnsupportedFunctionCall import pandas as pd from pandas import ( DataFrame, Index, MultiIndex, Series, Timestamp, date_range, ) import pandas._testing as tm import pandas.core.nanops as nanops from pandas.util import _test_decorators as td @pytest.fixture( params=[np.int32, np.int64, np.float32, np.float64, "Int64", "Float64"], ids=["np.int32", "np.int64", "np.float32", "np.float64", "Int64", "Float64"], ) def dtypes_for_minmax(request): """ Fixture of dtypes with min and max values used for testing cummin and cummax """ dtype = request.param np_type = dtype if dtype == "Int64": np_type = np.int64 elif dtype == "Float64": np_type = np.float64 min_val = ( np.iinfo(np_type).min if np.dtype(np_type).kind == "i" else np.finfo(np_type).min ) max_val = ( np.iinfo(np_type).max if np.dtype(np_type).kind == "i" else np.finfo(np_type).max ) return (dtype, min_val, max_val) def test_max_min_non_numeric(): # #2700 aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]}) result = aa.groupby("nn").max() assert "ss" in result result = aa.groupby("nn").max(numeric_only=False) assert "ss" in result result = aa.groupby("nn").min() assert "ss" in result result = aa.groupby("nn").min(numeric_only=False) assert "ss" in result def test_max_min_object_multiple_columns(using_array_manager): # GH#41111 case where the aggregation is valid for some columns but not # others; we split object blocks column-wise, consistent with # DataFrame._reduce df = DataFrame( { "A": [1, 1, 2, 2, 3], "B": [1, "foo", 2, "bar", False], "C": ["a", "b", "c", "d", "e"], } ) df._consolidate_inplace() # should already be consolidate, but double-check if not using_array_manager: assert len(df._mgr.blocks) == 2 gb = df.groupby("A") with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"): result = gb.max(numeric_only=False) # "max" is valid for column "C" but not for "B" ei = Index([1, 2, 3], name="A") expected = DataFrame({"C": ["b", "d", "e"]}, index=ei) tm.assert_frame_equal(result, expected) with tm.assert_produces_warning(FutureWarning, match="Dropping invalid"): result = gb.min(numeric_only=False) # "min" is valid for column "C" but not for "B" ei = Index([1, 2, 3], name="A") expected = DataFrame({"C": ["a", "c", "e"]}, index=ei) tm.assert_frame_equal(result, expected) def test_min_date_with_nans(): # GH26321 dates = pd.to_datetime( Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d" ).dt.date df = DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates}) result = df.groupby("b", as_index=False)["c"].min()["c"] expected = pd.to_datetime( Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d" ).dt.date tm.assert_series_equal(result, expected) result = df.groupby("b")["c"].min() expected.index.name = "b" tm.assert_series_equal(result, expected) def test_intercept_builtin_sum(): s = Series([1.0, 2.0, np.nan, 3.0]) grouped = s.groupby([0, 1, 2, 2]) result = grouped.agg(builtins.sum) result2 = grouped.apply(builtins.sum) expected = grouped.sum() tm.assert_series_equal(result, expected) tm.assert_series_equal(result2, expected) @pytest.mark.parametrize("f", [max, min, sum]) @pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key def test_builtins_apply(keys, f): # see gh-8155 df = DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"]) df["jolie"] = np.random.randn(1000) fname = f.__name__ result = df.groupby(keys).apply(f) ngroups = len(df.drop_duplicates(subset=keys)) assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))" assert result.shape == (ngroups, 3), assert_msg tm.assert_frame_equal( result, # numpy's equivalent function df.groupby(keys).apply(getattr(np, fname)), ) if f != sum: expected = df.groupby(keys).agg(fname).reset_index() expected.set_index(keys, inplace=True, drop=False) tm.assert_frame_equal(result, expected, check_dtype=False) tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)()) class TestNumericOnly: # make sure that we are passing thru kwargs to our agg functions @pytest.fixture def df(self): # GH3668 # GH5724 df = DataFrame( { "group": [1, 1, 2], "int": [1, 2, 3], "float": [4.0, 5.0, 6.0], "string": list("abc"), "category_string": Series(list("abc")).astype("category"), "category_int": [7, 8, 9], "datetime": date_range("20130101", periods=3), "datetimetz": date_range("20130101", periods=3, tz="US/Eastern"), "timedelta": pd.timedelta_range("1 s", periods=3, freq="s"), }, columns=[ "group", "int", "float", "string", "category_string", "category_int", "datetime", "datetimetz", "timedelta", ], ) return df @pytest.mark.parametrize("method", ["mean", "median"]) def test_averages(self, df, method): # mean / median expected_columns_numeric = Index(["int", "float", "category_int"]) gb = df.groupby("group") expected = DataFrame( { "category_int": [7.5, 9], "float": [4.5, 6.0], "timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")], "int": [1.5, 3], "datetime": [ Timestamp("2013-01-01 12:00:00"), Timestamp("2013-01-03 00:00:00"), ], "datetimetz": [ Timestamp("2013-01-01 12:00:00", tz="US/Eastern"), Timestamp("2013-01-03 00:00:00", tz="US/Eastern"), ], }, index=Index([1, 2], name="group"), columns=[ "int", "float", "category_int", "datetime", "datetimetz", "timedelta", ], ) with tm.assert_produces_warning( FutureWarning, match="Dropping invalid", check_stacklevel=False ): result = getattr(gb, method)(numeric_only=False) tm.assert_frame_equal(result.reindex_like(expected), expected) expected_columns = expected.columns self._check(df, method, expected_columns, expected_columns_numeric) @pytest.mark.parametrize("method", ["min", "max"]) def test_extrema(self, df, method): # TODO: min, max *should* handle # categorical (ordered) dtype expected_columns = Index( [ "int", "float", "string", "category_int", "datetime", "datetimetz", "timedelta", ] ) expected_columns_numeric = expected_columns self._check(df, method, expected_columns, expected_columns_numeric) @pytest.mark.parametrize("method", ["first", "last"]) def test_first_last(self, df, method): expected_columns = Index( [ "int", "float", "string", "category_string", "category_int", "datetime", "datetimetz", "timedelta", ] ) expected_columns_numeric = expected_columns self._check(df, method, expected_columns, expected_columns_numeric) @pytest.mark.parametrize("method", ["sum", "cumsum"]) def test_sum_cumsum(self, df, method): expected_columns_numeric = Index(["int", "float", "category_int"]) expected_columns = Index( ["int", "float", "string", "category_int", "timedelta"] ) if method == "cumsum": # cumsum loses string expected_columns = Index(["int", "float", "category_int", "timedelta"]) self._check(df, method, expected_columns, expected_columns_numeric) @pytest.mark.parametrize("method", ["prod", "cumprod"]) def test_prod_cumprod(self, df, method): expected_columns = Index(["int", "float", "category_int"]) expected_columns_numeric = expected_columns self._check(df, method, expected_columns, expected_columns_numeric) @pytest.mark.parametrize("method", ["cummin", "cummax"]) def test_cummin_cummax(self, df, method): # like min, max, but don't include strings expected_columns = Index( ["int", "float", "category_int", "datetime", "datetimetz", "timedelta"] ) # GH#15561: numeric_only=False set by default like min/max expected_columns_numeric = expected_columns self._check(df, method, expected_columns, expected_columns_numeric) def _check(self, df, method, expected_columns, expected_columns_numeric): gb = df.groupby("group") # cummin, cummax dont have numeric_only kwarg, always use False warn = None if method in ["cummin", "cummax"]: # these dont have numeric_only kwarg, always use False warn = FutureWarning elif method in ["min", "max"]: # these have numeric_only kwarg, but default to False warn = FutureWarning with tm.assert_produces_warning(warn, match="Dropping invalid columns"): result = getattr(gb, method)() tm.assert_index_equal(result.columns, expected_columns_numeric) # GH#41475 deprecated silently ignoring nuisance columns warn = None if len(expected_columns) < len(gb._obj_with_exclusions.columns): warn = FutureWarning with tm.assert_produces_warning(warn, match="Dropping invalid columns"): result = getattr(gb, method)(numeric_only=False) tm.assert_index_equal(result.columns, expected_columns) class TestGroupByNonCythonPaths: # GH#5610 non-cython calls should not include the grouper # Tests for code not expected to go through cython paths. @pytest.fixture def df(self): df = DataFrame( [[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"], ) return df @pytest.fixture def gb(self, df): gb = df.groupby("A") return gb @pytest.fixture def gni(self, df): gni = df.groupby("A", as_index=False) return gni # TODO: non-unique columns, as_index=False @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmax(self, gb): # object dtype so idxmax goes through _aggregate_item_by_item # GH#5610 # non-cython calls should not include the grouper expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3]) expected.index.name = "A" result = gb.idxmax() tm.assert_frame_equal(result, expected) @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmin(self, gb): # object dtype so idxmax goes through _aggregate_item_by_item # GH#5610 # non-cython calls should not include the grouper expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3]) expected.index.name = "A" result = gb.idxmin() tm.assert_frame_equal(result, expected) def test_mad(self, gb, gni): # mad expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3]) expected.index.name = "A" result = gb.mad() tm.assert_frame_equal(result, expected) expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1]) result = gni.mad() tm.assert_frame_equal(result, expected) def test_describe(self, df, gb, gni): # describe expected_index = Index([1, 3], name="A") expected_col = MultiIndex( levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]], codes=[[0] * 8, list(range(8))], ) expected = DataFrame( [ [1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0], [0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], ], index=expected_index, columns=expected_col, ) result = gb.describe() tm.assert_frame_equal(result, expected) expected = pd.concat( [ df[df.A == 1].describe().unstack().to_frame().T, df[df.A == 3].describe().unstack().to_frame().T, ] ) expected.index = Index([0, 1]) result = gni.describe() tm.assert_frame_equal(result, expected) def test_cython_api2(): # this takes the fast apply path # cumsum (GH5614) df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"]) expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"]) result = df.groupby("A").cumsum() tm.assert_frame_equal(result, expected) # GH 5755 - cumsum is a transformer and should ignore as_index result = df.groupby("A", as_index=False).cumsum() tm.assert_frame_equal(result, expected) # GH 13994 result = df.groupby("A").cumsum(axis=1) expected = df.cumsum(axis=1) tm.assert_frame_equal(result, expected) result = df.groupby("A").cumprod(axis=1) expected = df.cumprod(axis=1) tm.assert_frame_equal(result, expected) def test_cython_median(): df = DataFrame(np.random.randn(1000)) df.values[::2] = np.nan labels = np.random.randint(0, 50, size=1000).astype(float) labels[::17] = np.nan result = df.groupby(labels).median() exp = df.groupby(labels).agg(nanops.nanmedian) tm.assert_frame_equal(result, exp) df = DataFrame(np.random.randn(1000, 5)) rs = df.groupby(labels).agg(np.median) xp = df.groupby(labels).median() tm.assert_frame_equal(rs, xp) def test_median_empty_bins(observed): df = DataFrame(np.random.randint(0, 44, 500)) grps = range(0, 55, 5) bins = pd.cut(df[0], grps) result = df.groupby(bins, observed=observed).median() expected = df.groupby(bins, observed=observed).agg(lambda x: x.median()) # TODO: GH 41137 tm.assert_frame_equal(result, expected, check_dtype=False) @pytest.mark.parametrize( "dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"] ) @pytest.mark.parametrize( "method,data", [ ("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}), ("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}), ("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}), ("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}), ("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}), ("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}), ], ) def test_groupby_non_arithmetic_agg_types(dtype, method, data): # GH9311, GH6620 df = DataFrame( [{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}] ) df["b"] = df.b.astype(dtype) if "args" not in data: data["args"] = [] if "out_type" in data: out_type = data["out_type"] else: out_type = dtype exp = data["df"] df_out = DataFrame(exp) df_out["b"] = df_out.b.astype(out_type) df_out.set_index("a", inplace=True) grpd = df.groupby("a") t = getattr(grpd, method)(*data["args"]) tm.assert_frame_equal(t, df_out) @pytest.mark.parametrize( "i", [ ( Timestamp("2011-01-15 12:50:28.502376"), Timestamp("2011-01-20 12:50:28.593448"), ), (24650000000000001, 24650000000000002), ], ) def test_groupby_non_arithmetic_agg_int_like_precision(i): # see gh-6620, gh-9311 df = DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}]) grp_exp = { "first": {"expected": i[0]}, "last": {"expected": i[1]}, "min": {"expected": i[0]}, "max": {"expected": i[1]}, "nth": {"expected": i[1], "args": [1]}, "count": {"expected": 2}, } for method, data in grp_exp.items(): if "args" not in data: data["args"] = [] grouped = df.groupby("a") res = getattr(grouped, method)(*data["args"]) assert res.iloc[0].b == data["expected"] @pytest.mark.parametrize( "func, values", [ ("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}), ("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}), ], ) @pytest.mark.filterwarnings("ignore:.*Select only valid:FutureWarning") def test_idxmin_idxmax_returns_int_types(func, values): # GH 25444 df = DataFrame( { "name": ["A", "A", "B", "B"], "c_int": [1, 2, 3, 4], "c_float": [4.02, 3.03, 2.04, 1.05], "c_date": ["2019", "2018", "2016", "2017"], } ) df["c_date"] = pd.to_datetime(df["c_date"]) df["c_date_tz"] = df["c_date"].dt.tz_localize("US/Pacific") df["c_timedelta"] = df["c_date"] - df["c_date"].iloc[0] df["c_period"] = df["c_date"].dt.to_period("W") df["c_Integer"] = df["c_int"].astype("Int64") df["c_Floating"] = df["c_float"].astype("Float64") result = getattr(df.groupby("name"), func)() expected = DataFrame(values, index=Index(["A", "B"], name="name")) expected["c_date_tz"] = expected["c_date"] expected["c_timedelta"] = expected["c_date"] expected["c_period"] = expected["c_date"] expected["c_Integer"] = expected["c_int"] expected["c_Floating"] = expected["c_float"] tm.assert_frame_equal(result, expected) def test_idxmin_idxmax_axis1(): df = DataFrame(np.random.randn(10, 4), columns=["A", "B", "C", "D"]) df["A"] = [1, 2, 3, 1, 2, 3, 1, 2, 3, 4] gb = df.groupby("A") res = gb.idxmax(axis=1) alt = df.iloc[:, 1:].idxmax(axis=1) indexer = res.index.get_level_values(1) tm.assert_series_equal(alt[indexer], res.droplevel("A")) df["E"] = date_range("2016-01-01", periods=10) gb2 = df.groupby("A") msg = "reduction operation 'argmax' not allowed for this dtype" with pytest.raises(TypeError, match=msg): gb2.idxmax(axis=1) def test_groupby_cumprod(): # GH 4095 df = DataFrame({"key": ["b"] * 10, "value": 2}) actual = df.groupby("key")["value"].cumprod() expected = df.groupby("key")["value"].apply(lambda x: x.cumprod()) expected.name = "value" tm.assert_series_equal(actual, expected) df = DataFrame({"key": ["b"] * 100, "value": 2}) actual = df.groupby("key")["value"].cumprod() # if overflows, groupby product casts to float # while numpy passes back invalid values df["value"] = df["value"].astype(float) expected = df.groupby("key")["value"].apply(lambda x: x.cumprod()) expected.name = "value" tm.assert_series_equal(actual, expected) def scipy_sem(*args, **kwargs): from scipy.stats import sem return sem(*args, ddof=1, **kwargs) @pytest.mark.parametrize( "op,targop", [ ("mean", np.mean), ("median", np.median), ("std", np.std), ("var", np.var), ("sum", np.sum), ("prod", np.prod), ("min", np.min), ("max", np.max), ("first", lambda x: x.iloc[0]), ("last", lambda x: x.iloc[-1]), ("count", np.size), pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy), ], ) def test_ops_general(op, targop): df = DataFrame(np.random.randn(1000)) labels = np.random.randint(0, 50, size=1000).astype(float) result = getattr(df.groupby(labels), op)() expected = df.groupby(labels).agg(targop) tm.assert_frame_equal(result, expected) def test_max_nan_bug(): raw = """,Date,app,File -04-23,2013-04-23 00:00:00,,log080001.log -05-06,2013-05-06 00:00:00,,log.log -05-07,2013-05-07 00:00:00,OE,xlsx""" df = pd.read_csv(StringIO(raw), parse_dates=[0]) gb = df.groupby("Date") r = gb[["File"]].max() e = gb["File"].max().to_frame() tm.assert_frame_equal(r, e) assert not r["File"].isna().any() def test_max_inat(): # GH#40767 dont interpret iNaT as NaN ser = Series([1, iNaT]) gb = ser.groupby([1, 1]) result = gb.max(min_count=2) expected = Series({1: 1}, dtype=np.int64) tm.assert_series_equal(result, expected, check_exact=True) result = gb.min(min_count=2) expected = Series({1: iNaT}, dtype=np.int64) tm.assert_series_equal(result, expected, check_exact=True) # not enough entries -> gets masked to NaN result = gb.min(min_count=3) expected = Series({1: np.nan}) tm.assert_series_equal(result, expected, check_exact=True) def test_max_inat_not_all_na(): # GH#40767 dont interpret iNaT as NaN # make sure we dont round iNaT+1 to iNaT ser = Series([1, iNaT, 2, iNaT + 1]) gb = ser.groupby([1, 2, 3, 3]) result = gb.min(min_count=2) # Note: in converting to float64, the iNaT + 1 maps to iNaT, i.e. is lossy expected = Series({1: np.nan, 2: np.nan, 3: iNaT + 1}) tm.assert_series_equal(result, expected, check_exact=True) def test_nlargest(): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list("a" * 5 + "b" * 5)) gb = a.groupby(b) r = gb.nlargest(3) e = Series( [7, 5, 3, 10, 9, 6], index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]), ) tm.assert_series_equal(r, e) a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) gb = a.groupby(b) e = Series( [3, 2, 1, 3, 3, 2], index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]), ) tm.assert_series_equal(gb.nlargest(3, keep="last"), e) def test_nlargest_mi_grouper(): # see gh-21411 npr = np.random.RandomState(123456789) dts = date_range("20180101", periods=10) iterables = [dts, ["one", "two"]] idx = MultiIndex.from_product(iterables, names=["first", "second"]) s = Series(npr.randn(20), index=idx) result = s.groupby("first").nlargest(1) exp_idx = MultiIndex.from_tuples( [ (dts[0], dts[0], "one"), (dts[1], dts[1], "one"), (dts[2], dts[2], "one"), (dts[3], dts[3], "two"), (dts[4], dts[4], "one"), (dts[5], dts[5], "one"), (dts[6], dts[6], "one"), (dts[7], dts[7], "one"), (dts[8], dts[8], "two"), (dts[9], dts[9], "one"), ], names=["first", "first", "second"], ) exp_values = [ 2.2129019979039612, 1.8417114045748335, 0.858963679564603, 1.3759151378258088, 0.9430284594687134, 0.5296914208183142, 0.8318045593815487, -0.8476703342910327, 0.3804446884133735, -0.8028845810770998, ] expected = Series(exp_values, index=exp_idx) tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3) def test_nsmallest(): a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10]) b = Series(list("a" * 5 + "b" * 5)) gb = a.groupby(b) r = gb.nsmallest(3) e = Series( [1, 2, 3, 0, 4, 6], index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]), ) tm.assert_series_equal(r, e) a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0]) gb = a.groupby(b) e = Series( [0, 1, 1, 0, 1, 2], index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]), ) tm.assert_series_equal(gb.nsmallest(3, keep="last"), e) @pytest.mark.parametrize("func", ["cumprod", "cumsum"]) def test_numpy_compat(func): # see gh-12811 df = DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]}) g = df.groupby("A") msg = "numpy operations are not valid with groupby" with pytest.raises(UnsupportedFunctionCall, match=msg): getattr(g, func)(1, 2, 3) with pytest.raises(UnsupportedFunctionCall, match=msg): getattr(g, func)(foo=1) def test_cummin(dtypes_for_minmax): dtype = dtypes_for_minmax[0] min_val = dtypes_for_minmax[1] # GH 15048 base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) expected_mins = [3, 3, 3, 2, 2, 2, 2, 1] df = base_df.astype(dtype) expected = DataFrame({"B": expected_mins}).astype(dtype) result = df.groupby("A").cummin() tm.assert_frame_equal(result, expected) result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # Test w/ min value for dtype df.loc[[2, 6], "B"] = min_val df.loc[[1, 5], "B"] = min_val + 1 expected.loc[[2, 3, 6, 7], "B"] = min_val expected.loc[[1, 5], "B"] = min_val + 1 # should not be rounded to min_val result = df.groupby("A").cummin() tm.assert_frame_equal(result, expected, check_exact=True) expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected, check_exact=True) # Test nan in some values base_df.loc[[0, 2, 4, 6], "B"] = np.nan expected = DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) result = base_df.groupby("A").cummin() tm.assert_frame_equal(result, expected) expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame() tm.assert_frame_equal(result, expected) # GH 15561 df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) expected = Series(pd.to_datetime("2001"), index=[0], name="b") result = df.groupby("a")["b"].cummin() tm.assert_series_equal(expected, result) # GH 15635 df = DataFrame({"a": [1, 2, 1], "b": [1, 2, 2]}) result = df.groupby("a").b.cummin() expected = Series([1, 2, 1], name="b") tm.assert_series_equal(result, expected) @pytest.mark.parametrize("method", ["cummin", "cummax"]) @pytest.mark.parametrize("dtype", ["UInt64", "Int64", "Float64", "float", "boolean"]) def test_cummin_max_all_nan_column(method, dtype): base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8}) base_df["B"] = base_df["B"].astype(dtype) grouped = base_df.groupby("A") expected = DataFrame({"B": [np.nan] * 8}, dtype=dtype) result = getattr(grouped, method)() tm.assert_frame_equal(expected, result) result = getattr(grouped["B"], method)().to_frame() tm.assert_frame_equal(expected, result) def test_cummax(dtypes_for_minmax): dtype = dtypes_for_minmax[0] max_val = dtypes_for_minmax[2] # GH 15048 base_df = DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}) expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3] df = base_df.astype(dtype) expected = DataFrame({"B": expected_maxs}).astype(dtype) result = df.groupby("A").cummax() tm.assert_frame_equal(result, expected) result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # Test w/ max value for dtype df.loc[[2, 6], "B"] = max_val expected.loc[[2, 3, 6, 7], "B"] = max_val result = df.groupby("A").cummax() tm.assert_frame_equal(result, expected) expected = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # Test nan in some values base_df.loc[[0, 2, 4, 6], "B"] = np.nan expected = DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]}) result = base_df.groupby("A").cummax() tm.assert_frame_equal(result, expected) expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame() tm.assert_frame_equal(result, expected) # GH 15561 df = DataFrame({"a": [1], "b": pd.to_datetime(["2001"])}) expected = Series(pd.to_datetime("2001"), index=[0], name="b") result = df.groupby("a")["b"].cummax() tm.assert_series_equal(expected, result) # GH 15635 df = DataFrame({"a": [1, 2, 1], "b": [2, 1, 1]}) result = df.groupby("a").b.cummax() expected = Series([2, 1, 2], name="b") tm.assert_series_equal(result, expected) @td.skip_if_32bit @pytest.mark.parametrize("method", ["cummin", "cummax"]) @pytest.mark.parametrize( "dtype,val", [("UInt64", np.iinfo("uint64").max), ("Int64", 2 ** 53 + 1)] ) def test_nullable_int_not_cast_as_float(method, dtype, val): data = [val, pd.NA] df = DataFrame({"grp": [1, 1], "b": data}, dtype=dtype) grouped = df.groupby("grp") result = grouped.transform(method) expected = DataFrame({"b": data}, dtype=dtype) tm.assert_frame_equal(result, expected) @pytest.mark.parametrize( "in_vals, out_vals", [ # Basics: strictly increasing (T), strictly decreasing (F), # abs val increasing (F), non-strictly increasing (T) ([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]), # Test with inf vals ( [1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf], [True, False, True, False], ), # Test with nan vals; should always be False ( [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], [False, False, False, False], ), ], ) def test_is_monotonic_increasing(in_vals, out_vals): # GH 17015 source_dict = { "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], "C": in_vals, } df = DataFrame(source_dict) result = df.groupby("B").C.is_monotonic_increasing index = Index(list("abcd"), name="B") expected = Series(index=index, data=out_vals, name="C") tm.assert_series_equal(result, expected) # Also check result equal to manually taking x.is_monotonic_increasing. expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing) tm.assert_series_equal(result, expected) @pytest.mark.parametrize( "in_vals, out_vals", [ # Basics: strictly decreasing (T), strictly increasing (F), # abs val decreasing (F), non-strictly increasing (T) ([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]), # Test with inf vals ( [np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf], [True, True, False, True], ), # Test with nan vals; should always be False ( [1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan], [False, False, False, False], ), ], ) def test_is_monotonic_decreasing(in_vals, out_vals): # GH 17015 source_dict = { "A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"], "B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"], "C": in_vals, } df = DataFrame(source_dict) result = df.groupby("B").C.is_monotonic_decreasing index = Index(list("abcd"), name="B") expected = Series(index=index, data=out_vals, name="C") tm.assert_series_equal(result, expected) # describe # -------------------------------- def test_apply_describe_bug(mframe): grouped = mframe.groupby(level="first") grouped.describe() # it works! def test_series_describe_multikey(): ts = tm.makeTimeSeries() grouped = ts.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.describe() tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False) tm.assert_series_equal(result["std"], grouped.std(), check_names=False) tm.assert_series_equal(result["min"], grouped.min(), check_names=False) def test_series_describe_single(): ts = tm.makeTimeSeries() grouped = ts.groupby(lambda x: x.month) result = grouped.apply(lambda x: x.describe()) expected = grouped.describe().stack() tm.assert_series_equal(result, expected) def test_series_index_name(df): grouped = df.loc[:, ["C"]].groupby(df["A"]) result = grouped.agg(lambda x: x.mean()) assert result.index.name == "A" def test_frame_describe_multikey(tsframe): grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month]) result = grouped.describe() desc_groups = [] for col in tsframe: group = grouped[col].describe() # GH 17464 - Remove duplicate MultiIndex levels group_col = MultiIndex( levels=[[col], group.columns], codes=[[0] * len(group.columns), range(len(group.columns))], ) group = DataFrame(group.values, columns=group_col, index=group.index) desc_groups.append(group) expected = pd.concat(desc_groups, axis=1) tm.assert_frame_equal(result, expected) groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1) result = groupedT.describe() expected = tsframe.describe().T tm.assert_frame_equal(result, expected) def test_frame_describe_tupleindex(): # GH 14848 - regression from 0.19.0 to 0.19.1 df1 = DataFrame( { "x": [1, 2, 3, 4, 5] * 3, "y": [10, 20, 30, 40, 50] * 3, "z": [100, 200, 300, 400, 500] * 3, } ) df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5 df2 = df1.rename(columns={"k": "key"}) msg = "Names should be list-like for a MultiIndex" with pytest.raises(ValueError, match=msg): df1.groupby("k").describe() with pytest.raises(ValueError, match=msg): df2.groupby("key").describe() def test_frame_describe_unstacked_format(): # GH 4792 prices = { Timestamp("2011-01-06 10:59:05", tz=None): 24990, Timestamp("2011-01-06 12:43:33", tz=None): 25499, Timestamp("2011-01-06 12:54:09", tz=None): 25499, } volumes = { Timestamp("2011-01-06 10:59:05", tz=None): 1500000000, Timestamp("2011-01-06 12:43:33", tz=None): 5000000000, Timestamp("2011-01-06 12:54:09", tz=None): 100000000, } df = DataFrame({"PRICE": prices, "VOLUME": volumes}) result = df.groupby("PRICE").VOLUME.describe() data = [ df[df.PRICE == 24990].VOLUME.describe().values.tolist(), df[df.PRICE == 25499].VOLUME.describe().values.tolist(), ] expected = DataFrame( data, index=Index([24990, 25499], name="PRICE"), columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], ) tm.assert_frame_equal(result, expected) @pytest.mark.filterwarnings( "ignore:" "indexing past lexsort depth may impact performance:" "pandas.errors.PerformanceWarning" ) @pytest.mark.parametrize("as_index", [True, False]) def test_describe_with_duplicate_output_column_names(as_index): # GH 35314 df = DataFrame( { "a": [99, 99, 99, 88, 88, 88], "b": [1, 2, 3, 4, 5, 6], "c": [10, 20, 30, 40, 50, 60], }, columns=["a", "b", "b"], copy=False, ) expected = ( DataFrame.from_records( [ ("a", "count", 3.0, 3.0), ("a", "mean", 88.0, 99.0), ("a", "std", 0.0, 0.0), ("a", "min", 88.0, 99.0), ("a", "25%", 88.0, 99.0), ("a", "50%", 88.0, 99.0), ("a", "75%", 88.0, 99.0), ("a", "max", 88.0, 99.0), ("b", "count", 3.0, 3.0), ("b", "mean", 5.0, 2.0), ("b", "std", 1.0, 1.0), ("b", "min", 4.0, 1.0), ("b", "25%", 4.5, 1.5), ("b", "50%", 5.0, 2.0), ("b", "75%", 5.5, 2.5), ("b", "max", 6.0, 3.0), ("b", "count", 3.0, 3.0), ("b", "mean", 5.0, 2.0), ("b", "std", 1.0, 1.0), ("b", "min", 4.0, 1.0), ("b", "25%", 4.5, 1.5), ("b", "50%", 5.0, 2.0), ("b", "75%", 5.5, 2.5), ("b", "max", 6.0, 3.0), ], ) .set_index([0, 1]) .T ) expected.columns.names = [None, None] expected.index = Index([88, 99], name="a") if as_index: expected = expected.drop(columns=["a"], level=0) else: expected = expected.reset_index(drop=True) result = df.groupby("a", as_index=as_index).describe() tm.assert_frame_equal(result, expected) def test_groupby_mean_no_overflow(): # Regression test for (#22487) df = DataFrame( { "user": ["A", "A", "A", "A", "A"], "connections": [4970, 4749, 4719, 4704, 18446744073699999744], } ) assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840 @pytest.mark.parametrize( "values", [ { "a": [1, 1, 1, 2, 2, 2, 3, 3, 3], "b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2], }, {"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]}, ], ) @pytest.mark.parametrize("function", ["mean", "median", "var"]) def test_apply_to_nullable_integer_returns_float(values, function): # https://github.com/pandas-dev/pandas/issues/32219 output = 0.5 if function == "var" else 1.5 arr = np.array([output] * 3, dtype=float) idx = Index([1, 2, 3], dtype=object, name="a") expected = DataFrame({"b": arr}, index=idx).astype("Float64") groups = DataFrame(values, dtype="Int64").groupby("a") result = getattr(groups, function)() tm.assert_frame_equal(result, expected) result = groups.agg(function) tm.assert_frame_equal(result, expected) result = groups.agg([function]) expected.columns = MultiIndex.from_tuples([("b", function)]) tm.assert_frame_equal(result, expected) def test_groupby_sum_below_mincount_nullable_integer(): # https://github.com/pandas-dev/pandas/issues/32861 df = DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64") grouped = df.groupby("a") idx = Index([0, 1, 2], dtype=object, name="a") result = grouped["b"].sum(min_count=2) expected = Series([pd.NA] * 3, dtype="Int64", index=idx, name="b") tm.assert_series_equal(result, expected) result = grouped.sum(min_count=2) expected = DataFrame({"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx) tm.assert_frame_equal(result, expected)
# Copyright 2016 James Hensman # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from gpflow import settings from functools import reduce float_type = settings.dtypes.float_type import numpy as np class BlockDiagMat_many: def __init__(self, mats): self.mats = mats @property def shape(self): return (sum([m.shape[0] for m in mats]), sum([m.shape[1] for m in mats])) @property def sqrt_dims(self): return sum([m.sqrt_dims for m in mats]) def _get_rhs_slices(self, X): ret = [] start = 0 for m in self.mats: ret.append(tf.slice(X, begin=tf.stack([start, 0]), size=tf.stack([m.shape[1], -1]))) start = start + m.shape[1] return ret def _get_rhs_blocks(self, X): """ X is a solid matrix, same size as this one. Get the blocks of X that correspond to the structure of this matrix """ ret = [] start1 = 0 start2 = 0 for m in self.mats: ret.append(tf.slice(X, begin=tf.stack([start1, start2]), size=m.shape)) start1 = start1 + m.shape[0] start2 = start2 + m.shape[1] return ret def get(self): ret = self.mats[0].get() for m in self.mats[1:]: tr_shape = tf.stack([tf.shape(ret)[0], m.shape[1]]) bl_shape = tf.stack([m.shape[0], tf.shape(ret)[1]]) top = tf.concat([ret, tf.zeros(tr_shape, float_type)], axis=1) bottom = tf.concat([tf.zeros(bl_shape, float_type), m.get()], axis=1) ret = tf.concat([top, bottom], axis=0) return ret def logdet(self): return reduce(tf.add, [m.logdet() for m in self.mats]) def matmul(self, X): return tf.concat([m.matmul(Xi) for m, Xi in zip(self.mats, self._get_rhs_slices(X))], axis=0) def solve(self, X): return tf.concat([m.solve(Xi) for m, Xi in zip(self.mats, self._get_rhs_slices(X))], axis=0) def inv(self): return BlockDiagMat_many([mat.inv() for mat in self.mats]) def trace_KiX(self, X): """ X is a square matrix of the same size as this one. if self is K, compute tr(K^{-1} X) """ return reduce(tf.add, [m.trace_KiX(Xi) for m, Xi in zip(self.mats, self._get_rhs_blocks(X))]) def get_diag(self): return tf.concat([m.get_diag() for m in self.mats], axis=0) def inv_diag(self): return tf.concat([m.inv_diag() for m in self.mats], axis=0) def matmul_sqrt(self, X): return tf.concat([m.matmul_sqrt(Xi) for m, Xi in zip(self.mats, self._get_rhs_slices(X))], axis=0) def matmul_sqrt_transpose(self, X): ret = [] start = np.zeros((2, np.int32)) for m in self.mats: ret.append(m.matmul_sqrt_transpose(tf.slice(X, begin=start, size=tf.stack([m.sqrt_dims, -1])))) start[0] += m.sqrt_dims return tf.concat(ret, axis=0) class BlockDiagMat: def __init__(self, A, B): self.A, self.B = A, B @property def shape(self): mats = [self.A, self.B] return (sum([m.shape[0] for m in mats]), sum([m.shape[1] for m in mats])) @property def sqrt_dims(self): mats = [self.A, self.B] return sum([m.sqrt_dims for m in mats]) def _get_rhs_slices(self, X): # X1 = X[:self.A.shape[1], :] X1 = tf.slice(X, begin=tf.zeros((2,), tf.int32), size=tf.stack([self.A.shape[1], -1])) # X2 = X[self.A.shape[1]:, :] X2 = tf.slice(X, begin=tf.stack([self.A.shape[1], 0]), size=-tf.ones((2,), tf.int32)) return X1, X2 def get(self): tl_shape = tf.stack([self.A.shape[0], self.B.shape[1]]) br_shape = tf.stack([self.B.shape[0], self.A.shape[1]]) top = tf.concat([self.A.get(), tf.zeros(tl_shape, float_type)], axis=1) bottom = tf.concat([tf.zeros(br_shape, float_type), self.B.get()], axis=1) return tf.concat([top, bottom], axis=0) def logdet(self): return self.A.logdet() + self.B.logdet() def matmul(self, X): X1, X2 = self._get_rhs_slices(X) top = self.A.matmul(X1) bottom = self.B.matmul(X2) return tf.concat([top, bottom], axis=0) def solve(self, X): X1, X2 = self._get_rhs_slices(X) top = self.A.solve(X1) bottom = self.B.solve(X2) return tf.concat([top, bottom], axis=0) def inv(self): return BlockDiagMat(self.A.inv(), self.B.inv()) def trace_KiX(self, X): """ X is a square matrix of the same size as this one. if self is K, compute tr(K^{-1} X) """ X1, X2 = tf.slice(X, [0, 0], self.A.shape), tf.slice(X, self.A.shape, [-1, -1]) top = self.A.trace_KiX(X1) bottom = self.B.trace_KiX(X2) return top + bottom def get_diag(self): return tf.concat([self.A.get_diag(), self.B.get_diag()], axis=0) def inv_diag(self): return tf.concat([self.A.inv_diag(), self.B.inv_diag()], axis=0) def matmul_sqrt(self, X): X1, X2 = self._get_rhs_slices(X) top = self.A.matmul_sqrt(X1) bottom = self.B.matmul_sqrt(X2) return tf.concat([top, bottom], axis=0) def matmul_sqrt_transpose(self, X): X1 = tf.slice(X, begin=tf.zeros((2,), tf.int32), size=tf.stack([self.A.sqrt_dims, -1])) X2 = tf.slice(X, begin=tf.stack([self.A.sqrt_dims, 0]), size=-tf.ones((2,), tf.int32)) top = self.A.matmul_sqrt_transpose(X1) bottom = self.B.matmul_sqrt_transpose(X2) return tf.concat([top, bottom], axis=0) class LowRankMat: def __init__(self, d, W): """ A matrix of the form diag(d) + W W^T """ self.d = d self.W = W @property def shape(self): return (tf.size(self.d), tf.size(self.d)) @property def sqrt_dims(self): return tf.size(self.d) + tf.shape(W)[1] def get(self): return tf.diag(self.d) + tf.matmul(self.W, tf.transpose(self.W)) def logdet(self): part1 = tf.reduce_sum(tf.log(self.d)) I = tf.eye(tf.shape(self.W)[1], float_type) M = I + tf.matmul(tf.transpose(self.W) / self.d, self.W) part2 = 2*tf.reduce_sum(tf.log(tf.diag_part(tf.cholesky(M)))) return part1 + part2 def matmul(self, B): WTB = tf.matmul(tf.transpose(self.W), B) WWTB = tf.matmul(self.W, WTB) DB = tf.reshape(self.d, [-1, 1]) * B return DB + WWTB def get_diag(self): return self.d + tf.reduce_sum(tf.square(self.W), 1) def solve(self, B): d_col = tf.expand_dims(self.d, 1) DiB = B / d_col DiW = self.W / d_col WTDiB = tf.matmul(tf.transpose(DiW), B) M = tf.eye(tf.shape(self.W)[1], float_type) + tf.matmul(tf.transpose(DiW), self.W) L = tf.cholesky(M) tmp1 = tf.matrix_triangular_solve(L, WTDiB, lower=True) tmp2 = tf.matrix_triangular_solve(tf.transpose(L), tmp1, lower=False) return DiB - tf.matmul(DiW, tmp2) def inv(self): di = tf.reciprocal(self.d) d_col = tf.expand_dims(self.d, 1) DiW = self.W / d_col M = tf.eye(tf.shape(self.W)[1], float_type) + tf.matmul(tf.transpose(DiW), self.W) L = tf.cholesky(M) v = tf.transpose(tf.matrix_triangular_solve(L, tf.transpose(DiW), lower=True)) return LowRankMatNeg(di, V) def trace_KiX(self, X): """ X is a square matrix of the same size as this one. if self is K, compute tr(K^{-1} X) """ d_col = tf.expand_dims(self.d, 1) R = self.W / d_col RTX = tf.matmul(tf.transpose(R), X) RTXR = tf.matmul(RTX, R) M = tf.eye(tf.shape(self.W)[1], float_type) + tf.matmul(tf.transpose(R), self.W) Mi = tf.matrix_inverse(M) return tf.reduce_sum(tf.diag_part(X) * 1./self.d) - tf.reduce_sum(RTXR * Mi) def inv_diag(self): d_col = tf.expand_dims(self.d, 1) WTDi = tf.transpose(self.W / d_col) M = tf.eye(tf.shape(self.W)[1], float_type) + tf.matmul(WTDi, self.W) L = tf.cholesky(M) tmp1 = tf.matrix_triangular_solve(L, WTDi, lower=True) return 1./self.d - tf.reduce_sum(tf.square(tmp1), 0) def matmul_sqrt(self, B): """ There's a non-square sqrt of this matrix given by [ D^{1/2}] [ W^T ] This method right-multiplies the sqrt by the matrix B """ DB = tf.expand_dims(tf.sqrt(self.d), 1) * B VTB = tf.matmul(tf.transpose(self.W), B) return tf.concat([DB, VTB], axis=0) def matmul_sqrt_transpose(self, B): """ There's a non-square sqrt of this matrix given by [ D^{1/2}] [ W^T ] This method right-multiplies the transposed-sqrt by the matrix B """ B1 = tf.slice(B, tf.zeros((2,), tf.int32), tf.stack([tf.size(self.d), -1])) B2 = tf.slice(B, tf.stack([tf.size(self.d), 0]), -tf.ones((2,), tf.int32)) return tf.expand_dims(tf.sqrt(self.d), 1) * B1 + tf.matmul(self.W, B2) class LowRankMatNeg: def __init__(self, d, W): """ A matrix of the form diag(d) - W W^T (note the minus sign) """ self.d = d self.W = W @property def shape(self): return (tf.size(self.d), tf.size(self.d)) def get(self): return tf.diag(self.d) - tf.matmul(self.W, tf.transpose(self.W)) class Rank1Mat: def __init__(self, d, v): """ A matrix of the form diag(d) + v v^T """ self.d = d self.v = v @property def shape(self): return (tf.size(self.d), tf.size(self.d)) @property def sqrt_dims(self): return tf.size(self.d) + 1 def get(self): V = tf.expand_dims(self.v, 1) return tf.diag(self.d) + tf.matmul(V, tf.transpose(V)) def logdet(self): return tf.reduce_sum(tf.log(self.d)) +\ tf.log(1. + tf.reduce_sum(tf.square(self.v) / self.d)) def matmul(self, B): V = tf.expand_dims(self.v, 1) return tf.expand_dims(self.d, 1) * B +\ tf.matmul(V, tf.matmul(tf.transpose(V), B)) def solve(self, B): div = self.v / self.d c = 1. + tf.reduce_sum(div * self.v) div = tf.expand_dims(div, 1) return B / tf.expand_dims(self.d, 1) -\ tf.matmul(div/c, tf.matmul(tf.transpose(div), B)) def inv(self): di = tf.reciprocal(self.d) Div = self.v * di M = 1. + tf.reduce_sum(Div * self.v) v_new = Div / tf.sqrt(M) return Rank1MatNeg(di, v_new) def trace_KiX(self, X): """ X is a square matrix of the same size as this one. if self is K, compute tr(K^{-1} X) """ R = tf.expand_dims(self.v / self.d, 1) RTX = tf.matmul(tf.transpose(R), X) RTXR = tf.matmul(RTX, R) M = 1 + tf.reduce_sum(tf.square(self.v) / self.d) return tf.reduce_sum(tf.diag_part(X) / self.d) - RTXR / M def get_diag(self): return self.d + tf.square(self.v) def inv_diag(self): div = self.v / self.d c = 1. + tf.reduce_sum(div * self.v) return 1./self.d - tf.square(div) / c def matmul_sqrt(self, B): """ There's a non-square sqrt of this matrix given by [ D^{1/2}] [ V^T ] This method right-multiplies the sqrt by the matrix B """ DB = tf.expand_dims(tf.sqrt(self.d), 1) * B VTB = tf.matmul(tf.expand_dims(self.v, 0), B) return tf.concat([DB, VTB], axis=0) def matmul_sqrt_transpose(self, B): """ There's a non-square sqrt of this matrix given by [ D^{1/2}] [ W^T ] This method right-multiplies the transposed-sqrt by the matrix B """ B1 = tf.slice(B, tf.zeros((2,), tf.int32), tf.stack([tf.size(self.d), -1])) B2 = tf.slice(B, tf.stack([tf.size(self.d), 0]), -tf.ones((2,), tf.int32)) return tf.expand_dims(tf.sqrt(self.d), 1) * B1 + tf.matmul(tf.expand_dims(self.v, 1), B2) class Rank1MatNeg: def __init__(self, d, v): """ A matrix of the form diag(d) - v v^T (note the minus sign) """ self.d = d self.v = v @property def shape(self): return (tf.size(self.d), tf.size(self.d)) def get(self): W = tf.expand_dims(self.v, 1) return tf.diag(self.d) - tf.matmul(W, tf.transpose(W)) class DiagMat: def __init__(self, d): self.d = d @property def shape(self): return (tf.size(self.d), tf.size(self.d)) @property def sqrt_dims(self): return tf.size(self.d) def get(self): return tf.diag(self.d) def logdet(self): return tf.reduce_sum(tf.log(self.d)) def matmul(self, B): return tf.expand_dims(self.d, 1) * B def solve(self, B): return B / tf.expand_dims(self.d, 1) def inv(self): return DiagMat(tf.reciprocal(self.d)) def trace_KiX(self, X): """ X is a square matrix of the same size as this one. if self is K, compute tr(K^{-1} X) """ return tf.reduce_sum(tf.diag_part(X) / self.d) def get_diag(self): return self.d def inv_diag(self): return 1. / self.d def matmul_sqrt(self, B): return tf.expand_dims(tf.sqrt(self.d), 1) * B def matmul_sqrt_transpose(self, B): return tf.expand_dims(tf.sqrt(self.d), 1) * B
# This file is part of gorm, an object relational mapper for versioned graphs. # Copyright (C) 2014 Zachary Spector. from collections import defaultdict, deque from .graph import ( Graph, DiGraph, MultiGraph, MultiDiGraph, ) from .query import QueryEngine from .reify import reify class GraphNameError(KeyError): pass class ORM(object): """Instantiate this with the same string argument you'd use for a SQLAlchemy ``create_engine`` call. This will be your interface to gorm. """ @reify def _graph_val_cache(self): assert(self.caching) from .window import WindowDict r = defaultdict( # graph: lambda: defaultdict( # key: lambda: defaultdict( # branch: WindowDict # rev: value ) ) ) for (graph, key, branch, rev, value) in self.db.graph_val_dump(): r[graph][key][branch][rev] = value return r @reify def _node_val_cache(self): assert(self.caching) from .window import WindowDict r = defaultdict( # graph: lambda: defaultdict( # node: lambda: defaultdict( # key: lambda: defaultdict( # branch: WindowDict # rev: value ) ) ) ) for (graph, node, key, branch, rev, value) in self.db.node_val_dump(): r[graph][node][key][branch][rev] = value return r @reify def _nodes_cache(self): assert(self.caching) from .window import WindowDict r = defaultdict( # graph: lambda: defaultdict( # node: lambda: defaultdict( # branch: WindowDict # rev: extant ) ) ) for (graph, node, branch, rev, extant) in self.db.nodes_dump(): r[graph][node][branch][rev] = extant return r @reify def _edge_val_cache(self): assert(self.caching) from .window import WindowDict r = defaultdict( # graph: lambda: defaultdict( # nodeA: lambda: defaultdict( # nodeB: lambda: defaultdict( # idx: lambda: defaultdict( # key: lambda: defaultdict( # branch: WindowDict # rev: value ) ) ) ) ) ) for ( graph, nodeA, nodeB, idx, key, branch, rev, value ) in self.db.edge_val_dump(): r[graph][nodeA][nodeB][idx][key][branch][rev] = value return r @reify def _edges_cache(self): assert self.caching from .window import WindowDict r = defaultdict( # graph: lambda: defaultdict( # nodeA: lambda: defaultdict( # nodeB: lambda: defaultdict( # idx: lambda: defaultdict( # branch: WindowDict # rev: extant ) ) ) ) ) for ( graph, nodeA, nodeB, idx, branch, rev, extant ) in self.db.edges_dump(): r[graph][nodeA][nodeB][idx][branch][rev] = extant return r def __init__( self, dbstring, alchemy=True, connect_args={}, query_engine_class=QueryEngine, json_dump=None, json_load=None, caching=True ): """Make a SQLAlchemy engine if possible, else a sqlite3 connection. In either case, begin a transaction. """ self.db = query_engine_class(dbstring, connect_args, alchemy, json_dump, json_load) self._branches = {} self._obranch = None self._orev = None self.db.initdb() if caching: self.caching = True self._obranch = self.branch self._orev = self.rev self._timestream = {'master': {}} self._branch_start = {} self._branches = {'master': self._timestream['master']} self._branch_parents = {} self._active_branches_cache = [] self.db.active_branches = self._active_branches todo = deque(self.db.timestream_data()) while todo: (branch, parent, parent_tick) = working = todo.popleft() if branch == 'master': continue if parent in self._branches: assert(branch not in self._branches) self._branches[parent][branch] = {} self._branches[branch] = self._branches[parent][branch] self._branch_parents['branch'] = parent self._branch_start[branch] = parent_tick else: todo.append(working) def __enter__(self): """Enable the use of the ``with`` keyword""" return self def __exit__(self, *args): """Alias for ``close``""" self.close() def _havebranch(self, b): """Private use. Checks that the branch is known about.""" if self.caching and b in self._branches: return True return self.db.have_branch(b) def is_parent_of(self, parent, child): """Return whether ``child`` is a branch descended from ``parent`` at any remove. """ # trivial cases if child in self._branches and self._branches[child][0] == parent: return True elif child == parent: return False # I will be recursing a lot so just cache all the branch info self._childbranch = {} self._ancestry = {} for (branch, parent, parent_rev) in self.db.all_branches(): self._branches[branch] = (parent, parent_rev) self._childbranch[parent] = branch self._ancestry[child] = set([parent]) lineage = self._ancestry[child] def recurse(oneparent): if oneparent in lineage: return True if oneparent not in self._branches: return False if self._branches[oneparent][0] in lineage: return True lineage.add(oneparent) return recurse(self._branches[oneparent][0]) return recurse(child) @property def branch(self): """Return the global value ``branch``, or ``self._obranch`` if it's set """ if self._obranch is not None: return self._obranch return self.db.globl['branch'] @branch.setter def branch(self, v): """Set the global value ``branch`` and note that the branch's (parent, parent_rev) are the (branch, tick) set previously """ curbranch = self.branch currev = self.rev if not self._havebranch(v): # assumes the present revision in the parent branch has # been finalized. self.db.new_branch(v, curbranch, currev) if v == 'master': return # make sure I'll end up within the revision range of the # destination branch if self.caching: if v not in self._branch_parents: self._branch_parents[v] = curbranch self._branch_start[v] = currev parrev = self._branch_start[v] else: parrev = self.db.parrev(v) if currev < parrev: raise ValueError( "Tried to jump to branch {br}, which starts at revision {rv}. " "Go to rev {rv} or later to use this branch.".format( br=v, rv=currev ) ) self.db.globl['branch'] = v if self.engine.caching: self._obranch = v @property def rev(self): """Return the global value ``rev``, or ``self._orev`` if that's set""" if self._orev is not None: return self._orev return self.db.globl['rev'] @rev.setter def rev(self, v): """Set the global value ``rev``, first checking that it's not before the start of this branch. If it is, also go to the parent branch. """ # first make sure the cursor is not before the start of this branch branch = self.branch if branch != 'master': if self.caching: parent = self._branch_parents[branch] parent_rev = self._branch_start[branch] else: (parent, parent_rev) = self.db.parparrev(branch) if v < int(parent_rev): raise ValueError( "The revision number {revn} " "occurs before the start of " "the branch {brnch}".format(revn=v, brnch=branch) ) self.db.globl['rev'] = v assert(self.rev == v) if self.caching: self._orev = v def commit(self): """Alias of ``self.db.commit``""" self.db.commit() def close(self): """Alias of ``self.db.close``""" self.db.close() def initdb(self): """Alias of ``self.db.initdb``""" self.db.initdb() def _init_graph(self, name, type_s='Graph'): if self.db.have_graph(name): raise GraphNameError("Already have a graph by that name") self.db.new_graph(name, type_s) def new_graph(self, name, data=None, **attr): """Return a new instance of type Graph, initialized with the given data if provided. """ self._init_graph(name, 'Graph') return Graph(self, name, data, **attr) def new_digraph(self, name, data=None, **attr): """Return a new instance of type DiGraph, initialized with the given data if provided. """ self._init_graph(name, 'DiGraph') return DiGraph(self, name, data, **attr) def new_multigraph(self, name, data=None, **attr): """Return a new instance of type MultiGraph, initialized with the given data if provided. """ self._init_graph(name, 'MultiGraph') return MultiGraph(self, name, data, **attr) def new_multidigraph(self, name, data=None, **attr): """Return a new instance of type MultiDiGraph, initialized with the given data if provided. """ self._init_graph(name, 'MultiDiGraph') return MultiDiGraph(self, name, data, **attr) def get_graph(self, name): """Return a graph previously created with ``new_graph``, ``new_digraph``, ``new_multigraph``, or ``new_multidigraph`` """ graphtypes = { 'Graph': Graph, 'DiGraph': DiGraph, 'MultiGraph': MultiGraph, 'MultiDiGraph': MultiDiGraph } type_s = self.db.graph_type(name) if type_s not in graphtypes: raise GraphNameError("I don't know of a graph named {}".format(name)) return graphtypes[type_s](self, name) def del_graph(self, name): """Remove all traces of a graph's existence from the database""" # make sure the graph exists before deleting anything self.get_graph(name) self.db.del_graph(name) def _active_branches(self, branch=None, rev=None): """Private use. Iterate over (branch, rev) pairs, where the branch is a descendant of the previous (starting with whatever branch is presently active and ending at 'master'), and the rev is the latest revision in the branch that matters. """ b = branch or self.branch r = rev or self.rev if self.caching: yield b, r while b in self._branch_parents: r = self._branch_start[b] b = self._branch_parents[b] yield b, r return for pair in self.db.active_branches(b, r): yield pair def _branch_descendants(self, branch=None): """Iterate over all branches immediately descended from the current one (or the given one, if available). """ branch = branch or self.branch if not self.caching: for desc in self.db.branch_descendants(branch): yield desc return for b in self._branches[branch].keys(): yield b for child in self._branches[branch].keys(): for b in self._branch_descendants(child): yield b __all__ = [ORM, 'alchemy', 'graph', 'query', 'reify', 'window', 'xjson']
""" Model classes """ import numpy as np import sys import inspect # Used for storing the input from .aquifer import Aquifer from .aquifer_parameters import param_maq, param_3d from .constant import ConstantStar from .util import PlotTim import multiprocessing as mp __all__ = ['Model', 'ModelMaq', 'Model3D'] class Model(PlotTim): """ Model Class to create a model object consisting of an arbitrary sequence of aquifer layers and leaky layers. Use ModelMaq for regular sequence of aquifer and leaky layers. Use Model3D for multi-layer model of single aquifer Parameters ---------- kaq : array hydraulic conductivity of each aquifer from the top down z : array elevation tops and bottoms of all layers layers may have zero thickness c : array resistance between two consecutive aquifer layers if ltype[0]='a': length is number of aquifers - 1 if ltype[0]='l': length is number of aquifers npor : array porosity of all layers from the top down ltype : array of characters array indicating for each layer whether it is 'a' aquifer layer 'l' leaky layer """ def __init__(self, kaq, c, z, npor, ltype, f2py=False): # All input variables are numpy arrays # That should be checked outside this function self.elementlist = [] self.elementdict = {} # only elements that have a label self.aq = Aquifer(self, kaq, c, z, npor, ltype) self.modelname = 'ml' # Used for writing out input self.f2py = False if f2py: try: from .src import besselaesnew self.f2py = True except: print('FORTRAN extension not found while f2py=True. Using Numba instead') def initialize(self): # remove inhomogeneity elements (they are added again) self.elementlist = [e for e in self.elementlist if not e.inhomelement] self.aq.initialize() for e in self.elementlist: e.initialize() def add_element(self, e): self.elementlist.append(e) if e.label is not None: self.elementdict[e.label] = e def remove_element(self, e): """Remove element `e` from model """ if e.label is not None: self.elementdict.pop(e.label) self.elementlist.remove(e) def storeinput(self, frame): self.inputargs, _, _, self.inputvalues = inspect.getargvalues(frame) def potential(self, x, y, aq=None): if aq is None: aq = self.aq.find_aquifer_data(x, y) pot = np.zeros(aq.naq) for e in aq.elementlist: pot += e.potential(x, y, aq) rv = np.sum(pot * aq.eigvec, 1) if aq.ltype[0] == 'l': # potential for head above leaky layer rv += aq.constantstar.potstar return rv def disvec(self, x, y, aq=None): """Discharge vector at `x`, `y` Returns ------- qxqy : array size (2, naq) first row is Qx in each aquifer layer, second row is Qy """ if aq is None: aq = self.aq.find_aquifer_data(x, y) rv = np.zeros((2, aq.naq)) for e in aq.elementlist: rv += e.disvec(x, y, aq) rv = np.sum(rv[:, np.newaxis, :] * aq.eigvec, 2) return rv def qztop(self, x, y, aq=None): if aq is None: aq = self.aq.find_aquifer_data(x, y) rv = 0.0 if aq.ltype[0] == 'a': # otherwise recharge cannot be added for e in aq.elementlist: rv += e.qztop(x, y) return rv def head(self, x, y, layers=None, aq=None): """Head at `x`, `y` Returns ------- h : array length `naq` or `len(layers)` head in all `layers` (if not `None`), or all layers of aquifer (otherwise) """ if aq is None: aq = self.aq.find_aquifer_data(x, y) rv = self.potential(x, y, aq) / aq.T if layers is None: return rv else: return rv[layers] def headgrid(self, xg, yg, layers=None, printrow=False): """Grid of heads Parameters ---------- xg : array x values of grid yg : array y values of grid layers : integer, list or array, optional layers for which grid is returned printrow : boolean, optional prints dot to screen for each row of grid if set to `True` Returns ------- h : array size `nlayers, ny, nx` See also -------- :func:`~timml.model.Model.headgrid2` """ nx, ny = len(xg), len(yg) if layers is None: Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq else: Nlayers = len(np.atleast_1d(layers)) h = np.empty((Nlayers, ny, nx)) for j in range(ny): if printrow: print('.', end='', flush=True) for i in range(nx): h[:, j, i] = self.head(xg[i], yg[j], layers) if printrow: print('', flush=True) return h def headgrid2(self, x1, x2, nx, y1, y2, ny, layers=None, printrow=False): """Grid of heads Parameters ---------- x1, x2, nx : x values are generated as linspace(x1, x2, nx) y1, y2, ny : y values are generated as linspace(y1, y2, ny) layers : integer, list or array, optional layers for which grid is returned printrow : boolean, optional prints dot to screen for each row of grid if set to `True` Returns ------- h : array size `nlayers, ny, nx` See also -------- :func:`~timml.model.Model.headgrid` """ xg, yg = np.linspace(x1, x2, nx), np.linspace(y1, y2, ny) return self.headgrid(xg, yg, layers=layers, printrow=printrow) def headalongline(self, x, y, layers=None): """Head along line or curve Parameters ---------- x : array x values of line y : array y values of line layers : integer, list or array, optional layers for which grid is returned Returns ------- h : array size `nlayers, nx` """ xg, yg = np.atleast_1d(x), np.atleast_1d(y) if layers is None: Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq else: Nlayers = len(np.atleast_1d(layers)) nx = len(xg) if len(yg) == 1: yg = yg * np.ones(nx) h = np.zeros((Nlayers, nx)) for i in range(nx): h[:, i] = self.head(xg[i], yg[i], layers) return h def disvecalongline(self, x, y, layers=None): '''Returns Qx[Nlayers,len(x)], Qy[Nlayers,len(x)] Assumes same number of layers for each x and y layers may be None or list of layers for which head is computed''' xg, yg = np.atleast_1d(x), np.atleast_1d(y) if layers is None: Nlayers = self.aq.find_aquifer_data(xg[0], yg[0]).naq else: Nlayers = len(np.atleast_1d(layers)) nx = len(xg) if len(yg) == 1: yg = yg * np.ones(nx) Qx = np.zeros((Nlayers, nx)) Qy = np.zeros((Nlayers, nx)) for i in range(nx): Qx[:, i], Qy[:, 1] = self.disvec(xg[i], yg[i], layers) return Qx, Qy def velocity(self, x, y, z): return self.velocomp(x, y, z) def velocomp(self, x, y, z, aq=None, layer_ltype=None): if aq is None: aq = self.aq.find_aquifer_data(x, y) assert z <= aq.z[0] and z >= aq.z[-1], "z value not inside aquifer" if layer_ltype is None: layer, ltype, dummy = aq.findlayer(z) else: layer, ltype = layer_ltype h = self.head(x, y, aq=aq) # qz between aquifer layers qzlayer = np.zeros(aq.naq + 1) qzlayer[1:-1] = (h[1:] - h[:-1]) / aq.c[1:] if aq.ltype[0] == 'l': qzlayer[0] = (h[0] - aq.hstar) / aq.c[0] if ltype == 'l': vz = qzlayer[layer] / aq.nporll[layer] vx = 0 vy = 0 else: qzbot = qzlayer[layer + 1] qztop = qzlayer[layer] if layer == 0: qztop += self.qztop(x, y) vz = (qzbot + (z - aq.zaqbot[layer]) / aq.Haq[layer] * \ (qztop - qzbot)) / aq.nporaq[layer] qx, qy = self.disvec(x, y, aq=aq) vx = qx[layer] / (aq.Haq[layer] * aq.nporaq[layer]) vy = qy[layer] / (aq.Haq[layer] * aq.nporaq[layer]) return np.array([vx, vy, vz]) def solve(self, printmat=0, sendback=0, silent=False): '''Compute solution''' # Initialize elements self.initialize() # Compute number of equations self.neq = np.sum([e.nunknowns for e in self.elementlist]) if self.neq == 0: return if silent is False: print('Number of elements, Number of equations:', len( self.elementlist), ',', self.neq) if self.neq == 0: if silent is False: print('No unknowns. Solution complete') return mat = np.empty((self.neq, self.neq)) rhs = np.empty(self.neq) ieq = 0 for e in self.elementlist: if e.nunknowns > 0: mat[ieq:ieq + e.nunknowns, :], rhs[ieq:ieq + e.nunknowns] = \ e.equation() ieq += e.nunknowns if silent is False: print('.', end='', flush=True) if printmat: return mat, rhs sol = np.linalg.solve(mat, rhs) icount = 0 for e in self.elementlist: if e.nunknowns > 0: e.setparams(sol[icount:icount + e.nunknowns]) icount += e.nunknowns if silent is False: print() # needed cause the dots are printed print('solution complete') elif (silent == 'dot') or (silent == '.'): print('.', end='', flush=True) if sendback: return sol return def solve_mp(self, nproc=4, printmat=0, sendback=0, silent=False): '''Compute solution, multiprocessing implementation. Note: estimated speedup approximately by factor of number of physical cores. Virtual cores do not improve calculation time.''' # Initialize elements self.initialize() # Compute number of equations self.neq = np.sum([e.nunknowns for e in self.elementlist]) if self.neq == 0: return if silent is False: print('Number of elements, Number of equations:', len( self.elementlist), ',', self.neq) if self.neq == 0: if silent is False: print('No unknowns. Solution complete') return mat = np.empty((self.neq, self.neq)) rhs = np.empty(self.neq) # start multiprocessing if nproc is None: nproc = mp.cpu_count() - 1 # make no. of processes equal to 1 less than no. of cores elif nproc > mp.cpu_count(): print("Given 'nproc' larger than no. of cores on machine. Setting 'nproc' to {}.".format(mp.cpu_count())) nproc = mp.cpu_count() pool = mp.Pool(processes=nproc) results = [] for e in self.elementlist: if e.nunknowns > 0: results.append(pool.apply_async(e.equation)) if silent is False: print('.', end='', flush=True) pool.close() pool.join() mat = np.empty((self.neq, self.neq)) rhs = np.zeros(self.neq) ieq = 0 for p in results: imat, irhs = p.get() mat[ieq:ieq + imat.shape[0], :] = imat rhs[ieq:ieq + irhs.shape[0]] = irhs ieq += imat.shape[0] # end multiprocessing if printmat: return mat, rhs sol = np.linalg.solve(mat, rhs) icount = 0 for e in self.elementlist: if e.nunknowns > 0: e.setparams(sol[icount:icount + e.nunknowns]) icount += e.nunknowns if silent is False: print() # needed cause the dots are printed print('solution complete') elif (silent == 'dot') or (silent == '.'): print('.', end='', flush=True) if sendback: return sol return class ModelMaq(Model): """ Create a Model object by specifying a mult-aquifer sequence of aquifer-leakylayer-aquifer-leakylayer-aquifer etc Parameters ---------- kaq : float, array or list Hydraulic conductivity of each aquifer from the top down. If float, hydraulic conductivity is the same in all aquifers. z : array or list Elevation of tops and bottoms of the aquifers from the top down. Leaky layers may have zero thickness. * if topboundary='conf': length is 2 * number of aquifers * if topboundary='semi': length is 2 * number of aquifers + 1 as top of leaky layer on top of systems needs to be specified c : float, array or list Resistance of leaky layers from the top down. * if float, resistance is the same for all leaky layers * if topboundary='conf': length is number of aquifers - 1 * if topboundary='semi': length is number of aquifers npor : float, array or list Porosity of all aquifers and leaky layers from the top down. * if float, porosity is the same for all layers * if topboundary='conf': length is 2 * number of aquifers - 1 * if topboundary='semi': length is 2 * number of aquifers topboundary : string, 'conf' or 'semi' (default is 'conf') Indicates whether the topboundary is confined ('conf') or semi-confined ('semi'). hstar : float or None (default is None) Head value above semi-confining top, only read if topboundary='semi'. Examples -------- >>> ml = ModelMaq(kaq=[10, 20], z=[20, 12, 10, 0], c=1000) """ def __init__(self, kaq=1, z=[1, 0], c=[], npor=0.3, topboundary='conf', hstar=None, f2py=False): self.storeinput(inspect.currentframe()) kaq, c, npor, ltype = param_maq(kaq, z, c, npor, topboundary) Model.__init__(self, kaq, c, z, npor, ltype, f2py) self.name = 'ModelMaq' if self.aq.ltype[0] == 'l': ConstantStar(self, hstar, aq=self.aq) class Model3D(Model): """ Model3D Class to create a multi-layer model object consisting of many aquifer layers. The resistance between the layers is computed from the vertical hydraulic conductivity of the layers. Parameters ---------- kaq : float, array or list hydraulic conductivity of each layer from the top down if float, hydraulic conductivity is the same in all aquifers z : array or list elevation of top of system followed by bottoms of all layers from the top down bottom of layer is automatically equal to top of layer below it if topboundary='conf': length is number of layers + 1 if topboundary='semi': length is number of layers + 2 as top of leaky layer on top of systems needs to be specified kzoverkh : float vertical anisotropy ratio vertical k divided by horizontal k if float, value is the same for all layers length is number of layers npor : float, array or list porosity of all aquifer layers from the top down if float, porosity is the same for all layers if topboundary='conf': length is number of layers if topboundary='semi': length is number of layers + 1 topboundary : string, 'conf' or 'semi' (default is 'conf') indicating whether the top is confined ('conf') or semi-confined ('semi') topres : float resistance of top semi-confining layer, only read if topboundary='semi' topthick: float thickness of top semi-confining layer, only read if topboundary='semi' hstar : float or None (default is None) head value above semi-confining top, only read if topboundary='semi' Examples -------- >>> ml = Model3D(kaq=10, z=np.arange(20, -1, -2), kzoverkh=0.1) """ def __init__(self, kaq=1, z=[1, 0], kzoverkh=1, npor=0.3, topboundary='conf', topres=0, topthick=0, hstar=0, f2py=False): '''Model3D for semi-confined aquifers, set top equal to 'semi' and provide topres: resistance of top tophick: thickness of top hstar: head above top''' self.storeinput(inspect.currentframe()) kaq, c, npor, ltype = param_3d(kaq, z, kzoverkh, npor, topboundary, topres) if topboundary == 'semi': z = np.hstack((z[0] + topthick, z)) Model.__init__(self, kaq, c, z, npor, ltype, f2py) self.name = 'Model3D' if self.aq.ltype[0] == 'l': ConstantStar(self, hstar, aq=self.aq)
# -*- coding: utf-8 -*- # Copyright 2017 GIG Technology NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.3@@ import base64 import logging import re from types import NoneType from mcfw.consts import MISSING from mcfw.properties import azzert from mcfw.properties import object_factory from mcfw.rpc import arguments, returns from rogerthat.bizz.messaging import ChatFlags as ChatFlagsBizz, InvalidURLException from rogerthat.dal.mfd import get_message_flow_run_record from rogerthat.dal.service import get_service_identity, get_service_interaction_def from rogerthat.models import Message, ServiceProfile, ServiceIdentity, ProfilePointer from rogerthat.models.properties.forms import FormResult from rogerthat.rpc import users from rogerthat.rpc.service import service_api, service_api_callback from rogerthat.rpc.users import get_current_user from rogerthat.to.messaging import AnswerTO, MemberTO, AttachmentTO, BroadcastTargetAudienceTO, KeyValueTO, \ BroadcastResultTO, ChatMessageListResultTO, PokeInformationTO from rogerthat.to.messaging.flow import FLOW_STEP_MAPPING, FormFlowStepTO from rogerthat.to.messaging.forms import FormTO from rogerthat.to.messaging.service_callback_results import PokeCallbackResultTO, MessageAcknowledgedCallbackResultTO, \ FormAcknowledgedCallbackResultTO, FlowMemberResultCallbackResultTO from rogerthat.to.service import UserDetailsTO from rogerthat.utils import try_or_defer from rogerthat.utils.app import create_app_user, create_app_user_by_email from rogerthat.utils.service import create_service_identity_user ChatFlags = ChatFlagsBizz # Prevent unused import warning @service_api(function=u"messaging.send") @returns(unicode) @arguments(parent_key=unicode, parent_message_key=unicode, message=unicode, answers=[AnswerTO], flags=int, members=[(unicode, MemberTO)], branding=unicode, tag=unicode, service_identity=unicode, alert_flags=int, dismiss_button_ui_flags=int, context=unicode, attachments=[AttachmentTO], broadcast_guid=unicode, step_id=unicode) def send(parent_key, parent_message_key, message, answers, flags, members, branding, tag, service_identity=None, alert_flags=Message.ALERT_FLAG_VIBRATE, dismiss_button_ui_flags=0, context=None, attachments=None, broadcast_guid=None, step_id=None): from rogerthat.bizz.messaging import sendMessage, member_list_to_usermember_list from rogerthat.bizz.service import get_and_validate_service_identity_user service_identity_user = get_and_validate_service_identity_user(users.get_current_user(), service_identity) mm = member_list_to_usermember_list(service_identity_user, members, alert_flags) flags &= ~Message.FLAG_SENT_BY_JS_MFR flags &= ~Message.FLAG_DYNAMIC_CHAT flags &= ~Message.FLAG_NOT_REMOVABLE flags &= ~Message.FLAG_CHAT_STICKY flags &= ~Message.FLAG_ALLOW_CHAT_PICTURE flags &= ~Message.FLAG_ALLOW_CHAT_VIDEO flags &= ~Message.FLAG_ALLOW_CHAT_PRIORITY flags &= ~Message.FLAG_ALLOW_CHAT_STICKY message = sendMessage(service_identity_user, mm, flags, 0, parent_key if parent_key != MISSING else parent_message_key, message, answers, None, branding, tag, dismiss_button_ui_flags, context=context, attachments=attachments, is_mfr=users.get_current_user().is_mfr, broadcast_guid=broadcast_guid, step_id=step_id) return message.mkey @service_api(function=u"messaging.start_chat") @returns(unicode) @arguments(members=[(unicode, MemberTO)], topic=unicode, description=unicode, alert_flags=int, service_identity=unicode, tag=unicode, context=unicode, reader_members=[(unicode, MemberTO)], flags=int, metadata=[KeyValueTO], avatar=unicode, background_color=unicode, text_color=unicode, default_priority=(int, long), default_sticky=bool) def start_chat(members, topic, description, alert_flags=Message.ALERT_FLAG_VIBRATE, service_identity=None, tag=None, context=None, reader_members=None, flags=0, metadata=None, avatar=None, background_color=None, text_color=None, default_priority=Message.PRIORITY_NORMAL, default_sticky=False): from rogerthat.bizz.service import get_and_validate_service_identity_user from rogerthat.bizz.messaging import start_chat as start_chat_bizz, member_list_to_usermember_list service_identity_user = get_and_validate_service_identity_user(users.get_current_user(), service_identity) writers = member_list_to_usermember_list(service_identity_user, members, alert_flags) readers = member_list_to_usermember_list(service_identity_user, reader_members, alert_flags, read_only=True) if reader_members else list() avatar = base64.b64decode(avatar) if avatar else None message = start_chat_bizz(service_identity_user, topic, description, writers, readers, tag, context, flags, metadata, avatar, background_color, text_color, default_priority, default_sticky) return message.mkey @service_api(function=u"messaging.update_chat") @returns(bool) @arguments(parent_message_key=unicode, topic=unicode, description=unicode, flags=int, metadata=[KeyValueTO], avatar=unicode, background_color=unicode, text_color=unicode) def update_chat(parent_message_key, topic=None, description=None, flags=-1, metadata=None, avatar=None, background_color=None, text_color=None): from rogerthat.bizz.messaging import update_chat as bizz_update_chat flags = None if flags < 0 else flags avatar = base64.b64decode(avatar) if avatar else None return bizz_update_chat(users.get_current_user(), parent_message_key, topic, description, flags, metadata, avatar, background_color, text_color) @service_api(function=u"messaging.send_chat_message") @returns(unicode) @arguments(parent_key=unicode, message=unicode, answers=[AnswerTO], attachments=[AttachmentTO], sender=(unicode, MemberTO), priority=(int, long), sticky=bool, tag=unicode, alert_flags=(int, long)) def send_chat_message(parent_key, message, answers=None, attachments=None, sender=None, priority=None, sticky=False, tag=None, alert_flags=Message.ALERT_FLAG_VIBRATE): from rogerthat.bizz.messaging import send_chat_message as bizz_send_chat_message service_user = users.get_current_user() message = bizz_send_chat_message(service_user, parent_key, message, answers, attachments, sender, priority, sticky, tag, alert_flags) return message.mkey @service_api_callback(function=u"messaging.new_chat_message", code=ServiceProfile.CALLBACK_MESSAGING_NEW_CHAT_MESSAGE) @returns() @arguments(parent_message_key=unicode, message_key=unicode, sender=UserDetailsTO, message=unicode, answers=[AnswerTO], timestamp=int, tag=unicode, service_identity=unicode, attachments=[AttachmentTO]) def new_chat_message(parent_message_key, message_key, sender, message, answers, timestamp, tag, service_identity, attachments): pass @service_api(function=u"messaging.list_chat_messages") @returns(ChatMessageListResultTO) @arguments(parent_message_key=unicode, cursor=unicode) def list_chat_messages(parent_message_key, cursor=None): from rogerthat.bizz.messaging import list_chat_messages as bizz_list_chat_messages service_user = users.get_current_user() lr = bizz_list_chat_messages(service_user, parent_message_key, cursor) return ChatMessageListResultTO.from_model(lr.cursor, lr.messages, lr.user_profiles) @service_api(function=u"messaging.broadcast") @returns(BroadcastResultTO) @arguments(broadcast_type=unicode, message=unicode, answers=[AnswerTO], flags=int, branding=unicode, tag=unicode, \ service_identity=unicode, alert_flags=int, dismiss_button_ui_flags=int, target_audience=BroadcastTargetAudienceTO, attachments=[AttachmentTO], timeout=int) def broadcast(broadcast_type, message, answers, flags, branding, tag, service_identity=None, \ alert_flags=Message.ALERT_FLAG_VIBRATE, dismiss_button_ui_flags=0, target_audience=None, \ attachments=None, timeout=0): from rogerthat.bizz.messaging import broadcastMessage from rogerthat.bizz.service import get_and_validate_service_identity_user, validate_broadcast_type service_user = users.get_current_user() service_identity_user = get_and_validate_service_identity_user(service_user, service_identity) validate_broadcast_type(service_user, broadcast_type) broadcast_guid = broadcastMessage(service_identity_user, broadcast_type, message, answers, flags, branding, tag, alert_flags, \ dismiss_button_ui_flags, target_audience, attachments, timeout) result = BroadcastResultTO() result.statistics_key = broadcast_guid return result @service_api(function=u"messaging.send_form") @returns(unicode) @arguments(parent_key=unicode, parent_message_key=unicode, member=unicode, message=unicode, form=FormTO, flags=int, alert_flags=int, branding=unicode, tag=unicode, service_identity=unicode, context=unicode, attachments=[AttachmentTO], app_id=unicode, broadcast_guid=unicode, step_id=unicode) def send_form(parent_key, parent_message_key, member, message, form, flags, alert_flags, branding, tag, service_identity=None, context=None, attachments=None, app_id=None, broadcast_guid=None, step_id=None): from rogerthat.bizz.messaging import sendForm from rogerthat.bizz.service import get_and_validate_service_identity_user, get_and_validate_app_id_for_service_identity_user flags = 0 # flags are currently not used; clear any flags set by api client (e.g. ALLOW_DISMISS or SHARED_MEMBERS) service_identity_user = get_and_validate_service_identity_user(users.get_current_user(), service_identity) app_id = get_and_validate_app_id_for_service_identity_user(service_identity_user, app_id, member) fm = sendForm(service_identity_user, parent_key if parent_key != MISSING else parent_message_key, create_app_user(users.User(member), app_id), message, form, flags, branding, tag, alert_flags, context=context, attachments=attachments, is_mfr=users.get_current_user().is_mfr, broadcast_guid=broadcast_guid, step_id=step_id) return fm.mkey @service_api_callback(function=u"messaging.form_update", code=ServiceProfile.CALLBACK_MESSAGING_FORM_ACKNOWLEDGED) @returns(FormAcknowledgedCallbackResultTO) @arguments(status=int, form_result=FormResult, answer_id=unicode, member=unicode, message_key=unicode, tag=unicode, \ received_timestamp=int, acked_timestamp=int, parent_message_key=unicode, result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO]) def form_acknowledged(status, form_result, answer_id, member, message_key, tag, received_timestamp, acked_timestamp, parent_message_key, result_key, service_identity, user_details): pass @service_api_callback(function=u"messaging.update", code=ServiceProfile.CALLBACK_MESSAGING_RECEIVED) @returns(NoneType) @arguments(status=int, answer_id=unicode, received_timestamp=int, member=unicode, message_key=unicode, tag=unicode, acked_timestamp=int, parent_message_key=unicode, service_identity=unicode, user_details=[UserDetailsTO]) def received(status, answer_id, received_timestamp, member, message_key, tag, acked_timestamp, parent_message_key, service_identity, user_details): pass @service_api_callback(function=u"messaging.update", code=ServiceProfile.CALLBACK_MESSAGING_ACKNOWLEDGED) @returns(MessageAcknowledgedCallbackResultTO) @arguments(status=int, answer_id=unicode, received_timestamp=int, member=unicode, message_key=unicode, tag=unicode, acked_timestamp=int, parent_message_key=unicode, result_key=unicode, service_identity=unicode, user_details=[UserDetailsTO]) def acknowledged(status, answer_id, received_timestamp, member, message_key, tag, acked_timestamp, parent_message_key, result_key, service_identity, user_details): pass @service_api_callback(function=u"messaging.poke", code=ServiceProfile.CALLBACK_MESSAGING_POKE) @returns(PokeCallbackResultTO) @arguments(email=unicode, tag=unicode, result_key=unicode, context=unicode, service_identity=unicode, user_details=[UserDetailsTO], timestamp=int) def poke(email, tag, result_key, context, service_identity, user_details, timestamp): pass @service_api(function=u"messaging.poke_information") @returns(PokeInformationTO) @arguments(url=unicode) def poke_information(url): svc_user = users.get_current_user() m = re.match("(https?://)(.*)/S/(.*)", url, flags=re.IGNORECASE) if m: from rogerthat.pages.shortner import get_short_url_by_code code = m.group(3) su = get_short_url_by_code(code) if not su: logging.debug("poke_information - no ShortURL") raise InvalidURLException(url=url) path = su.full elif "/q/s/" in url: path = url.split("/q/s/")[1] else: logging.debug("poke_information - else path") raise InvalidURLException(url=url) if not path.startswith("/q/s/"): logging.debug("poke_information - not startswith /q/s/ but '%s'", path) raise InvalidURLException(url=url) m = re.match("/q/s/(.*)/(.*)", path) if not m: logging.debug("poke_information not a match") raise InvalidURLException(url=url) userCode, id_ = m.group(1), m.group(2) pp = ProfilePointer.get(userCode) if not pp: logging.debug("poke_information - no pp") return None sid = get_service_interaction_def(pp.user, int(id_)) if not sid: logging.debug("poke_information - no sid") return None if sid.user != svc_user: logging.debug("poke_information - incorrect svc_user. should be %s, but got %s", sid.user, svc_user) return None r = PokeInformationTO() r.description = sid.description r.tag = sid.tag r.timestamp = sid.timestamp r.total_scan_count = sid.totalScanCount return r @service_api(function=u"messaging.seal") @returns(NoneType) @arguments(message_key=unicode, message_parent_key=unicode, parent_message_key=unicode, dirty_behavior=int) def seal(message_key, message_parent_key, parent_message_key, dirty_behavior): from rogerthat.bizz.messaging import lockMessage svc_user = users.get_current_user() lockMessage(svc_user, message_key, message_parent_key if message_parent_key != MISSING else parent_message_key, dirty_behavior) @service_api(function=u"messaging.delete_conversation", cache_result=False) @returns(bool) @arguments(parent_message_key=unicode, members=[(unicode, MemberTO)], service_identity=unicode) def delete_conversation(parent_message_key, members, service_identity=None): from rogerthat.bizz.messaging import service_delete_conversation from rogerthat.bizz.service import get_and_validate_service_identity_user service_identity_user = get_and_validate_service_identity_user(users.get_current_user(), service_identity) return service_delete_conversation(service_identity_user, parent_message_key, _convert_to_member_users(service_identity_user, members)) @service_api(function=u"messaging.delete_chat", cache_result=False) @returns(bool) @arguments(parent_message_key=unicode) def delete_chat(parent_message_key): from rogerthat.bizz.messaging import delete_chat as bizz_delete_chat service_user = users.get_current_user() return bizz_delete_chat(service_user, parent_message_key) @service_api_callback(function=u"messaging.chat_deleted", code=ServiceProfile.CALLBACK_MESSAGING_CHAT_DELETED) @returns() @arguments(parent_message_key=unicode, member=UserDetailsTO, timestamp=int, service_identity=unicode, tag=unicode) def chat_deleted(parent_message_key, member, timestamp, service_identity, tag): pass @service_api(function=u"messaging.add_chat_members") @returns() @arguments(parent_message_key=unicode, members=[(unicode, MemberTO)], reader_members=[(unicode, MemberTO)]) def add_chat_members(parent_message_key, members, reader_members=None): from rogerthat.bizz.messaging import add_members_to_chat service_user = users.get_current_user() add_members_to_chat(service_user, parent_message_key, members, reader_members or list()) @service_api(function=u"messaging.delete_chat_members") @returns() @arguments(parent_message_key=unicode, members=[(unicode, MemberTO)], soft=bool) def delete_chat_members(parent_message_key, members, soft=False): from rogerthat.bizz.messaging import delete_members_from_chat service_user = users.get_current_user() delete_members_from_chat(service_user, parent_message_key, members, soft) @service_api(function=u"messaging.update_chat_members") @returns() @arguments(parent_message_key=unicode, members=[(unicode, MemberTO)], status=unicode) def update_chat_members(parent_message_key, members, status): from rogerthat.bizz.messaging import update_chat_members as bizz_update_chat_members bizz_update_chat_members(users.get_current_user(), parent_message_key, members, status) @returns([users.User]) @arguments(service_identity_user=users.User, members=[(unicode, MemberTO)]) def _convert_to_member_users(service_identity_user, members): from rogerthat.bizz.service import validate_is_friend_or_supports_app_id mm = None if members != MISSING: mm = [] si = get_service_identity(service_identity_user) for m in members: if isinstance(m, MemberTO) and m.app_id != MISSING: validate_is_friend_or_supports_app_id(si, m.app_id, m.app_user) mm.append(m.app_user) else: mm.append(create_app_user_by_email(m, si.app_id)) return mm @service_api(function=u"messaging.start_flow") @returns(unicode) @arguments(message_parent_key=unicode, parent_message_key=unicode, flow=unicode, members=[(unicode, MemberTO)], service_identity=unicode, tag=unicode, context=unicode, force_language=unicode, flow_params=unicode) def start_flow(message_parent_key, parent_message_key, flow, members, service_identity=None, tag=None, context=None, force_language=None, flow_params=None): from rogerthat.bizz.service.mfr import start_flow as start_flow_bizz from rogerthat.bizz.service import get_and_validate_service_identity_user service_identity_user = get_and_validate_service_identity_user(users.get_current_user(), service_identity) mm = _convert_to_member_users(service_identity_user, members) parent_key = message_parent_key if message_parent_key != MISSING else parent_message_key return start_flow_bizz(service_identity_user, parent_key, flow, mm, True, True, tag, context=context, force_language=force_language, flow_params=flow_params) @service_api(function=u"messaging.start_local_flow") @returns(unicode) @arguments(xml=unicode, members=[(unicode, MemberTO)], service_identity=unicode, tag=unicode, parent_message_key=unicode, context=unicode, force_language=unicode, download_attachments_upfront=bool, push_message=unicode, flow=unicode, flow_params=unicode) def start_local_flow(xml, members, service_identity=None, tag=None, parent_message_key=None, context=None, force_language=None, download_attachments_upfront=False, push_message=None, flow=None, flow_params=None): from rogerthat.bizz.service.mfr import start_local_flow as start_local_flow_bizz from rogerthat.bizz.service import get_and_validate_service_identity_user service_identity_user = get_and_validate_service_identity_user(users.get_current_user(), service_identity) mm = _convert_to_member_users(service_identity_user, members) return start_local_flow_bizz(service_identity_user, parent_message_key, xml, mm, tag, context, force_language, download_attachments_upfront, push_message, None, flow, flow_params) ############################################# # DO NOT DOCUMENT THIS SERVICE API FUNCTION # # MFR ONLY # @service_api(function=u"messaging.mfr_flow_member_result") @returns(NoneType) @arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)], end_id=unicode, flush_id=unicode, parent_message_key=unicode, service_identity=unicode, results_email=bool, email_admins=bool, emails=[unicode], message_flow_name=unicode, app_id=unicode) def mfr_flow_member_result(message_flow_run_id, member, steps, end_id, flush_id, parent_message_key, service_identity=None, results_email=False, email_admins=False, emails=None, message_flow_name=None, app_id=None): from rogerthat.bizz.service import get_and_validate_app_id_for_service_identity_user svc_user = get_current_user() azzert(svc_user.is_mfr) if not service_identity or service_identity == MISSING: service_identity = ServiceIdentity.DEFAULT mfr = get_message_flow_run_record(svc_user, message_flow_run_id) if not mfr: return svc_identity_user = create_service_identity_user(svc_user, service_identity) azzert(mfr.service_identity == svc_identity_user.email()) app_id = get_and_validate_app_id_for_service_identity_user(svc_identity_user, app_id, member) app_user = create_app_user(users.User(member), app_id) if end_id: from rogerthat.bizz.messaging import check_test_flow_broadcast_ended check_test_flow_broadcast_ended(app_user, users.User(mfr.service_identity), parent_message_key, mfr.tag) if results_email: from rogerthat.bizz.messaging import send_message_flow_results_email for step in steps: if step.step_type == FormFlowStepTO.TYPE and step.display_value and step.display_value.startswith('base64:'): step.display_value = base64.b64decode(step.display_value[7:]).decode('utf-8') try_or_defer(send_message_flow_results_email, message_flow_name, emails, email_admins, steps, app_user, service_identity, svc_user, parent_message_key, mfr.tag) else: if mfr.post_result_callback: from rogerthat.bizz.messaging import send_message_flow_member_result send_message_flow_member_result(svc_user, service_identity, message_flow_run_id, parent_message_key, app_user, steps, end_id, flush_id, mfr.tag, mfr.flow_params) ############################################ @service_api_callback(function=u"messaging.flow_member_result", code=ServiceProfile.CALLBACK_MESSAGING_FLOW_MEMBER_RESULT) @returns(FlowMemberResultCallbackResultTO) @arguments(message_flow_run_id=unicode, member=unicode, steps=[object_factory("step_type", FLOW_STEP_MAPPING)], end_id=unicode, end_message_flow_id=unicode, parent_message_key=unicode, tag=unicode, result_key=unicode, flush_id=unicode, flush_message_flow_id=unicode, service_identity=unicode, user_details=[UserDetailsTO], flow_params=unicode, timestamp=(int, long)) def flow_member_result(message_flow_run_id, member, steps, end_id, end_message_flow_id, parent_message_key, tag, result_key, flush_id, flush_message_flow_id, service_identity, user_details, flow_params, timestamp): pass
import os import shutil import sys import time from selenium import webdriver from seleniumbase.config import settings from seleniumbase.core.style_sheet import style from seleniumbase.fixtures import page_actions LATEST_REPORT_DIR = settings.LATEST_REPORT_DIR ARCHIVE_DIR = settings.REPORT_ARCHIVE_DIR HTML_REPORT = settings.HTML_REPORT RESULTS_TABLE = settings.RESULTS_TABLE def get_timestamp(): return str(int(time.time() * 1000)) def process_successes(test, test_count, duration): return( '"%s","%s","%s","%s","%s","%s","%s","%s","%s","%s"' % ( test_count, "Passed!", "*", "*", "*", test.browser, get_timestamp()[:-3], duration, test.id(), "*")) def process_failures(test, test_count, browser_type, duration): bad_page_image = "failure_%s.jpg" % test_count bad_page_data = "failure_%s.txt" % test_count page_actions.save_screenshot( test.driver, bad_page_image, folder=LATEST_REPORT_DIR) page_actions.save_test_failure_data( test.driver, bad_page_data, browser_type, folder=LATEST_REPORT_DIR) exc_info = '(Unknown Failure)' exception = sys.exc_info()[1] if exception: if hasattr(exception, 'msg'): exc_info = exception.msg elif hasattr(exception, 'message'): exc_info = exception.message else: pass return( '"%s","%s","%s","%s","%s","%s","%s","%s","%s","%s"' % ( test_count, "FAILED!", bad_page_data, bad_page_image, test.driver.current_url, test.browser, get_timestamp()[:-3], duration, test.id(), exc_info)) def clear_out_old_report_logs(archive_past_runs=True, get_log_folder=False): abs_path = os.path.abspath('.') file_path = abs_path + "/%s" % LATEST_REPORT_DIR if not os.path.exists(file_path): os.makedirs(file_path) if archive_past_runs: archive_timestamp = int(time.time()) if not os.path.exists("%s/../%s/" % (file_path, ARCHIVE_DIR)): os.makedirs("%s/../%s/" % (file_path, ARCHIVE_DIR)) archive_dir = "%s/../%s/report_%s" % ( file_path, ARCHIVE_DIR, archive_timestamp) shutil.move(file_path, archive_dir) os.makedirs(file_path) if get_log_folder: return archive_dir else: # Just delete bad pages to make room for the latest run. filelist = [f for f in os.listdir( "./%s" % LATEST_REPORT_DIR) if f.startswith("failure_") or ( f == HTML_REPORT) or (f.startswith("automation_failure")) or ( f == RESULTS_TABLE)] for f in filelist: os.remove("%s/%s" % (file_path, f)) def add_bad_page_log_file(page_results_list): abs_path = os.path.abspath('.') file_path = abs_path + "/%s" % LATEST_REPORT_DIR log_file = "%s/%s" % (file_path, RESULTS_TABLE) f = open(log_file, 'w') h_p1 = '''"Num","Result","Stacktrace","Screenshot",''' h_p2 = '''"URL","Browser","Epoch Time","Duration",''' h_p3 = '''"Test Case Address","Additional Info"\n''' page_header = h_p1 + h_p2 + h_p3 f.write(page_header) for line in page_results_list: f.write("%s\n" % line) f.close() def archive_new_report_logs(): log_string = clear_out_old_report_logs(get_log_folder=True) log_folder = log_string.split('/')[-1] abs_path = os.path.abspath('.') file_path = abs_path + "/%s" % ARCHIVE_DIR report_log_path = "%s/%s" % (file_path, log_folder) return report_log_path def add_results_page(html): abs_path = os.path.abspath('.') file_path = abs_path + "/%s" % LATEST_REPORT_DIR results_file_name = HTML_REPORT results_file = "%s/%s" % (file_path, results_file_name) f = open(results_file, 'w') f.write(html) f.close() return results_file def build_report(report_log_path, page_results_list, successes, failures, browser_type, show_report): web_log_path = "file://%s" % report_log_path successes_count = len(successes) failures_count = len(failures) total_test_count = successes_count + failures_count tf_color = "#11BB11" if failures_count > 0: tf_color = "#EE3A3A" summary_table = '''<div><table><thead><tr> <th>TESTING SUMMARY</th> <th>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;</th> </tr></thead><tbody> <tr style="color:#00BB00"><td>TESTS PASSING: <td>%s</tr> <tr style="color:%s" ><td>TESTS FAILING: <td>%s</tr> <tr style="color:#4D4DDD"><td>TOTAL TESTS: <td>%s</tr> </tbody></table>''' % (successes_count, tf_color, failures_count, total_test_count) summary_table = '''<h1 id="ContextHeader" class="sectionHeader" title=""> %s</h1>''' % summary_table log_link_shown = '../%s%s/' % ( ARCHIVE_DIR, web_log_path.split(ARCHIVE_DIR)[1]) csv_link = '%s/%s' % (web_log_path, RESULTS_TABLE) csv_link_shown = '%s' % RESULTS_TABLE log_table = '''<p><p><p><p><h2><table><tbody> <tr><td>LOG FILES LINK:&nbsp;&nbsp;<td><a href="%s">%s</a></tr> <tr><td>RESULTS TABLE:&nbsp;&nbsp;<td><a href="%s">%s</a></tr> </tbody></table></h2><p><p><p><p>''' % ( web_log_path, log_link_shown, csv_link, csv_link_shown) failure_table = '<h2><table><tbody></div>' any_screenshots = False for line in page_results_list: line = line.split(',') if line[1] == '"FAILED!"': if not any_screenshots: any_screenshots = True failure_table += '''<thead><tr> <th>STACKTRACE&nbsp;&nbsp;</th> <th>SCREENSHOT&nbsp;&nbsp;</th> <th>LOCATION OF FAILURE</th> </tr></thead>''' display_url = line[4] if len(display_url) > 60: display_url = display_url[0:58] + '...' line = '<a href="%s">%s</a>' % ( "file://" + report_log_path + '/' + line[2], line[2]) + ''' &nbsp;&nbsp; ''' + '<td><a href="%s">%s</a>' % ( "file://" + report_log_path + '/' + line[3], line[3]) + ''' &nbsp;&nbsp; ''' + '<td><a href="%s">%s</a>' % (line[4], display_url) line = line.replace('"', '') failure_table += '<tr><td>%s</tr>\n' % line failure_table += '</tbody></table></h2>' failing_list = '' if failures: failing_list = '<h2><table><tbody>' failing_list += '''<thead><tr><th>LIST OF FAILING TESTS &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </th></tr></thead>''' for failure in failures: failing_list += '<tr style="color:#EE3A3A"><td>%s</tr>\n' % failure failing_list += '</tbody></table></h2>' passing_list = '' if successes: passing_list = '<h2><table><tbody>' passing_list += '''<thead><tr><th>LIST OF PASSING TESTS &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; </th></tr></thead>''' for success in successes: passing_list += '<tr style="color:#00BB00"><td>%s</tr>\n' % success passing_list += '</tbody></table></h2>' table_view = '%s%s%s%s%s' % ( summary_table, log_table, failure_table, failing_list, passing_list) report_html = '<html><head>%s</head><body>%s</body></html>' % ( style, table_view) results_file = add_results_page(report_html) archived_results_file = report_log_path + '/' + HTML_REPORT shutil.copyfile(results_file, archived_results_file) print("\n* The latest html report page is located at:\n" + results_file) print( "\n* Files saved for this report are located at:\n" + report_log_path) print("") if show_report: if browser_type == 'firefox': browser = webdriver.Firefox() else: browser = webdriver.Chrome() browser.get("file://%s" % archived_results_file) print("\n*** Close the html report window to continue. ***") while len(browser.window_handles): time.sleep(0.1) browser.quit()
from __future__ import absolute_import, division, print_function from datetime import timedelta import functools import logging from uuid import uuid4 import six from django.db import IntegrityError, transaction from django.utils import timezone from rest_framework import serializers from rest_framework.response import Response from sentry import analytics, features, search from sentry.api.base import DocSection, EnvironmentMixin from sentry.api.bases.project import ProjectEndpoint, ProjectEventPermission from sentry.api.fields import ActorField, Actor from sentry.api.serializers import serialize from sentry.api.serializers.models.actor import ActorSerializer from sentry.api.serializers.models.group import ( SUBSCRIPTION_REASON_MAP, StreamGroupSerializer) from sentry.constants import DEFAULT_SORT_OPTION from sentry.db.models.query import create_or_update from sentry.models import ( Activity, Environment, Group, GroupAssignee, GroupBookmark, GroupHash, GroupResolution, GroupSeen, GroupShare, GroupSnooze, GroupStatus, GroupSubscription, GroupSubscriptionReason, GroupHashTombstone, GroupTombstone, Release, TOMBSTONE_FIELDS_FROM_GROUP, UserOption, User, Team ) from sentry.models.event import Event from sentry.models.group import looks_like_short_id from sentry.receivers import DEFAULT_SAVED_SEARCHES from sentry.search.utils import InvalidQuery, parse_query from sentry.signals import advanced_search, issue_ignored, issue_resolved_in_release from sentry.tasks.deletion import delete_group from sentry.tasks.integrations import kick_off_status_syncs from sentry.tasks.merge import merge_group from sentry.utils.apidocs import attach_scenarios, scenario from sentry.utils.cursors import Cursor, CursorResult from sentry.utils.functional import extract_lazy_object delete_logger = logging.getLogger('sentry.deletions.api') ERR_INVALID_STATS_PERIOD = "Invalid stats_period. Valid choices are '', '24h', and '14d'" SAVED_SEARCH_QUERIES = set([s['query'] for s in DEFAULT_SAVED_SEARCHES]) @scenario('BulkUpdateIssues') def bulk_update_issues_scenario(runner): project = runner.default_project group1, group2 = Group.objects.filter(project=project)[:2] runner.request( method='PUT', path='/projects/%s/%s/issues/?id=%s&id=%s' % (runner.org.slug, project.slug, group1.id, group2.id), data={'status': 'unresolved', 'isPublic': False} ) @scenario('BulkRemoveIssuess') def bulk_remove_issues_scenario(runner): with runner.isolated_project('Amazing Plumbing') as project: group1, group2 = Group.objects.filter(project=project)[:2] runner.request( method='DELETE', path='/projects/%s/%s/issues/?id=%s&id=%s' % (runner.org.slug, project.slug, group1.id, group2.id), ) @scenario('ListProjectIssuess') def list_project_issues_scenario(runner): project = runner.default_project runner.request( method='GET', path='/projects/%s/%s/issues/?statsPeriod=24h' % ( runner.org.slug, project.slug), ) STATUS_CHOICES = { 'resolved': GroupStatus.RESOLVED, 'unresolved': GroupStatus.UNRESOLVED, 'ignored': GroupStatus.IGNORED, 'resolvedInNextRelease': GroupStatus.UNRESOLVED, # TODO(dcramer): remove in 9.0 'muted': GroupStatus.IGNORED, } class ValidationError(Exception): pass class StatusDetailsValidator(serializers.Serializer): inNextRelease = serializers.BooleanField() inRelease = serializers.CharField() ignoreDuration = serializers.IntegerField() ignoreCount = serializers.IntegerField() # in minutes, max of one week ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60) ignoreUserCount = serializers.IntegerField() # in minutes, max of one week ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60) def validate_inRelease(self, attrs, source): value = attrs[source] project = self.context['project'] if value == 'latest': try: attrs[source] = Release.objects.filter( projects=project, organization_id=project.organization_id, ).extra(select={ 'sort': 'COALESCE(date_released, date_added)', }).order_by('-sort')[0] except IndexError: raise serializers.ValidationError( 'No release data present in the system to form a basis for \'Next Release\'' ) else: try: attrs[source] = Release.objects.get( projects=project, organization_id=project.organization_id, version=value, ) except Release.DoesNotExist: raise serializers.ValidationError( 'Unable to find a release with the given version.' ) return attrs def validate_inNextRelease(self, attrs, source): project = self.context['project'] if not Release.objects.filter( projects=project, organization_id=project.organization_id, ).exists(): raise serializers.ValidationError( 'No release data present in the system to form a basis for \'Next Release\'' ) return attrs class GroupValidator(serializers.Serializer): status = serializers.ChoiceField(choices=zip( STATUS_CHOICES.keys(), STATUS_CHOICES.keys())) statusDetails = StatusDetailsValidator() hasSeen = serializers.BooleanField() isBookmarked = serializers.BooleanField() isPublic = serializers.BooleanField() isSubscribed = serializers.BooleanField() merge = serializers.BooleanField() discard = serializers.BooleanField() ignoreDuration = serializers.IntegerField() ignoreCount = serializers.IntegerField() # in minutes, max of one week ignoreWindow = serializers.IntegerField(max_value=7 * 24 * 60) ignoreUserCount = serializers.IntegerField() # in minutes, max of one week ignoreUserWindow = serializers.IntegerField(max_value=7 * 24 * 60) assignedTo = ActorField() # TODO(dcramer): remove in 9.0 snoozeDuration = serializers.IntegerField() def validate_assignedTo(self, attrs, source): value = attrs[source] if value and value.type is User and not self.context['project'].member_set.filter( user_id=value.id).exists(): raise serializers.ValidationError( 'Cannot assign to non-team member') if value and value.type is Team and not self.context['project'].teams.filter( id=value.id).exists(): raise serializers.ValidationError( 'Cannot assign to a team without access to the project') return attrs def validate(self, attrs): attrs = super(GroupValidator, self).validate(attrs) if len(attrs) > 1 and 'discard' in attrs: raise serializers.ValidationError( 'Other attributes cannot be updated when discarding') return attrs class ProjectGroupIndexEndpoint(ProjectEndpoint, EnvironmentMixin): doc_section = DocSection.EVENTS permission_classes = (ProjectEventPermission, ) def _build_query_params_from_request(self, request, project): query_kwargs = { 'project': project, 'sort_by': request.GET.get('sort', DEFAULT_SORT_OPTION), } limit = request.GET.get('limit') if limit: try: query_kwargs['limit'] = int(limit) except ValueError: raise ValidationError('invalid limit') # TODO: proper pagination support cursor = request.GET.get('cursor') if cursor: query_kwargs['cursor'] = Cursor.from_string(cursor) query = request.GET.get('query', 'is:unresolved').strip() if query: try: query_kwargs.update(parse_query(project, query, request.user)) except InvalidQuery as e: raise ValidationError( u'Your search query could not be parsed: {}'.format( e.message) ) return query_kwargs def _search(self, request, project, extra_query_kwargs=None): query_kwargs = self._build_query_params_from_request(request, project) if extra_query_kwargs is not None: assert 'environment' not in extra_query_kwargs query_kwargs.update(extra_query_kwargs) try: if features.has('organizations:environments', project.organization, actor=request.user): query_kwargs['environment'] = self._get_environment_from_request( request, project.organization_id, ) except Environment.DoesNotExist: # XXX: The 1000 magic number for `max_hits` is an abstraction leak # from `sentry.api.paginator.BasePaginator.get_result`. result = CursorResult([], None, None, hits=0, max_hits=1000) else: result = search.query(**query_kwargs) return result, query_kwargs def _subscribe_and_assign_issue(self, acting_user, group, result): if acting_user: GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.status_change, ) self_assign_issue = UserOption.objects.get_value( user=acting_user, key='self_assign_issue', default='0' ) if self_assign_issue == '1' and not group.assignee_set.exists(): result['assignedTo'] = Actor(type=User, id=extract_lazy_object(acting_user).id) # statsPeriod=24h @attach_scenarios([list_project_issues_scenario]) def get(self, request, project): """ List a Project's Issues ``````````````````````` Return a list of issues (groups) bound to a project. All parameters are supplied as query string parameters. A default query of ``is:unresolved`` is applied. To return results with other statuses send an new query value (i.e. ``?query=`` for all results). The ``statsPeriod`` parameter can be used to select the timeline stats which should be present. Possible values are: '' (disable), '24h', '14d' :qparam string statsPeriod: an optional stat period (can be one of ``"24h"``, ``"14d"``, and ``""``). :qparam bool shortIdLookup: if this is set to true then short IDs are looked up by this function as well. This can cause the return value of the function to return an event issue of a different project which is why this is an opt-in. Set to `1` to enable. :qparam querystring query: an optional Sentry structured search query. If not provided an implied ``"is:unresolved"`` is assumed.) :pparam string organization_slug: the slug of the organization the issues belong to. :pparam string project_slug: the slug of the project the issues belong to. :auth: required """ stats_period = request.GET.get('statsPeriod') if stats_period not in (None, '', '24h', '14d'): return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400) elif stats_period is None: # default stats_period = '24h' elif stats_period == '': # disable stats stats_period = None serializer = functools.partial( StreamGroupSerializer, environment_func=self._get_environment_func(request, project.organization_id), stats_period=stats_period, ) query = request.GET.get('query', '').strip() if query: matching_group = None matching_event = None if len(query) == 32: # check to see if we've got an event ID try: matching_group = Group.objects.from_event_id(project, query) except Group.DoesNotExist: pass else: try: matching_event = Event.objects.get( event_id=query, project_id=project.id) except Event.DoesNotExist: pass else: Event.objects.bind_nodes([matching_event], 'data') # If the query looks like a short id, we want to provide some # information about where that is. Note that this can return # results for another project. The UI deals with this. elif request.GET.get('shortIdLookup') == '1' and \ looks_like_short_id(query): try: matching_group = Group.objects.by_qualified_short_id( project.organization_id, query ) except Group.DoesNotExist: matching_group = None if matching_group is not None: matching_event_environment = None try: matching_event_environment = matching_event.get_environment().name if matching_event else None except Environment.DoesNotExist: pass response = Response( serialize( [matching_group], request.user, serializer( matching_event_id=getattr(matching_event, 'id', None), matching_event_environment=matching_event_environment, ) ) ) response['X-Sentry-Direct-Hit'] = '1' return response try: cursor_result, query_kwargs = self._search(request, project, {'count_hits': True}) except ValidationError as exc: return Response({'detail': six.text_type(exc)}, status=400) results = list(cursor_result) context = serialize(results, request.user, serializer()) # HACK: remove auto resolved entries if query_kwargs.get('status') == GroupStatus.UNRESOLVED: context = [r for r in context if r['status'] == 'unresolved'] response = Response(context) self.add_cursor_headers(request, response, cursor_result) if results and query not in SAVED_SEARCH_QUERIES: advanced_search.send(project=project, sender=request.user) analytics.record('project_issue.searched', user_id=request.user.id, organization_id=project.organization_id, project_id=project.id, query=query) return response @attach_scenarios([bulk_update_issues_scenario]) def put(self, request, project): """ Bulk Mutate a List of Issues ```````````````````````````` Bulk mutate various attributes on issues. The list of issues to modify is given through the `id` query parameter. It is repeated for each issue that should be modified. - For non-status updates, the `id` query parameter is required. - For status updates, the `id` query parameter may be omitted for a batch "update all" query. - An optional `status` query parameter may be used to restrict mutations to only events with the given status. The following attributes can be modified and are supplied as JSON object in the body: If any ids are out of scope this operation will succeed without any data mutation. :qparam int id: a list of IDs of the issues to be mutated. This parameter shall be repeated for each issue. It is optional only if a status is mutated in which case an implicit `update all` is assumed. :qparam string status: optionally limits the query to issues of the specified status. Valid values are ``"resolved"``, ``"unresolved"`` and ``"ignored"``. :pparam string organization_slug: the slug of the organization the issues belong to. :pparam string project_slug: the slug of the project the issues belong to. :param string status: the new status for the issues. Valid values are ``"resolved"``, ``"resolvedInNextRelease"``, ``"unresolved"``, and ``"ignored"``. :param int ignoreDuration: the number of minutes to ignore this issue. :param boolean isPublic: sets the issue to public or private. :param boolean merge: allows to merge or unmerge different issues. :param string assignedTo: the actor id (or username) of the user or team that should be assigned to this issue. :param boolean hasSeen: in case this API call is invoked with a user context this allows changing of the flag that indicates if the user has seen the event. :param boolean isBookmarked: in case this API call is invoked with a user context this allows changing of the bookmark flag. :auth: required """ group_ids = request.GET.getlist('id') if group_ids: group_list = Group.objects.filter( project=project, id__in=group_ids) # filter down group ids to only valid matches group_ids = [g.id for g in group_list] if not group_ids: return Response(status=204) else: group_list = None serializer = GroupValidator( data=request.DATA, partial=True, context={'project': project}, ) if not serializer.is_valid(): return Response(serializer.errors, status=400) result = dict(serializer.object) acting_user = request.user if request.user.is_authenticated() else None if not group_ids: try: # bulk mutations are limited to 1000 items # TODO(dcramer): it'd be nice to support more than this, but its # a bit too complicated right now cursor_result, _ = self._search(request, project, { 'limit': 1000, 'paginator_options': {'max_limit': 1000}, }) except ValidationError as exc: return Response({'detail': six.text_type(exc)}, status=400) group_list = list(cursor_result) group_ids = [g.id for g in group_list] is_bulk = len(group_ids) > 1 queryset = Group.objects.filter( id__in=group_ids, ) discard = result.get('discard') if discard: if not features.has('projects:discard-groups', project, actor=request.user): return Response({'detail': ['You do not have that feature enabled']}, status=400) group_list = list(queryset) groups_to_delete = [] for group in group_list: with transaction.atomic(): try: tombstone = GroupTombstone.objects.create( previous_group_id=group.id, actor_id=acting_user.id if acting_user else None, **{name: getattr(group, name) for name in TOMBSTONE_FIELDS_FROM_GROUP} ) except IntegrityError: # in this case, a tombstone has already been created # for a group, so no hash updates are necessary pass else: groups_to_delete.append(group) GroupHash.objects.filter( group=group, ).update( group=None, group_tombstone_id=tombstone.id, ) self._delete_groups(request, project, groups_to_delete) return Response(status=204) statusDetails = result.pop('statusDetails', result) status = result.get('status') if status in ('resolved', 'resolvedInNextRelease'): if status == 'resolvedInNextRelease' or statusDetails.get('inNextRelease'): release = Release.objects.filter( projects=project, organization_id=project.organization_id, ).extra(select={ 'sort': 'COALESCE(date_released, date_added)', }).order_by('-sort')[0] activity_type = Activity.SET_RESOLVED_IN_RELEASE activity_data = { # no version yet 'version': '', } status_details = { 'inNextRelease': True, 'actor': serialize(extract_lazy_object(request.user), request.user), } res_type = GroupResolution.Type.in_next_release res_status = GroupResolution.Status.pending elif statusDetails.get('inRelease'): release = statusDetails['inRelease'] activity_type = Activity.SET_RESOLVED_IN_RELEASE activity_data = { # no version yet 'version': release.version, } status_details = { 'inRelease': release.version, 'actor': serialize(extract_lazy_object(request.user), request.user), } res_type = GroupResolution.Type.in_release res_status = GroupResolution.Status.resolved else: release = None activity_type = Activity.SET_RESOLVED activity_data = {} status_details = {} now = timezone.now() for group in group_list: with transaction.atomic(): if release: resolution_params = { 'release': release, 'type': res_type, 'status': res_status, 'actor_id': request.user.id if request.user.is_authenticated() else None, } resolution, created = GroupResolution.objects.get_or_create( group=group, defaults=resolution_params, ) if not created: resolution.update( datetime=timezone.now(), **resolution_params) else: resolution = None affected = Group.objects.filter( id=group.id, ).update( status=GroupStatus.RESOLVED, resolved_at=now, ) if not resolution: created = affected group.status = GroupStatus.RESOLVED group.resolved_at = now self._subscribe_and_assign_issue( acting_user, group, result) if created: activity = Activity.objects.create( project=group.project, group=group, type=activity_type, user=acting_user, ident=resolution.id if resolution else None, data=activity_data, ) # TODO(dcramer): we need a solution for activity rollups # before sending notifications on bulk changes if not is_bulk: activity.send_notification() issue_resolved_in_release.send( group=group, project=project, sender=acting_user, ) kick_off_status_syncs.apply_async(kwargs={ 'project_id': group.project_id, 'group_id': group.id, }) result.update({ 'status': 'resolved', 'statusDetails': status_details, }) elif status: new_status = STATUS_CHOICES[result['status']] with transaction.atomic(): happened = queryset.exclude( status=new_status, ).update( status=new_status, ) GroupResolution.objects.filter( group__in=group_ids, ).delete() if new_status == GroupStatus.IGNORED: ignore_duration = ( statusDetails.pop('ignoreDuration', None) or statusDetails.pop('snoozeDuration', None) ) or None ignore_count = statusDetails.pop( 'ignoreCount', None) or None ignore_window = statusDetails.pop( 'ignoreWindow', None) or None ignore_user_count = statusDetails.pop( 'ignoreUserCount', None) or None ignore_user_window = statusDetails.pop( 'ignoreUserWindow', None) or None if ignore_duration or ignore_count or ignore_user_count: if ignore_duration: ignore_until = timezone.now() + timedelta( minutes=ignore_duration, ) else: ignore_until = None for group in group_list: state = {} if ignore_count and not ignore_window: state['times_seen'] = group.times_seen if ignore_user_count and not ignore_user_window: state['users_seen'] = group.count_users_seen() GroupSnooze.objects.create_or_update( group=group, values={ 'until': ignore_until, 'count': ignore_count, 'window': ignore_window, 'user_count': ignore_user_count, 'user_window': ignore_user_window, 'state': state, 'actor_id': request.user.id if request.user.is_authenticated() else None, } ) result['statusDetails'] = { 'ignoreCount': ignore_count, 'ignoreUntil': ignore_until, 'ignoreUserCount': ignore_user_count, 'ignoreUserWindow': ignore_user_window, 'ignoreWindow': ignore_window, 'actor': serialize(extract_lazy_object(request.user), request.user), } issue_ignored.send_robust(project=project, sender=self.__class__) else: GroupSnooze.objects.filter( group__in=group_ids, ).delete() ignore_until = None result['statusDetails'] = {} else: result['statusDetails'] = {} if group_list and happened: if new_status == GroupStatus.UNRESOLVED: activity_type = Activity.SET_UNRESOLVED activity_data = {} elif new_status == GroupStatus.IGNORED: activity_type = Activity.SET_IGNORED activity_data = { 'ignoreCount': ignore_count, 'ignoreDuration': ignore_duration, 'ignoreUntil': ignore_until, 'ignoreUserCount': ignore_user_count, 'ignoreUserWindow': ignore_user_window, 'ignoreWindow': ignore_window, } for group in group_list: group.status = new_status activity = Activity.objects.create( project=group.project, group=group, type=activity_type, user=acting_user, data=activity_data, ) # TODO(dcramer): we need a solution for activity rollups # before sending notifications on bulk changes if not is_bulk: if acting_user: GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.status_change, ) activity.send_notification() if new_status == GroupStatus.UNRESOLVED: kick_off_status_syncs.apply_async(kwargs={ 'project_id': group.project_id, 'group_id': group.id, }) if 'assignedTo' in result: assigned_actor = result['assignedTo'] if assigned_actor: for group in group_list: resolved_actor = assigned_actor.resolve() GroupAssignee.objects.assign(group, resolved_actor, acting_user) result['assignedTo'] = serialize( assigned_actor.resolve(), acting_user, ActorSerializer()) else: for group in group_list: GroupAssignee.objects.deassign(group, acting_user) if result.get('hasSeen') and project.member_set.filter(user=acting_user).exists(): for group in group_list: instance, created = create_or_update( GroupSeen, group=group, user=acting_user, project=group.project, values={ 'last_seen': timezone.now(), } ) elif result.get('hasSeen') is False: GroupSeen.objects.filter( group__in=group_ids, user=acting_user, ).delete() if result.get('isBookmarked'): for group in group_list: GroupBookmark.objects.get_or_create( project=project, group=group, user=acting_user, ) GroupSubscription.objects.subscribe( user=acting_user, group=group, reason=GroupSubscriptionReason.bookmark, ) elif result.get('isBookmarked') is False: GroupBookmark.objects.filter( group__in=group_ids, user=acting_user, ).delete() # TODO(dcramer): we could make these more efficient by first # querying for rich rows are present (if N > 2), flipping the flag # on those rows, and then creating the missing rows if result.get('isSubscribed') in (True, False): is_subscribed = result['isSubscribed'] for group in group_list: # NOTE: Subscribing without an initiating event (assignment, # commenting, etc.) clears out the previous subscription reason # to avoid showing confusing messaging as a result of this # action. It'd be jarring to go directly from "you are not # subscribed" to "you were subscribed due since you were # assigned" just by clicking the "subscribe" button (and you # may no longer be assigned to the issue anyway.) GroupSubscription.objects.create_or_update( user=acting_user, group=group, project=project, values={ 'is_active': is_subscribed, 'reason': GroupSubscriptionReason.unknown, }, ) result['subscriptionDetails'] = { 'reason': SUBSCRIPTION_REASON_MAP.get( GroupSubscriptionReason.unknown, 'unknown', ), } if 'isPublic' in result: # We always want to delete an existing share, because triggering # an isPublic=True even when it's already public, should trigger # regenerating. for group in group_list: if GroupShare.objects.filter(group=group).delete(): result['shareId'] = None Activity.objects.create( project=group.project, group=group, type=Activity.SET_PRIVATE, user=acting_user, ) if result.get('isPublic'): for group in group_list: share, created = GroupShare.objects.get_or_create( project=group.project, group=group, user=acting_user, ) if created: result['shareId'] = share.uuid Activity.objects.create( project=group.project, group=group, type=Activity.SET_PUBLIC, user=acting_user, ) # XXX(dcramer): this feels a bit shady like it should be its own # endpoint if result.get('merge') and len(group_list) > 1: primary_group = sorted(group_list, key=lambda x: -x.times_seen)[0] children = [] transaction_id = uuid4().hex for group in group_list: if group == primary_group: continue children.append(group) group.update(status=GroupStatus.PENDING_MERGE) merge_group.delay( from_object_id=group.id, to_object_id=primary_group.id, transaction_id=transaction_id, ) Activity.objects.create( project=primary_group.project, group=primary_group, type=Activity.MERGE, user=acting_user, data={ 'issues': [{ 'id': c.id } for c in children], }, ) result['merge'] = { 'parent': six.text_type(primary_group.id), 'children': [six.text_type(g.id) for g in children], } return Response(result) @attach_scenarios([bulk_remove_issues_scenario]) def delete(self, request, project): """ Bulk Remove a List of Issues ```````````````````````````` Permanently remove the given issues. The list of issues to modify is given through the `id` query parameter. It is repeated for each issue that should be removed. Only queries by 'id' are accepted. If any ids are out of scope this operation will succeed without any data mutation. :qparam int id: a list of IDs of the issues to be removed. This parameter shall be repeated for each issue. :pparam string organization_slug: the slug of the organization the issues belong to. :pparam string project_slug: the slug of the project the issues belong to. :auth: required """ group_ids = request.GET.getlist('id') if group_ids: group_list = list( Group.objects.filter( project=project, id__in=set(group_ids), ).exclude( status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ] ) ) else: try: # bulk mutations are limited to 1000 items # TODO(dcramer): it'd be nice to support more than this, but its # a bit too complicated right now cursor_result, _ = self._search(request, project, { 'limit': 1000, 'paginator_options': {'max_limit': 1000}, }) except ValidationError as exc: return Response({'detail': six.text_type(exc)}, status=400) group_list = list(cursor_result) if not group_list: return Response(status=204) self._delete_groups(request, project, group_list) return Response(status=204) def _delete_groups(self, request, project, group_list): group_ids = [g.id for g in group_list] Group.objects.filter( id__in=group_ids, ).exclude(status__in=[ GroupStatus.PENDING_DELETION, GroupStatus.DELETION_IN_PROGRESS, ]).update(status=GroupStatus.PENDING_DELETION) GroupHashTombstone.tombstone_groups( project_id=project.id, group_ids=group_ids, ) transaction_id = uuid4().hex for group in group_list: delete_group.apply_async( kwargs={ 'object_id': group.id, 'transaction_id': transaction_id, }, countdown=3600, ) self.create_audit_entry( request=request, organization_id=project.organization_id, target_object=group.id, transaction_id=transaction_id, ) delete_logger.info( 'object.delete.queued', extra={ 'object_id': group.id, 'transaction_id': transaction_id, 'model': type(group).__name__, } )
""" Properties for modeling Chart inputs, constraints, and dependencies. selection spec: [['x'], ['x', 'y']] [{'x': categorical, 'y': numerical}] """ from __future__ import absolute_import import numpy as np import pandas as pd from bokeh.core.properties import (HasProps, Either, String, Int, List, Bool, PrimitiveProperty, bokeh_integer_types, Array) from .utils import special_columns, title_from_columns class Column(Array): """Represents column-oriented data. This property is used to provide a consistent interface for column-like data. The property both validates the data types set, and transforms all column-like data into `pd.Series` data. """ def _is_seq(self, value): is_array = super(Column, self)._is_seq(value) return (isinstance(value, pd.Series) or isinstance(value, pd.Index) or isinstance(value, list) or is_array) def _new_instance(self, value): return pd.Series(value) def transform(self, value): if value is None: return None if isinstance(value, pd.Series): arr = value.values else: arr = pd.Series(value).values trans_array = super(Column, self).transform(arr) try: return pd.Series(trans_array) except ValueError: raise ValueError("Could not transform %r" % value) class Logical(Bool): """A boolean like data type. This property is valid for both python and numpy boolean types. """ def validate(self, value): try: super(Logical, self).validate(value) except ValueError: if isinstance(value, list): value = np.array(value) # If not a Bool, then look for pseudo-logical types if isinstance(value, np.ndarray): values = np.unique(value) if len(values) == 2: return raise ValueError('expected a Bool or array with 2 unique values, got %s' % value) class ColumnLabel(Either): """Specify a column by name or index.""" def __init__(self, columns=None, default=None, help=None): # ToDo: make sure we can select by integer types = (String, Int) self.columns = columns super(ColumnLabel, self).__init__(*types, default=default, help=help) def validate(self, value): """If we are given a column list, make sure that the column provided is valid.""" super(ColumnLabel, self).validate(value) if self.columns: if type(value) in bokeh_integer_types: if len(self.columns) > value: return else: raise ValueError("Not a valid column selection.") else: if value not in self.columns and value not in special_columns: raise ValueError("Column provided is not in the list of valid columns: %s" % self.columns) def __str__(self): return "Column Name or Column String" class Dimension(HasProps): """Configures valid Chart column selections. A dimension is Chart property that is assigned one or more columns names or indices. Each column can match one or more column types, which are important to charts, because the type of column selection can greatly affect the behavior of generalized Charts. The Dimension also provides convenient utilities for accessing information about the current provided configuration at the global, non-grouped level. The dimension configuration does not require the data, but when the data is added using the `set_data` method, then validation can occur of the settings by using the `valid` and `invalid` types identified by the selection. """ name = String() alt_names = Either(String, List(String), default=None) columns = Either(ColumnLabel, List(ColumnLabel), default=None) valid = Either(PrimitiveProperty, List(PrimitiveProperty), default=None) invalid = Either(PrimitiveProperty, List(PrimitiveProperty), default=None) selection = Either(ColumnLabel, List(ColumnLabel), default=None) def __init__(self, name, **properties): properties['name'] = name super(Dimension, self).__init__(**properties) self._data = pd.DataFrame() self._chart_source = None def get_valid_types(self, col_data): """Returns all property types that are matched.""" valid_types = list(self.valid) matches = [] # validate each type on the provided column for valid_type in valid_types: prop = valid_type() # if valid, append to the output try: prop.validate(col_data) matches.append(valid_type) except ValueError: pass return matches @property def data(self): """The data selected for the Dimension. Returns pd.Series(1) if data is empty or no selection. """ if self._data.empty or self.selection is None: return pd.Series(1) else: # return special column type if available if self.selection in list(special_columns.keys()): return special_columns[self.selection](self._data) return self._data[self.selection] def __len__(self): return len(self.data.index) def set_data(self, data): """Set data property so that builders has access to configuration metadata. Args: data (`ChartDataSource`): the data source associated with the chart """ self.selection = data[self.name] self._chart_source = data self._data = data.df self.columns = list(self._data.columns.values) @property def min(self): """The minimum of one to many column selections.""" if isinstance(self.data, pd.Series): return self.data.min() else: return self.data.min(axis=1).min() @property def max(self): """The maximum of one to many column selections.""" if isinstance(self.data, pd.Series): return self.data.max() else: return self.data.max(axis=1).max() @property def dtype(self): if isinstance(self.data, pd.DataFrame): return self.data.dtypes[self.selection[0]] else: return self.data.dtype @property def computed(self): """Check the `ChartDataSource` to see if the selection is a derived column.""" if self._chart_source is None: return False else: return self._chart_source.is_computed(self.selection) @property def selected_title(self): """A title formatted representation of selected columns.""" return title_from_columns(self.selection) class EitherColumn(Either): """Allow providing option of column types.""" # ToDo: incorporate fix into Either def matches(self, new, old): comparison = super(EitherColumn, self).matches(new, old) if isinstance(comparison, bool): return comparison elif isinstance(comparison, pd.Series): return comparison.all() else: raise ValueError('Failed when comparing Columns')
#!/usr/bin/env python # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Sample command-line program for writing and reading Stackdriver Monitoring API V3 custom metrics. Simple command-line program to demonstrate connecting to the Google Monitoring API to write custom metrics and read them back. See README.md for instructions on setting up your development environment. This example creates a custom metric based on a hypothetical GAUGE measurement. To run locally: python custom_metric.py --project_id=<YOUR-PROJECT-ID> """ # [START all] import argparse import datetime import pprint import random import time import googleapiclient.discovery def get_start_time(): # Return now - 5 minutes start_time = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(minutes=5) return start_time.isoformat() def get_now(): # Return now return datetime.datetime.now(tz=datetime.timezone.utc).isoformat() def create_custom_metric(client, project_id, custom_metric_type, metric_kind): """Create custom metric descriptor""" metrics_descriptor = { "type": custom_metric_type, "labels": [ { "key": "environment", "valueType": "STRING", "description": "An arbitrary measurement" } ], "metricKind": metric_kind, "valueType": "INT64", "unit": "items", "description": "An arbitrary measurement.", "displayName": "Custom Metric" } return client.projects().metricDescriptors().create( name=project_id, body=metrics_descriptor).execute() def delete_metric_descriptor( client, custom_metric_name): """Delete a custom metric descriptor.""" client.projects().metricDescriptors().delete( name=custom_metric_name).execute() def get_custom_metric(client, project_id, custom_metric_type): """Retrieve the custom metric we created""" request = client.projects().metricDescriptors().list( name=project_id, filter='metric.type=starts_with("{}")'.format(custom_metric_type)) response = request.execute() print('ListCustomMetrics response:') pprint.pprint(response) try: return response['metricDescriptors'] except KeyError: return None def get_custom_data_point(): """Dummy method to return a mock measurement for demonstration purposes. Returns a random number between 0 and 10""" length = random.randint(0, 10) print("reporting timeseries value {}".format(str(length))) return length # [START write_timeseries] def write_timeseries_value(client, project_resource, custom_metric_type, instance_id, metric_kind): """Write the custom metric obtained by get_custom_data_point at a point in time.""" # Specify a new data point for the time series. now = get_now() timeseries_data = { "metric": { "type": custom_metric_type, "labels": { "environment": "STAGING" } }, "resource": { "type": 'gce_instance', "labels": { 'instance_id': instance_id, 'zone': 'us-central1-f' } }, "points": [ { "interval": { "startTime": now, "endTime": now }, "value": { "int64Value": get_custom_data_point() } } ] } request = client.projects().timeSeries().create( name=project_resource, body={"timeSeries": [timeseries_data]}) request.execute() # [END write_timeseries] def read_timeseries(client, project_resource, custom_metric_type): """Reads all of the CUSTOM_METRICS that we have written between START_TIME and END_TIME :param project_resource: Resource of the project to read the timeseries from. :param custom_metric_name: The name of the timeseries we want to read. """ request = client.projects().timeSeries().list( name=project_resource, filter='metric.type="{0}"'.format(custom_metric_type), pageSize=3, interval_startTime=get_start_time(), interval_endTime=get_now(), ) response = request.execute() return response def main(project_id): # This is the namespace for all custom metrics CUSTOM_METRIC_DOMAIN = "custom.googleapis.com" # This is our specific metric name CUSTOM_METRIC_TYPE = "{}/custom_measurement".format(CUSTOM_METRIC_DOMAIN) INSTANCE_ID = "test_instance" METRIC_KIND = "GAUGE" project_resource = "projects/{0}".format(project_id) client = googleapiclient.discovery.build('monitoring', 'v3') create_custom_metric(client, project_resource, CUSTOM_METRIC_TYPE, METRIC_KIND) custom_metric = None while not custom_metric: # wait until it's created time.sleep(1) custom_metric = get_custom_metric( client, project_resource, CUSTOM_METRIC_TYPE) write_timeseries_value(client, project_resource, CUSTOM_METRIC_TYPE, INSTANCE_ID, METRIC_KIND) # Sometimes on new metric descriptors, writes have a delay in being read # back. 3 seconds should be enough to make sure our read call picks up the # write time.sleep(3) timeseries = read_timeseries(client, project_resource, CUSTOM_METRIC_TYPE) print('read_timeseries response:\n{}'.format(pprint.pformat(timeseries))) if __name__ == '__main__': parser = argparse.ArgumentParser( description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( '--project_id', help='Project ID you want to access.', required=True) args = parser.parse_args() main(args.project_id) # [END all]
""" Django settings for openshift project. For more information on this file, see https://docs.djangoproject.com/en/1.6/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.6/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os import imp ON_OPENSHIFT = False if 'OPENSHIFT_REPO_DIR' in os.environ: ON_OPENSHIFT = True BASE_DIR = os.path.dirname(os.path.realpath(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! default_keys = { 'SECRET_KEY': 'tjy&7h%c=q01+c5i@_-t)&n2c+y*tn7v_)vbdksnlv@s5qh%e_' } use_keys = default_keys if ON_OPENSHIFT: imp.find_module('openshiftlibs') import openshiftlibs use_keys = openshiftlibs.openshift_secure(default_keys) SECRET_KEY = use_keys['SECRET_KEY'] # SECURITY WARNING: don't run with debug turned on in production! if ON_OPENSHIFT: DEBUG = False else: DEBUG = True TEMPLATE_DEBUG = DEBUG if DEBUG: ALLOWED_HOSTS = [] else: ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'compressor', 'taggit', 'modelcluster', 'south', 'wagtail.wagtailcore', 'wagtail.wagtailadmin', 'wagtail.wagtaildocs', 'wagtail.wagtailsnippets', 'wagtail.wagtailusers', 'wagtail.wagtailimages', 'wagtail.wagtailembeds', 'wagtail.wagtailsearch', 'wagtail.wagtailredirects', 'blog', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'wagtail.wagtailcore.middleware.SiteMiddleware', 'wagtail.wagtailredirects.middleware.RedirectMiddleware', ) # If you want configure the REDISCLOUD if 'REDISCLOUD_URL' in os.environ and 'REDISCLOUD_PORT' in os.environ and 'REDISCLOUD_PASSWORD' in os.environ: redis_server = os.environ['REDISCLOUD_URL'] redis_port = os.environ['REDISCLOUD_PORT'] redis_password = os.environ['REDISCLOUD_PASSWORD'] CACHES = { 'default' : { 'BACKEND' : 'redis_cache.RedisCache', 'LOCATION' : '%s:%d'%(redis_server,int(redis_port)), 'OPTIONS' : { 'DB':0, 'PARSER_CLASS' : 'redis.connection.HiredisParser', 'PASSWORD' : redis_password, } } } MIDDLEWARE_CLASSES = ('django.middleware.cache.UpdateCacheMiddleware',) + MIDDLEWARE_CLASSES + ('django.middleware.cache.FetchFromCacheMiddleware',) ROOT_URLCONF = 'urls' # WSGI_APPLICATION = 'wsgi.application' TEMPLATE_DIRS = ( os.path.join(BASE_DIR,'templates'), ) # Database # https://docs.djangoproject.com/en/1.6/ref/settings/#databases if ON_OPENSHIFT: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': os.environ['OPENSHIFT_APP_NAME'], 'USER': os.environ['OPENSHIFT_POSTGRESQL_DB_USERNAME'], 'PASSWORD': os.environ['OPENSHIFT_POSTGRESQL_DB_PASSWORD'], 'HOST': os.environ['OPENSHIFT_POSTGRESQL_DB_HOST'], 'PORT': os.environ['OPENSHIFT_POSTGRESQL_DB_PORT'], } } # DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(os.environ['OPENSHIFT_DATA_DIR'], 'db.sqlite3'), # } # } else: DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', 'NAME': 'akoshodi', 'USER': 'akoshodi', 'PASSWORD': 'abc123', 'HOST': '', # Set to empty string for localhost. 'PORT': '', # Set to empty string for default. } } # DATABASES = { # 'default': { # 'ENGINE': 'django.db.backends.sqlite3', # 'NAME': os.path.join(BASE_DIR, 'testdb.sqlite3'), # } # } # Internationalization # https://docs.djangoproject.com/en/1.6/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.6/howto/static-files/ STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static') STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(BASE_DIR, 'static'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) from django.conf import global_settings TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS + ( 'django.core.context_processors.request', ) EMAIL_SUBJECT_PREFIX = '[wagtailtutorial] ' INTERNAL_IPS = ('127.0.0.1', '10.0.2.2') COMPRESS_PRECOMPILERS = ( ('text/x-scss', 'django_libsass.SassCompiler'), ) # Auth settings LOGIN_URL = 'django.contrib.auth.views.login' LOGIN_REDIRECT_URL = 'wagtailadmin_home' # WAGTAIL SETTINGS WAGTAIL_SITE_NAME = 'maestro' # Override the search results template for wagtailsearch WAGTAILSEARCH_RESULTS_TEMPLATE = 'maestroblog/search_results.html' WAGTAILSEARCH_RESULTS_TEMPLATE_AJAX = 'maestroblog/includes/search_listing.html' WAGTAILSEARCH_ES_INDEX = 'maestro' AUTH_PROFILE_MODULE = 'userprofile.UserProfile' TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )
#!/usr/bin/env python # pylint: disable=too-many-lines # ___ ___ _ _ ___ ___ _ _____ ___ ___ # / __| __| \| | __| _ \ /_\_ _| __| \ # | (_ | _|| .` | _|| / / _ \| | | _|| |) | # \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____ # | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _| # | |) | (_) | | .` | (_) || | | _|| |) | | | | # |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_| ''' OpenShiftCLI class that wraps the oc commands in a subprocess ''' # pylint: disable=too-many-lines import atexit import json import os import re import shutil import subprocess import ruamel.yaml as yaml #import yaml # ## This is here because of a bug that causes yaml ## to incorrectly handle timezone info on timestamps #def timestamp_constructor(_, node): # '''return timestamps as strings''' # return str(node.value) #yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor) class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = kubeconfig self.all_namespaces = all_namespaces # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = '/tmp/%s' % rname yed = Yedit(fname, res['results'][0], separator=sep) changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''return all pods ''' cmd = ['-n', self.namespace, 'replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''return all pods ''' fname = '/tmp/%s' % rname yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''return all pods ''' return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace]) def _delete(self, resource, rname, selector=None): '''return all pods ''' cmd = ['delete', resource, rname, '-n', self.namespace] if selector: cmd.append('--selector=%s' % selector) return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): '''return all pods ''' cmd = ['process', '-n', self.namespace] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["%s=%s" % (key, value) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = '/tmp/%s' % template_name yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname]) def _get(self, resource, rname=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector: cmd.append('--selector=%s' % selector) if self.all_namespaces: cmd.extend(['--all-namespaces']) elif self.namespace: cmd.extend(['-n', self.namespace]) cmd.extend(['-o', 'json']) if rname: cmd.append(rname) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if rval.has_key('items'): rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector=%s' % selector) cmd.append('--schedulable=%s' % schedulable) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector=%s' % selector) if pod_selector: cmd.append('--pod-selector=%s' % pod_selector) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') #pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector=%s' % selector) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector=%s' % pod_selector) if grace_period: cmd.append('--grace-period=%s' % int(grace_period)) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) #pylint: disable=too-many-arguments def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [] if oadm: cmds = ['/usr/bin/oadm'] else: cmds = ['/usr/bin/oc'] cmds.extend(cmd) rval = {} results = '' err = None if self.verbose: print ' '.join(cmds) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={'KUBECONFIG': self.kubeconfig}) stdout, stderr = proc.communicate(input_data) rval = {"returncode": proc.returncode, "results": results, "cmd": ' '.join(cmds), } if proc.returncode == 0: if output: if output_type == 'json': try: rval['results'] = json.loads(stdout) except ValueError as err: if "No JSON object could be decoded" in err.message: err = err.message elif output_type == 'raw': rval['results'] = stdout if self.verbose: print stdout print stderr if err: rval.update({"err": err, "stderr": stderr, "stdout": stdout, "cmd": cmds }) else: rval.update({"stderr": stderr, "stdout": stdout, "results": {}, }) return rval class Utils(object): ''' utilities for openshiftcli modules ''' @staticmethod def create_file(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' path = os.path.join('/tmp', rname) with open(path, 'w') as fds: if ftype == 'yaml': fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper)) elif ftype == 'json': fds.write(json.dumps(data)) else: fds.write(data) # Register cleanup when module is done atexit.register(Utils.cleanup, [path]) return path @staticmethod def create_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_file(item['path'], item['data'], ftype=content_type) files.append({'name': os.path.basename(path), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if result.has_key('metadata') and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': contents = yaml.load(contents, yaml.RoundTripLoader) elif sfile_type == 'json': contents = json.loads(contents) return contents # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if not user_def.has_key(key): if debug: print 'User data does not have key [%s]' % key print 'User data: %s' % user_def return False if not isinstance(user_def[key], list): if debug: print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]) return False if len(user_def[key]) != len(value): if debug: print "List lengths are not equal." print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)) print "user_def: %s" % user_def[key] print "value: %s" % value return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print 'sending list - list' print type(values[0]) print type(values[1]) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print 'list compare returned false' return False elif value != user_def[key]: if debug: print 'value should be identical' print value print user_def[key] return False # recurse on a dictionary elif isinstance(value, dict): if not user_def.has_key(key): if debug: print "user_def does not have key [%s]" % key return False if not isinstance(user_def[key], dict): if debug: print "dict returned false: not instance of dict" return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print "keys are not equal in dict" print api_values print user_values return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print "dict returned false" print result return False # Verify each key, value pair is the same else: if not user_def.has_key(key) or value != user_def[key]: if debug: print "value not equal; user_def does not have key" print key print value if user_def.has_key(key): print user_def[key] return False if debug: print 'returning true' return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self): '''return all options as a string''' return self.stringify() def stringify(self): ''' return the options hash as cli params in a string ''' rval = [] for key, data in self.config_options.items(): if data['include'] \ and (data['value'] or isinstance(data['value'], int)): rval.append('--%s=%s' % (key.replace('_', '-'), data['value'])) return rval class YeditException(Exception): ''' Exception class for Yedit ''' pass class Yedit(object): ''' Class to modify yaml files ''' re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$" re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)" com_sep = set(['.', '#', '|', ':']) # pylint: disable=too-many-arguments def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False): self.content = content self._separator = separator self.filename = filename self.__yaml_dict = content self.content_type = content_type self.backup = backup self.load(content_type=self.content_type) if self.__yaml_dict == None: self.__yaml_dict = {} @property def separator(self): ''' getter method for yaml_dict ''' return self._separator @separator.setter def separator(self): ''' getter method for yaml_dict ''' return self._separator @property def yaml_dict(self): ''' getter method for yaml_dict ''' return self.__yaml_dict @yaml_dict.setter def yaml_dict(self, value): ''' setter method for yaml_dict ''' self.__yaml_dict = value @staticmethod def parse_key(key, sep='.'): '''parse the key allowing the appropriate separator''' common_separators = list(Yedit.com_sep - set([sep])) return re.findall(Yedit.re_key % ''.join(common_separators), key) @staticmethod def valid_key(key, sep='.'): '''validate the incoming key''' common_separators = list(Yedit.com_sep - set([sep])) if not re.match(Yedit.re_valid_key % ''.join(common_separators), key): return False return True @staticmethod def remove_entry(data, key, sep='.'): ''' remove data at location key ''' if key == '' and isinstance(data, dict): data.clear() return True elif key == '' and isinstance(data, list): del data[:] return True if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key and isinstance(data, dict): data = data.get(dict_key, None) elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1: data = data[int(arr_ind)] else: return None # process last index for remove # expected list entry if key_indexes[-1][0]: if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: del data[int(key_indexes[-1][0])] return True # expected dict entry elif key_indexes[-1][1]: if isinstance(data, dict): del data[key_indexes[-1][1]] return True @staticmethod def add_entry(data, key, item=None, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a#b return c ''' if key == '': pass elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes[:-1]: if dict_key: if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]: data = data[dict_key] continue elif data and not isinstance(data, dict): return None data[dict_key] = {} data = data[dict_key] elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1: data = data[int(arr_ind)] else: return None if key == '': data = item # process last index for add # expected list entry elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: data[int(key_indexes[-1][0])] = item # expected dict entry elif key_indexes[-1][1] and isinstance(data, dict): data[key_indexes[-1][1]] = item return data @staticmethod def get_entry(data, key, sep='.'): ''' Get an item from a dictionary with key notation a.b.c d = {'a': {'b': 'c'}}} key = a.b return c ''' if key == '': pass elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)): return None key_indexes = Yedit.parse_key(key, sep) for arr_ind, dict_key in key_indexes: if dict_key and isinstance(data, dict): data = data.get(dict_key, None) elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1: data = data[int(arr_ind)] else: return None return data def write(self): ''' write to file ''' if not self.filename: raise YeditException('Please specify a filename.') if self.backup and self.file_exists(): shutil.copy(self.filename, self.filename + '.orig') tmp_filename = self.filename + '.yedit' try: with open(tmp_filename, 'w') as yfd: # pylint: disable=no-member if hasattr(self.yaml_dict, 'fa'): self.yaml_dict.fa.set_block_style() yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper)) except Exception as err: raise YeditException(err.message) os.rename(tmp_filename, self.filename) return (True, self.yaml_dict) def read(self): ''' read from file ''' # check if it exists if self.filename == None or not self.file_exists(): return None contents = None with open(self.filename) as yfd: contents = yfd.read() return contents def file_exists(self): ''' return whether file exists ''' if os.path.exists(self.filename): return True return False def load(self, content_type='yaml'): ''' return yaml file ''' contents = self.read() if not contents and not self.content: return None if self.content: if isinstance(self.content, dict): self.yaml_dict = self.content return self.yaml_dict elif isinstance(self.content, str): contents = self.content # check if it is yaml try: if content_type == 'yaml' and contents: self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader) # pylint: disable=no-member if hasattr(self.yaml_dict, 'fa'): self.yaml_dict.fa.set_block_style() elif content_type == 'json' and contents: self.yaml_dict = json.loads(contents) except yaml.YAMLError as err: # Error loading yaml or json raise YeditException('Problem with loading yaml file. %s' % err) return self.yaml_dict def get(self, key): ''' get a specified key''' try: entry = Yedit.get_entry(self.yaml_dict, key, self.separator) except KeyError as _: entry = None return entry def pop(self, path, key_or_item): ''' remove a key, value pair from a dict or an item for a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry == None: return (False, self.yaml_dict) if isinstance(entry, dict): # pylint: disable=no-member,maybe-no-member if entry.has_key(key_or_item): entry.pop(key_or_item) return (True, self.yaml_dict) return (False, self.yaml_dict) elif isinstance(entry, list): # pylint: disable=no-member,maybe-no-member ind = None try: ind = entry.index(key_or_item) except ValueError: return (False, self.yaml_dict) entry.pop(ind) return (True, self.yaml_dict) return (False, self.yaml_dict) def delete(self, path): ''' remove path from a dict''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry == None: return (False, self.yaml_dict) result = Yedit.remove_entry(self.yaml_dict, path, self.separator) if not result: return (False, self.yaml_dict) return (True, self.yaml_dict) def exists(self, path, value): ''' check if value exists at path''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if isinstance(entry, list): if value in entry: return True return False elif isinstance(entry, dict): if isinstance(value, dict): rval = False for key, val in value.items(): if entry[key] != val: rval = False break else: rval = True return rval return value in entry return entry == value def append(self, path, value): '''append value to a list''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry is None: self.put(path, []) entry = Yedit.get_entry(self.yaml_dict, path, self.separator) if not isinstance(entry, list): return (False, self.yaml_dict) # pylint: disable=no-member,maybe-no-member entry.append(value) return (True, self.yaml_dict) # pylint: disable=too-many-arguments def update(self, path, value, index=None, curr_value=None): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if isinstance(entry, dict): # pylint: disable=no-member,maybe-no-member if not isinstance(value, dict): raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \ ' value=[%s] [%s]' % (value, type(value))) entry.update(value) return (True, self.yaml_dict) elif isinstance(entry, list): # pylint: disable=no-member,maybe-no-member ind = None if curr_value: try: ind = entry.index(curr_value) except ValueError: return (False, self.yaml_dict) elif index != None: ind = index if ind != None and entry[ind] != value: entry[ind] = value return (True, self.yaml_dict) # see if it exists in the list try: ind = entry.index(value) except ValueError: # doesn't exist, append it entry.append(value) return (True, self.yaml_dict) #already exists, return if ind != None: return (False, self.yaml_dict) return (False, self.yaml_dict) def put(self, path, value): ''' put path, value into a dict ''' try: entry = Yedit.get_entry(self.yaml_dict, path, self.separator) except KeyError as _: entry = None if entry == value: return (False, self.yaml_dict) # deepcopy didn't work tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) # pylint: disable=no-member if hasattr(self.yaml_dict, 'fa'): tmp_copy.fa.set_block_style() result = Yedit.add_entry(tmp_copy, path, value, self.separator) if not result: return (False, self.yaml_dict) self.yaml_dict = tmp_copy return (True, self.yaml_dict) def create(self, path, value): ''' create a yaml file ''' if not self.file_exists(): # deepcopy didn't work tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader) # pylint: disable=no-member if hasattr(self.yaml_dict, 'fa'): tmp_copy.fa.set_block_style() result = Yedit.add_entry(tmp_copy, path, value, self.separator) if result: self.yaml_dict = tmp_copy return (True, self.yaml_dict) return (False, self.yaml_dict) # pylint: disable=too-many-instance-attributes class ProjectConfig(OpenShiftCLIConfig): ''' project config object ''' def __init__(self, rname, namespace, kubeconfig, project_options): super(ProjectConfig, self).__init__(rname, rname, kubeconfig, project_options) class Project(Yedit): ''' Class to wrap the oc command line tools ''' annotations_path = "metadata.annotations" kind = 'Service' annotation_prefix = 'openshift.io/' def __init__(self, content): '''Service constructor''' super(Project, self).__init__(content=content) def get_annotations(self): ''' get a list of ports ''' return self.get(Project.annotations_path) or {} def add_annotations(self, inc_annos): ''' add a port object to the ports list ''' if not isinstance(inc_annos, list): inc_annos = [inc_annos] annos = self.get_annotations() if not annos: self.put(Project.annotations_path, inc_annos) else: for anno in inc_annos: for key, value in anno.items(): annos[key] = value return True def find_annotation(self, key): ''' find a specific port ''' annotations = self.get_annotations() for anno in annotations: if Project.annotation_prefix + key == anno: return annotations[anno] return None def delete_annotation(self, inc_anno_keys): ''' remove an annotation from a project''' if not isinstance(inc_anno_keys, list): inc_anno_keys = [inc_anno_keys] annos = self.get(Project.annotations_path) or {} if not annos: return True removed = False for inc_anno in inc_anno_keys: anno = self.find_annotation(inc_anno) if anno: del annos[Project.annotation_prefix + anno] removed = True return removed def update_annotation(self, key, value): ''' remove an annotation from a project''' annos = self.get(Project.annotations_path) or {} if not annos: return True updated = False anno = self.find_annotation(key) if anno: annos[Project.annotation_prefix + key] = value updated = True else: self.add_annotations({Project.annotation_prefix + key: value}) return updated # pylint: disable=too-many-instance-attributes class OadmProject(OpenShiftCLI): ''' Class to wrap the oc command line tools ''' kind = 'namespace' # pylint allows 5 # pylint: disable=too-many-arguments def __init__(self, config, verbose=False): ''' Constructor for OCVolume ''' super(OadmProject, self).__init__(config.name, config.kubeconfig) self.config = config self._project = None @property def project(self): ''' property function project''' if not self._project: self.get() return self._project @project.setter def project(self, data): ''' setter function for yedit var ''' self._project = data def exists(self): ''' return whether a project exists ''' if self.project: return True return False def get(self): '''return project ''' result = self.openshift_cmd(['get', self.kind, self.config.name, '-o', 'json'], output=True, output_type='raw') if result['returncode'] == 0: self.project = Project(content=json.loads(result['results'])) result['results'] = self.project.yaml_dict elif 'namespaces "%s" not found' % self.config.name in result['stderr']: result = {'results': [], 'returncode': 0} return result def delete(self): '''delete the object''' return self._delete(self.kind, self.config.name) def create(self): '''create a project ''' cmd = ['new-project', self.config.name] cmd.extend(self.config.to_option_list()) return self.openshift_cmd(cmd, oadm=True) def update(self): '''update a project ''' self.project.update_annotation('display-name', self.config.config_options['display_name']['value']) self.project.update_annotation('description', self.config.config_options['description']['value']) # work around for immutable project field if self.config.config_options['node_selector']['value']: self.project.update_annotation('node-selector', self.config.config_options['node_selector']['value']) else: self.project.update_annotation('node-selector', self.project.find_annotation('node-selector')) return self._replace_content(self.kind, self.config.namespace, self.project.yaml_dict) def needs_update(self): ''' verify an update is needed ''' result = self.project.find_annotation("display-name") if result != self.config.config_options['display_name']['value']: return True result = self.project.find_annotation("description") if result != self.config.config_options['description']['value']: return True result = self.project.find_annotation("node-selector") if result != self.config.config_options['node_selector']['value']: return True # Check rolebindings and policybindings return False def main(): ''' ansible oc module for project ''' module = AnsibleModule( argument_spec=dict( kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'), state=dict(default='present', type='str', choices=['present', 'absent', 'list']), debug=dict(default=False, type='bool'), name=dict(default=None, require=True, type='str'), display_name=dict(default=None, type='str'), node_selector=dict(default=None, type='str'), description=dict(default=None, type='str'), admin=dict(default=None, type='str'), admin_role=dict(default=None, type='str'), ), supports_check_mode=True, ) pconfig = ProjectConfig(module.params['name'], module.params['name'], module.params['kubeconfig'], {'admin': {'value': module.params['admin'], 'include': True}, 'admin_role': {'value': module.params['admin_role'], 'include': True}, 'description': {'value': module.params['description'], 'include': True}, 'display_name': {'value': module.params['display_name'], 'include': True}, 'node_selector': {'value': module.params['node_selector'], 'include': True}, }) oadm_project = OadmProject(pconfig, verbose=module.params['debug']) state = module.params['state'] api_rval = oadm_project.get() ##### # Get ##### if state == 'list': module.exit_json(changed=False, results=api_rval['results'], state="list") ######## # Delete ######## if state == 'absent': if oadm_project.exists(): if module.check_mode: module.exit_json(changed=False, msg='Would have performed a delete.') api_rval = oadm_project.delete() module.exit_json(changed=True, results=api_rval, state="absent") module.exit_json(changed=False, state="absent") if state == 'present': ######## # Create ######## if not oadm_project.exists(): if module.check_mode: module.exit_json(changed=False, msg='Would have performed a create.') # Create it here api_rval = oadm_project.create() # return the created object api_rval = oadm_project.get() if api_rval['returncode'] != 0: module.fail_json(msg=api_rval) module.exit_json(changed=True, results=api_rval, state="present") ######## # Update ######## if oadm_project.needs_update(): api_rval = oadm_project.update() if api_rval['returncode'] != 0: module.fail_json(msg=api_rval) # return the created object api_rval = oadm_project.get() if api_rval['returncode'] != 0: module.fail_json(msg=api_rval) module.exit_json(changed=True, results=api_rval, state="present") module.exit_json(changed=False, results=api_rval, state="present") module.exit_json(failed=True, changed=False, results='Unknown state passed. %s' % state, state="unknown") # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled # import module snippets. This are required if __name__ == '__main__': from ansible.module_utils.basic import * main()
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Generic Node base class for all workers that run on hosts.""" import errno import logging as std_logging import os import random import signal import sys import time try: # Importing just the symbol here because the io module does not # exist in Python 2.6. from io import UnsupportedOperation # noqa except ImportError: # Python 2.6 UnsupportedOperation = None import eventlet from eventlet import event from oslo.config import cfg from ceilometer.openstack.common import eventlet_backdoor from ceilometer.openstack.common._i18n import _LE, _LI, _LW from ceilometer.openstack.common import log as logging from ceilometer.openstack.common import systemd from ceilometer.openstack.common import threadgroup CONF = cfg.CONF LOG = logging.getLogger(__name__) def _sighup_supported(): return hasattr(signal, 'SIGHUP') def _is_daemon(): # The process group for a foreground process will match the # process group of the controlling terminal. If those values do # not match, or ioctl() fails on the stdout file handle, we assume # the process is running in the background as a daemon. # http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics try: is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno()) except OSError as err: if err.errno == errno.ENOTTY: # Assume we are a daemon because there is no terminal. is_daemon = True else: raise except UnsupportedOperation: # Could not get the fileno for stdout, so we must be a daemon. is_daemon = True return is_daemon def _is_sighup_and_daemon(signo): if not (_sighup_supported() and signo == signal.SIGHUP): # Avoid checking if we are a daemon, because the signal isn't # SIGHUP. return False return _is_daemon() def _signo_to_signame(signo): signals = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'} if _sighup_supported(): signals[signal.SIGHUP] = 'SIGHUP' return signals[signo] def _set_signals_handler(handler): signal.signal(signal.SIGTERM, handler) signal.signal(signal.SIGINT, handler) if _sighup_supported(): signal.signal(signal.SIGHUP, handler) class Launcher(object): """Launch one or more services and wait for them to complete.""" def __init__(self): """Initialize the service launcher. :returns: None """ self.services = Services() self.backdoor_port = eventlet_backdoor.initialize_if_enabled() def launch_service(self, service): """Load and start the given service. :param service: The service you would like to start. :returns: None """ service.backdoor_port = self.backdoor_port self.services.add(service) def stop(self): """Stop all services which are currently running. :returns: None """ self.services.stop() def wait(self): """Waits until all services have been stopped, and then returns. :returns: None """ self.services.wait() def restart(self): """Reload config files and restart service. :returns: None """ cfg.CONF.reload_config_files() self.services.restart() class SignalExit(SystemExit): def __init__(self, signo, exccode=1): super(SignalExit, self).__init__(exccode) self.signo = signo class ServiceLauncher(Launcher): def _handle_signal(self, signo, frame): # Allow the process to be killed again and die from natural causes _set_signals_handler(signal.SIG_DFL) raise SignalExit(signo) def handle_signal(self): _set_signals_handler(self._handle_signal) def _wait_for_exit_or_signal(self, ready_callback=None): status = None signo = 0 LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: if ready_callback: ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code finally: self.stop() return status, signo def wait(self, ready_callback=None): systemd.notify_once() while True: self.handle_signal() status, signo = self._wait_for_exit_or_signal(ready_callback) if not _is_sighup_and_daemon(signo): return status self.restart() class ServiceWrapper(object): def __init__(self, service, workers): self.service = service self.workers = workers self.children = set() self.forktimes = [] class ProcessLauncher(object): def __init__(self, wait_interval=0.01): """Constructor. :param wait_interval: The interval to sleep for between checks of child process exit. """ self.children = {} self.sigcaught = None self.running = True self.wait_interval = wait_interval rfd, self.writepipe = os.pipe() self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r') self.handle_signal() def handle_signal(self): _set_signals_handler(self._handle_signal) def _handle_signal(self, signo, frame): self.sigcaught = signo self.running = False # Allow the process to be killed again and die from natural causes _set_signals_handler(signal.SIG_DFL) def _pipe_watcher(self): # This will block until the write end is closed when the parent # dies unexpectedly self.readpipe.read() LOG.info(_LI('Parent process has died unexpectedly, exiting')) sys.exit(1) def _child_process_handle_signal(self): # Setup child signal handlers differently def _sigterm(*args): signal.signal(signal.SIGTERM, signal.SIG_DFL) raise SignalExit(signal.SIGTERM) def _sighup(*args): signal.signal(signal.SIGHUP, signal.SIG_DFL) raise SignalExit(signal.SIGHUP) signal.signal(signal.SIGTERM, _sigterm) if _sighup_supported(): signal.signal(signal.SIGHUP, _sighup) # Block SIGINT and let the parent send us a SIGTERM signal.signal(signal.SIGINT, signal.SIG_IGN) def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Child caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo def _child_process(self, service): self._child_process_handle_signal() # Reopen the eventlet hub to make sure we don't share an epoll # fd with parent and/or siblings, which would be bad eventlet.hubs.use_hub() # Close write to ensure only parent has it open os.close(self.writepipe) # Create greenthread to watch for parent to close pipe eventlet.spawn_n(self._pipe_watcher) # Reseed random number generator random.seed() launcher = Launcher() launcher.launch_service(service) return launcher def _start_child(self, wrap): if len(wrap.forktimes) > wrap.workers: # Limit ourselves to one process a second (over the period of # number of workers * 1 second). This will allow workers to # start up quickly but ensure we don't fork off children that # die instantly too quickly. if time.time() - wrap.forktimes[0] < wrap.workers: LOG.info(_LI('Forking too fast, sleeping')) time.sleep(1) wrap.forktimes.pop(0) wrap.forktimes.append(time.time()) pid = os.fork() if pid == 0: launcher = self._child_process(wrap.service) while True: self._child_process_handle_signal() status, signo = self._child_wait_for_exit_or_signal(launcher) if not _is_sighup_and_daemon(signo): break launcher.restart() os._exit(status) LOG.info(_LI('Started child %d'), pid) wrap.children.add(pid) self.children[pid] = wrap return pid def launch_service(self, service, workers=1): wrap = ServiceWrapper(service, workers) LOG.info(_LI('Starting %d workers'), wrap.workers) while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def _wait_child(self): try: # Don't block if no child processes have exited pid, status = os.waitpid(0, os.WNOHANG) if not pid: return None except OSError as exc: if exc.errno not in (errno.EINTR, errno.ECHILD): raise return None if os.WIFSIGNALED(status): sig = os.WTERMSIG(status) LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'), dict(pid=pid, sig=sig)) else: code = os.WEXITSTATUS(status) LOG.info(_LI('Child %(pid)s exited with status %(code)d'), dict(pid=pid, code=code)) if pid not in self.children: LOG.warning(_LW('pid %d not in child list'), pid) return None wrap = self.children.pop(pid) wrap.children.remove(pid) return wrap def _respawn_children(self): while self.running: wrap = self._wait_child() if not wrap: # Yield to other threads if no children have exited # Sleep for a short time to avoid excessive CPU usage # (see bug #1095346) eventlet.greenthread.sleep(self.wait_interval) continue while self.running and len(wrap.children) < wrap.workers: self._start_child(wrap) def wait(self): """Loop waiting on children to die and respawning as necessary.""" systemd.notify_once() LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: while True: self.handle_signal() self._respawn_children() # No signal means that stop was called. Don't clean up here. if not self.sigcaught: return signame = _signo_to_signame(self.sigcaught) LOG.info(_LI('Caught %s, stopping children'), signame) if not _is_sighup_and_daemon(self.sigcaught): break for pid in self.children: os.kill(pid, signal.SIGHUP) self.running = True self.sigcaught = None except eventlet.greenlet.GreenletExit: LOG.info(_LI("Wait called after thread killed. Cleaning up.")) self.stop() def stop(self): """Terminate child processes and wait on each.""" self.running = False for pid in self.children: try: os.kill(pid, signal.SIGTERM) except OSError as exc: if exc.errno != errno.ESRCH: raise # Wait for children to die if self.children: LOG.info(_LI('Waiting on %d children to exit'), len(self.children)) while self.children: self._wait_child() class Service(object): """Service object for binaries running on hosts.""" def __init__(self, threads=1000): self.tg = threadgroup.ThreadGroup(threads) # signal that the service is done shutting itself down: self._done = event.Event() def reset(self): # NOTE(Fengqian): docs for Event.reset() recommend against using it self._done = event.Event() def start(self): pass def stop(self): self.tg.stop() self.tg.wait() # Signal that service cleanup is done: if not self._done.ready(): self._done.send() def wait(self): self._done.wait() class Services(object): def __init__(self): self.services = [] self.tg = threadgroup.ThreadGroup() self.done = event.Event() def add(self, service): self.services.append(service) self.tg.add_thread(self.run_service, service, self.done) def stop(self): # wait for graceful shutdown of services: for service in self.services: service.stop() service.wait() # Each service has performed cleanup, now signal that the run_service # wrapper threads can now die: if not self.done.ready(): self.done.send() # reap threads: self.tg.stop() def wait(self): self.tg.wait() def restart(self): self.stop() self.done = event.Event() for restart_service in self.services: restart_service.reset() self.tg.add_thread(self.run_service, restart_service, self.done) @staticmethod def run_service(service, done): """Service start wrapper. :param service: service to run :param done: event to wait on until a shutdown is triggered :returns: None """ service.start() done.wait() def launch(service, workers=1): if workers is None or workers == 1: launcher = ServiceLauncher() launcher.launch_service(service) else: launcher = ProcessLauncher() launcher.launch_service(service, workers=workers) return launcher
# =================================================================== # # Copyright (c) 2014, Legrandin <helderijs@gmail.com> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # =================================================================== import os import re import unittest from binascii import unhexlify, hexlify from Cryptodome.Util.py3compat import b, tobytes, bchr from Cryptodome.Util.strxor import strxor_c from Cryptodome.SelfTest.st_common import list_test_cases from Cryptodome.Cipher import ChaCha20 class ChaCha20Test(unittest.TestCase): def test_new_positive(self): cipher = ChaCha20.new(key=b("0")*32, nonce=b("0")*8) self.assertEqual(cipher.nonce, b("0") * 8) def test_new_negative(self): new = ChaCha20.new self.assertRaises(TypeError, new) self.assertRaises(TypeError, new, nonce=b("0")) self.assertRaises(ValueError, new, nonce=b("0")*8, key=b("0")) self.assertRaises(ValueError, new, nonce=b("0"), key=b("0")*32) def test_default_nonce(self): cipher1 = ChaCha20.new(key=bchr(1) * 32) cipher2 = ChaCha20.new(key=bchr(1) * 32) self.assertEquals(len(cipher1.nonce), 8) self.assertNotEqual(cipher1.nonce, cipher2.nonce) def test_eiter_encrypt_or_decrypt(self): """Verify that a cipher cannot be used for both decrypting and encrypting""" c1 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8) c1.encrypt(b("8")) self.assertRaises(TypeError, c1.decrypt, b("9")) c2 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8) c2.decrypt(b("8")) self.assertRaises(TypeError, c2.encrypt, b("9")) def test_round_trip(self): pt = b("A") * 1024 c1 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8) c2 = ChaCha20.new(key=b("5") * 32, nonce=b("6") * 8) ct = c1.encrypt(pt) self.assertEqual(c2.decrypt(ct), pt) self.assertEqual(c1.encrypt(b("")), b("")) self.assertEqual(c2.decrypt(b("")), b("")) def test_streaming(self): """Verify that an arbitrary number of bytes can be encrypted/decrypted""" from Cryptodome.Hash import SHA1 segments = (1, 3, 5, 7, 11, 17, 23) total = sum(segments) pt = b("") while len(pt) < total: pt += SHA1.new(pt).digest() cipher1 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8) ct = cipher1.encrypt(pt) cipher2 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8) cipher3 = ChaCha20.new(key=b("7") * 32, nonce=b("t") * 8) idx = 0 for segment in segments: self.assertEqual(cipher2.decrypt(ct[idx:idx+segment]), pt[idx:idx+segment]) self.assertEqual(cipher3.encrypt(pt[idx:idx+segment]), ct[idx:idx+segment]) idx += segment def test_seek(self): cipher1 = ChaCha20.new(key=b("9") * 32, nonce=b("e") * 8) offset = 64 * 900 + 7 pt = b("1") * 64 cipher1.encrypt(b("0") * offset) ct1 = cipher1.encrypt(pt) cipher2 = ChaCha20.new(key=b("9") * 32, nonce=b("e") * 8) cipher2.seek(offset) ct2 = cipher2.encrypt(pt) self.assertEquals(ct1, ct2) def test_seek_tv(self): # Test Vector #4, A.1 from # http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04 key = bchr(0) + bchr(255) + bchr(0) * 30 nonce = bchr(0) * 8 cipher = ChaCha20.new(key=key, nonce=nonce) cipher.seek(64 * 2) expected_key_stream = unhexlify(b( "72d54dfbf12ec44b362692df94137f32" "8fea8da73990265ec1bbbea1ae9af0ca" "13b25aa26cb4a648cb9b9d1be65b2c09" "24a66c54d545ec1b7374f4872e99f096" )) ct = cipher.encrypt(bchr(0) * len(expected_key_stream)) self.assertEqual(expected_key_stream, ct) class ChaCha20_AGL_NIR(unittest.TestCase): # From http://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04 # and http://tools.ietf.org/html/draft-nir-cfrg-chacha20-poly1305-04 tv = [ ( "00" * 32, "00" * 8, "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc" "8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11c" "c387b669b2ee6586" "9f07e7be5551387a98ba977c732d080d" "cb0f29a048e3656912c6533e32ee7aed" "29b721769ce64e43d57133b074d839d5" "31ed1f28510afb45ace10a1f4b794d6f" ), ( "00" * 31 + "01", "00" * 8, "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952" "ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d792b1c43fea81" "7e9ad275ae546963" "3aeb5224ecf849929b9d828db1ced4dd" "832025e8018b8160b82284f3c949aa5a" "8eca00bbb4a73bdad192b5c42f73f2fd" "4e273644c8b36125a64addeb006c13a0" ), ( "00" * 32, "00" * 7 + "01", "de9cba7bf3d69ef5e786dc63973f653a0b49e015adbff7134fcb7df1" "37821031e85a050278a7084527214f73efc7fa5b5277062eb7a0433e" "445f41e3" ), ( "00" * 32, "01" + "00" * 7, "ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd1" "38e50d32111e4caf237ee53ca8ad6426194a88545ddc497a0b466e7d" "6bbdb0041b2f586b" ), ( "000102030405060708090a0b0c0d0e0f101112131415161718191a1b" "1c1d1e1f", "0001020304050607", "f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56" "f85ac3c134a4547b733b46413042c9440049176905d3be59ea1c53f1" "5916155c2be8241a38008b9a26bc35941e2444177c8ade6689de9526" "4986d95889fb60e84629c9bd9a5acb1cc118be563eb9b3a4a472f82e" "09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a4750" "32b63fc385245fe054e3dd5a97a5f576fe064025d3ce042c566ab2c5" "07b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f7" "6dad3979e5c5360c3317166a1c894c94a371876a94df7628fe4eaaf2" "ccb27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab7" "8fab78c9" ), ( "00" * 32, "00" * 7 + "02", "c2c64d378cd536374ae204b9ef933fcd" "1a8b2288b3dfa49672ab765b54ee27c7" "8a970e0e955c14f3a88e741b97c286f7" "5f8fc299e8148362fa198a39531bed6d" ), ] def runTest(self): for (key, nonce, stream) in self.tv: c = ChaCha20.new(key=unhexlify(b(key)), nonce=unhexlify(b(nonce))) ct = unhexlify(b(stream)) pt = b("\x00") * len(ct) self.assertEqual(c.encrypt(pt), ct) def get_tests(config={}): tests = [] tests += list_test_cases(ChaCha20Test) tests.append(ChaCha20_AGL_NIR()) return tests if __name__ == '__main__': import unittest suite = lambda: unittest.TestSuite(get_tests()) unittest.main(defaultTest='suite')
#!/usr/bin/env python # Unit tests for PostgreSQL on OS X and Linux. from __future__ import print_function import sys, os, re import unittest from decimal import Decimal from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of composed of `seed` to make a string `length` characters long. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = int((length + len(_TESTSTR)-1) / len(_TESTSTR)) v = _TESTSTR * c return v[:length] class PGTestCase(unittest.TestCase): # These are from the C++ code. Keep them up to date. # If we are reading a binary, string, or unicode value and do not know how large it is, we'll try reading 2K into a # buffer on the stack. We then copy into a new Python object. SMALL_READ = 2048 # A read guaranteed not to fit in the MAX_STACK_STACK stack buffer, but small enough to be used for varchar (4K max). LARGE_READ = 4000 SMALL_STRING = _generate_test_string(SMALL_READ) LARGE_STRING = _generate_test_string(LARGE_READ) def __init__(self, connection_string, ansi, method_name): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string self.ansi = ansi def setUp(self): self.cnxn = pyodbc.connect(self.connection_string, ansi=self.ansi) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_datasources(self): p = pyodbc.dataSources() self.assert_(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assert_(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assert_(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assert_(isinstance(value, int)) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assert_(isinstance(value, int)) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) def _test_strtype(self, sqltype, value, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), type(value)) if value is not None: self.assertEqual(len(v), len(value)) self.assertEqual(v, value) # # varchar # def test_empty_varchar(self): self._test_strtype('varchar', '', self.SMALL_READ) def test_null_varchar(self): self._test_strtype('varchar', None, self.SMALL_READ) def test_large_null_varchar(self): # There should not be a difference, but why not find out? self._test_strtype('varchar', None, self.LARGE_READ) def test_small_varchar(self): self._test_strtype('varchar', self.SMALL_STRING, self.SMALL_READ) def test_large_varchar(self): self._test_strtype('varchar', self.LARGE_STRING, self.LARGE_READ) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_small_decimal(self): # value = Decimal('1234567890987654321') value = Decimal('100010') # (I use this because the ODBC docs tell us how the bytes should look in the C struct) self.cursor.execute("create table t1(d numeric(19))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_small_decimal_scale(self): # The same as small_decimal, except with a different scale. This value exactly matches the ODBC documentation # example in the C Data Types appendix. value = '1000.10' value = Decimal(value) self.cursor.execute("create table t1(d numeric(20,6))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def test_negative_decimal_scale(self): value = Decimal('-10.0010') self.cursor.execute("create table t1(d numeric(19,4))") self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), Decimal) self.assertEqual(v, value) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEquals(row[0], "1") self.assertEquals(row[-1], "1") def test_version(self): self.assertEquals(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. def test_rowcount_delete(self): self.assertEquals(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEquals(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEquals(self.cursor.rowcount, 0) def test_rowcount_select(self): self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEquals(self.cursor.rowcount, 4) # PostgreSQL driver fails here? # def test_rowcount_reset(self): # "Ensure rowcount is reset to -1" # # self.cursor.execute("create table t1(i int)") # count = 4 # for i in range(count): # self.cursor.execute("insert into t1 values (?)", i) # self.assertEquals(self.cursor.rowcount, 1) # # self.cursor.execute("create table t2(i int)") # self.assertEquals(self.cursor.rowcount, -1) def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEquals(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEquals(self.cursor.description, row.cursor_description) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) # REVIEW: Without the cast, we get the following error: # [07006] [unixODBC]Received an unsupported type from Postgres.;\nERROR: table "t2" does not exist (14) count = self.cursor.execute("select cast(count(*) as int) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.failUnlessRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.failUnless(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.failUnless(result is row) def test_cnxn_execute_error(self): """ Make sure that Connection.execute (not Cursor) errors are not "eaten". GitHub issue #74 """ self.cursor.execute("create table t1(a int primary key)") self.cursor.execute("insert into t1 values (1)") self.failUnlessRaises(pyodbc.Error, self.cnxn.execute, "insert into t1 values (1)") def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def main(): from optparse import OptionParser parser = OptionParser(usage="usage: %prog [options] connection_string") parser.add_option("-v", "--verbose", default=0, action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") parser.add_option('-a', '--ansi', help='ANSI only', default=False, action='store_true') (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('pgtests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] if options.verbose: cnxn = pyodbc.connect(connection_string, ansi=options.ansi) print_library_info(cnxn) # print 'library:', os.path.abspath(pyodbc.__file__) # print 'odbc: %s' % cnxn.getinfo(pyodbc.SQL_ODBC_VER) # print 'driver: %s %s' % (cnxn.getinfo(pyodbc.SQL_DRIVER_NAME), cnxn.getinfo(pyodbc.SQL_DRIVER_VER)) # print 'driver supports ODBC version %s' % cnxn.getinfo(pyodbc.SQL_DRIVER_ODBC_VER) # print 'unicode:', pyodbc.UNICODE_SIZE, 'sqlwchar:', pyodbc.SQLWCHAR_SIZE cnxn.close() if options.test: # Run a single test if not options.test.startswith('test_'): options.test = 'test_%s' % (options.test) s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, options.test) ]) else: # Run all tests in the class methods = [ m for m in dir(PGTestCase) if m.startswith('test_') ] methods.sort() s = unittest.TestSuite([ PGTestCase(connection_string, options.ansi, m) for m in methods ]) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(s) if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc main()
#!/usr/bin/env python from __future__ import print_function from mindmup_as_attack_trees import * import sys,json import re from collections import OrderedDict import math #import ipdb import argparse parser = argparse.ArgumentParser() parser.add_argument('mupin', nargs='?', help="The mindmup file that will be processed") args = parser.parse_args() def info(type, value, tb): ipdb.pm() #sys.excepthook = info levels_count = dict() nodes_lookup = dict() fixups_queue = list() objective_node =None def do_children_firstpass(node): for child in get_node_children(node): do_node_firstpass(child) return def do_node_firstpass(node): global nodes_lookup do_children_firstpass(node) node_title = node.get('title', '') if not node_title.find('TODO') == -1 or not get_raw_description(node).find('TODO') == -1: print("WARNING todo node: %s" % node_title) return if not node_title.find('XOR') == -1 or not get_raw_description(node).find('XOR') == -1: print("WARNING XOR node: %s" % node_title) return if node_title == 'AND' or node_title == 'OR': return if is_node_a_reference(node): if not is_node_a_leaf(node): print("ERROR reference node with children: %s" % node_title) return if nodes_lookup.get(node_title, None) is None: nodes_lookup.update({node_title: node}) else: print("ERROR duplicate node found: %s" % node_title) if (not is_node_a_reference(node)) and is_attack_vector(node) and get_raw_description(node).find('EVITA::') == -1: print("ERROR attack vector node is missing RAP assignment: %s" % node_title) if (not is_node_a_reference(node)) and is_attack_vector(node): mitigations = collect_all(node, is_mitigation) if len(mitigations) == 0: print("WARNING attack vector node has no mitigations: %s" % node_title) if is_objective(node) and (not is_outofscope(node)) and get_raw_description(node).find('EVITA::') == -1: print("ERROR Objective node w/o EVITA:: marker: %s" % node_title) if not is_outofscope(node) and is_attack_vector(node) and not node_title.find('(Locally-Permitted)') == -1: print("WARNING overconstrained node: attack vector node with (Locally-Permitted): %s" % node_title) #TODO WARNING Node with explicit (Out of Scope) label #TODO ERROR OBJECTIVE Node without EVITA #TODO ERROR EVITA:: without enough (terminated) elements of the vector (e.g. missing | terminator on last column) #TODO ERROR no RISK_HERE:: node return def is_node_weighted(node): weight = get_node_weight(node) return (not weight is None) and (not math.isnan(weight)) and (not math.isinf(weight)) def update_node_weight(node, weight): if node.get('attr', None) is None: node.update({'attr': dict()}) node.get('attr').update({'weight': weight}) def pos_infs_of_children(node): for child in get_node_children(node): if get_node_weight(child) == float('-inf'): update_node_weight(child, float('inf')) def neg_infs_of_children(node): for child in get_node_children(node): if get_node_weight(child) == float('inf'): update_node_weight(child, float('-inf')) def get_max_weight_of_children(node): child_maximum = float('-inf') for child in get_node_children(node): child_maximum = max(child_maximum, get_node_weight(child)) return child_maximum def get_min_weight_of_children(node): child_minimum = float('inf') for child in get_node_children(node): child_minimum = min(child_minimum, get_node_weight(child)) return child_minimum def get_node_weight(node): return node.get('attr', dict()).get('weight', None) def do_children_secondpass(node, nodes_context): for child in get_node_children(node): do_node_secondpass(child, nodes_context) return objective_context = None def do_node_secondpass(node, nodes_context): global nodes_lookup global fixups_queue global objective_context if is_node_weighted(node): return update_node_weight(node, float('nan')) if is_objective(node): objective_context = node if is_node_a_reference(node): node_referent = get_node_referent(node, nodes_lookup) node_referent_title=get_node_title(node_referent) #TODO Warn on reference to non subtree-root node (to ensure that re-used nodes are sufficiently abstracted to be their own section if not is_subtree(node_referent): if (not is_attack_vector(node_referent)) and (not is_mitigation(node_referent)): print("WARNING reference made to non-leaf non-subtree %s" % node_referent_title) if (not get_node_weight(node_referent) is None) and (math.isnan(get_node_weight(node_referent))): #is referent in-progress? then we have a loop. update the reference node with the identity of the tree reduction operation and return update_node_weight(node, float('-inf')) else: #otherwise, descend through referent's children #do all on the referent and copy the node weight back do_node_secondpass(node_referent, nodes_context) update_node_weight(node,get_node_weight(node_referent)) else: if is_attack_vector(node): update_node_weight(node, 0) else: nodes_context.append(get_node_title(node)) do_children_secondpass(node, nodes_context) nodes_context.pop() if node.get('title', None) == 'AND': pos_infs_of_children(node) update_node_weight(node, get_min_weight_of_children(node)) else: neg_infs_of_children(node) update_node_weight(node, get_max_weight_of_children(node)) if get_node_weight(node) is None: print("ERROR None propagating through weights at node: %s" % get_node_title(node)) else: if math.isnan(get_node_weight(node)): print("ERROR NaN propagating through weights at node: %s (%s)" % (get_node_title(node),nodes_context)) if (not is_mitigation(node)) and (not is_outofscope(node)) and (not objective_context is None) and math.isinf(get_node_weight(node)): fixups_queue.append(node) if is_objective(node): objective_context = None return def do_fixups(nodes_context): global fixups_queue fixups_len = len(fixups_queue) while len(fixups_queue) > 0: fixups_this_time = list(fixups_queue) fixups_queue = list() for node in fixups_this_time: do_node_secondpass(node, nodes_context) if len(fixups_queue) >= fixups_len: print("ERROR couldn't resolve remaining infs %s" % fixups_queue) break else: fixups_len = len(fixups_queue) def do_children_checkinfs(node, nodes_context): for child in get_node_children(node): do_node_checkinfs(child, nodes_context) return def do_node_checkinfs(node, nodes_context): global objective_context if is_objective(node): objective_context = node if not is_attack_vector(node): nodes_context.append(get_node_title(node)) do_children_checkinfs(node, nodes_context) nodes_context.pop() if math.isinf(get_node_weight(node)) and (not is_outofscope(node)) and (not is_mitigation(node)) and (not objective_context is None): print("ERROR leftover %s at %s" % (get_node_weight(node), get_node_title(node))) if is_objective(node): objective_context = None return if args.mupin is None: fd_in = stdin else: fd_in=open(sys.argv[1], 'r') data = json.load(fd_in) nodes_context=list() if 'id' in data and data['id'] == 'root': #version 2 mindmup root_node = data['ideas']['1'] else: root_node = data def remove_hidden(node): for child in get_node_children(node): if get_node_title(child) == '.hidden': remove_child(node, child) return apply_each_node(root_node, remove_hidden) do_children_firstpass(root_node) do_node_secondpass(root_node, nodes_context) top_weight = get_node_weight(root_node) #TODO check for any leftover infs and fix 'em do_fixups(nodes_context) do_node_checkinfs(root_node, nodes_context) if top_weight != 0: print("ERROR: weights not propagated correctly through tree. Expecting 0. Got %s" % top_weight) #TODO: check for missing referents of mitigations too
""" Module providing easy API for working with remote files and folders. """ import hashlib import os import six from functools import partial from fabric.api import run, sudo, hide, settings, env, put, abort from fabric.utils import apply_lcwd def exists(path, use_sudo=False, verbose=False): """ Return True if given path exists on the current remote host. If ``use_sudo`` is True, will use `sudo` instead of `run`. `exists` will, by default, hide all output (including the run line, stdout, stderr and any warning resulting from the file not existing) in order to avoid cluttering output. You may specify ``verbose=True`` to change this behavior. .. versionchanged:: 1.13 Replaced internal use of ``test -e`` with ``stat`` for improved remote cross-platform (e.g. Windows) compatibility. """ func = use_sudo and sudo or run cmd = 'stat %s' % _expand_path(path) # If verbose, run normally if verbose: with settings(warn_only=True): return not func(cmd).failed # Otherwise, be quiet with settings(hide('everything'), warn_only=True): return not func(cmd).failed def is_link(path, use_sudo=False, verbose=False): """ Return True if the given path is a symlink on the current remote host. If ``use_sudo`` is True, will use `.sudo` instead of `.run`. `.is_link` will, by default, hide all output. Give ``verbose=True`` to change this. """ func = sudo if use_sudo else run cmd = 'test -L "$(echo %s)"' % path args, kwargs = [], {'warn_only': True} if not verbose: args = [hide('everything')] with settings(*args, **kwargs): return func(cmd).succeeded def first(*args, **kwargs): """ Given one or more file paths, returns first one found, or None if none exist. May specify ``use_sudo`` and ``verbose`` which are passed to `exists`. """ for directory in args: if exists(directory, **kwargs): return directory def upload_template(filename, destination, context=None, use_jinja=False, template_dir=None, use_sudo=False, backup=True, mirror_local_mode=False, mode=None, pty=None, keep_trailing_newline=False, temp_dir=''): """ Render and upload a template text file to a remote host. Returns the result of the inner call to `~fabric.operations.put` -- see its documentation for details. ``filename`` should be the path to a text file, which may contain `Python string interpolation formatting <http://docs.python.org/library/stdtypes.html#string-formatting>`_ and will be rendered with the given context dictionary ``context`` (if given.) Alternately, if ``use_jinja`` is set to True and you have the Jinja2 templating library available, Jinja will be used to render the template instead. Templates will be loaded from the invoking user's current working directory by default, or from ``template_dir`` if given. The resulting rendered file will be uploaded to the remote file path ``destination``. If the destination file already exists, it will be renamed with a ``.bak`` extension unless ``backup=False`` is specified. By default, the file will be copied to ``destination`` as the logged-in user; specify ``use_sudo=True`` to use `sudo` instead. The ``mirror_local_mode``, ``mode``, and ``temp_dir`` kwargs are passed directly to an internal `~fabric.operations.put` call; please see its documentation for details on these two options. The ``pty`` kwarg will be passed verbatim to any internal `~fabric.operations.run`/`~fabric.operations.sudo` calls, such as those used for testing directory-ness, making backups, etc. The ``keep_trailing_newline`` kwarg will be passed when creating Jinja2 Environment which is False by default, same as Jinja2's behaviour. .. versionchanged:: 1.1 Added the ``backup``, ``mirror_local_mode`` and ``mode`` kwargs. .. versionchanged:: 1.9 Added the ``pty`` kwarg. .. versionchanged:: 1.11 Added the ``keep_trailing_newline`` kwarg. .. versionchanged:: 1.11 Added the ``temp_dir`` kwarg. """ func = use_sudo and sudo or run if pty is not None: func = partial(func, pty=pty) # Normalize destination to be an actual filename, due to using StringIO with settings(hide('everything'), warn_only=True): if func('test -d %s' % _expand_path(destination)).succeeded: sep = "" if destination.endswith('/') else "/" destination += sep + os.path.basename(filename) # Use mode kwarg to implement mirror_local_mode, again due to using # StringIO if mirror_local_mode and mode is None: mode = os.stat(apply_lcwd(filename, env)).st_mode # To prevent put() from trying to do this # logic itself mirror_local_mode = False # Process template text = None if use_jinja: try: template_dir = template_dir or os.getcwd() template_dir = apply_lcwd(template_dir, env) from jinja2 import Environment, FileSystemLoader jenv = Environment(loader=FileSystemLoader(template_dir), keep_trailing_newline=keep_trailing_newline) text = jenv.get_template(filename).render(**context or {}) # Force to a byte representation of Unicode, or str()ification # within Paramiko's SFTP machinery may cause decode issues for # truly non-ASCII characters. text = text.encode('utf-8') except ImportError: import traceback tb = traceback.format_exc() abort(tb + "\nUnable to import Jinja2 -- see above.") else: if template_dir: filename = os.path.join(template_dir, filename) filename = apply_lcwd(filename, env) with open(os.path.expanduser(filename)) as inputfile: text = inputfile.read() if context: text = text % context # Back up original file if backup and exists(destination): func("cp %s{,.bak}" % _expand_path(destination)) if six.PY3 is True and isinstance(text, bytes): text = text.decode('utf-8') # Upload the file. return put( local_path=six.StringIO(text), remote_path=destination, use_sudo=use_sudo, mirror_local_mode=mirror_local_mode, mode=mode, temp_dir=temp_dir ) def sed(filename, before, after, limit='', use_sudo=False, backup='.bak', flags='', shell=False): """ Run a search-and-replace on ``filename`` with given regex patterns. Equivalent to ``sed -i<backup> -r -e "/<limit>/ s/<before>/<after>/<flags>g" <filename>``. Setting ``backup`` to an empty string will, disable backup file creation. For convenience, ``before`` and ``after`` will automatically escape forward slashes, single quotes and parentheses for you, so you don't need to specify e.g. ``http:\/\/foo\.com``, instead just using ``http://foo\.com`` is fine. If ``use_sudo`` is True, will use `sudo` instead of `run`. The ``shell`` argument will be eventually passed to `run`/`sudo`. It defaults to False in order to avoid problems with many nested levels of quotes and backslashes. However, setting it to True may help when using ``~fabric.operations.cd`` to wrap explicit or implicit ``sudo`` calls. (``cd`` by it's nature is a shell built-in, not a standalone command, so it should be called within a shell.) Other options may be specified with sed-compatible regex flags -- for example, to make the search and replace case insensitive, specify ``flags="i"``. The ``g`` flag is always specified regardless, so you do not need to remember to include it when overriding this parameter. .. versionadded:: 1.1 The ``flags`` parameter. .. versionadded:: 1.6 Added the ``shell`` keyword argument. """ func = use_sudo and sudo or run # Characters to be escaped in both for char in "/'": before = before.replace(char, r'\%s' % char) after = after.replace(char, r'\%s' % char) # Characters to be escaped in replacement only (they're useful in regexen # in the 'before' part) for char in "()": after = after.replace(char, r'\%s' % char) if limit: limit = r'/%s/ ' % limit context = { 'script': r"'%ss/%s/%s/%sg'" % (limit, before, after, flags), 'filename': _expand_path(filename), 'backup': backup } # Test the OS because of differences between sed versions with hide('running', 'stdout'): platform = run("uname", shell=False, pty=False) if platform in ('NetBSD', 'OpenBSD', 'QNX'): # Attempt to protect against failures/collisions hasher = hashlib.sha1() hasher.update(env.host_string) hasher.update(filename) context['tmp'] = "/tmp/%s" % hasher.hexdigest() # Use temp file to work around lack of -i expr = r"""cp -p %(filename)s %(tmp)s \ && sed -r -e %(script)s %(filename)s > %(tmp)s \ && cp -p %(filename)s %(filename)s%(backup)s \ && mv %(tmp)s %(filename)s""" else: context['extended_regex'] = '-E' if platform == 'Darwin' else '-r' expr = r"sed -i%(backup)s %(extended_regex)s -e %(script)s %(filename)s" command = expr % context return func(command, shell=shell) def uncomment(filename, regex, use_sudo=False, char='#', backup='.bak', shell=False): """ Attempt to uncomment all lines in ``filename`` matching ``regex``. The default comment delimiter is `#` and may be overridden by the ``char`` argument. This function uses the `sed` function, and will accept the same ``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does. `uncomment` will remove a single whitespace character following the comment character, if it exists, but will preserve all preceding whitespace. For example, ``# foo`` would become ``foo`` (the single space is stripped) but `` # foo`` would become `` foo`` (the single space is still stripped, but the preceding 4 spaces are not.) .. versionchanged:: 1.6 Added the ``shell`` keyword argument. """ return sed( filename, before=r'^([[:space:]]*)%s[[:space:]]?' % char, after=r'\1', limit=regex, use_sudo=use_sudo, backup=backup, shell=shell ) def comment(filename, regex, use_sudo=False, char='#', backup='.bak', shell=False): """ Attempt to comment out all lines in ``filename`` matching ``regex``. The default commenting character is `#` and may be overridden by the ``char`` argument. This function uses the `sed` function, and will accept the same ``use_sudo``, ``shell`` and ``backup`` keyword arguments that `sed` does. `comment` will prepend the comment character to the beginning of the line, so that lines end up looking like so:: this line is uncommented #this line is commented # this line is indented and commented In other words, comment characters will not "follow" indentation as they sometimes do when inserted by hand. Neither will they have a trailing space unless you specify e.g. ``char='# '``. .. note:: In order to preserve the line being commented out, this function will wrap your ``regex`` argument in parentheses, so you don't need to. It will ensure that any preceding/trailing ``^`` or ``$`` characters are correctly moved outside the parentheses. For example, calling ``comment(filename, r'^foo$')`` will result in a `sed` call with the "before" regex of ``r'^(foo)$'`` (and the "after" regex, naturally, of ``r'#\\1'``.) .. versionadded:: 1.5 Added the ``shell`` keyword argument. """ carot, dollar = '', '' if regex.startswith('^'): carot = '^' regex = regex[1:] if regex.endswith('$'): dollar = '$' regex = regex[:-1] regex = "%s(%s)%s" % (carot, regex, dollar) return sed( filename, before=regex, after=r'%s\1' % char, use_sudo=use_sudo, backup=backup, shell=shell ) def contains(filename, text, exact=False, use_sudo=False, escape=True, shell=False, case_sensitive=True): """ Return True if ``filename`` contains ``text`` (which may be a regex.) By default, this function will consider a partial line match (i.e. where ``text`` only makes up part of the line it's on). Specify ``exact=True`` to change this behavior so that only a line containing exactly ``text`` results in a True return value. This function leverages ``egrep`` on the remote end (so it may not follow Python regular expression syntax perfectly), and skips ``env.shell`` wrapper by default. If ``use_sudo`` is True, will use `sudo` instead of `run`. If ``escape`` is False, no extra regular expression related escaping is performed (this includes overriding ``exact`` so that no ``^``/``$`` is added.) The ``shell`` argument will be eventually passed to ``run/sudo``. See description of the same argument in ``~fabric.contrib.sed`` for details. If ``case_sensitive`` is False, the `-i` flag will be passed to ``egrep``. .. versionchanged:: 1.0 Swapped the order of the ``filename`` and ``text`` arguments to be consistent with other functions in this module. .. versionchanged:: 1.4 Updated the regular expression related escaping to try and solve various corner cases. .. versionchanged:: 1.4 Added ``escape`` keyword argument. .. versionadded:: 1.6 Added the ``shell`` keyword argument. .. versionadded:: 1.11 Added the ``case_sensitive`` keyword argument. """ func = use_sudo and sudo or run if escape: text = _escape_for_regex(text) if exact: text = "^%s$" % text with settings(hide('everything'), warn_only=True): egrep_cmd = 'egrep "%s" %s' % (text, _expand_path(filename)) if not case_sensitive: egrep_cmd = egrep_cmd.replace('egrep', 'egrep -i', 1) return func(egrep_cmd, shell=shell).succeeded def append(filename, text, use_sudo=False, partial=False, escape=True, shell=False): """ Append string (or list of strings) ``text`` to ``filename``. When a list is given, each string inside is handled independently (but in the order given.) If ``text`` is already found in ``filename``, the append is not run, and None is returned immediately. Otherwise, the given text is appended to the end of the given ``filename`` via e.g. ``echo '$text' >> $filename``. The test for whether ``text`` already exists defaults to a full line match, e.g. ``^<text>$``, as this seems to be the most sensible approach for the "append lines to a file" use case. You may override this and force partial searching (e.g. ``^<text>``) by specifying ``partial=True``. Because ``text`` is single-quoted, single quotes will be transparently backslash-escaped. This can be disabled with ``escape=False``. If ``use_sudo`` is True, will use `sudo` instead of `run`. The ``shell`` argument will be eventually passed to ``run/sudo``. See description of the same argumnet in ``~fabric.contrib.sed`` for details. .. versionchanged:: 0.9.1 Added the ``partial`` keyword argument. .. versionchanged:: 1.0 Swapped the order of the ``filename`` and ``text`` arguments to be consistent with other functions in this module. .. versionchanged:: 1.0 Changed default value of ``partial`` kwarg to be ``False``. .. versionchanged:: 1.4 Updated the regular expression related escaping to try and solve various corner cases. .. versionadded:: 1.6 Added the ``shell`` keyword argument. """ func = use_sudo and sudo or run # Normalize non-list input to be a list if isinstance(text, six.string_types): text = [text] for line in text: regex = '^' + _escape_for_regex(line) + ('' if partial else '$') if (exists(filename, use_sudo=use_sudo) and line and contains(filename, regex, use_sudo=use_sudo, escape=False, shell=shell)): continue line = line.replace("'", r"'\\''") if escape else line func("echo '%s' >> %s" % (line, _expand_path(filename))) def _escape_for_regex(text): """Escape ``text`` to allow literal matching using egrep""" re_specials = '\\^$|(){}[]*+?.' sh_specials = '\\$`"' re_chars = [] sh_chars = [] for c in text: if c in re_specials: re_chars.append('\\') re_chars.append(c) for c in re_chars: if c in sh_specials: sh_chars.append('\\') sh_chars.append(c) return ''.join(sh_chars) def is_win(): """ Return True if remote SSH server is running Windows, False otherwise. The idea is based on echoing quoted text: \*NIX systems will echo quoted text only, while Windows echoes quotation marks as well. """ with settings(hide('everything'), warn_only=True): return '"' in run('echo "Will you echo quotation marks"') def is_win(): """ Return True if remote SSH server is running Windows, False otherwise. The idea is based on echoing quoted text: \*NIX systems will echo quoted text only, while Windows echoes quotation marks as well. """ with settings(hide('everything'), warn_only=True): return '"' in run('echo "Will you echo quotation marks"') def _expand_path(path): """ Return a path expansion E.g. ~/some/path -> /home/myuser/some/path /user/\*/share -> /user/local/share More examples can be found here: http://linuxcommand.org/lc3_lts0080.php .. versionchanged:: 1.0 Avoid breaking remote Windows commands which does not support expansion. """ return path if is_win() else '"$(echo %s)"' % path
"""JOSE Web Signature.""" import argparse import base64 import sys import M2Crypto from acme.jose import b64 from acme.jose import errors from acme.jose import json_util from acme.jose import jwa from acme.jose import jwk from acme.jose import util class MediaType(object): """MediaType field encoder/decoder.""" PREFIX = 'application/' """MIME Media Type and Content Type prefix.""" @classmethod def decode(cls, value): """Decoder.""" # 4.1.10 if '/' not in value: if ';' in value: raise errors.DeserializationError('Unexpected semi-colon') return cls.PREFIX + value return value @classmethod def encode(cls, value): """Encoder.""" # 4.1.10 if ';' not in value: assert value.startswith(cls.PREFIX) return value[len(cls.PREFIX):] return value class Header(json_util.JSONObjectWithFields): """JOSE Header. .. warning:: This class supports **only** Registered Header Parameter Names (as defined in section 4.1 of the protocol). If you need Public Header Parameter Names (4.2) or Private Header Parameter Names (4.3), you must subclass and override :meth:`from_json` and :meth:`to_partial_json` appropriately. .. warning:: This class does not support any extensions through the "crit" (Critical) Header Parameter (4.1.11) and as a conforming implementation, :meth:`from_json` treats its occurence as an error. Please subclass if you seek for a diferent behaviour. :ivar x5tS256: "x5t#S256" :ivar str typ: MIME Media Type, inc. :const:`MediaType.PREFIX`. :ivar str cty: Content-Type, inc. :const:`MediaType.PREFIX`. """ alg = json_util.Field( 'alg', decoder=jwa.JWASignature.from_json, omitempty=True) jku = json_util.Field('jku', omitempty=True) jwk = json_util.Field('jwk', decoder=jwk.JWK.from_json, omitempty=True) kid = json_util.Field('kid', omitempty=True) x5u = json_util.Field('x5u', omitempty=True) x5c = json_util.Field('x5c', omitempty=True, default=()) x5t = json_util.Field( 'x5t', decoder=json_util.decode_b64jose, omitempty=True) x5tS256 = json_util.Field( 'x5t#S256', decoder=json_util.decode_b64jose, omitempty=True) typ = json_util.Field('typ', encoder=MediaType.encode, decoder=MediaType.decode, omitempty=True) cty = json_util.Field('cty', encoder=MediaType.encode, decoder=MediaType.decode, omitempty=True) crit = json_util.Field('crit', omitempty=True, default=()) def not_omitted(self): """Fields that would not be omitted in the JSON object.""" return dict((name, getattr(self, name)) for name, field in self._fields.iteritems() if not field.omit(getattr(self, name))) def __add__(self, other): if not isinstance(other, type(self)): raise TypeError('Header cannot be added to: {0}'.format( type(other))) not_omitted_self = self.not_omitted() not_omitted_other = other.not_omitted() if set(not_omitted_self).intersection(not_omitted_other): raise TypeError('Addition of overlapping headers not defined') not_omitted_self.update(not_omitted_other) return type(self)(**not_omitted_self) # pylint: disable=star-args def find_key(self): """Find key based on header. .. todo:: Supports only "jwk" header parameter lookup. :returns: (Public) key found in the header. :rtype: :class:`acme.jose.jwk.JWK` :raises acme.jose.errors.Error: if key could not be found """ if self.jwk is None: raise errors.Error('No key found') return self.jwk @crit.decoder def crit(unused_value): # pylint: disable=missing-docstring,no-self-argument,no-self-use raise errors.DeserializationError( '"crit" is not supported, please subclass') # x5c does NOT use JOSE Base64 (4.1.6) @x5c.encoder def x5c(value): # pylint: disable=missing-docstring,no-self-argument return [base64.b64encode(cert.as_der()) for cert in value] @x5c.decoder def x5c(value): # pylint: disable=missing-docstring,no-self-argument try: return tuple(util.ComparableX509(M2Crypto.X509.load_cert_der_string( base64.b64decode(cert))) for cert in value) except M2Crypto.X509.X509Error as error: raise errors.DeserializationError(error) class Signature(json_util.JSONObjectWithFields): """JWS Signature. :ivar combined: Combined Header (protected and unprotected, :class:`Header`). :ivar unicode protected: JWS protected header (Jose Base-64 decoded). :ivar header: JWS Unprotected Header (:class:`Header`). :ivar str signature: The signature. """ header_cls = Header __slots__ = ('combined',) protected = json_util.Field( 'protected', omitempty=True, default='', decoder=json_util.decode_b64jose, encoder=b64.b64encode) # TODO: utf-8? header = json_util.Field( 'header', omitempty=True, default=header_cls(), decoder=header_cls.from_json) signature = json_util.Field( 'signature', decoder=json_util.decode_b64jose, encoder=b64.b64encode) def __init__(self, **kwargs): if 'combined' not in kwargs: kwargs = self._with_combined(kwargs) super(Signature, self).__init__(**kwargs) assert self.combined.alg is not None @classmethod def _with_combined(cls, kwargs): assert 'combined' not in kwargs header = kwargs.get('header', cls._fields['header'].default) protected = kwargs.get('protected', cls._fields['protected'].default) if protected: combined = header + cls.header_cls.json_loads(protected) else: combined = header kwargs['combined'] = combined return kwargs def verify(self, payload, key=None): """Verify. :param key: Key used for verification. :type key: :class:`acme.jose.jwk.JWK` """ key = self.combined.find_key() if key is None else key return self.combined.alg.verify( key=key.key, sig=self.signature, msg=(b64.b64encode(self.protected) + '.' + b64.b64encode(payload))) @classmethod def sign(cls, payload, key, alg, include_jwk=True, protect=frozenset(), **kwargs): """Sign. :param key: Key for signature. :type key: :class:`acme.jose.jwk.JWK` """ assert isinstance(key, alg.kty) header_params = kwargs header_params['alg'] = alg if include_jwk: header_params['jwk'] = key.public() assert set(header_params).issubset(cls.header_cls._fields) assert protect.issubset(cls.header_cls._fields) protected_params = {} for header in protect: protected_params[header] = header_params.pop(header) if protected_params: # pylint: disable=star-args protected = cls.header_cls(**protected_params).json_dumps() else: protected = '' header = cls.header_cls(**header_params) # pylint: disable=star-args signature = alg.sign(key.key, b64.b64encode(protected) + '.' + b64.b64encode(payload)) return cls(protected=protected, header=header, signature=signature) def fields_to_partial_json(self): fields = super(Signature, self).fields_to_partial_json() if not fields['header'].not_omitted(): del fields['header'] return fields @classmethod def fields_from_json(cls, jobj): fields = super(Signature, cls).fields_from_json(jobj) fields_with_combined = cls._with_combined(fields) if 'alg' not in fields_with_combined['combined'].not_omitted(): raise errors.DeserializationError('alg not present') return fields_with_combined class JWS(json_util.JSONObjectWithFields): """JSON Web Signature. :ivar str payload: JWS Payload. :ivar str signaturea: JWS Signatures. """ __slots__ = ('payload', 'signatures') signature_cls = Signature def verify(self, key=None): """Verify.""" return all(sig.verify(self.payload, key) for sig in self.signatures) @classmethod def sign(cls, payload, **kwargs): """Sign.""" return cls(payload=payload, signatures=( cls.signature_cls.sign(payload=payload, **kwargs),)) @property def signature(self): """Get a singleton signature. :rtype: `signature_cls` """ assert len(self.signatures) == 1 return self.signatures[0] def to_compact(self): """Compact serialization.""" assert len(self.signatures) == 1 assert 'alg' not in self.signature.header.not_omitted() # ... it must be in protected return '{0}.{1}.{2}'.format( b64.b64encode(self.signature.protected), b64.b64encode(self.payload), b64.b64encode(self.signature.signature)) @classmethod def from_compact(cls, compact): """Compact deserialization.""" try: protected, payload, signature = compact.split('.') except ValueError: raise errors.DeserializationError( 'Compact JWS serialization should comprise of exactly' ' 3 dot-separated components') sig = cls.signature_cls(protected=json_util.decode_b64jose(protected), signature=json_util.decode_b64jose(signature)) return cls(payload=json_util.decode_b64jose(payload), signatures=(sig,)) def to_partial_json(self, flat=True): # pylint: disable=arguments-differ assert self.signatures payload = b64.b64encode(self.payload) if flat and len(self.signatures) == 1: ret = self.signatures[0].to_partial_json() ret['payload'] = payload return ret else: return { 'payload': payload, 'signatures': self.signatures, } @classmethod def from_json(cls, jobj): if 'signature' in jobj and 'signatures' in jobj: raise errors.DeserializationError('Flat mixed with non-flat') elif 'signature' in jobj: # flat return cls(payload=json_util.decode_b64jose(jobj.pop('payload')), signatures=(cls.signature_cls.from_json(jobj),)) else: return cls(payload=json_util.decode_b64jose(jobj['payload']), signatures=tuple(cls.signature_cls.from_json(sig) for sig in jobj['signatures'])) class CLI(object): """JWS CLI.""" @classmethod def sign(cls, args): """Sign.""" key = args.alg.kty.load(args.key.read()) if args.protect is None: args.protect = [] if args.compact: args.protect.append('alg') sig = JWS.sign(payload=sys.stdin.read(), key=key, alg=args.alg, protect=set(args.protect)) if args.compact: print sig.to_compact() else: # JSON print sig.json_dumps_pretty() @classmethod def verify(cls, args): """Verify.""" if args.compact: sig = JWS.from_compact(sys.stdin.read()) else: # JSON try: sig = JWS.json_loads(sys.stdin.read()) except errors.Error as error: print error return -1 if args.key is not None: assert args.kty is not None key = args.kty.load(args.key.read()) else: key = None sys.stdout.write(sig.payload) return int(not sig.verify(key=key)) @classmethod def _alg_type(cls, arg): return jwa.JWASignature.from_json(arg) @classmethod def _header_type(cls, arg): assert arg in Signature.header_cls._fields return arg @classmethod def _kty_type(cls, arg): assert arg in jwk.JWK.TYPES return jwk.JWK.TYPES[arg] @classmethod def run(cls, args=sys.argv[1:]): """Parse arguments and sign/verify.""" parser = argparse.ArgumentParser() parser.add_argument('--compact', action='store_true') subparsers = parser.add_subparsers() parser_sign = subparsers.add_parser('sign') parser_sign.set_defaults(func=cls.sign) parser_sign.add_argument( '-k', '--key', type=argparse.FileType(), required=True) parser_sign.add_argument( '-a', '--alg', type=cls._alg_type, default=jwa.RS256) parser_sign.add_argument( '-p', '--protect', action='append', type=cls._header_type) parser_verify = subparsers.add_parser('verify') parser_verify.set_defaults(func=cls.verify) parser_verify.add_argument( '-k', '--key', type=argparse.FileType(), required=False) parser_verify.add_argument( '--kty', type=cls._kty_type, required=False) parsed = parser.parse_args(args) return parsed.func(parsed) if __name__ == '__main__': exit(CLI.run()) # pragma: no cover
import os import commander class EMMA_OUTMODE: COPY = "copy" OVERWRITE = "overwrite" FULLCOPY = "fullcopy" class EMMA_MERGE: YES = "yes" NO = "no" class EMMA_REPORT: HTML = "html" TXT = "txt" XML = "xml" class EmmaInterface: def __init__(self, javaPath="java", javaOpts="-Xms512m -Xmx1024m", pathEmma="./auxiliary/emma/", jarEmma="emma.jar", jarEmmaDevice="emma_device.jar"): self.javaPath = javaPath self.javaOpts = javaOpts self.pathEmma = pathEmma self.jarEmma = jarEmma self.jarEmmaDevice = jarEmmaDevice def _runEmmaCommand(self, commandString): cmd = "'%s' %s -cp %s" % (self.javaPath, self.javaOpts, commandString) return commander.runOnce(cmd) def _previewEmmaCmd(self, action, options): path = os.path.join(self.pathEmma, self.jarEmma) cmd = "'%s' %s %s" % (path, action, options) return cmd def _interpResultsEmmaInstrCmd(self, returnCode, outputStr): retCode = "Return code is: %s" % returnCode #NOTE: later we can select different routines to process different errors if returnCode == 1: output = "%s\n%s\n%s" % (retCode, "Failure due to incorrect option usage. This error code is also returned when command line usage (-h) is requested explicitly.", outputStr) return (False, output) if returnCode == 2: output = "%s\n%s\n%s" % (retCode, "Unknown failure happened.", outputStr) return (False, output) return (True, None) def getEmmaDeviceJarPath(self): path = os.path.join(self.pathEmma, self.jarEmmaDevice) return path def instr(self, instrpaths=[], outdir=None, emmaMetadataFile=None, merge=EMMA_MERGE.YES, outmode=EMMA_OUTMODE.COPY, commonOptions={}, filters=[]): ''' The command line interface can be found here: http://emma.sourceforge.net/reference/ch02s03s03.html Args: :param instrpaths: :param outdir: :param emmaMetadataFile: :param merge: :param outmode: :param filters: Returns: ''' options = "" if instrpaths: options += " -instrpath " for pth in instrpaths: options += "'%s'," % pth options = options[0:-1] if outdir: options += " -outdir '%s'" % outdir if emmaMetadataFile: options += " -outfile '%s'" % emmaMetadataFile options += " -merge %s" % merge options += " -outmode %s" % outmode if commonOptions: for entry in commonOptions.iteritems(): options += " -D%s=%s" % entry if filters: options += " -filter " for fltr in filters: options += "%s," % fltr options = options[0:-1] cmd = self._previewEmmaCmd("emma instr", options) (returnCode, outputStr) = self._runEmmaCommand(cmd) return self._interpResultsEmmaInstrCmd(returnCode, outputStr) def _interpResultsEmmaReportCmd(self, returnCode, outputStr): retCode = "Return code is: %s" % returnCode #NOTE: later we can select different routines to process different errors if returnCode == 1: output = "%s\n%s\n%s" % (retCode, "Failure due to incorrect option usage. This error code is also returned when command line usage (-h) is requested explicitly.", outputStr) return (False, output) if returnCode == 2: output = "%s\n%s\n%s" % (retCode, "Unknown failure happened.", outputStr) return (False, output) return (True, None) def report(self, inputs=[], report=EMMA_REPORT.HTML, sourcepath=[], commonOptions={}): ''' The description of the command can be found here: http://emma.sourceforge.net/reference/ch02s04s03.html :param inputs: :param report: :param sourcepath: :param commonOptions: ''' """Genoptions can be seen here: http://emma.sourceforge.net/reference/ch03s02.html#prop-ref.report.out.file""" options = "" if inputs: options += " -input " for f in inputs: options += f + "," options = options[0:-1] options += " -report " + report if sourcepath: options += " -sourcepath " for src in sourcepath: options += src + "," options = options[0:-1] if commonOptions: for entry in commonOptions.iteritems(): options += " -D" + entry[0] + "=" + entry[1] cmd = self._previewEmmaCmd("emma report", options) (returnCode, outputStr) = self._runEmmaCommand(cmd) return self._interpResultsEmmaReportCmd(returnCode, outputStr) def reportToDir(self, inputs=[], toDir=None, report=EMMA_REPORT.HTML, sourcepath=[], commonoptions={}): ''' This method produce a report into the specified directory. Args: :param inputs: :param toDir: :param report: :param sourcepath: :param commonoptions: Returns: ''' options = "" if inputs: options += " -input " for f in inputs: options += f + "," options = options[0:-1] options += " -report " + report if sourcepath: options += " -sourcepath " for src in sourcepath: options += src + "," options = options[0:-1] if toDir: if report == EMMA_REPORT.HTML: opt = "report.html.out.file" value = os.path.join(toDir, "coverage", "index.html") elif report == EMMA_REPORT.XML: opt = "report.xml.out.file" value = os.path.join(toDir, "coverage.xml") elif report == EMMA_REPORT.TXT: opt = "report.txt.out.file" value = os.path.join(toDir, "coverage.txt") options += " -D%s=%s" % (opt, value) if commonoptions: for entry in commonoptions.iteritems(): options += " -D" + entry[0] + "=" + entry[1] cmd = self._previewEmmaCmd("emma report", options) (returnCode, outputStr) = self._runEmmaCommand(cmd) return self._interpResultsEmmaReportCmd(returnCode, outputStr) def reportToDirWithName(self, inputs=[], toDir=None, mainFileName=None, report=EMMA_REPORT.HTML, sourcepath=[], commonoptions={}): ''' This method produce a report into the specified directory. Args: :param inputs: :param toDir: :param mainFileName: :param report: :param sourcepath: :param commonoptions: Returns: ''' options = "" if inputs: options += " -input " for f in inputs: options += f + "," options = options[0:-1] options += " -report " + report if sourcepath: options += " -sourcepath " for src in sourcepath: options += src + "," options = options[0:-1] if not mainFileName: mainFileName = "coverage" if not toDir: toDir="" if report == EMMA_REPORT.HTML: opt = "report.html.out.file" value = os.path.join(toDir, "coverage", "%s.html" % mainFileName) elif report == EMMA_REPORT.XML: opt = "report.xml.out.file" value = os.path.join(toDir, "%s.xml" % mainFileName) elif report == EMMA_REPORT.TXT: opt = "report.txt.out.file" value = os.path.join(toDir, "%s.txt" % mainFileName) options += " -D%s=%s" % (opt, value) if commonoptions: for entry in commonoptions.iteritems(): options += " -D" + entry[0] + "=" + entry[1] cmd = self._previewEmmaCmd("emma report", options) (returnCode, outputStr) = self._runEmmaCommand(cmd) return self._interpResultsEmmaReportCmd(returnCode, outputStr) #=============================================================================== # emma_helper = EmmaInterface() # emma_helper.report(inputs=["coverage.ec", "coverage.em"], commonoptions={"report.html.out.file" : "mycoverage/coverage.html", "testkey" : "testvalue"}) #===============================================================================
from __future__ import absolute_import from hashlib import sha256 import hmac import json import six from sentry import options from sentry.models import ApiToken, ProjectKey from sentry.testutils import TestCase UNSET = object() class BaseWebhookTest(TestCase): def setUp(self): super(BaseWebhookTest, self).setUp() self.user = self.create_user(is_superuser=False) self.org = self.create_organization(owner=None) self.team = self.create_team(organization=self.org) self.create_member(organization=self.org, user=self.user, role='owner', teams=[self.team]) self.project = self.create_project(name='a', team=self.team) self.token = ApiToken.objects.create( user=self.user, token='55838c83b3ec4e3ebc24c10c7bd071ffb1dc91161d3d49aeaedd9bd35d84bbe2', ) self.key = ProjectKey.objects.get_or_create(project=self.project)[0] def post_webhook(self, data, signature=UNSET, variant=UNSET, key=None): if key is None: key = options.get('cloudflare.secret-key') if not isinstance(data, six.string_types): body = json.dumps(data) else: body = data if signature is UNSET: signature = hmac.new( key=key.encode('utf-8'), msg=body.encode('utf-8'), digestmod=sha256, ).hexdigest() if variant is UNSET: variant = '1' headers = { 'HTTP_X_SIGNATURE_HMAC_SHA256_HEX': signature, 'HTTP_X_SIGNATURE_KEY_VARIANT': variant, } return self.client.post( '/extensions/cloudflare/webhook/', body, content_type='application/json', **headers ) class CloudflareWebhookTest(BaseWebhookTest): def test_missing_signature(self): resp = self.post_webhook( {'event': 'test'}, signature=None, ) assert resp.status_code == 400 def test_invalid_signature(self): resp = self.post_webhook( {'event': 'test'}, signature='a' * 40, ) assert resp.status_code == 400 def test_invalid_json(self): resp = self.post_webhook('a') assert resp.status_code == 400 def test_missing_variant(self): resp = self.post_webhook( {'event': 'test'}, variant=None, ) assert resp.status_code == 400 def test_invalid_variant(self): resp = self.post_webhook( {'event': 'test'}, variant='fizzbuz', ) assert resp.status_code == 400 def test_invalid_signature_with_test_variant(self): resp = self.post_webhook( {'event': 'test'}, variant='test', ) assert resp.status_code == 400 def test_invalid_app_id_test_variant(self): resp = self.post_webhook( {'event': 'test', 'app': {'id': 'buzz'}}, variant='test', key='test-key', ) assert resp.status_code == 400 def test_valid_test_variant(self): resp = self.post_webhook( {'event': 'test', 'app': {'id': 'local'}, 'install': {}}, variant='test', key='test-key', ) assert resp.status_code == 200 class PreviewWebhookTest(BaseWebhookTest): def test_empty(self): webhook_data = json.loads(self.load_fixture('cloudflare/preview-webhook.json')) resp = self.post_webhook(webhook_data) assert resp.status_code == 200, resp.content assert resp.data == { 'install': webhook_data['install'], 'proceed': True, } def test_prefills_data(self): webhook_data = json.loads(self.load_fixture( 'cloudflare/preview-webhook-authenticated.json')) webhook_data['install']['options']['organization'] = six.text_type(self.org.id) resp = self.post_webhook(data=webhook_data) assert resp.status_code == 200, resp.content assert resp.data['proceed'] assert resp.data['install']['schema']['properties']['organization']['enum'] == [ six.text_type(self.org.id)] assert resp.data['install']['schema']['properties']['organization']['enumNames'] == { six.text_type(self.org.id): self.org.slug, } assert resp.data['install']['options']['organization'] == six.text_type(self.org.id) assert resp.data['install']['schema']['properties']['project']['enum'] == [ six.text_type(self.project.id)] assert resp.data['install']['schema']['properties']['project']['enumNames'] == { six.text_type(self.project.id): self.project.slug, } assert resp.data['install']['options']['project'] == six.text_type(self.project.id) assert resp.data['install']['schema']['properties']['dsn']['enum'] == [ self.key.get_dsn(public=True)] assert resp.data['install']['options']['dsn'] == six.text_type( self.key.get_dsn(public=True)) def test_multiple_projects(self): project2 = self.create_project(name='b', team=self.team) webhook_data = json.loads(self.load_fixture( 'cloudflare/preview-webhook-authenticated.json')) webhook_data['install']['options']['organization'] = six.text_type(self.org.id) resp = self.post_webhook(webhook_data) assert resp.status_code == 200, resp.content assert resp.data['proceed'] assert resp.data['install']['schema']['properties']['organization']['enum'] == [ six.text_type(self.org.id)] assert resp.data['install']['options']['organization'] == six.text_type(self.org.id) assert resp.data['install']['schema']['properties']['project']['enum'] == [ six.text_type(self.project.id), six.text_type(project2.id)] assert resp.data['install']['options']['project'] == six.text_type(self.project.id) assert resp.data['install']['schema']['properties']['dsn']['enum'] == [ self.key.get_dsn(public=True)] assert resp.data['install']['options']['dsn'] == six.text_type( self.key.get_dsn(public=True)) def test_no_projects(self): self.project.delete() webhook_data = json.loads(self.load_fixture( 'cloudflare/preview-webhook-authenticated.json')) webhook_data['install']['options']['organization'] = six.text_type(self.org.id) resp = self.post_webhook(webhook_data) assert resp.status_code == 200, resp.content assert resp.data['proceed'] assert resp.data['install']['schema']['properties']['organization']['enum'] == [ six.text_type(self.org.id)] assert resp.data['install']['options']['organization'] == six.text_type(self.org.id) assert resp.data['install']['schema']['properties']['project']['enum'] == [] assert 'dsn' not in resp.data['install']['schema']['properties'] class OptionChangeAccountWebhookTest(BaseWebhookTest): def test_without_authentication(self): webhook_data = json.loads(self.load_fixture( 'cloudflare/option-change-account-webhook.json')) del webhook_data['authentications'] resp = self.post_webhook(webhook_data) assert resp.status_code == 401, resp.content def test_prefills_data(self): webhook_data = json.loads(self.load_fixture( 'cloudflare/option-change-account-webhook.json')) resp = self.post_webhook(webhook_data) assert resp.status_code == 200, resp.content assert resp.data['proceed'] assert resp.data['install']['schema']['properties']['organization']['enum'] == [ six.text_type(self.org.id)] assert resp.data['install']['options']['organization'] == six.text_type(self.org.id) assert resp.data['install']['schema']['properties']['project']['enum'] == [ six.text_type(self.project.id)] assert resp.data['install']['options']['project'] == six.text_type(self.project.id) assert resp.data['install']['schema']['properties']['dsn']['enum'] == [ self.key.get_dsn(public=True)] assert resp.data['install']['options']['dsn'] == six.text_type( self.key.get_dsn(public=True)) def test_with_invalid_organization_selected(self): webhook_data = json.loads(self.load_fixture( 'cloudflare/option-change-account-webhook.json')) webhook_data['install']['options']['organization'] = -1 resp = self.post_webhook(webhook_data) assert resp.status_code == 200, resp.content assert resp.data['proceed'] assert resp.data['install']['schema']['properties']['organization']['enum'] == [ six.text_type(self.org.id)] assert resp.data['install']['options']['organization'] == six.text_type(self.org.id) assert resp.data['install']['schema']['properties']['project']['enum'] == [ six.text_type(self.project.id)] assert resp.data['install']['options']['project'] == six.text_type(self.project.id) assert resp.data['install']['schema']['properties']['dsn']['enum'] == [ self.key.get_dsn(public=True)] assert resp.data['install']['options']['dsn'] == six.text_type( self.key.get_dsn(public=True)) def test_with_existing_project_selected_and_no_keys(self): project2 = self.create_project(name='b', team=self.team) # kill the automatically generated keys ProjectKey.objects.filter(project=project2).delete() webhook_data = json.loads(self.load_fixture( 'cloudflare/option-change-account-webhook.json')) webhook_data['install']['options']['organization'] = six.text_type(self.org.id) webhook_data['install']['options']['project'] = six.text_type(project2.id) resp = self.post_webhook(webhook_data) assert resp.status_code == 200, resp.content assert resp.data['proceed'] assert resp.data['install']['schema']['properties']['organization']['enum'] == [ six.text_type(self.org.id)] assert resp.data['install']['options']['organization'] == six.text_type(self.org.id) assert resp.data['install']['schema']['properties']['project']['enum'] == [ six.text_type(self.project.id), six.text_type(project2.id)] assert resp.data['install']['options']['project'] == six.text_type(project2.id) assert resp.data['install']['schema']['properties']['dsn']['enum'] == [] assert 'dsn' not in resp.data['install']['options']
#Copyright (c) 2011 Erich Schubert erich@debian.org #Permission is hereby granted, free of charge, to any person obtaining a copy #of this software and associated documentation files (the "Software"), to deal #in the Software without restriction, including without limitation the rights #to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the Software is #furnished to do so, subject to the following conditions: #The above copyright notice and this permission notice shall be included in #all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE #SOFTWARE. import sys, socket, signal, re from util import Util from iptables import Iptables #import iptables_parse from exception import PyromanException import subprocess class Firewall: """ Main firewall class. Note that the current code is NOT designed to support multiple firewall objects, but only uses class variables. hostname -- automatically filled with the computers hostname timeout -- timeout for confirmation of the firewall by the user. 0 will disable auto-rollback vercmd -- command to do an external firewall verification accept -- target chain name for the accept() user command drop -- target chain name for the drop() user command reject -- target chain name for the reject() user command services -- hashmap of known services hosts -- hashmap of known hosts interfaces -- hashmap of known interfaces chains -- hashmap of known chains nats -- list of NAT rules rules -- list of firewall rules """ hostname = socket.gethostname() # Timeout when the firewall setup will be rolled back when # no OK is received. timeout = 0 # Don't do external verification by default vercmd = None # Target names for the "accept", "drop" and "reject" commands accept = "ACCEPT" drop = "DROP" reject = "REJECT" input = "INPUT" output = "OUTPUT" forward = "FORWARD" services = {} hosts = {} interfaces = {} chains = {} nats = [] rules = [] # forwarding firewall. Default to yes forwarding = True # for testing kernel version kernelversioncmd = ["/bin/uname", "-r"] _kernelversion = None def __init__(self): """ Dummy initialization function, will raise PyromanException(an exception!) """ raise PyromanException("Instanciation not supported!") class Error(Exception): """ Basic Exception class """ pass def verify(): """ Verify the data inserted into the firewall """ for s in Firewall.services.values(): s.verify() for h in Firewall.hosts.values(): h.verify() for i in Firewall.interfaces.values(): i.verify() for r in Firewall.rules: r.verify() verify = staticmethod(verify) def prepare(): """ Prepare for generation run """ for s in Firewall.services.values(): s.prepare() for h in Firewall.hosts.values(): h.prepare() for i in Firewall.interfaces.values(): i.prepare() for r in Firewall.rules: r.prepare() prepare = staticmethod(prepare) def iptables_version(min=None, max=None): """ Return iptables version or test for a minimum and/or maximum version min -- minimal iptables version required max -- maximum iptables version required """ return Iptables.version(min=min, max=max) iptables_version = staticmethod(iptables_version) def generate(): """ Generate the rules from the specifications given """ Firewall.prepare() for r in Firewall.rules: r.generate() for n in Firewall.nats: n.generate() generate = staticmethod(generate) def calciptableslines(): """ Calculate the lines to be passed to iptables """ # prepare firewall rules l4, l6 = [], [] # collect tables tables = [] for c in Firewall.chains.values(): if not c.table in tables: tables.append(c.table) # process tables for t in tables: # try to provide some useful help info, in case some error occurs l4.append( ["*%s" % t, "table select statement for table %s" % t] ) if t != "nat": l6.append( ["*%s" % t, "table select statement for table %s" % t] ) # first create all chains for c in Firewall.chains.values(): if c.table == t: l4.append( [c.get_init(), c.loginfo] ) if t != "nat": l6.append( [c.get_init(), c.loginfo] ) # then write rules (which might -j to a table not yet created otherwise) for c in Firewall.chains.values(): if c.table == t: for l in c.get_rules4(): l4.append(l) if t != "nat": for l in c.get_rules6(): l6.append(l) # commit after each table, try to make a useful error message possible l4.append(["COMMIT", "commit statement for table %s" % t ]) if t != "nat": l6.append(["COMMIT", "commit statement for table %s" % t ]) return l4, l6 calciptableslines = staticmethod(calciptableslines) def rollback(savedlines): """ Rollback changes to the firewall, and report rollback success to the user savedlines -- saved firewall setting to be restored. """ # restore old iptables rules restored = Iptables.restore(savedlines) if restored: sys.stderr.write("*"*70+"\n") sys.stderr.write(" FIREWALL ROLLBACK FAILED.\n") sys.stderr.write("*"*70+"\n") else: sys.stderr.write("Firewall initialization failed. Rollback complete.\n") rollback = staticmethod(rollback) def print_rules(verbose): """ Print the calculated rules, as they would be passed to iptables. """ r4, r6 = Firewall.calciptableslines() print "#### IPv4 rules" for line in r4: if verbose: # print reasoning print "# %s" % line[1] print line[0] print "#### IPv6 rules" for line in r6: if verbose: # print reasoning print "# %s" % line[1] print line[0] print_rules = staticmethod(print_rules) def execute_rules(terse_mode=False): """ Execute the generated rules, rollback on error. If Firewall.timeout is set, give the user some time to accept the new configuration, otherwise roll back automatically. """ def user_confirm_timeout_handler(signum, frame): """ This handler is called when the user does not confirm firewall changes withing the given time limit. The firewall will then be rolled back. """ raise Firewall.Error("Success not confirmed by user") r4, r6 = Firewall.calciptableslines() # Save old firewall. if terse_mode: sys.stderr.write("backing up current... ") else: sys.stderr.write("Backing up current firewall...\n") savedlines = Iptables.save() # parse the firewall setup #try: # parsed = iptables_parse.parse(savedlines) #except: # pass # now try to execute the new rules successful = False try: if terse_mode: sys.stderr.write("activating new... ") successful = Iptables.commit( (r4, r6) ) if terse_mode: sys.stderr.write("success") else: sys.stderr.write("New firewall commited successfully.\n") if Firewall.vercmd: vcmd = subprocess.Popen(Firewall.vercmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = vcmd.communicate() if len(result[0]) > 0 or len(result[1]) > 0: if len(result[0]) > 0: sys.stderr.write("Verification command output:\n") sys.stderr.write(result[0]) if len(result[1]) > 0: sys.stderr.write("Verification command error output:\n") sys.stderr.write(result[1]) if vcmd.returncode != 0: raise Firewall.Error("External verification command failed.") if Firewall.timeout > 0: sys.stderr.write("To accept the new configuration, type 'OK' within %d seconds!\n" % Firewall.timeout) # setup timeout signal.signal(signal.SIGALRM, user_confirm_timeout_handler) signal.alarm(Firewall.timeout) # wait for user input input = sys.stdin.readline() # reset alarm handling signal.alarm(0) signal.signal(signal.SIGALRM, signal.SIG_DFL) if not re.search("^(OK|YES)", input, re.I): raise Firewall.Error("Success not confirmed by user") except Iptables.Error, e: if terse_mode: sys.stderr.write("error... restoring backup.\n") else: sys.stderr.write("*"*70+"\n") sys.stderr.write("An Iptables error occurred. Starting firewall restoration.\n") Firewall.rollback(savedlines) # show exception sys.stderr.write("%s\n" % e); except Firewall.Error, e: if terse_mode: sys.stderr.write("error. Restoring old firewall.\n") else: sys.stderr.write("*"*70+"\n") sys.stderr.write("A Firewall error occurred. Starting firewall restoration.\n") Firewall.rollback(savedlines) # show exception sys.stderr.write("%s\n" % e); except: if terse_mode: sys.stderr.write("error. Restoring old firewall.\n") else: sys.stderr.write("*"*70+"\n") sys.stderr.write("An unknown error occurred. Starting firewall restoration.\n") Firewall.rollback(savedlines) sys.stderr.write("\nHere is the exception triggered during execution:\n") raise execute_rules = staticmethod(execute_rules) def kernel_version(min=None, max=None): """ Return kernel version or test for a minimum and/or maximum version min -- minimal kernel version required max -- maximum kernel version required """ if not Firewall._kernelversion: # query iptables version kvcmd = subprocess.Popen(Firewall.kernelversioncmd, stdout=subprocess.PIPE) result = kvcmd.communicate()[0] Firewall._kernelversion = result.strip() # still no version number? - raise PyromanException(an exception) if not Firewall._kernelversion: raise Error("Couldn't get kernel version!") if not min and not max: return Firewall._kernelversion if min: if Util.compare_versions(Firewall._kernelversion, min) < 0: return False if max: if Util.compare_versions(Firewall._kernelversion, max) > 0: return False return True kernel_version = staticmethod(kernel_version)
'''Arsenal API ec2_instance.''' # Copyright 2015 CityGrid Media, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging from datetime import datetime from pyramid.view import view_config from sqlalchemy.orm.exc import NoResultFound from arsenalweb.views.api.common import ( api_500, collect_params, ) from arsenalweb.models.common import ( DBSession, ) from arsenalweb.models.ec2_instances import ( Ec2Instance, Ec2InstanceAudit, ) LOG = logging.getLogger(__name__) def find_ec2_instance_by_id(instance_id): '''Find an ec2_instance by it's instance_id. Returns an ec2_instance if found, raises NoResultFound otherwise.''' LOG.debug('Searching for ec2_instance by instance_id: {0}'.format(instance_id)) ec2 = DBSession.query(Ec2Instance) ec2 = ec2.filter(Ec2Instance.instance_id == instance_id) return ec2.one() def create_ec2_instance(instance_id=None, updated_by=None, **kwargs): '''Create a new ec2_instance. Required params: instance_id: A string that is the ec2 instance ID. updated_by : A string that is the user making the update. Optional kwargs: account_id : A string that is the account_id. ami_id : A string that is the ami_id. hostname : A string that is the hostname. instance_id : A string that is the instance_id. instance_type : A string that is the instance_type. availability_zone: A string that is the availability_zone. profile : A string that is the profile. reservation_id : A string that is the reservation_id. security_groups : A string that is the security_groups. ''' try: LOG.info('Creating new ec2_instance id: {0}'.format(instance_id)) utcnow = datetime.utcnow() ec2 = Ec2Instance(instance_id=instance_id, updated_by=updated_by, created=utcnow, updated=utcnow, **kwargs) DBSession.add(ec2) DBSession.flush() audit = Ec2InstanceAudit(object_id=ec2.id, field='instance_id', old_value='created', new_value=ec2.instance_id, updated_by=updated_by, created=utcnow) DBSession.add(audit) DBSession.flush() return ec2 except Exception as ex: msg = 'Error creating new ec2_instance id: {0} ' \ 'exception: {1}'.format(instance_id, ex) LOG.error(msg) return api_500(msg=msg) def update_ec2_instance(ec2, **kwargs): '''Update an existing ec2_instance. Required params: ec2: The existing ec2 object to update. updated_by : A string that is the user making the update. Optional kwargs: account_id : A string that is the account_id. ami_id : A string that is the ami_id. hostname : A string that is the hostname. instance_type : A string that is the instance_type. availability_zone: A string that is the availability_zone. profile : A string that is the profile. reservation_id : A string that is the reservation_id. security_groups : A string that is the security_groups. ''' try: # Convert everything that is defined to a string. my_attribs = kwargs.copy() for my_attr in my_attribs: if my_attribs.get(my_attr): my_attribs[my_attr] = str(my_attribs[my_attr]) LOG.info('Updating ec2_instance ec2_instance_id={0}'.format(ec2.instance_id)) utcnow = datetime.utcnow() for attribute in my_attribs: if attribute == 'instance_id': LOG.debug('Skipping update to ec2.instance_id.') continue old_value = getattr(ec2, attribute) new_value = my_attribs[attribute] if old_value != new_value and new_value: if not old_value: old_value = 'None' LOG.debug('Updating ec2_instance: {0} attribute: ' '{1} new_value: {2}'.format(ec2.instance_id, attribute, new_value)) audit = Ec2InstanceAudit(object_id=ec2.id, field=attribute, old_value=old_value, new_value=new_value, updated_by=my_attribs['updated_by'], created=utcnow) DBSession.add(audit) setattr(ec2, attribute, new_value) DBSession.flush() return ec2 except Exception as ex: LOG.error('Error updating ec2_instance instance_id={0},' 'exception={1}'.format(ec2.instance_id, repr(ex))) raise @view_config(route_name='api_ec2_instances', request_method='GET', request_param='schema=true', renderer='json') def api_ec2_instances_schema(request): '''Schema document for the ec2_instances API.''' ec2 = { } return ec2 @view_config(route_name='api_ec2_instances', permission='api_write', request_method='PUT', renderer='json') def api_ec2_instance_write(request): '''Process write requests for /api/ec2_instances route.''' try: req_params = [ 'instance_id', ] opt_params = [ 'account_id', 'ami_id', 'availability_zone', 'hostname', 'instance_type', 'profile', 'reservation_id', 'security_groups', ] params = collect_params(request, req_params, opt_params) LOG.debug('Searching for ec2_instance id: {0}'.format(params['instance_id'])) try: ec2 = find_ec2_instance_by_id(params['instance_id']) ec2 = update_ec2_instance(ec2, **params) except NoResultFound: ec2 = create_ec2_instance(**params) return ec2 except Exception as ex: msg = 'Error writing to ec2_instances API: {0} exception: {1}'.format(request.url, repr(ex)) LOG.error(msg) return api_500(msg=msg)
import numpy from lib.util import Util from lib.words import Words class OLIM: def __init__(self, users, tweets, model, lwords): self.users = users self.user_distributions = {} self.tweets = tweets self.model = model # GMM in scikit-learn self.lwords = lwords # local words if extracted previously self.regular_sum = self.calc_regular_sum() def calc_regular_sum(self): return sum([v**2 for v in self.model.weights_]) def extract_local_words_batch(self, params): lwords = {} word_counts = {} for user in self.users.iter(): location = user['location_point'] if location != None: city = str(self.model.predict([location])[0]) tweets = self.tweets.get(user['id']) user_words = set([]) for tweet in tweets: user_words |= set(Util.get_nouns(tweet['text'], params['lang'])) for w in user_words: if not w in word_counts: word_counts[w] = {city: 1} elif not city in word_counts[w]: word_counts[w][city] = 1 else: word_counts[w][city] += 1 """ calculating divergences """ for w in word_counts: N = float(sum([v for v in word_counts[w].values()])) if N >= params['cmin']: d = self.calc_divergence(N, word_counts[w], params) if self.check_divergence(d, params) == True: lwords[w] = {'word':w, 'd':d, 'distribution':word_counts[w]} # save as dict return Words(lwords) def extract_local_words_(self, tweets, params): lwords = {} word_counts = {} word_sets = {} for tweet in tweets: if not tweet['user_id'] in word_sets: word_sets[tweet['user_id']] = set([]) words = Util.get_nouns(tweet['text'], params['lang']) word_sets[tweet['user_id']] |= set(words) for user_id in word_sets: user = self.users.get(user_id) if user != None: location = user['location_point'] if location != None: city = str(self.model.predict([location])[0]) for w in word_sets[user_id]: if not w in word_counts: word_counts[w] = {} if not city in word_counts[w]: word_counts[w][city] = 0 word_counts[w][city] += 1 """ calculating divergences """ for w in word_counts: N = float(sum([v for v in word_counts[w].values()])) if N >= params['cmin']: d = self.calc_divergence(N, word_counts[w], params) if self.check_divergence(d, params) == True: lwords[w] = {'word':w, 'd':d, 'distribution':word_counts[w]} # save as dict return Words(lwords) def extract_local_words(self, tweets, params): lwords = {} word_counts = {} """ making user sets """ user_sets = {} for tweet in tweets: words = Util.get_nouns(tweet['text'], params['lang']) for w in words: if not w in user_sets: user_sets[w] = set([]) user_sets[w].add(tweet['user_id']) """ making word distributions """ for w in user_sets: for user_id in user_sets[w]: user = self.users.get(user_id) if user != None: location = user['location_point'] if location != None: """ labeled user """ if not w in word_counts: word_counts[w] = {} city = str(self.model.predict([location])[0]) if not city in word_counts[w]: word_counts[w][city] = 0 word_counts[w][city] += 1 """ calculating divergences """ for w in word_counts: N = float(sum([v for v in word_counts[w].values()])) if N >= params['cmin']: d = self.calc_divergence(N, word_counts[w], params) if self.check_divergence(d, params) == True: lwords[w] = {'word':w, 'd':d, 'distribution':word_counts[w]} # save as dict return Words(lwords) def calc_divergence(self, n, word_count, params): if params['divergence'] == 'l2': d = Util.l2dist_fast(self.regular_sum, self.model.weights_, n, word_count) return d elif params['divergence'] == 'kl': d = Util.kl_div_fast(self.model.weights_, n, word_count) return d elif params['divergence'] == 'dispersion': points = [self.model.means_[int(k)] for k,v in word_count.items() for i in range(0, v)] d = Util.calc_dispersion(points) return d else: print 'invalid divergence' exit() def check_divergence(self, d, params): if params['divergence'] == 'l2': if d >= params['dmin']: return True else: return False elif params['divergence'] == 'kl': if d >= params['dmin']: return True else: return False elif params['divergence'] == 'dispersion': if d <= params['dmin']: return True else: return False else: print 'invalid divergence' exit() def update_user_distributions(self, tweets, tlwords, params): for tweet in tweets: user = self.users.get(tweet['user_id']) if user != None: if user['location_point'] == None: """ unlabeled users """ if not user['id'] in self.user_distributions: self.user_distributions[user['id']] = self.init_user_distribution() words = Util.get_nouns(tweet['text'], params['lang']) for w in words: if tlwords.contain(w): """ update using temporally-local word """ tlword = tlwords.get(w) self.user_distributions[user['id']] = self.add_distribution(self.user_distributions[user['id']], tlword['distribution']) if self.lwords.contain(w): """ update using local word """ lword = self.lwords.get(w) if params['divergence'] in ['l2', 'kl']: if lword['d'] >= params['dmin']: self.user_distributions[user['id']] = self.add_distribution(self.user_distributions[user['id']], lword['distribution']) else: if lword['d'] <= params['dmin']: self.user_distributions[user['id']] = self.add_distribution(self.user_distributions[user['id']], lword['distribution']) def add_distribution(self, p, q): for k in q: if not k in p: p[k] = 0 p[k] += q[k] return p def init_user_distribution(self): return {} def predict(self, user_distribution, params): B = numpy.array(self.model.weights_) * params['r'] for k in user_distribution: B[int(k)] += user_distribution[k] if params['predict'] == 'max': pass elif params['predict'] == 'div': B = B / B.sum() # normalize regular = numpy.array(self.model.weights_) B = B / regular elif params['predict'] == 'sub': B = B / B.sum() # normalize regular = numpy.array(self.model.weights_) B = B - regular else: print 'invalid prediction method' exit() return B.argmax() def infer(self, params): window = {'tweets':[], 'start':0} # storing tweets """ User distribution updating """ for tweet in self.tweets.stream(): if type(tweet) == type({}) and 'timestamp' in tweet: current_time = Util.str_to_unixtime(Util.time_to_str(tweet['timestamp'])) window['tweets'].append(tweet) if current_time - window['start'] > params['window_size']: if params['tl']: """ use tl-words """ tlwords = self.extract_local_words_(window['tweets'], params) else: """ dont use tl-words """ tlwords = Words() self.update_user_distributions(window['tweets'], tlwords, params) window = {'tweets':[], 'start':current_time} """ Location prediction using user distribution """ for user in self.users.iter(): if user['location_point'] == None: """ unlabeled user """ if user['id'] in self.user_distributions and len(self.user_distributions[user['id']]) > 0: inferred_city = self.predict(self.user_distributions[user['id']], params) inferred_location = self.model.means_[inferred_city] user['location_point'] = inferred_location else: if params['default']: """ no clues """ """ predict using prior """ inferred_city = self.predict({}, params) inferred_location = self.model.means_[inferred_city] user['location_point'] = inferred_location def get_users(self): return self.users if __name__ == '__main__': import sys import pickle from lib.db import DB from lib.users import Users from lib.tweets_db import Tweets from lib.words import Words if len(sys.argv) < 6: print '[usage]: python %s [users file path] [db user name] [db pass] [db name] [model filepath]' % sys.argv[0] exit() users = Users() users.load_file(sys.argv[1]) db = DB(sys.argv[2], sys.argv[3], sys.argv[4]) tweets = Tweets(db) lwords = Words() f = open(sys.argv[5], 'r') model = pickle.load(f) f.close() tl = OLIM(users, tweets, model, lwords) #print tl.extract_local_words({'dmin':0.05, 'cmin':30}) #print tl.extract_local_words_(tl.tweets.stream(), {'dmin':0.05, 'cmin':30, 'window_size':1800, 'tl':False, 'default':False, 'divergence':'l2'}) #print tl.extract_local_words_batch({'dmin':300000, 'cmin':30, 'window_size':1800, 'tl':False, 'default':False, 'divergence':'dispersion'}) print tl.extract_local_words_batch({'dmin':0.1, 'cmin':30, 'window_size':10800, 'tl':False, 'default':False, 'divergence':'kl'}) #print tl.extract_local_words_batch({'dmin':1.0, 'cmin':30, 'window_size':1800, 'tl':False, 'default':False, 'divergence':'kl'}) #print tl.extract_local_words_batch({'dmin':300000, 'cmin':30, 'window_size':1800, 'tl':False, 'default':False, 'divergence':'dispersion'})
# Copyright 2012 Google Inc. All Rights Reserved. __author__ = 'benvanik@google.com (Ben Vanik)' class DebuggerProtocol(object): """An abstract debugger protocol. Protocols implement asynchronous command channels for controlling remote debuggers. The debugging interface has been normalized (somewhat) and the exact transmission mechanism (TCP/pipes/etc) can be implemented however it is required. """ def __init__(self, uri, *args, **kwargs): """Initializes a debugger protocol. Args: uri: Target instance URI. """ self._uri = uri self._detach_callback = None self._break_callback = None self._exception_callback = None def uri(self): return self._uri def set_detach_callback(self, value): self._detach_callback = value def set_break_callback(self, value): self._break_callback = value def set_exception_callback(self, value): self._exception_callback = value def is_attached(self): """ Returns: True if the protocol is attached. """ raise NotImplementedError() def attach(self, callback=None): """Begins attaching the protocol to the instance. Args: callback: A function to call when the attaching completes. Receives a boolean indicating success. """ raise NotImplementedError() def detach(self, terminate, reason=None): """Detaches the protocol from the instance. Args: terminate: True to terminate the target. reason: Reason for detaching, or None if user initiated. """ raise NotImplementedError() def suspend(self, callback): """Suspends the target instance. Note that this will not break in the target, but merely suspend execution. Args: callback: A function to call when the suspend completes. """ raise NotImplementedError() def resume(self, callback): """Resumes the target instance. If the target was at a breakpoint this will continue from there. Args: callback: A function to call when the resume completes. """ raise NotImplementedError() def step(self, action, count, callback): """Steps the target instance. Only valid when suspended at a breakpoint. Args: action: 'next', 'in', 'out'. count: Number of steps to make. callback: A function to call when the step completes. """ raise NotImplementedError() def change_source(self, uri, new_source, callback): """Modifies source code at runtime. Here be black magic, and it may not work. Args: uri: Source URI. new_source: New source code contents. callback: A function to call when the change completes. """ raise NotImplementedError() def add_breakpoint(self, breakpoint, callback): """Adds a breakpoint to the debugger. Args: breakpoint: Breakpoint to add. callback: A function to call when the add completes. Inspect for the protocol ID used in change/remove requests. """ raise NotImplementedError() def change_breakpoint(self, protocol_id, breakpoint, callback): """Updates a breakpoint that has changed. Args: protocol_id: Breakpoint protocol ID. breakpoint: Breakpoint that changed. callback: A function to call when the change completes. """ raise NotImplementedError() def ignore_breakpoint(self, protocol_id, ignore_count, callback): """Ignores a breakpoint for a given number of hits. Args: protocol_id: Breakpoint protocol ID. ignore_count: Number of hits to ignore. callback: A function to call when the ignore acknowledges. """ raise NotImplementedError() def remove_breakpoint(self, protocol_id, callback): """Removes a breakpoint from the debugger. Args: protocol_id: Breakpoint protocol ID. callback: A function to call when the remove completes. """ raise NotImplementedError() def query_values(self, handle_ids, callback): """Queries the values of a list of handles. This is only valid while the remote debugger is paused after an event, such as a break or exception. Args: handle_ids: A list of handle IDs. callback: A function to call when the query completes. """ raise NotImplementedError() def query_frame_scopes(self, frame, callback): """Queries the scopes for the given frame. This is only valid while the remote debugger is paused after an event, such as a break or exception. Args: frame: Frame to query. callback: A function to call when the query completes. """ raise NotImplementedError() class ProtocolResponse(object): """A response to a request made to a protocol. """ def __init__(self, protocol, is_running, is_success, error_message, body, *args, **kwargs): """Initializes a protocol response. Args: protocol: The protocol that this response is from. is_running: True if the VM is running. is_success: True if the requests was successful. error_message: An error message, if not successful. body: Raw body. Implementation-specific. """ self._protocol = protocol self._is_running = is_running self._is_success = is_success self._error_message = error_message self._body = body def is_running(self): return self._is_running def is_success(self): return self._is_success def error_message(self): return self._error_message def body(self): return self._body class SnapshotResponse(ProtocolResponse): """A response containing callstack information. """ def __init__(self, protocol, is_running, is_success, error_message, body, handle_set, frames, *args, **kwargs): """Initializes a snapshot response. Args: protocol: The protocol that this response is from. is_running: True if the VM is running. is_success: True if the requests was successful. error_message: An error message, if not successful. body: Raw body. Implementation-specific. handle_set: Handle value set. frames: A list of Frames. """ super(SnapshotResponse, self).__init__( protocol, is_running, is_success, error_message, body, *args, **kwargs) self._handle_set = handle_set self._frames = frames def handle_set(self): return self._handle_set def frames(self): return self._frames class QueryValuesResponse(ProtocolResponse): """A response to value requests. """ def __init__(self, protocol, is_running, is_success, error_message, body, handle_set, *args, **kwargs): """Initializes a value query response. Args: protocol: The protocol that this response is from. is_running: True if the VM is running. is_success: True if the requests was successful. error_message: An error message, if not successful. body: Raw body. Implementation-specific. handle_set: Handle value set. """ super(QueryValuesResponse, self).__init__( protocol, is_running, is_success, error_message, body, *args, **kwargs) self._handle_set = handle_set def handle_set(self): return self._handle_set class QueryFrameScopesResponse(ProtocolResponse): """A response to frame scope value requests. """ def __init__(self, protocol, is_running, is_success, error_message, body, handle_set, scopes, *args, **kwargs): """Initializes a frame scope query response. Args: protocol: The protocol that this response is from. is_running: True if the VM is running. is_success: True if the requests was successful. error_message: An error message, if not successful. body: Raw body. Implementation-specific. handle_set: Handle value set. scopes: A list of Scopes. """ super(QueryFrameScopesResponse, self).__init__( protocol, is_running, is_success, error_message, body, *args, **kwargs) self._handle_set = handle_set self._scopes = scopes def handle_set(self): return self._handle_set def scopes(self): return self._scopes class ChangeSourceResponse(ProtocolResponse): """A response to change source requests. """ def __init__(self, protocol, is_running, is_success, error_message, body, step_in_required, *args, **kwargs): """Initializes a change source response. Args: protocol: The protocol that this response is from. is_running: True if the VM is running. is_success: True if the requests was successful. error_message: An error message, if not successful. body: Raw body. Implementation-specific. step_in_required: A step-in is required. """ super(ChangeSourceResponse, self).__init__( protocol, is_running, is_success, error_message, body, *args, **kwargs) self._step_in_required = step_in_required # change_log: [ # { # 'break_points_update': [] ?? # }, # { # 'function_patched': '' # }, # { # 'position_patched': [...] # } # ], # result: { # 'stack_modified': bool, # 'updated': True, # 'change_tree': { # 'status': 'source changed', # 'name': '', # 'positions': { # 'start_position': 0, # 'end_position': 481 # }, # 'new_positions': { # 'start_position': 0, # 'end_position': 482 # }, # 'new_children': [], # 'children': [ ... ] # }, # 'textual_diff': { # 'old_len': 481, # 'new_len': 482, # 'chunks': [325, 325, 326] # }, # 'stack_update_needs_step_in': bool # } def step_in_required(self): return self._step_in_required class AddBreakpointResponse(ProtocolResponse): """A response to add breakpoint requests. """ def __init__(self, protocol, is_running, is_success, error_message, body, protocol_id, *args, **kwargs): """Initializes an add breakpoint response. Args: protocol: The protocol that this response is from. is_running: True if the VM is running. is_success: True if the requests was successful. error_message: An error message, if not successful. body: Raw body. Implementation-specific. protocol_id: Breakpoint protocol ID. """ super(AddBreakpointResponse, self).__init__( protocol, is_running, is_success, error_message, body, *args, **kwargs) self._protocol_id = protocol_id # TODO(benvanik): actual location line/col def protocol_id(self): return self._protocol_id class ProtocolEvent(object): """An event fired by the protocol. """ def __init__(self, protocol, source, *args, **kwargs): """Initializes a protocol event. Args: protocol: The protocol that fired this event. source: A tuple of (url, line, column). """ self._protocol = protocol self._source = source def source_url(self): return self._source[0] def source_line(self): return self._source[1] def source_column(self): return self._source[2] class BreakEvent(ProtocolEvent): """An event indicating that a break occurred. """ def __init__(self, protocol, source, breakpoint_ids, *args, **kwargs): """Initializes a break protocol event. Args: protocol: The protocol that fired this event. source: A tuple of (url, line, column). breakpoint_ids: A list of breakpoints that were hit, if any. """ super(BreakEvent, self).__init__(protocol, source, *args, **kwargs) self._breakpoint_ids = breakpoint_ids def breakpoint_ids(self): return self._breakpoint_ids class ExceptionEvent(ProtocolEvent): """An event indicating that an exception occurred. """ def __init__(self, protocol, source, is_uncaught, exception, *args, **kwargs): """Initializes an exception protocol event. Args: protocol: The protocol that fired this event. source: A tuple of (url, line, column). is_uncaught: True if the exception was uncaught. exception: Exception object. """ super(ExceptionEvent, self).__init__(protocol, source, *args, **kwargs) self._is_uncaught = is_uncaught self._exception = exception def is_uncaught(self): return self._is_uncaught def exception(self): return self._exception class Frame(object): def __init__(self, ordinal, location, is_constructor, is_at_return, function_ref, this_ref, argument_vars, local_vars): self._ordinal = ordinal self._location = location self._is_constructor = is_constructor self._is_at_return = is_at_return self._function_ref = function_ref self._this_ref = this_ref self._arguments = argument_vars self._locals = local_vars def ordinal(self): return self._ordinal def location(self): return self._location def is_constructor(self): return self._is_constructor def is_at_return(self): return self._is_at_return def function_ref(self): return self._function_ref def this_ref(self): return self._this_ref def argument_refs(self): return self._arguments def local_refs(self): return self._locals def formatted_call(self, handle_set): function = handle_set.get_value(self._function_ref) s = '%s(' % (function.inferred_name() or function.name() or '<anonymous>') for n in range(len(self._arguments)): var = self._arguments[n] var_name = var[0] var_value = handle_set.get_value(var[1]) if var_name: s += '%s=' % (var_name) s += str(var_value) if n < len(self._arguments) - 1: s += ', ' s += ')' return s class ScopeType: GLOBAL = 0 LOCAL = 1 WITH = 2 CLOSURE = 3 CATCH = 4 class Scope(object): def __init__(self, ordinal, scope_type, object_ref, *args, **kwargs): self._ordinal = ordinal self._scope_type = scope_type self._object_ref = object_ref def ordinal(self): return self._ordinal def scope_type(self): return self._scope_type def scope_name(self): if self._scope_type == ScopeType.GLOBAL: return 'Global' elif self._scope_type == ScopeType.LOCAL: return 'Local' elif self._scope_type == ScopeType.WITH: return 'With' elif self._scope_type == ScopeType.CLOSURE: return 'Closure' elif self._scope_type == ScopeType.CATCH: return 'Catch' else: return 'Unknown' def object_ref(self): return self._object_ref class HandleSet(object): def __init__(self, *args, **kwargs): self._values = {} def merge(self, other): for value in other._values: self.add_value(value) def add_value(self, value): self._values[value.handle_id()] = value def has_value(self, handle_id): return self._values.get(handle_id, None) != None def get_value(self, handle_id): return self._values.get(handle_id, None) def dump(self): print 'handle set contains %s values:' % (len(self._values.keys())) for (key, value) in self._values.items(): print ' %s: %s' % (key, value) def print_value(self, key, handle_id): dumper = _RecursiveDumper(self) dumper.dump(key, self.get_value(handle_id)) return dumper.output() class _RecursiveDumper(object): def __init__(self, handle_set): self._handle_set = handle_set self._stack = [] self._output = '' def output(self): return self._output def dump(self, key, value): if key: indent = ''.join([' ' for n in range(len(self._stack))]) self._output += '%s%s: %s\n' % (indent, key, value) if value in self._stack: return if isinstance(value, JSObject): self._stack.append(value) if isinstance(value, JSFunction): self._dump_function(value) else: self._dump_object(value) self._stack.pop() def _dump_function(self, value): pass def _dump_object(self, value): for p in value.properties(): self.dump(p.name(), self._handle_set.get_value(p.ref())) class JSHandle(object): def __init__(self, handle_id, handle_type, *args, **kwargs): self._handle_id = handle_id self._handle_type = handle_type def handle_id(self): return self._handle_id def handle_type(self): return self._handle_type class JSUndefined(JSHandle): def __init__(self, handle_id, *args, **kwargs): super(JSUndefined, self).__init__(handle_id, 'undefined', *args, **kwargs) def __repr__(self): return 'undefined' class JSNull(JSHandle): def __init__(self, handle_id, *args, **kwargs): super(JSNull, self).__init__(handle_id, 'null', *args, **kwargs) def __repr__(self): return 'null' class JSBoolean(JSHandle): def __init__(self, handle_id, value, *args, **kwargs): super(JSBoolean, self).__init__(handle_id, 'boolean', *args, **kwargs) self._value = value def value(self): return self._value def __repr__(self): return 'true' if self._value else 'false' class JSNumber(JSHandle): def __init__(self, handle_id, value, *args, **kwargs): super(JSNumber, self).__init__(handle_id, 'number', *args, **kwargs) self._value = value def value(self): return self._value def __repr__(self): return str(self._value) class JSString(JSHandle): def __init__(self, handle_id, value, *args, **kwargs): super(JSString, self).__init__(handle_id, 'string', *args, **kwargs) self._value = value def value(self): return self._value def __repr__(self): return '"%s"' % (self._value) class JSScript(JSHandle): def __init__(self, handle_id, uri, *args, **kwargs): super(JSScript, self).__init__(handle_id, 'script', *args, **kwargs) self._uri = uri def uri(self): return self._uri def __repr__(self): return self._uri class JSObject(JSHandle): def __init__(self, handle_id, class_name, constructor_ref, prototype_ref, properties, *args, **kwargs): super(JSObject, self).__init__(handle_id, 'object', *args, **kwargs) self._class_name = class_name self._constructor_ref = constructor_ref self._prototype_ref = prototype_ref self._properties = properties def class_name(self): return self._class_name def constructor_ref(self): return self._constructor_ref def prototype_ref(self): return self._prototype_ref def properties(self): return self._properties def __repr__(self): return '<object %s>' % (self.handle_id()) class JSProperty(object): def __init__(self, name, ref, property_type, attributes, *args, **kwargs): self._name = name self._ref = ref self._property_type = property_type self._attributes = attributes def name(self): return self._name def ref(self): return self._ref def property_type(self): return self._property_type def attributes(self): return self._attributes def __repr__(self): return '%s = <%s>' % (self._name, self._ref) class JSFunction(JSObject): def __init__(self, handle_id, class_name, constructor_ref, prototype_ref, properties, name, inferred_name, location, *args, **kwargs): super(JSFunction, self).__init__(handle_id, class_name, constructor_ref, prototype_ref, properties, *args, **kwargs) self._name = name self._inferred_name = inferred_name self._location = location def name(self): return self._name def inferred_name(self): return self._inferred_name def location(self): return self._location def __repr__(self): name = self._inferred_name or self._name if self._location: return '%s (%s@%s:%s)' % (name, self._location[0], self._location[1], self._location[2]) else: return name
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import ddt import mock from oslo_config import cfg import requests from six.moves.urllib import parse from rally import exceptions from rally.verification.tempest import config from tests.unit import fakes from tests.unit import test CONF = cfg.CONF CREDS = { "admin": { "username": "admin", "tenant_name": "admin", "password": "admin-12345", "auth_url": "http://test/v2.0/", "permission": "admin", "region_name": "test", "admin_domain_name": "Default", "https_insecure": False, "https_cacert": "/path/to/cacert/file" } } @ddt.ddt class TempestConfigTestCase(test.TestCase): def setUp(self): super(TempestConfigTestCase, self).setUp() mock.patch("rally.common.objects.deploy.db.deployment_get", return_value=CREDS).start() mock.patch("rally.osclients.Clients").start() self.mock_isfile = mock.patch("os.path.isfile", return_value=True).start() self.tempest_conf = config.TempestConfig("fake_deployment") @mock.patch("os.rename") @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) @mock.patch("requests.get", return_value=mock.MagicMock(status_code=200)) def test__download_cirros_image_success(self, mock_get, mock_open, mock_rename): self.mock_isfile.return_value = False self.tempest_conf._download_cirros_image() mock_get.assert_called_once_with( CONF.image.cirros_img_url, stream=True) @mock.patch("requests.get") @ddt.data(404, 500) def test__download_cirros_image_failure(self, status_code, mock_get): self.mock_isfile.return_value = False mock_get.return_value = mock.MagicMock(status_code=status_code) self.assertRaises(exceptions.TempestConfigCreationFailure, self.tempest_conf._download_cirros_image) @mock.patch("requests.get", side_effect=requests.ConnectionError()) def test__download_cirros_image_connection_error(self, mock_requests_get): self.mock_isfile.return_value = False self.assertRaises(exceptions.TempestConfigCreationFailure, self.tempest_conf._download_cirros_image) def test__get_service_url(self): self.tempest_conf.keystone.auth_ref = { "serviceCatalog": [ { "name": "test_service", "type": "test_service_type", "endpoints": [{"publicURL": "test_url"}] } ] } self.tempest_conf.clients.services.return_value = { "test_service_type": "test_service"} self.assertEqual( self.tempest_conf._get_service_url("test_service"), "test_url") @mock.patch("rally.verification.tempest.config." "TempestConfig._get_service_url", return_value="test_url") def test__configure_boto(self, mock__get_service_url): self.tempest_conf._configure_boto() expected = (("ec2_url", "test_url"), ("s3_url", "test_url"), ("http_socket_timeout", "30"), ("s3_materials_path", os.path.join( self.tempest_conf.data_dir, "s3materials"))) result = self.tempest_conf.conf.items("boto") for item in expected: self.assertIn(item, result) def test__configure_default(self): self.tempest_conf._configure_default() expected = (("debug", "True"), ("log_file", "tempest.log"), ("use_stderr", "False")) results = self.tempest_conf.conf.items("DEFAULT") self.assertEqual(sorted(expected), sorted(results)) def test__configure_dashboard(self): self.tempest_conf._configure_dashboard() url = "http://%s/" % parse.urlparse( CREDS["admin"]["auth_url"]).hostname self.assertEqual( self.tempest_conf.conf.get("dashboard", "dashboard_url"), url) def test__configure_identity(self): self.tempest_conf._configure_identity() expected = ( ("username", CREDS["admin"]["username"]), ("password", CREDS["admin"]["password"]), ("tenant_name", CREDS["admin"]["tenant_name"]), ("admin_username", CREDS["admin"]["username"]), ("admin_password", CREDS["admin"]["password"]), ("admin_tenant_name", CREDS["admin"]["username"]), ("admin_domain_name", CREDS["admin"]["admin_domain_name"]), ("region", CREDS["admin"]["region_name"]), ("uri", CREDS["admin"]["auth_url"]), ("uri_v3", CREDS["admin"]["auth_url"].replace("/v2.0/", "/v3")), ("disable_ssl_certificate_validation", str(CREDS["admin"]["https_insecure"])), ("ca_certificates_file", CREDS["admin"]["https_cacert"])) result = self.tempest_conf.conf.items("identity") for item in expected: self.assertIn(item, result) def test__configure_network_if_neutron(self): self.tempest_conf.available_services = ["neutron"] client = self.tempest_conf.clients.neutron() client.list_networks.return_value = { "networks": [ { "status": "ACTIVE", "id": "test_id", "router:external": True } ] } self.tempest_conf._configure_network() self.assertEqual( self.tempest_conf.conf.get("network", "public_network_id"), "test_id") def test__configure_network_if_nova(self): self.tempest_conf.available_services = ["nova"] client = self.tempest_conf.clients.nova() client.networks.list.return_value = [ mock.MagicMock(human_id="fake-network")] self.tempest_conf._configure_network() expected = (("network_for_ssh", "fake-network"), ("fixed_network_name", "fake-network")) result = self.tempest_conf.conf.items("compute") for item in expected: self.assertIn(item, result) def test__configure_network_feature_enabled(self): self.tempest_conf.available_services = ["neutron"] client = self.tempest_conf.clients.neutron() client.list_ext.return_value = { "extensions": [ {"alias": "dvr"}, {"alias": "extra_dhcp_opt"}, {"alias": "extraroute"} ] } self.tempest_conf._configure_network_feature_enabled() self.assertEqual(self.tempest_conf.conf.get( "network-feature-enabled", "api_extensions"), "dvr,extra_dhcp_opt,extraroute") @mock.patch("os.makedirs") @mock.patch("os.path.exists", return_value=False) def test__configure_oslo_concurrency(self, mock_exists, mock_makedirs): self.tempest_conf._configure_oslo_concurrency() lock_path = os.path.join( self.tempest_conf.data_dir, "lock_files_fake_deployment") mock_makedirs.assert_called_with(lock_path) self.assertEqual( self.tempest_conf.conf.get( "oslo_concurrency", "lock_path"), lock_path) def test__configure_object_storage(self): self.tempest_conf._configure_object_storage() expected = ( ("operator_role", CONF.role.swift_operator_role), ("reseller_admin_role", CONF.role.swift_reseller_admin_role)) result = self.tempest_conf.conf.items("object-storage") for item in expected: self.assertIn(item, result) def test__configure_orchestration(self): self.tempest_conf._configure_orchestration() expected = ( ("stack_owner_role", CONF.role.heat_stack_owner_role), ("stack_user_role", CONF.role.heat_stack_user_role)) result = self.tempest_conf.conf.items("orchestration") for item in expected: self.assertIn(item, result) def test__configure_scenario(self): self.tempest_conf._configure_scenario() expected = (("img_dir", self.tempest_conf.data_dir), ("img_file", config.IMAGE_NAME)) result = self.tempest_conf.conf.items("scenario") for item in expected: self.assertIn(item, result) @mock.patch("requests.get", return_value=mock.MagicMock(status_code=200)) def test__configure_service_available(self, mock_get): available_services = ("nova", "cinder", "glance", "sahara") self.tempest_conf.available_services = available_services self.tempest_conf._configure_service_available() expected_horizon_url = "http://test" expected_timeout = CONF.openstack_client_http_timeout mock_get.assert_called_once_with(expected_horizon_url, timeout=expected_timeout) expected = ( ("neutron", "False"), ("heat", "False"), ("nova", "True"), ("swift", "False"), ("cinder", "True"), ("sahara", "True"), ("glance", "True"), ("horizon", "True"), ("ceilometer", "False")) result = self.tempest_conf.conf.items("service_available") for item in expected: self.assertIn(item, result) @mock.patch("requests.get", return_value=mock.MagicMock(status_code=404)) def test__configure_service_available_horizon_not_available( self, mock_get): self.tempest_conf._configure_service_available() self.assertEqual( self.tempest_conf.conf.get( "service_available", "horizon"), "False") @mock.patch("requests.get", side_effect=requests.Timeout()) def test__configure_service_available_horizon_request_timeout( self, mock_get): self.tempest_conf._configure_service_available() self.assertEqual( self.tempest_conf.conf.get( "service_available", "horizon"), "False") @ddt.data({}, {"service": "neutron", "connect_method": "floating"}) @ddt.unpack def test__configure_validation(self, service="nova", connect_method="fixed"): self.tempest_conf.available_services = [service] self.tempest_conf._configure_validation() self.assertEqual(connect_method, self.tempest_conf.conf.get("validation", "connect_method")) @mock.patch("rally.verification.tempest.config._write_config") @mock.patch("inspect.getmembers") def test_generate(self, mock_inspect_getmembers, mock__write_config): configure_something_method = mock.MagicMock() mock_inspect_getmembers.return_value = [("_configure_something", configure_something_method)] self.tempest_conf.generate("/path/to/fake/conf") self.assertEqual(configure_something_method.call_count, 1) self.assertEqual(mock__write_config.call_count, 1) @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) def test__write_config(self, mock_open): conf_path = "/path/to/fake/conf" conf_data = mock.Mock() config._write_config(conf_path, conf_data) mock_open.assert_called_once_with(conf_path, "w+") conf_data.write.assert_called_once_with(mock_open.side_effect()) class TempestResourcesContextTestCase(test.TestCase): def setUp(self): super(TempestResourcesContextTestCase, self).setUp() mock.patch("rally.common.objects.deploy.db.deployment_get", return_value=CREDS).start() mock.patch("rally.osclients.Clients").start() self.context = config.TempestResourcesContext("fake_deployment", "/fake/path/to/config") self.context.conf.add_section("compute") @mock.patch("rally.plugins.openstack.wrappers." "network.NeutronWrapper.create_network") @mock.patch("six.moves.builtins.open", side_effect=mock.mock_open()) def test_options_configured_manually( self, mock_open, mock_neutron_wrapper_create_network): self.context.available_services = ["glance", "nova", "neutron"] self.context.conf.set("compute", "image_ref", "id1") self.context.conf.set("compute", "image_ref_alt", "id2") self.context.conf.set("compute", "flavor_ref", "id3") self.context.conf.set("compute", "flavor_ref_alt", "id4") self.context.conf.set("compute", "fixed_network_name", "name1") self.context.__enter__() glanceclient = self.context.clients.glance() novaclient = self.context.clients.nova() self.assertEqual(glanceclient.images.create.call_count, 0) self.assertEqual(novaclient.flavors.create.call_count, 0) self.assertEqual(mock_neutron_wrapper_create_network.call_count, 0) def test__create_tempest_roles(self): role1 = CONF.role.swift_operator_role role2 = CONF.role.swift_reseller_admin_role role3 = CONF.role.heat_stack_owner_role role4 = CONF.role.heat_stack_user_role client = self.context.clients.verified_keystone() client.roles.list.return_value = [fakes.FakeRole(name=role1), fakes.FakeRole(name=role2)] client.roles.create.side_effect = [fakes.FakeFlavor(name=role3), fakes.FakeFlavor(name=role4)] self.context._create_tempest_roles() self.assertEqual(client.roles.create.call_count, 2) created_roles = [role.name for role in self.context._created_roles] self.assertIn(role3, created_roles) self.assertIn(role4, created_roles) # We can choose any option to test the '_configure_option' method. So let's # configure the 'flavor_ref' option. def test__configure_option(self): create_method = mock.MagicMock() create_method.side_effect = [fakes.FakeFlavor(id="id1")] self.context.conf.set("compute", "flavor_ref", "") self.context._configure_option("flavor_ref", create_method, 64) self.assertEqual(create_method.call_count, 1) result = self.context.conf.get("compute", "flavor_ref") self.assertEqual("id1", result) @mock.patch("six.moves.builtins.open") def test__create_image(self, mock_open): client = self.context.clients.glance() client.images.create.side_effect = [fakes.FakeImage(id="id1")] image = self.context._create_image() self.assertEqual("id1", image.id) self.assertEqual("id1", self.context._created_images[0].id) def test__create_flavor(self): client = self.context.clients.nova() client.flavors.create.side_effect = [fakes.FakeFlavor(id="id1")] flavor = self.context._create_flavor(64) self.assertEqual("id1", flavor.id) self.assertEqual("id1", self.context._created_flavors[0].id) @mock.patch("rally.plugins.openstack.wrappers." "network.NeutronWrapper.create_network") def test__create_network_resources( self, mock_neutron_wrapper_create_network): mock_neutron_wrapper_create_network.side_effect = [ fakes.FakeNetwork(id="id1")] network = self.context._create_network_resources() self.assertEqual("id1", network.id) self.assertEqual("id1", self.context._created_networks[0].id) def test__cleanup_tempest_roles(self): self.context._created_roles = [fakes.FakeRole(), fakes.FakeRole()] self.context._cleanup_tempest_roles() client = self.context.clients.keystone() self.assertEqual(client.roles.delete.call_count, 2) def test__cleanup_images(self): self.context._created_images = [fakes.FakeImage(id="id1"), fakes.FakeImage(id="id2")] self.context.conf.set("compute", "image_ref", "id1") self.context.conf.set("compute", "image_ref_alt", "id2") self.context._cleanup_images() client = self.context.clients.glance() self.assertEqual(client.images.delete.call_count, 2) self.assertEqual("", self.context.conf.get("compute", "image_ref")) self.assertEqual("", self.context.conf.get("compute", "image_ref_alt")) def test__cleanup_flavors(self): self.context._created_flavors = [fakes.FakeFlavor(id="id1"), fakes.FakeFlavor(id="id2")] self.context.conf.set("compute", "flavor_ref", "id1") self.context.conf.set("compute", "flavor_ref_alt", "id2") self.context._cleanup_flavors() client = self.context.clients.nova() self.assertEqual(client.flavors.delete.call_count, 2) self.assertEqual("", self.context.conf.get("compute", "flavor_ref")) self.assertEqual("", self.context.conf.get("compute", "flavor_ref_alt")) @mock.patch("rally.plugins.openstack.wrappers." "network.NeutronWrapper.delete_network") def test__cleanup_network_resources( self, mock_neutron_wrapper_delete_network): self.context._created_networks = [{"name": "net-12345"}] self.context.conf.set("compute", "fixed_network_name", "net-12345") self.context._cleanup_network_resources() self.assertEqual(mock_neutron_wrapper_delete_network.call_count, 1) self.assertEqual("", self.context.conf.get("compute", "fixed_network_name"))
""" NetCDF reader/writer module. This module is used to read and create NetCDF files. NetCDF files are accessed through the `netcdf_file` object. Data written to and from NetCDF files are contained in `netcdf_variable` objects. Attributes are given as member variables of the `netcdf_file` and `netcdf_variable` objects. This module implements the Scientific.IO.NetCDF API to read and create NetCDF files. The same API is also used in the PyNIO and pynetcdf modules, allowing these modules to be used interchangeably when working with NetCDF files. """ from __future__ import division, print_function, absolute_import # TODO: # * properly implement ``_FillValue``. # * implement Jeff Whitaker's patch for masked variables. # * fix character variables. # * implement PAGESIZE for Python 2.6? # The Scientific.IO.NetCDF API allows attributes to be added directly to # instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate # between user-set attributes and instance attributes, user-set attributes # are automatically stored in the ``_attributes`` attribute by overloading #``__setattr__``. This is the reason why the code sometimes uses #``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``; # otherwise the key would be inserted into userspace attributes. __all__ = ['netcdf_file'] import warnings import weakref from operator import mul import mmap as mm import numpy as np from numpy.compat import asbytes, asstr from numpy import fromstring, ndarray, dtype, empty, array, asarray from numpy import little_endian as LITTLE_ENDIAN from functools import reduce from scipy.lib.six import integer_types, text_type, binary_type ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' ZERO = b'\x00\x00\x00\x00' NC_BYTE = b'\x00\x00\x00\x01' NC_CHAR = b'\x00\x00\x00\x02' NC_SHORT = b'\x00\x00\x00\x03' NC_INT = b'\x00\x00\x00\x04' NC_FLOAT = b'\x00\x00\x00\x05' NC_DOUBLE = b'\x00\x00\x00\x06' NC_DIMENSION = b'\x00\x00\x00\n' NC_VARIABLE = b'\x00\x00\x00\x0b' NC_ATTRIBUTE = b'\x00\x00\x00\x0c' TYPEMAP = {NC_BYTE: ('b', 1), NC_CHAR: ('c', 1), NC_SHORT: ('h', 2), NC_INT: ('i', 4), NC_FLOAT: ('f', 4), NC_DOUBLE: ('d', 8)} REVERSE = {('b', 1): NC_BYTE, ('B', 1): NC_CHAR, ('c', 1): NC_CHAR, ('h', 2): NC_SHORT, ('i', 4): NC_INT, ('f', 4): NC_FLOAT, ('d', 8): NC_DOUBLE, # these come from asarray(1).dtype.char and asarray('foo').dtype.char, # used when getting the types from generic attributes. ('l', 4): NC_INT, ('S', 1): NC_CHAR} class netcdf_file(object): """ A file object for NetCDF data. A `netcdf_file` object has two standard attributes: `dimensions` and `variables`. The values of both are dictionaries, mapping dimension names to their associated lengths and variable names to variables, respectively. Application programs should never modify these dictionaries. All other attributes correspond to global attributes defined in the NetCDF file. Global file attributes are created by assigning to an attribute of the `netcdf_file` object. Parameters ---------- filename : string or file-like string -> filename mode : {'r', 'w', 'a'}, optional read-write-append mode, default is 'r' mmap : None or bool, optional Whether to mmap `filename` when reading. Default is True when `filename` is a file name, False when `filename` is a file-like object. Note that when mmap is in use, data arrays returned refer directly to the mmapped data on disk, and the file cannot be closed as long as references to it exist. version : {1, 2}, optional version of netcdf to read / write, where 1 means *Classic format* and 2 means *64-bit offset format*. Default is 1. See `here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`_ for more info. Notes ----- The major advantage of this module over other modules is that it doesn't require the code to be linked to the NetCDF libraries. This module is derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_. NetCDF files are a self-describing binary data format. The file contains metadata that describes the dimensions and variables in the file. More details about NetCDF files can be found `here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`_. There are three main sections to a NetCDF data structure: 1. Dimensions 2. Variables 3. Attributes The dimensions section records the name and length of each dimension used by the variables. The variables would then indicate which dimensions it uses and any attributes such as data units, along with containing the data values for the variable. It is good practice to include a variable that is the same name as a dimension to provide the values for that axes. Lastly, the attributes section would contain additional information such as the name of the file creator or the instrument used to collect the data. When writing data to a NetCDF file, there is often the need to indicate the 'record dimension'. A record dimension is the unbounded dimension for a variable. For example, a temperature variable may have dimensions of latitude, longitude and time. If one wants to add more temperature data to the NetCDF file as time progresses, then the temperature variable should have the time dimension flagged as the record dimension. In addition, the NetCDF file header contains the position of the data in the file, so access can be done in an efficient manner without loading unnecessary data into memory. It uses the ``mmap`` module to create Numpy arrays mapped to the data on disk, for the same purpose. Note that when `netcdf_file` is used to open a file with mmap=True (default for read-only), arrays returned by it refer to data directly on the disk. The file should not be closed, and cannot be cleanly closed when asked, if such arrays are alive. You may want to copy data arrays obtained from mmapped Netcdf file if they are to be processed after the file is closed, see the example below. Examples -------- To create a NetCDF file: >>> from scipy.io import netcdf >>> f = netcdf.netcdf_file('simple.nc', 'w') >>> f.history = 'Created for a test' >>> f.createDimension('time', 10) >>> time = f.createVariable('time', 'i', ('time',)) >>> time[:] = np.arange(10) >>> time.units = 'days since 2008-01-01' >>> f.close() Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice of the time variable allows for the data to be set in the object, rather than letting ``range(10)`` overwrite the ``time`` variable. To read the NetCDF file we just created: >>> from scipy.io import netcdf >>> f = netcdf.netcdf_file('simple.nc', 'r') >>> print(f.history) Created for a test >>> time = f.variables['time'] >>> print(time.units) days since 2008-01-01 >>> print(time.shape) (10,) >>> print(time[-1]) 9 NetCDF files, when opened read-only, return arrays that refer directly to memory-mapped data on disk: >>> data = time[:] >>> data.base.base <mmap.mmap object at 0x7fe753763180> If the data is to be processed after the file is closed, it needs to be copied to main memory: >>> data = time[:].copy() >>> f.close() >>> data.mean() A NetCDF file can also be used as context manager: >>> from scipy.io import netcdf >>> with netcdf.netcdf_file('simple.nc', 'r') as f: >>> print(f.history) Created for a test """ def __init__(self, filename, mode='r', mmap=None, version=1): """Initialize netcdf_file from fileobj (str or file-like).""" if mode not in 'rwa': raise ValueError("Mode must be either 'r', 'w' or 'a'.") if hasattr(filename, 'seek'): # file-like self.fp = filename self.filename = 'None' if mmap is None: mmap = False elif mmap and not hasattr(filename, 'fileno'): raise ValueError('Cannot use file object for mmap') else: # maybe it's a string self.filename = filename omode = 'r+' if mode == 'a' else mode self.fp = open(self.filename, '%sb' % omode) if mmap is None: mmap = True if mode != 'r': # Cannot read write-only files mmap = False self.use_mmap = mmap self.mode = mode self.version_byte = version self.dimensions = {} self.variables = {} self._dims = [] self._recs = 0 self._recsize = 0 self._mm = None self._mm_buf = None if self.use_mmap: self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ) self._mm_buf = np.frombuffer(self._mm, dtype=np.int8) self._attributes = {} if mode in 'ra': self._read() def __setattr__(self, attr, value): # Store user defined attributes in a separate dict, # so we can save them to file later. try: self._attributes[attr] = value except AttributeError: pass self.__dict__[attr] = value def close(self): """Closes the NetCDF file.""" if not self.fp.closed: try: self.flush() finally: self.variables = {} if self._mm_buf is not None: ref = weakref.ref(self._mm_buf) self._mm_buf = None if ref() is None: # self._mm_buf is gc'd, and we can close the mmap self._mm.close() else: # we cannot close self._mm, since self._mm_buf is # alive and there may still be arrays referring to it warnings.warn(( "Cannot close a netcdf_file opened with mmap=True, when " "netcdf_variables or arrays referring to its data still exist. " "All data arrays obtained from such files refer directly to " "data on disk, and must be copied before the file can be cleanly " "closed. (See netcdf_file docstring for more information on mmap.)" ), category=RuntimeWarning) self._mm = None self.fp.close() __del__ = close def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def createDimension(self, name, length): """ Adds a dimension to the Dimension section of the NetCDF data structure. Note that this function merely adds a new dimension that the variables can reference. The values for the dimension, if desired, should be added as a variable using `createVariable`, referring to this dimension. Parameters ---------- name : str Name of the dimension (Eg, 'lat' or 'time'). length : int Length of the dimension. See Also -------- createVariable """ self.dimensions[name] = length self._dims.append(name) def createVariable(self, name, type, dimensions): """ Create an empty variable for the `netcdf_file` object, specifying its data type and the dimensions it uses. Parameters ---------- name : str Name of the new variable. type : dtype or str Data type of the variable. dimensions : sequence of str List of the dimension names used by the variable, in the desired order. Returns ------- variable : netcdf_variable The newly created ``netcdf_variable`` object. This object has also been added to the `netcdf_file` object as well. See Also -------- createDimension Notes ----- Any dimensions to be used by the variable should already exist in the NetCDF data structure or should be created by `createDimension` prior to creating the NetCDF variable. """ shape = tuple([self.dimensions[dim] for dim in dimensions]) shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy type = dtype(type) typecode, size = type.char, type.itemsize if (typecode, size) not in REVERSE: raise ValueError("NetCDF 3 does not support type %s" % type) data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3 self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions) return self.variables[name] def flush(self): """ Perform a sync-to-disk flush if the `netcdf_file` object is in write mode. See Also -------- sync : Identical function """ if hasattr(self, 'mode') and self.mode in 'wa': self._write() sync = flush def _write(self): self.fp.seek(0) self.fp.write(b'CDF') self.fp.write(array(self.version_byte, '>b').tostring()) # Write headers and data. self._write_numrecs() self._write_dim_array() self._write_gatt_array() self._write_var_array() def _write_numrecs(self): # Get highest record count from all record variables. for var in self.variables.values(): if var.isrec and len(var.data) > self._recs: self.__dict__['_recs'] = len(var.data) self._pack_int(self._recs) def _write_dim_array(self): if self.dimensions: self.fp.write(NC_DIMENSION) self._pack_int(len(self.dimensions)) for name in self._dims: self._pack_string(name) length = self.dimensions[name] self._pack_int(length or 0) # replace None with 0 for record dimension else: self.fp.write(ABSENT) def _write_gatt_array(self): self._write_att_array(self._attributes) def _write_att_array(self, attributes): if attributes: self.fp.write(NC_ATTRIBUTE) self._pack_int(len(attributes)) for name, values in attributes.items(): self._pack_string(name) self._write_values(values) else: self.fp.write(ABSENT) def _write_var_array(self): if self.variables: self.fp.write(NC_VARIABLE) self._pack_int(len(self.variables)) # Sort variable names non-recs first, then recs. def sortkey(n): v = self.variables[n] if v.isrec: return (-1,) return v._shape variables = sorted(self.variables, key=sortkey, reverse=True) # Set the metadata for all variables. for name in variables: self._write_var_metadata(name) # Now that we have the metadata, we know the vsize of # each record variable, so we can calculate recsize. self.__dict__['_recsize'] = sum([ var._vsize for var in self.variables.values() if var.isrec]) # Set the data for all variables. for name in variables: self._write_var_data(name) else: self.fp.write(ABSENT) def _write_var_metadata(self, name): var = self.variables[name] self._pack_string(name) self._pack_int(len(var.dimensions)) for dimname in var.dimensions: dimid = self._dims.index(dimname) self._pack_int(dimid) self._write_att_array(var._attributes) nc_type = REVERSE[var.typecode(), var.itemsize()] self.fp.write(asbytes(nc_type)) if not var.isrec: vsize = var.data.size * var.data.itemsize vsize += -vsize % 4 else: # record variable try: vsize = var.data[0].size * var.data.itemsize except IndexError: vsize = 0 rec_vars = len([v for v in self.variables.values() if v.isrec]) if rec_vars > 1: vsize += -vsize % 4 self.variables[name].__dict__['_vsize'] = vsize self._pack_int(vsize) # Pack a bogus begin, and set the real value later. self.variables[name].__dict__['_begin'] = self.fp.tell() self._pack_begin(0) def _write_var_data(self, name): var = self.variables[name] # Set begin in file header. the_beguine = self.fp.tell() self.fp.seek(var._begin) self._pack_begin(the_beguine) self.fp.seek(the_beguine) # Write data. if not var.isrec: self.fp.write(var.data.tostring()) count = var.data.size * var.data.itemsize self.fp.write(b'0' * (var._vsize - count)) else: # record variable # Handle rec vars with shape[0] < nrecs. if self._recs > len(var.data): shape = (self._recs,) + var.data.shape[1:] var.data.resize(shape) pos0 = pos = self.fp.tell() for rec in var.data: # Apparently scalars cannot be converted to big endian. If we # try to convert a ``=i4`` scalar to, say, '>i4' the dtype # will remain as ``=i4``. if not rec.shape and (rec.dtype.byteorder == '<' or (rec.dtype.byteorder == '=' and LITTLE_ENDIAN)): rec = rec.byteswap() self.fp.write(rec.tostring()) # Padding count = rec.size * rec.itemsize self.fp.write(b'0' * (var._vsize - count)) pos += self._recsize self.fp.seek(pos) self.fp.seek(pos0 + var._vsize) def _write_values(self, values): if hasattr(values, 'dtype'): nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] else: types = [(t, NC_INT) for t in integer_types] types += [ (float, NC_FLOAT), (str, NC_CHAR) ] # bytes index into scalars in py3k. Check for "string" types if isinstance(values, text_type) or isinstance(values, binary_type): sample = values else: try: sample = values[0] # subscriptable? except TypeError: sample = values # scalar for class_, nc_type in types: if isinstance(sample, class_): break typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode # asarray() dies with bytes and '>c' in py3k. Change to 'S' dtype_ = 'S' if dtype_ == '>c' else dtype_ values = asarray(values, dtype=dtype_) self.fp.write(asbytes(nc_type)) if values.dtype.char == 'S': nelems = values.itemsize else: nelems = values.size self._pack_int(nelems) if not values.shape and (values.dtype.byteorder == '<' or (values.dtype.byteorder == '=' and LITTLE_ENDIAN)): values = values.byteswap() self.fp.write(values.tostring()) count = values.size * values.itemsize self.fp.write(b'0' * (-count % 4)) # pad def _read(self): # Check magic bytes and version magic = self.fp.read(3) if not magic == b'CDF': raise TypeError("Error: %s is not a valid NetCDF 3 file" % self.filename) self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0] # Read file headers and set data. self._read_numrecs() self._read_dim_array() self._read_gatt_array() self._read_var_array() def _read_numrecs(self): self.__dict__['_recs'] = self._unpack_int() def _read_dim_array(self): header = self.fp.read(4) if header not in [ZERO, NC_DIMENSION]: raise ValueError("Unexpected header.") count = self._unpack_int() for dim in range(count): name = asstr(self._unpack_string()) length = self._unpack_int() or None # None for record dimension self.dimensions[name] = length self._dims.append(name) # preserve order def _read_gatt_array(self): for k, v in self._read_att_array().items(): self.__setattr__(k, v) def _read_att_array(self): header = self.fp.read(4) if header not in [ZERO, NC_ATTRIBUTE]: raise ValueError("Unexpected header.") count = self._unpack_int() attributes = {} for attr in range(count): name = asstr(self._unpack_string()) attributes[name] = self._read_values() return attributes def _read_var_array(self): header = self.fp.read(4) if header not in [ZERO, NC_VARIABLE]: raise ValueError("Unexpected header.") begin = 0 dtypes = {'names': [], 'formats': []} rec_vars = [] count = self._unpack_int() for var in range(count): (name, dimensions, shape, attributes, typecode, size, dtype_, begin_, vsize) = self._read_var() # http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html # Note that vsize is the product of the dimension lengths # (omitting the record dimension) and the number of bytes # per value (determined from the type), increased to the # next multiple of 4, for each variable. If a record # variable, this is the amount of space per record. The # netCDF "record size" is calculated as the sum of the # vsize's of all the record variables. # # The vsize field is actually redundant, because its value # may be computed from other information in the header. The # 32-bit vsize field is not large enough to contain the size # of variables that require more than 2^32 - 4 bytes, so # 2^32 - 1 is used in the vsize field for such variables. if shape and shape[0] is None: # record variable rec_vars.append(name) # The netCDF "record size" is calculated as the sum of # the vsize's of all the record variables. self.__dict__['_recsize'] += vsize if begin == 0: begin = begin_ dtypes['names'].append(name) dtypes['formats'].append(str(shape[1:]) + dtype_) # Handle padding with a virtual variable. if typecode in 'bch': actual_size = reduce(mul, (1,) + shape[1:]) * size padding = -actual_size % 4 if padding: dtypes['names'].append('_padding_%d' % var) dtypes['formats'].append('(%d,)>b' % padding) # Data will be set later. data = None else: # not a record variable # Calculate size to avoid problems with vsize (above) a_size = reduce(mul, shape, 1) * size if self.use_mmap: data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_) data.shape = shape else: pos = self.fp.tell() self.fp.seek(begin_) data = fromstring(self.fp.read(a_size), dtype=dtype_) data.shape = shape self.fp.seek(pos) # Add variable. self.variables[name] = netcdf_variable( data, typecode, size, shape, dimensions, attributes) if rec_vars: # Remove padding when only one record variable. if len(rec_vars) == 1: dtypes['names'] = dtypes['names'][:1] dtypes['formats'] = dtypes['formats'][:1] # Build rec array. if self.use_mmap: rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes) rec_array.shape = (self._recs,) else: pos = self.fp.tell() self.fp.seek(begin) rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes) rec_array.shape = (self._recs,) self.fp.seek(pos) for var in rec_vars: self.variables[var].__dict__['data'] = rec_array[var] def _read_var(self): name = asstr(self._unpack_string()) dimensions = [] shape = [] dims = self._unpack_int() for i in range(dims): dimid = self._unpack_int() dimname = self._dims[dimid] dimensions.append(dimname) dim = self.dimensions[dimname] shape.append(dim) dimensions = tuple(dimensions) shape = tuple(shape) attributes = self._read_att_array() nc_type = self.fp.read(4) vsize = self._unpack_int() begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]() typecode, size = TYPEMAP[nc_type] dtype_ = '>%s' % typecode return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize def _read_values(self): nc_type = self.fp.read(4) n = self._unpack_int() typecode, size = TYPEMAP[nc_type] count = n*size values = self.fp.read(int(count)) self.fp.read(-count % 4) # read padding if typecode is not 'c': values = fromstring(values, dtype='>%s' % typecode) if values.shape == (1,): values = values[0] else: values = values.rstrip(b'\x00') return values def _pack_begin(self, begin): if self.version_byte == 1: self._pack_int(begin) elif self.version_byte == 2: self._pack_int64(begin) def _pack_int(self, value): self.fp.write(array(value, '>i').tostring()) _pack_int32 = _pack_int def _unpack_int(self): return int(fromstring(self.fp.read(4), '>i')[0]) _unpack_int32 = _unpack_int def _pack_int64(self, value): self.fp.write(array(value, '>q').tostring()) def _unpack_int64(self): return fromstring(self.fp.read(8), '>q')[0] def _pack_string(self, s): count = len(s) self._pack_int(count) self.fp.write(asbytes(s)) self.fp.write(b'0' * (-count % 4)) # pad def _unpack_string(self): count = self._unpack_int() s = self.fp.read(count).rstrip(b'\x00') self.fp.read(-count % 4) # read padding return s class netcdf_variable(object): """ A data object for the `netcdf` module. `netcdf_variable` objects are constructed by calling the method `netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable` objects behave much like array objects defined in numpy, except that their data resides in a file. Data is read by indexing and written by assigning to an indexed subset; the entire array can be accessed by the index ``[:]`` or (for scalars) by using the methods `getValue` and `assignValue`. `netcdf_variable` objects also have attribute `shape` with the same meaning as for arrays, but the shape cannot be modified. There is another read-only attribute `dimensions`, whose value is the tuple of dimension names. All other attributes correspond to variable attributes defined in the NetCDF file. Variable attributes are created by assigning to an attribute of the `netcdf_variable` object. Parameters ---------- data : array_like The data array that holds the values for the variable. Typically, this is initialized as empty, but with the proper shape. typecode : dtype character code Desired data-type for the data array. size : int Desired element size for the data array. shape : sequence of ints The shape of the array. This should match the lengths of the variable's dimensions. dimensions : sequence of strings The names of the dimensions used by the variable. Must be in the same order of the dimension lengths given by `shape`. attributes : dict, optional Attribute values (any type) keyed by string names. These attributes become attributes for the netcdf_variable object. Attributes ---------- dimensions : list of str List of names of dimensions used by the variable object. isrec, shape Properties See also -------- isrec, shape """ def __init__(self, data, typecode, size, shape, dimensions, attributes=None): self.data = data self._typecode = typecode self._size = size self._shape = shape self.dimensions = dimensions self._attributes = attributes or {} for k, v in self._attributes.items(): self.__dict__[k] = v def __setattr__(self, attr, value): # Store user defined attributes in a separate dict, # so we can save them to file later. try: self._attributes[attr] = value except AttributeError: pass self.__dict__[attr] = value def isrec(self): """Returns whether the variable has a record dimension or not. A record dimension is a dimension along which additional data could be easily appended in the netcdf data structure without much rewriting of the data file. This attribute is a read-only property of the `netcdf_variable`. """ return bool(self.data.shape) and not self._shape[0] isrec = property(isrec) def shape(self): """Returns the shape tuple of the data variable. This is a read-only attribute and can not be modified in the same manner of other numpy arrays. """ return self.data.shape shape = property(shape) def getValue(self): """ Retrieve a scalar value from a `netcdf_variable` of length one. Raises ------ ValueError If the netcdf variable is an array of length greater than one, this exception will be raised. """ return self.data.item() def assignValue(self, value): """ Assign a scalar value to a `netcdf_variable` of length one. Parameters ---------- value : scalar Scalar value (of compatible type) to assign to a length-one netcdf variable. This value will be written to file. Raises ------ ValueError If the input is not a scalar, or if the destination is not a length-one netcdf variable. """ if not self.data.flags.writeable: # Work-around for a bug in NumPy. Calling itemset() on a read-only # memory-mapped array causes a seg. fault. # See NumPy ticket #1622, and SciPy ticket #1202. # This check for `writeable` can be removed when the oldest version # of numpy still supported by scipy contains the fix for #1622. raise RuntimeError("variable is not writeable") self.data.itemset(value) def typecode(self): """ Return the typecode of the variable. Returns ------- typecode : char The character typecode of the variable (eg, 'i' for int). """ return self._typecode def itemsize(self): """ Return the itemsize of the variable. Returns ------- itemsize : int The element size of the variable (eg, 8 for float64). """ return self._size def __getitem__(self, index): return self.data[index] def __setitem__(self, index, data): # Expand data for record vars? if self.isrec: if isinstance(index, tuple): rec_index = index[0] else: rec_index = index if isinstance(rec_index, slice): recs = (rec_index.start or 0) + len(data) else: recs = rec_index + 1 if recs > len(self.data): shape = (recs,) + self._shape[1:] self.data.resize(shape) self.data[index] = data NetCDFFile = netcdf_file NetCDFVariable = netcdf_variable
import warnings from faker import Faker from typing import Optional, ClassVar, Pattern, List, Match from .. import exceptions from .. import utils class Filth(object): """This is the base class for all ``Filth`` that is detected in dirty dirty text. """ # this allows people to customize the output, especially for placeholder # text and identifier replacements prefix = u'{{' # type: ClassVar[str] suffix = u'}}' # type: ClassVar[str] # the `type` is used when filths are merged to come up with a sane label type = 'unknown' # type: ClassVar[str] # the `lookup` is used to keep track of all of the different types of filth # that are encountered across all `Filth` types. lookup = utils.Lookup() # For backwards compatibility, but this is deprecated. regex = None # type: Optional[Pattern[str]] def __init__(self, beg: Optional[int] = None, end: Optional[int] = None, text: Optional[str] = None, match: Optional[Match] = None, detector_name: Optional[str] = None, document_name: Optional[str] = None, replacement_string: Optional[str] = None, locale: Optional[str] = None, **kwargs): self.beg = 0 # type: int self.end = 0 # type: int self.text = '' # type: str self.match = None # type: Optional[Match] if match is not None and isinstance(match, Match): self.beg = match.start() self.end = match.end() self.text = match.string[match.start():match.end()] self.match = match if beg is not None: self.beg = beg if end is not None: self.end = end if text is not None: self.text = text self.detector_name = detector_name # type: Optional[str] self.document_name = document_name # type: Optional[str] self.replacement_string = replacement_string # type: Optional[str] self.locale = locale # type: Optional[str] if self.beg >= self.end: raise ValueError( f"Creating invalid filth (self.beg >= self.end): {self}" ) @property def placeholder(self) -> str: return self.type.upper() @property def identifier(self) -> str: # NOTE: this is not an efficient way to store this in memory. could # alternatively hash the type and text and do away with the overhead # bits of storing the tuple in the lookup i = self.lookup[(self.type, self.text.lower())] return u'%s-%d' % (self.placeholder, i) def replace_with(self, replace_with: str = 'placeholder', **kwargs) -> str: warnings.warn( "Filth.replace_with() will be removed in favour of using the more general PostProcessors", DeprecationWarning ) if self.prefix != '{{' or self.suffix != '}}': warnings.warn( "Setting prefixes and suffixes with scrubadub.filth.Filth.prefix or scrubadub.filth.Filth.suffix " "is depreciated in favour of using the PrefixSuffixReplacer", DeprecationWarning ) if replace_with == 'placeholder': return self.prefix + self.placeholder + self.suffix # elif replace_with == 'surrogate': # raise NotImplementedError elif replace_with == 'identifier': return self.prefix + self.identifier + self.suffix else: raise exceptions.InvalidReplaceWith(replace_with) def merge(self, other_filth: 'Filth') -> 'MergedFilth': return MergedFilth(self, other_filth) def __repr__(self) -> str: return self._to_string() def _to_string(self, attributes: Optional[List[str]] = None) -> str: if attributes is None: attributes = ['text', 'document_name', 'beg', 'end', 'comparison_type', 'detector_name', 'locale'] item_attributes = [ "{}={}".format(item, getattr(self, item, None).__repr__()) for item in attributes if getattr(self, item, None) is not None ] return "<{} {}>".format(self.__class__.__name__, " ".join(item_attributes)) def __eq__(self, other) -> bool: """Only test equality on a subset of class attributes and some are optional""" match = True if not hasattr(other, 'beg') or not hasattr(other, 'end') or not hasattr(other, 'text'): raise TypeError("Unsupported comparison with a Filth and {}".format(type(other))) match &= (self.beg == other.beg) match &= (self.end == other.end) match &= (self.text == other.text) if hasattr(self, 'document_name') or hasattr(other, 'document_name'): match &= (self.document_name == other.document_name) if hasattr(self, 'detector_name') or hasattr(other, 'detector_name'): match &= (self.detector_name == other.detector_name) return match @staticmethod def generate(faker: Faker) -> str: """Generates an example of this ``Filth`` type, usually using the faker python library. :param faker: The ``Faker`` class from the ``faker`` library :type faker: Faker :return: An example of this ``Filth`` :rtype: str """ raise NotImplementedError("A generate() function has not been implemented for this Filth") def is_valid(self) -> bool: return True class MergedFilth(Filth): """This class takes care of merging different types of filth""" def __init__(self, a_filth: Filth, b_filth: Filth): super(MergedFilth, self).__init__( beg=a_filth.beg, end=a_filth.end, text=a_filth.text, document_name=a_filth.document_name, ) self.filths = [a_filth] self._update_content(b_filth) def _update_content(self, other_filth: Filth): """this updates the bounds, text and placeholder for the merged filth """ if self.end < other_filth.beg or other_filth.end < self.beg: raise exceptions.FilthMergeError( "a_filth goes from [%s, %s) and b_filth goes from [%s, %s)" % ( self.beg, self.end, other_filth.beg, other_filth.end )) if self.document_name != other_filth.document_name: raise exceptions.FilthMergeError( "This MergedFilth is in document {}, but the Filth that is being merged is in another document {}" "".format(self.document_name.__repr__(), other_filth.document_name.__repr__()) ) # get the text over lap correct if self.beg < other_filth.beg: first = self # type: Filth second = other_filth # type: Filth else: second = self first = other_filth end_offset = second.end - first.end if end_offset > 0: self.text = first.text + second.text[-end_offset:] # update the beg/end strings self.beg = min(self.beg, other_filth.beg) self.end = max(self.end, other_filth.end) if self.end - self.beg != len(self.text): raise exceptions.FilthMergeError("text length isn't consistent") # update the placeholder self.filths.append(other_filth) self._placeholder = '+'.join([filth.type for filth in self.filths]) @property def placeholder(self): return self._placeholder.upper() def merge(self, other_filth: Filth) -> 'MergedFilth': """Be smart about merging filth in this case to avoid nesting merged filths. """ self._update_content(other_filth) return self def __repr__(self) -> str: return self._to_string(['filths']) class RegexFilth(Filth): def __init__(self, *args, **kwargs): warnings.warn("Use of RegexFilth is depreciated, use Filth directly instead.", DeprecationWarning) super(RegexFilth, self).__init__(*args, **kwargs)
import logging import os import pickle import re import sys from collections import Counter import numpy as np import tensorflow as tf from elasticsearch import Elasticsearch from imnamap.memory.searcher import ElasticSearcher from imnamap.models.imnamap import build_imnamap_model from imnamap.utils.embeddings import embedding_initializer from imnamap.utils.metrics import evaluate_hits from imnamap.utils.preprocessing import preprocess_question, ids2tokens, tokens2ids, pad_sequences from imnamap.utils.progress import Progbar flags = tf.app.flags flags.DEFINE_string("train", "../datasets/movie_dialog/task1_qa/task1_qa_train.txt.pkl", "Training data filename") flags.DEFINE_string("valid", "../datasets/movie_dialog/task1_qa/task1_qa_dev.txt.pkl", "Validation data filename") flags.DEFINE_string("index", "../datasets/movie_dialog/task1_qa/index.pkl", "Corpus index data filename") flags.DEFINE_string("embeddings", None, "Pretrained word embeddings filename") flags.DEFINE_string("model_dir", "../models/movie_dialog/task1_qa", "Model path") flags.DEFINE_string("model_name", "iana", "Model name") flags.DEFINE_string("es_address", "http://localhost:9200", "Elasticsearch server address") flags.DEFINE_string("es_index", "movie_kb", "Elasticsearch index") flags.DEFINE_integer("top_docs", 30, "Number of retrieved documents for each question") flags.DEFINE_integer("num_hops", 3, "Number of hops") flags.DEFINE_integer("num_epochs", 100, "Number of epochs") flags.DEFINE_integer("embedding_size", 50, "Word embedding size") flags.DEFINE_integer("gru_output_size", 128, "Question and documents GRU output size") flags.DEFINE_integer("inf_gru_output_size", 128, "Inference GRU output size") flags.DEFINE_integer("hidden_layer_size", 4096, "Last hidden layer size") flags.DEFINE_string("optim_method", "adam", "Optimization method") flags.DEFINE_integer("batch_size", 128, "Batch size") flags.DEFINE_float("learning_rate", 0.001, "Learning rate") flags.DEFINE_float("embedding_l2_reg", 0.0001, "Word embedding L2 regularization coefficient") flags.DEFINE_float("l2_max_norm", 5.0, "Upper bound clipping value for gradients") flags.DEFINE_float("dropout_gate_prob", 0.2, "Dropout keep probability") flags.DEFINE_float("dropout_dense_prob", 0.5, "Dropout keep probability") flags.DEFINE_integer("num_no_improv", 5, "Number of times that no improvements observed on validation set") flags.DEFINE_integer("top_results", 1, "Cutoff for HITS metrics evaluation") flags.DEFINE_string("starter_checkpoint", None, "Checkpoint to restore before starting the training") FLAGS = flags.FLAGS def get_optimizer(optim_method): if optim_method == "adam": return tf.train.AdamOptimizer else: raise ValueError("Invalid optimization method!") def evaluate_dataset_hits(sess, net, question_input, documents_input, batch_size_input, frequencies_input, dropout_gate_input, dropout_dense_input, dataset, index, batch_size, searcher, top_docs, max_doc_len, top_results): num_tokens = len(index["token2id"]) num_examples = len(dataset["dialogs_questions"]) batches_indexes = np.arange(num_examples) num_batches = num_examples // batch_size progress = Progbar(num_batches) hits = np.zeros(num_batches) num_batch = 1 for start_idx in range(0, num_examples - batch_size + 1, batch_size): batch_indexes = batches_indexes[start_idx:start_idx + batch_size] batch_questions = dataset["dialogs_questions"][batch_indexes, :] batch_answers = dataset["dialogs_answers"][batch_indexes, :] k_hot_answers = np.zeros((batch_size, num_tokens), dtype="float32") for i, answer in enumerate(batch_answers): for token_id in answer: if token_id != index["token2id"]["#pad#"]: k_hot_answers[i][token_id] = 1 batch_docs = [] for question in batch_questions: question_docs = searcher.search( preprocess_question( [re.escape(token) for token in ids2tokens(question, index["id2token"], index["token2id"]["#pad#"])]), top_docs ) batch_docs.append( pad_sequences( [tokens2ids(doc, index["token2id"]) for doc in question_docs], maxlen=max_doc_len, padding="post") ) batch_docs = np.array( [np.pad(doc, [(0, top_docs - doc.shape[0]), (0, 0)], "constant") for doc in batch_docs] ) frequencies = np.ones((batch_size, num_tokens), dtype="float32") for i, docs in enumerate(batch_docs): counter = Counter([token_id for doc in docs for token_id in doc]) for token_id, count in counter.items(): frequencies[i, token_id] = count top_k_values, top_k_indices = sess.run( tf.nn.top_k(tf.sigmoid(net[0]), top_results), { question_input: batch_questions, documents_input: batch_docs, batch_size_input: batch_size, frequencies_input: frequencies, dropout_gate_input: 1.0, dropout_dense_input: 1.0 } ) hits[num_batch - 1] = evaluate_hits(top_k_indices, batch_answers) progress.update(num_batch) num_batch += 1 return np.mean(hits) def main(_): os.makedirs(os.path.dirname(FLAGS.model_dir), exist_ok=True) # Compute model filename model_filename = "{model_name}__{top_docs}__{num_hops}__{num_epochs}" \ "__{embedding_size}__{gru_output_size}__{inf_gru_output_size}__" \ "{hidden_layer_size}__{optim_method}__{batch_size}__{learning_rate}__" \ "{embedding_l2_reg}__{l2_max_norm}__{dropout_gate_prob}__{dropout_dense_prob}" \ .format(**FLAGS.__dict__["__flags"]) \ .replace(".", "_") model_path = os.path.normpath(os.sep.join([FLAGS.model_dir, "{}.ckpt".format(model_filename)])) log_formatter = logging.Formatter("%(asctime)s %(message)s") root_logger = logging.getLogger(__name__) root_logger.setLevel(logging.INFO) file_handler = logging.FileHandler("{}.log".format(model_path)) file_handler.setFormatter(log_formatter) root_logger.addHandler(file_handler) console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(log_formatter) root_logger.addHandler(console_handler) if FLAGS.starter_checkpoint is not None: epoch_match = re.match(".*\.e(\d+).*", FLAGS.starter_checkpoint) if epoch_match: start_epoch = int(epoch_match.group(1)) else: root_logger.fatal("Unable to parse epoch parameter from model file!") sys.exit(-1) else: start_epoch = 1 root_logger.info("-- Loading training data from {}".format(FLAGS.train)) with open(FLAGS.train, mode="rb") as in_file: train = pickle.load(in_file) root_logger.info("-- Loading validation data from {}".format(FLAGS.valid)) with open(FLAGS.valid, mode="rb") as in_file: valid = pickle.load(in_file) root_logger.info("-- Loading index data from {}".format(FLAGS.index)) with open(FLAGS.index, mode="rb") as in_file: index = pickle.load(in_file) # Get dataset information num_tokens = len(index["token2id"]) num_examples = len(train["dialogs_questions"]) max_question_len = train["max_question_len"] max_answer_len = train["max_answer_len"] es_client = Elasticsearch({FLAGS.es_address}) searcher = ElasticSearcher(es_client, FLAGS.es_index) max_doc_len = index["max_doc_len"] num_batches = num_examples // FLAGS.batch_size root_logger.info("Number of tokens: %d" % num_tokens) root_logger.info("Number of examples: %d" % num_examples) root_logger.info("Maximum question len: %d" % max_question_len) root_logger.info("Maximum answer len: %d" % max_answer_len) root_logger.info("Maximum document len: %d" % max_doc_len) root_logger.info("-- Building model") with tf.Session() as sess: tf.set_random_seed(12345) np.random.seed(12345) question_input = tf.placeholder(dtype=tf.int32, shape=(None, max_question_len)) documents_input = tf.placeholder(dtype=tf.int32, shape=(None, None, max_doc_len)) batch_size_input = tf.placeholder(dtype=tf.int32) frequencies_input = tf.placeholder(dtype=tf.float32, shape=(None, num_tokens)) target_input = tf.placeholder(dtype=tf.float32, shape=(None, num_tokens)) dropout_gate_input = tf.placeholder(dtype=tf.float32) dropout_dense_input = tf.placeholder(dtype=tf.float32) if FLAGS.embeddings is not None: root_logger.info("-- Loading pretrained word embeddings from {}".format(FLAGS.embeddings)) emb_initializer = embedding_initializer(index["token2id"], FLAGS.embeddings) else: emb_initializer = embedding_initializer(index["token2id"]) net = build_imnamap_model( question_input, documents_input, batch_size_input, frequencies_input, dropout_gate_input, dropout_dense_input, num_tokens, FLAGS.embedding_size, emb_initializer, FLAGS.gru_output_size, FLAGS.inf_gru_output_size, FLAGS.hidden_layer_size, max_doc_len, FLAGS.top_docs, FLAGS.num_hops ) loss_function = tf.reduce_mean(tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(net[0], target_input), 1) ) with tf.variable_scope("embeddings", reuse=True): loss_function += FLAGS.embedding_l2_reg * tf.nn.l2_loss(tf.get_variable("embedding_matrix")) loss_summary_op = tf.scalar_summary("train_loss", loss_function) optim = get_optimizer(FLAGS.optim_method)(FLAGS.learning_rate) gvs = optim.compute_gradients(loss_function) clipped_gvs = [(tf.clip_by_norm(grad, FLAGS.l2_max_norm), var) for grad, var in gvs] train_step = optim.apply_gradients(clipped_gvs) sess.run(tf.initialize_all_variables()) saver = tf.train.Saver() if FLAGS.starter_checkpoint: root_logger.info("-- Restored starter checkpoint from file {}".format(FLAGS.starter_checkpoint)) saver.restore(sess, FLAGS.starter_checkpoint) summary_writer = tf.train.SummaryWriter(FLAGS.model_dir + "/logs", sess.graph) best_hits = 0 best_epoch = 0 no_improv_counter = 0 best_model_path = None for e in range(start_epoch, FLAGS.num_epochs + 1): root_logger.info("==> online epoch # {0}".format(e)) progress = Progbar(num_batches) batches_indexes = np.arange(num_examples) np.random.shuffle(batches_indexes) num_batch = 1 epoch_loss = 0 for start_idx in range(0, num_examples - FLAGS.batch_size + 1, FLAGS.batch_size): batch_indexes = batches_indexes[start_idx:start_idx + FLAGS.batch_size] batch_questions = train["dialogs_questions"][batch_indexes, :] batch_answers = train["dialogs_answers"][batch_indexes, :] k_hot_answers = np.zeros((FLAGS.batch_size, num_tokens), dtype="float32") for i, answer in enumerate(batch_answers): for token_id in answer: if token_id != index["token2id"]["#pad#"]: k_hot_answers[i][token_id] = 1 batch_docs = [] for question in batch_questions: question_docs = searcher.search(preprocess_question( [re.escape(token) for token in ids2tokens(question, index["id2token"], index["token2id"]["#pad#"])]), FLAGS.top_docs ) batch_docs.append(pad_sequences( [tokens2ids(doc, index["token2id"]) for doc in question_docs], maxlen=max_doc_len, padding="post") ) batch_docs = np.array( [np.pad(doc, [(0, FLAGS.top_docs - doc.shape[0]), (0, 0)], "constant") for doc in batch_docs] ) frequencies = np.ones((FLAGS.batch_size, num_tokens), dtype="float32") for i, docs in enumerate(batch_docs): counter = Counter([token_id for doc in docs for token_id in doc]) for token_id, count in counter.items(): frequencies[i, token_id] = count loss, _, loss_summary = sess.run( [loss_function, train_step, loss_summary_op], { question_input: batch_questions, documents_input: batch_docs, batch_size_input: FLAGS.batch_size, target_input: k_hot_answers, frequencies_input: frequencies, dropout_gate_input: FLAGS.dropout_gate_prob, dropout_dense_input: FLAGS.dropout_dense_prob } ) summary_writer.add_summary(loss_summary, global_step=e) progress.update(num_batch, [("Loss", loss)]) epoch_loss += loss num_batch += 1 root_logger.info("Current epoch loss: {}".format(epoch_loss / num_batches)) root_logger.info("-- Evaluating HITS@1 on validation set") current_hits = evaluate_dataset_hits(sess, net, question_input, documents_input, batch_size_input, frequencies_input, dropout_gate_input, dropout_dense_input, valid, index, FLAGS.batch_size, searcher, FLAGS.top_docs, max_doc_len, FLAGS.top_results) root_logger.info("Current HITS@1 on validation set: {}".format(current_hits)) valid_hits_summary = tf.Summary(value=[tf.Summary.Value(tag="valid_hits", simple_value=current_hits)]) summary_writer.add_summary(valid_hits_summary, global_step=e) if current_hits > best_hits: best_hits = current_hits best_epoch = e no_improv_counter = 0 if e > 1: os.remove(best_model_path) save_path = saver.save(sess, "{}.e{}".format(model_path, e)) best_model_path = save_path root_logger.info("Model saved in file: {}".format(save_path)) else: no_improv_counter += 1 if no_improv_counter == FLAGS.num_no_improv: root_logger.info("-- Terminating training due to early stopping") root_logger.info("-- Best HITS@{} {} at epoch {}".format(FLAGS.top_results, best_hits, best_epoch)) exit(-1) if __name__ == "__main__": tf.app.run()
import sys def main(): import argparse import redis import time import pickle import traceback import tldextract import ujson from scutils.redis_queue import RedisPriorityQueue from scutils.redis_throttled_queue import RedisThrottledQueue parser = argparse.ArgumentParser(description="Scrapy Cluster Migration " "script. Use to upgrade any part of " "Scrapy Cluster. Not recommended for " "use while your cluster" " is running.") parser.add_argument('-ir', '--input-redis-host', action='store', required=True, help="The input Redis host ip") parser.add_argument('-ip', '--input-redis-port', action='store', default='6379', help="The input Redis port") parser.add_argument('-id', '--input-redis-db', action='store', default='0', help="The input Redis db") parser.add_argument('-or', '--output-redis-host', action='store', required=False, help="The output Redis host ip, defaults to input", default=None) parser.add_argument('-op', '--output-redis-port', action='store', default=None, help="The output Redis port, defaults to input") parser.add_argument('-od', '--output-redis-db', action='store', default=None, help="The output Redis db, defaults to input") parser.add_argument('-sv', '--start-version', action='store', type=float, help="The current cluster version", required=True, choices=[1.0, 1.1]) parser.add_argument('-ev', '--end-version', action='store', default=1.2, help="The desired cluster version", required=True, choices=[1.1, 1.2], type=float) parser.add_argument('-v', '--verbosity', action='store', required=False, default=0, help="Increases output text verbosity", choices=[0, 1, 2], type=int) parser.add_argument('-y', '--yes', action='store_const', required=False, default=False, const=True, help="Answer 'yes' to any prompt") args = vars(parser.parse_args()) current_version = args['start_version'] end_version = args['end_version'] if end_version < current_version: vprint("Downgrading is not supported at this time") sys.exit(1) verbose = args['verbosity'] irh = args['input_redis_host'] irp = args['input_redis_port'] ird = args['input_redis_db'] orh = args['output_redis_host'] if args['output_redis_host'] is not None else irh orp = args['output_redis_port'] if args['output_redis_port'] is not None else irp ord = args['output_redis_db'] if args['output_redis_db'] is not None else ird def vprint(s, v=0): if v <= args['verbosity']: print s # from http://stackoverflow.com/questions/3041986/python-command-line-yes-no-input def query_yes_no(question, default="yes"): """Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits <Enter>. It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The "answer" return value is True for "yes" or False for "no". """ valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = raw_input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") drop_queue = False if irh == orh and ird == ord and irp == orp and args['end_version'] >= 1.2: print "Warning! Exact same Redis settings detected, migration will "\ "need to delete data before it is complete in order to "\ "be successful." print args['yes'] result = query_yes_no("Continue?") if not args['yes'] else True if result: drop_queue = True else: sys.exit(0) start_time = time.time() i_redis_conn = redis.Redis(host=irh, port=irp, db=ird) o_redis_conn = redis.Redis(host=orh, port=orp, db=ord) try: # Upgrade 1.0 to 1.1 if current_version == 1.0 and end_version > current_version: vprint("Upgrading Cluster from 1.0 to 1.1") extract = tldextract.TLDExtract() queue_keys = i_redis_conn.keys("*:queue") for queue in queue_keys: elements = queue.split(":") spider = elements[0] if len(elements) == 2: vprint("Upgrading " + spider + "spider") old_count = i_redis_conn.zcard(queue) current_count = 0 # loop through all elements for item in i_redis_conn.zscan_iter(queue): current_count += 1 if current_count % 10 == 0: vprint("count: " + str(current_count), 2) item_key = item[0] try: item = pickle.loads(item_key) except: vprint("Found unloadable item, skipping", 1) continue # format key ex_res = extract(item['url']) key = "{sid}:{dom}.{suf}:queue".format( sid=item['spiderid'], dom=ex_res.domain, suf=ex_res.suffix) val = pickle.dumps(item, protocol=-1) # shortcut to shove stuff into the priority queue o_redis_conn.zadd(key, val, -item['priority']) # loop through all new keys new_count = 0 for key in o_redis_conn.keys('{s}:*:queue'.format(s=spider)): new_count = new_count + i_redis_conn.zcard(key) if new_count == old_count: vprint("Successfully migrated " + str(new_count) + "requests for" + spider + "spider") i_redis_conn.delete(queue) else: vprint("Unknown error when migrating requests {o}/{n}" .format(o=old_count, n=new_count)) result = query_yes_no("Continue?") if not args['yes'] else True if result: pass else: sys.exit(0) current_version = 1.1 # Upgrade 1.1 to 1.2 if current_version == 1.1 and end_version > current_version: vprint("Upgrading Cluster from 1.1 to 1.2") queue_keys = i_redis_conn.keys("*:*:queue") for queue in queue_keys: elements = queue.split(":") cache = [] if len(elements) == 3: spider = elements[0] domain = elements[1] old_count = i_redis_conn.zcard(queue) vprint("Working on key " + queue, 1) # loop through all elements current_count = 0 for item in i_redis_conn.zscan_iter(queue): current_count += 1 if current_count % 10 == 0: vprint("count: " + str(current_count), 2) item_key = item[0] # load and cache request try: item = pickle.loads(item_key) cache.append(item) except: vprint("Found unloadable item, skipping", 1) continue # done geting all elements, drop queue if needed if drop_queue: vprint("Dropping queue " + queue, 1) i_redis_conn.delete(queue) # insert cached items back in vprint("Updating queue " + queue, 1) current_count = 0 for item in cache: current_count += 1 if current_count % 10 == 0: vprint("count: " + str(current_count), 2) val = ujson.dumps(item) # shortcut to shove stuff into the priority queue o_redis_conn.zadd(queue, val, -item['priority']) new_count = o_redis_conn.zcard(queue) if new_count == old_count: vprint("Successfully migrated " + str(new_count) + " requests for " + domain + " " + spider + "spider") else: vprint("Unknown error when migrating requests {o}/{n}" .format(o=old_count, n=new_count)) result = query_yes_no("Continue?") if not args['yes'] else True if result: pass else: sys.exit(0) current_version = 1.2 except Exception as e: vprint("Error Upgrading Cluster.") vprint(traceback.print_exc()) sys.exit(1) completion_time = int(time.time() - start_time) print "Cluster upgrade complete in", "%.2f" % completion_time, "seconds." vprint("Upgraded cluster from " + str(args['start_version']) + " to " + str(args['end_version'])) if __name__ == "__main__": sys.exit(main())
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import pkg_resources import unittest from pylons import tmpl_context as c from alluratest.controller import TestController, setup_basic_test, setup_global_objects from allura.tests import decorators as td from allura.lib import helpers as h from allura.model import User from allura import model as M from forgetracker import model as TM # important to be distinct from 'test' which ForgeGit uses, so that the # tests can run in parallel and not clobber each other test_project_with_repo = 'test2' with_git = td.with_tool(test_project_with_repo, 'Git', 'src-git', 'Git', type='git') class TestStats(TestController): def setUp(self): super(TestStats, self).setUp() p = M.Project.query.get(shortname='test') p.add_user(M.User.by_username('test-user'), ['Admin']) def test_login(self): user = User.by_username('test-user') init_logins = user.stats.tot_logins_count self.app.post('/auth/do_login', params=dict( username=user.username, password='foo')) assert user.stats.tot_logins_count == 1 + init_logins assert user.stats.getLastMonthLogins() == 1 + init_logins @td.with_user_project('test-admin') @td.with_tool('test', 'wiki', mount_point='wiki', mount_label='wiki', username='test-admin') def test_wiki_stats(self): initial_artifacts = c.user.stats.getArtifacts() initial_wiki = c.user.stats.getArtifacts(art_type="Wiki") self.app.post('/wiki/TestPage/update', params=dict(title='TestPage', text='some text'), extra_environ=dict(username=str(c.user.username))) artifacts = c.user.stats.getArtifacts() wiki = c.user.stats.getArtifacts(art_type="Wiki") assert artifacts['created'] == 1 + initial_artifacts['created'] assert artifacts['modified'] == initial_artifacts['modified'] assert wiki['created'] == 1 + initial_wiki['created'] assert wiki['modified'] == initial_wiki['modified'] self.app.post('/wiki/TestPage2/update', params=dict(title='TestPage2', text='some text'), extra_environ=dict(username=str(c.user.username))) artifacts = c.user.stats.getArtifacts() wiki = c.user.stats.getArtifacts(art_type="Wiki") assert artifacts['created'] == 2 + initial_artifacts['created'] assert artifacts['modified'] == initial_artifacts['modified'] assert wiki['created'] == 2 + initial_wiki['created'] assert wiki['modified'] == initial_wiki['modified'] self.app.post('/wiki/TestPage2/update', params=dict(title='TestPage2', text='some modified text'), extra_environ=dict(username=str(c.user.username))) artifacts = c.user.stats.getArtifacts() wiki = c.user.stats.getArtifacts(art_type="Wiki") assert artifacts['created'] == 2 + initial_artifacts['created'] assert artifacts['modified'] == 1 + initial_artifacts['modified'] assert wiki['created'] == 2 + initial_wiki['created'] assert wiki['modified'] == 1 + initial_wiki['modified'] @td.with_tool('test', 'tickets', mount_point='tickets', mount_label='tickets', username='test-admin') def test_tracker_stats(self): initial_tickets = c.user.stats.getTickets() initial_tickets_artifacts = c.user.stats.getArtifacts( art_type="Ticket") self.app.post('/tickets/save_ticket', params={'ticket_form.summary': 'test', 'ticket_form.assigned_to': str(c.user.username)}, extra_environ=dict(username=str(c.user.username))) ticketnum = str(TM.Ticket.query.get(summary='test').ticket_num) tickets = c.user.stats.getTickets() tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket") assert tickets['assigned'] == initial_tickets['assigned'] + 1 assert tickets['solved'] == initial_tickets['solved'] assert tickets['revoked'] == initial_tickets['revoked'] assert tickets_artifacts[ 'created'] == initial_tickets_artifacts['created'] + 1 assert tickets_artifacts[ 'modified'] == initial_tickets_artifacts['modified'] self.app.post('/tickets/%s/update_ticket_from_widget' % ticketnum, params={'ticket_form.ticket_num': ticketnum, 'ticket_form.summary': 'footext3', 'ticket_form.status': 'closed'}, extra_environ=dict(username=str(c.user.username))) tickets = c.user.stats.getTickets() tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket") assert tickets['assigned'] == initial_tickets['assigned'] + 1 assert tickets['solved'] == initial_tickets['solved'] + 1 assert tickets['revoked'] == initial_tickets['revoked'] assert tickets_artifacts[ 'created'] == initial_tickets_artifacts['created'] + 1 assert tickets_artifacts[ 'modified'] == initial_tickets_artifacts['modified'] + 1 self.app.post('/tickets/save_ticket', params={'ticket_form.summary': 'test2'}, extra_environ=dict(username=str(c.user.username))) ticketnum = str(TM.Ticket.query.get(summary='test2').ticket_num) tickets = c.user.stats.getTickets() tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket") assert tickets['assigned'] == initial_tickets['assigned'] + 1 assert tickets['solved'] == initial_tickets['solved'] + 1 assert tickets['revoked'] == initial_tickets['revoked'] assert tickets_artifacts[ 'created'] == initial_tickets_artifacts['created'] + 2 assert tickets_artifacts[ 'modified'] == initial_tickets_artifacts['modified'] + 1 self.app.post('/tickets/%s/update_ticket_from_widget' % ticketnum, params={'ticket_form.ticket_num': ticketnum, 'ticket_form.summary': 'test2', 'ticket_form.assigned_to': str(c.user.username)}, extra_environ=dict(username=str(c.user.username))) tickets = c.user.stats.getTickets() tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket") assert tickets['assigned'] == initial_tickets['assigned'] + 2 assert tickets['solved'] == initial_tickets['solved'] + 1 assert tickets['revoked'] == initial_tickets['revoked'] assert tickets_artifacts[ 'created'] == initial_tickets_artifacts['created'] + 2 assert tickets_artifacts[ 'modified'] == initial_tickets_artifacts['modified'] + 2 self.app.post('/tickets/%s/update_ticket_from_widget' % ticketnum, params={'ticket_form.ticket_num': ticketnum, 'ticket_form.summary': 'test2', 'ticket_form.assigned_to': 'test-user'}, extra_environ=dict(username=str(c.user.username))) tickets = c.user.stats.getTickets() tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket") assert tickets['assigned'] == initial_tickets['assigned'] + 2 assert tickets['solved'] == initial_tickets['solved'] + 1 assert tickets['revoked'] == initial_tickets['revoked'] + 1 assert tickets_artifacts[ 'created'] == initial_tickets_artifacts['created'] + 2 assert tickets_artifacts[ 'modified'] == initial_tickets_artifacts['modified'] + 3 class TestGitCommit(TestController, unittest.TestCase): def setUp(self): super(TestGitCommit, self).setUp() setup_basic_test() user = User.by_username('test-admin') user.set_password('testpassword') M.EmailAddress.upsert('rcopeland@geek.net') user.claim_address('rcopeland@geek.net') self.setup_with_tools() @with_git @td.with_wiki def setup_with_tools(self): setup_global_objects() h.set_context(test_project_with_repo, 'src-git', neighborhood='Projects') repo_dir = pkg_resources.resource_filename( 'forgeuserstats', 'tests/data') c.app.repo.fs_path = repo_dir c.app.repo.name = 'testgit.git' self.repo = c.app.repo self.repo.refresh() self.rev = self.repo.commit('HEAD') @td.with_user_project('test-admin') def test_commit(self): commits = c.user.stats.getCommits() assert commits['number'] == 4 lmcommits = c.user.stats.getLastMonthCommits() assert lmcommits['number'] == 4
#! /usr/bin/env python3 # Source: https://github.com/atx/prometheus-tor_exporter/blob/master/prometheus-tor-exporter.py import argparse import stem import stem.control import time from retrying import retry import prometheus_client as prom from prometheus_client.core import GaugeMetricFamily, REGISTRY class StemCollector: def __init__(self, tor): self.tor = tor self.authenticate() self.reconnect() @retry(wait_random_min=1000, wait_random_max=2000, stop_max_attempt_number=5) def authenticate(self): self.tor.authenticate() @retry(wait_random_min=1000, wait_random_max=2000, stop_max_attempt_number=5) def reconnect(self): self.tor.reconnect() def collect(self): self.reconnect() yield GaugeMetricFamily( "tor_written_bytes", "Tor written data counter", value=int(self.tor.get_info("traffic/written"))) yield GaugeMetricFamily( "tor_read_bytes", "Tor received data counter", value=int(self.tor.get_info("traffic/read"))) version = GaugeMetricFamily("tor_version", "Tor version as a label", labels=["version"]) version.add_metric([str(torctl.get_version())], 1) yield version version_status = GaugeMetricFamily( "tor_version_status", "Tor version status {new, old, unrecommended, recommended, new in series, obsolete, unknown} as a label", labels=["version_status"]) version_status.add_metric([self.tor.get_info("status/version/current")], 1) yield version_status yield GaugeMetricFamily("tor_network_liveness", "Indicates whether tor believes that the network is currently reachable", value=int(self.tor.get_info("network-liveness") == "up")) reachable = GaugeMetricFamily("tor_reachable", "Indicates whether our OR/Dir port is reachable", labels=["port"]) for entry in self.tor.get_info("status/reachability-succeeded").split(): k, v = entry.split("=") reachable.add_metric([k], int(v)) yield reachable yield GaugeMetricFamily("tor_circuit_established", "Indicates whether Tor is capable of establishing circuits", value=int(self.tor.get_info("status/circuit-established"))) # For some reason, 0 actually means that Tor is active, keep it that way yield GaugeMetricFamily("tor_dormant", "Indicates whether Tor is currently active and building circuits (note that 0 corresponds to Tor being active)", value=int(self.tor.get_info("dormant"))) effective_rate = self.tor.get_effective_rate(None) effective_burst_rate = self.tor.get_effective_rate(None, burst=True) if effective_rate is not None and effective_burst_rate is not None: yield GaugeMetricFamily("tor_effective_rate", "Shows Tor effective rate", value=int(effective_rate)) yield GaugeMetricFamily("tor_effective_burst_rate", "Shows Tor effective burst rate", value=int(effective_burst_rate)) try: fingerprint_value = self.tor.get_info("fingerprint") fingerprint = GaugeMetricFamily("tor_fingerprint", "Tor fingerprint as a label", labels=["fingerprint"]) fingerprint.add_metric([fingerprint_value], 1) yield fingerprint except (stem.ProtocolError, stem.OperationFailed): # happens when not running in server mode pass nickname = GaugeMetricFamily("tor_nickname", "Tor nickname as a label", labels=["nickname"]) nickname.add_metric([self.tor.get_conf("Nickname", "Unnamed")], 1) yield nickname # Connection counting # This won't work/will return wrong results if we are not running on # the same box as the Tor daemon is. # DisableDebuggerAttachment has to be set to 0 # TODO: Count individual OUT/DIR/Control connections, see arm sources # for reference try: tor_pid = self.tor.get_pid() connections = stem.util.connection.get_connections( process_pid=tor_pid) yield GaugeMetricFamily("tor_connection_count", "Amount of connections the Tor daemon has open", value=len(connections)) # Let's hope this does not break when there is NTP sync or # something uptime = time.time() - stem.util.system.start_time(tor_pid) yield GaugeMetricFamily("tor_uptime", "Tor daemon uptime", value=uptime) except (OSError, IOError): # This happens if the PID does not exists (on another machine). pass try: has_flags = self.tor.get_network_status().flags except stem.DescriptorUnavailable: # The tor daemon fails with this for a few minutes after startup # (before figuring out its own flags?) has_flags = [] except stem.ControllerError: # Happens when the daemon is not running in server mode has_flags = [] flags = GaugeMetricFamily("tor_flags", "Has a Tor flag", labels=["flag"]) for flag in ["Authority", "BadExit", "Exit", "Fast", "Guard", "HSDir", "NoEdConsensus", "Stable", "Running", "Valid", "V2Dir"]: flags.add_metric([flag], int(flag in has_flags)) yield flags try: accs = self.tor.get_accounting_stats() yield GaugeMetricFamily("tor_accounting_read_bytes", "Tor accounting read bytes", accs.read_bytes) yield GaugeMetricFamily("tor_accounting_left_read_bytes", "Tor accounting read bytes left", accs.read_bytes_left) yield GaugeMetricFamily("tor_accounting_read_limit_bytes", "Tor accounting read bytes limit", accs.read_limit) yield GaugeMetricFamily("tor_accounting_write_bytes", "Tor accounting write bytes", accs.written_bytes) yield GaugeMetricFamily("tor_accounting_left_write_bytes", "Tor accounting write bytes left", accs.write_bytes_left) yield GaugeMetricFamily("tor_accounting_write_limit_bytes", "Tor accounting write bytes limit", accs.write_limit) except stem.ControllerError: # happens when accounting isn't enabled pass if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-m", "--mode", help="Tor socker control mode (tcp or unix, default tcp)", default="tcp", choices=['tcp', 'unix'] ) parser.add_argument( "-a", "--address", help="Tor control IP address", default="127.0.0.1" ) parser.add_argument( "-c", "--control-port", help="Tor control port", type=int, default=9051 ) parser.add_argument( "-s", "--control-socket", help="Tor control socket", default="/var/run/tor/control" ) parser.add_argument( "-p", "--listen-port", help="Listen on this port", type=int, default=9099 ) parser.add_argument( "-b", "--bind-addr", help="Bind this address", default="localhost" ) args = parser.parse_args() if args.mode == 'unix': torctl = stem.control.Controller.from_socket_file(args.control_socket) else: torctl = stem.control.Controller.from_port(args.address, port=args.control_port) coll = StemCollector(torctl) REGISTRY.register(coll) print("Starting on %s:%s" % (args.bind_addr, args.listen_port)) prom.start_http_server(args.listen_port, addr=args.bind_addr) # We can't exit as start_http_server starts a daemon thread which would get # killed. while True: time.sleep(1000)
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import argparse from pipeline.backend.pipeline import PipeLine from pipeline.component import HeteroFeatureBinning, HeteroFeatureSelection, DataStatistics, Evaluation from pipeline.component.scale import FeatureScale from pipeline.component.dataio import DataIO from pipeline.component.hetero_lr import HeteroLR from pipeline.component.intersection import Intersection from pipeline.component.reader import Reader from pipeline.interface.data import Data from pipeline.interface.model import Model from pipeline.utils.tools import load_job_config from pipeline.runtime.entity import JobParameters def main(config="../../config.yaml", namespace=""): # obtain config if isinstance(config, str): config = load_job_config(config) parties = config.parties guest = parties.guest[0] host = parties.host[0] arbiter = parties.arbiter[0] guest_train_data = {"name": "breast_hetero_guest", "namespace": "experiment"} guest_test_data = {"name": "breast_hetero_guest", "namespace": "experiment"} host_train_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"} host_test_data = {"name": "breast_hetero_host_tag_value", "namespace": "experiment"} # initialize pipeline pipeline = PipeLine() # set job initiator pipeline.set_initiator(role='guest', party_id=guest) # set participants information pipeline.set_roles(guest=guest, host=host, arbiter=arbiter) # define Reader components to read in data reader_0 = Reader(name="reader_0") reader_1 = Reader(name="reader_1") # configure Reader for guest reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data) reader_1.get_party_instance(role='guest', party_id=guest).component_param(table=guest_test_data) # configure Reader for host reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data) reader_1.get_party_instance(role='host', party_id=host).component_param(table=host_test_data) # define DataIO components dataio_0 = DataIO(name="dataio_0") # start component numbering at 0 dataio_1 = DataIO(name="dataio_1") # start component numbering at 1 param = { "with_label": True, "label_name": "y", "label_type": "int", "output_format": "dense", "missing_fill": True, "missing_fill_method": "mean", "outlier_replace": False, "outlier_replace_method": "designated", "outlier_replace_value": 0.66, "outlier_impute": "-9999" } # get DataIO party instance of guest dataio_0_guest_party_instance = dataio_0.get_party_instance(role='guest', party_id=guest) # configure DataIO for guest dataio_0_guest_party_instance.component_param(**param) # get and configure DataIO party instance of host dataio_1.get_party_instance(role='guest', party_id=guest).component_param(**param) param = { "input_format": "tag", "with_label": False, "tag_with_value": True, "delimitor": ";", "output_format": "dense" } dataio_0.get_party_instance(role='host', party_id=host).component_param(**param) dataio_1.get_party_instance(role='host', party_id=host).component_param(**param) # define Intersection components intersection_0 = Intersection(name="intersection_0", intersect_method="raw") intersection_1 = Intersection(name="intersection_1", intersect_method="raw") param = { "name": 'hetero_feature_binning_0', "method": 'optimal', "optimal_binning_param": { "metric_method": "iv", "init_bucket_method": "quantile" }, "bin_indexes": -1 } hetero_feature_binning_0 = HeteroFeatureBinning(**param) statistic_0 = DataStatistics(name='statistic_0') param = { "name": 'hetero_feature_selection_0', "filter_methods": ["manually", "unique_value", "iv_filter", "statistic_filter"], "manually_param": { "filter_out_indexes": [1, 2], "filter_out_names": ["x3", "x4"] }, "unique_param": { "eps": 1e-6 }, "iv_param": { "metrics": ["iv", "iv"], "filter_type": ["top_k", "threshold"], "take_high": [True, True], "threshold": [10, 0.1] }, "statistic_param": { "metrics": ["coefficient_of_variance", "skewness"], "filter_type": ["threshold", "threshold"], "take_high": [True, False], "threshold": [0.001, -0.01] }, "select_col_indexes": -1 } hetero_feature_selection_0 = HeteroFeatureSelection(**param) hetero_feature_selection_1 = HeteroFeatureSelection(name='hetero_feature_selection_1') param = { "name": "hetero_scale_0", "method": "standard_scale" } hetero_scale_0 = FeatureScale(**param) hetero_scale_1 = FeatureScale(name='hetero_scale_1') param = { "penalty": "L2", "optimizer": "nesterov_momentum_sgd", "tol": 1e-4, "alpha": 0.01, "max_iter": 5, "early_stop": "diff", "batch_size": -1, "learning_rate": 0.15, "init_param": { "init_method": "zeros" }, "validation_freqs": None, "early_stopping_rounds": None } hetero_lr_0 = HeteroLR(name='hetero_lr_0', **param) evaluation_0 = Evaluation(name='evaluation_0') # add components to pipeline, in order of task execution pipeline.add_component(reader_0) pipeline.add_component(reader_1) pipeline.add_component(dataio_0, data=Data(data=reader_0.output.data)) pipeline.add_component(dataio_1, data=Data(data=reader_1.output.data), model=Model(dataio_0.output.model)) # set data input sources of intersection components pipeline.add_component(intersection_0, data=Data(data=dataio_0.output.data)) pipeline.add_component(intersection_1, data=Data(data=dataio_1.output.data)) # set train & validate data of hetero_lr_0 component pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data)) pipeline.add_component(statistic_0, data=Data(data=intersection_0.output.data)) pipeline.add_component(hetero_feature_selection_0, data=Data(data=intersection_0.output.data), model=Model(isometric_model=[hetero_feature_binning_0.output.model, statistic_0.output.model])) pipeline.add_component(hetero_feature_selection_1, data=Data(data=intersection_1.output.data), model=Model(hetero_feature_selection_0.output.model)) pipeline.add_component(hetero_scale_0, data=Data(data=hetero_feature_selection_0.output.data)) pipeline.add_component(hetero_scale_1, data=Data(data=hetero_feature_selection_1.output.data), model=Model(hetero_scale_0.output.model)) # set train & validate data of hetero_lr_0 component pipeline.add_component(hetero_lr_0, data=Data(train_data=hetero_scale_0.output.data, validate_data=hetero_scale_1.output.data)) pipeline.add_component(evaluation_0, data=Data(data=[hetero_lr_0.output.data])) # compile pipeline once finished adding modules, this step will form conf and dsl files for running job pipeline.compile() # fit model pipeline.fit() # query component summary print(pipeline.get_component("hetero_lr_0").get_summary()) if __name__ == "__main__": parser = argparse.ArgumentParser("PIPELINE DEMO") parser.add_argument("-config", type=str, help="config file") args = parser.parse_args() if args.config is not None: main(args.config) else: main()
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import itertools from conary.deps import deps from conary.local import deptable from conary.conaryclient import resolve from conary.repository import trovesource from rmake.lib import flavorutil class TroveSourceMesh(trovesource.SearchableTroveSource): def __init__(self, extraSource, mainSource, repos): trovesource.SearchableTroveSource.__init__(self) self.extraSource = extraSource self.mainSource = mainSource self.repos = repos trovesource.SearchableTroveSource.__init__(self) self.searchAsRepository() for source in self.mainSource, self.repos, self.extraSource: if not source: continue self._allowNoLabel = source._allowNoLabel self._bestFlavor = source._bestFlavor self._getLeavesOnly = source._getLeavesOnly self._flavorCheck = source._flavorCheck break self.sources = [ self.extraSource] if self.mainSource: self.sources.append(self.mainSource) if self.repos: self.sources.append(self.repos) def __getattr__(self, key): if self.repos: return getattr(self.repos, key) return getattr(self.mainSource, key) def getFileVersions(self, *args, **kw): if self.repos: return self.repos.getFileVersions(*args, **kw) return self.mainSource.getFileVersions(*args, **kw) def close(self): pass def hasTroves(self, troveList): if self.repos: results = self.repos.hasTroves(troveList) if isinstance(results, dict): results = [ results[x] for x in troveList ] else: results = [ False for x in troveList ] if self.extraSource: hasTroves = self.extraSource.hasTroves(troveList) results = [ x[0] or x[1] for x in itertools.izip(results, hasTroves) ] if self.mainSource: hasTroves = self.mainSource.hasTroves(troveList) results = [ x[0] or x[1] for x in itertools.izip(results, hasTroves) ] return dict(itertools.izip(troveList, results)) def trovesByName(self, name): if self.mainSource: return list(set(self.mainSource.trovesByName(name)) | set(self.extraSource.trovesByName(name))) else: return self.extraSource.trovesByName(name) def getTroves(self, troveList, *args, **kw): if self.repos: return self.repos.getTroves(troveList, *args, **kw) else: return self.mainSource.getTroves(troveList, *args, **kw) def _mergeTroveQuery(self, resultD, response): if isinstance(resultD, dict): for troveName, troveVersions in response.iteritems(): if not resultD.has_key(troveName): resultD[troveName] = {} versionDict = resultD[troveName] for version, flavors in troveVersions.iteritems(): if version not in versionDict: versionDict[version] = [] resultD[troveName][version].extend(flavors) else: if not resultD: for resultList in response: resultD.append(list(resultList)) else: for idx, resultList in enumerate(response): resultD[idx].extend(resultList) return resultD def _mergeListTroveQuery(self, resultList, result2, altFlavors, altFlavors2, map, query): newMap = [] newQuery = [] for idx, items in enumerate(result2): if not items: newMap.append(map[idx]) newQuery.append(query[idx]) if altFlavors2: altFlavors[map[idx]].extend(altFlavors2[idx]) else: resultList[map[idx]].extend(items) altFlavors[map[idx]] = [] return newMap, newQuery def _call(self, fn, query, *args, **kw): if not isinstance(query, dict): query = list(query) result, altFlavors = getattr(self.extraSource, fn)(query, *args, **kw) map = [] newQuery = [] for idx, item in enumerate(result): if not item: map.append(idx) newQuery.append(query[idx]) if self.mainSource: result2, altFlavors2 = getattr(self.mainSource, fn)(newQuery, *args, **kw) newQuery, map = self._mergeListTroveQuery(result, result2, altFlavors, altFlavors2, map, newQuery) if self.repos: result3, altFlavors3 = getattr(self.repos, fn)(newQuery, *args, **kw) newQuery, map = self._mergeListTroveQuery(result, result3, altFlavors, altFlavors3, map, newQuery) result = result, altFlavors else: query = dict(query) d1 = getattr(self.extraSource, fn)(query, *args, **kw) result = {} self._mergeTroveQuery(result, d1) for name in result: query.pop(name) if self.mainSource: d2 = getattr(self.mainSource, fn)(query, *args, **kw) self._mergeTroveQuery(result, d2) if self.repos: d3 = getattr(self.repos, fn)(query, *args, **kw) self._mergeTroveQuery(result, d3) return result def _addLabelsToQuery(self, query): if isinstance(query, dict): newQuery = query.copy() names = query for name in query: labels = set(x[1].trailingLabel() for x in self.extraSource.trovesByName(name)) #asserts there is only one flavorList flavorList, = set(x and tuple(x) for x in query[name].values()) for label in labels: if label not in query[name]: newQuery[name][label] = flavorList map = None else: map = {} newQuery = list(query) names = [(x[0], x[1][0], x[1][2]) for x in enumerate(query)] for idx, name, flavor in names: labels = set(x[1].trailingLabel() for x in self.extraSource.trovesByName(name)) for label in labels: map[len(newQuery)] = idx newQuery.append((name, label, flavor)) return newQuery, map def _compressResults(self, results, map): if map is None: return results results, altFlavors = results finalResults = [] for idx, result in enumerate(results): if idx in map: if result: finalResults[map[idx]].extend(result) altFlavors[map[idx]] = [] else: altFlavors[map[idx]].extend(altFlavors) else: finalResults.append(result) return finalResults, altFlavors def getTroveLatestByLabel(self, query, *args, **kw): map = None if self.expandLabelQueries: query, map = self._addLabelsToQuery(query) results = self._call('getTroveLatestByLabel', query, *args, **kw) return self._compressResults(results, map) def getTroveLeavesByLabel(self, query, *args, **kw): map = None if self.expandLabelQueries: query, map = self._addLabelsToQuery(query) results = self._call('getTroveLeavesByLabel', query, *args, **kw) return self._compressResults(results, map) def getTroveVersionsByLabel(self, query, *args, **kw): map = None if self.expandLabelQueries: query, map = self._addLabelsToQuery(query) results = self._call('getTroveVersionsByLabel', query, *args, **kw) return self._compressResults(results, map) def getTroveLeavesByBranch(self, query, *args, **kw): return self._call('getTroveLeavesByBranch', query, *args, **kw) def getTroveVersionsByBranch(self, query, *args, **kw): return self._call('getTroveVersionsByBranch', query, *args, **kw) def getTroveVersionFlavors(self, query, *args, **kw): return self._call('getTroveVersionFlavors', query, *args, **kw) def findTroves(self, labelPath, troveSpecs, defaultFlavor=None, acrossLabels=False, acrossFlavors=False, affinityDatabase=None, allowMissing=False, bestFlavor=None, getLeaves=None, troveTypes=trovesource.TROVE_QUERY_PRESENT, exactFlavors=False, **kw): if self.mainSource is None: return trovesource.SearchableTroveSource.findTroves(self, labelPath, troveSpecs, defaultFlavor=defaultFlavor, acrossLabels=acrossLabels, acrossFlavors=acrossFlavors, affinityDatabase=affinityDatabase, troveTypes=troveTypes, exactFlavors=exactFlavors, allowMissing=True, **kw) results = {} if bestFlavor is not None: kw.update(bestFlavor=bestFlavor) if getLeaves is not None: kw.update(getLeaves=getLeaves) for source in self.sources: if source == self.repos: # we need the labelPath for repos, otherwise # we allow other algorithms to determine which # version of a particular trove to use - the same ones # used during dep resolution. Sometimes this will not # be a package on the ILP. searchLabelPath = labelPath else: searchLabelPath = None foundTroves = source.findTroves(searchLabelPath, troveSpecs, defaultFlavor=defaultFlavor, acrossLabels=acrossLabels, acrossFlavors=acrossFlavors, affinityDatabase=affinityDatabase, troveTypes=troveTypes, exactFlavors=exactFlavors, allowMissing=True, **kw) for troveSpec, troveTups in foundTroves.iteritems(): results.setdefault(troveSpec, []).extend(troveTups) if not allowMissing: for troveSpec in troveSpecs: assert(troveSpec in results) return results def resolveDependencies(self, label, depList, *args, **kw): sugg = self.extraSource.resolveDependencies(label, depList, *args, **kw) sugg2 = self.repos.resolveDependencies(label, depList, *args, **kw) for depSet, trovesByDep in sugg.iteritems(): for idx, troveList in enumerate(trovesByDep): if not troveList: troveList.extend(sugg2[depSet][idx]) return sugg def resolveDependenciesByGroups(self, troveList, depList): sugg = self.extraSource.resolveDependencies(None, depList) sugg2 = self.repos.resolveDependenciesByGroups(troveList, depList) for depSet, trovesByDep in sugg.iteritems(): for idx, troveList in enumerate(trovesByDep): if not troveList: troveList.extend(sugg2[depSet][idx]) return sugg class DepHandlerSource(TroveSourceMesh): def __init__(self, builtTroveSource, troveListList, repos=None, useInstallLabelPath=True, expandLabelQueries=False): if repos: flavorPrefs = repos._flavorPreferences else: flavorPrefs = [] stack = trovesource.TroveSourceStack() stack.searchWithFlavor() stack.setFlavorPreferenceList(flavorPrefs) self.setFlavorPreferenceList(flavorPrefs) self.expandLabelQueries = expandLabelQueries self.resolveTroveSource = None if isinstance(troveListList, trovesource.SimpleTroveSource): troveListList.setFlavorPreferenceList(flavorPrefs) self.stack.addSource(troveListList) self.resolveTroveSource = troveListList else: if troveListList: for troveList in troveListList: allTroves = [ x.getNameVersionFlavor() for x in troveList ] childTroves = itertools.chain(* (x.iterTroveList(weakRefs=True, strongRefs=True) for x in troveList)) allTroves.extend(childTroves) source = trovesource.SimpleTroveSource(allTroves) source.searchWithFlavor() source.setFlavorPreferenceList(flavorPrefs) stack.addSource(source) self.resolveTroveSource = stack if not useInstallLabelPath: repos = None if not stack.sources: stack = None TroveSourceMesh.__init__(self, builtTroveSource, stack, repos) def __repr__(self): return 'DepHandlerSource(%r,%r,%r)' % (self.extraSource, self.mainSource, self.repos) def copy(self): inst = self.__class__(self.source, None, self.repos) inst.repos = self.repos return inst class BuiltTroveSource(trovesource.SimpleTroveSource): """ Trove source that is used for dep resolution and buildreq satisfaction only - it does not contain references to the changesets that are added """ def __init__(self, troves, repos): self.depDb = deptable.DependencyDatabase() trovesource.SimpleTroveSource.__init__(self) self.setFlavorPreferenceList(repos._flavorPreferences) self.idMap = [] self.idx = 0 for trove in troves: self.addTrove(trove.getNameVersionFlavor(), trove.getProvides(), trove.getRequires()) self.searchWithFlavor() def close(self): self.depDb.db.close() def __del__(self): self.depDb.db.close() def addTrove(self, troveTuple, provides, requires): self._trovesByName.setdefault(troveTuple[0],set()).add(troveTuple) self.idMap.append(troveTuple) self.depDb.add(self.idx, provides, requires) self.idx += 1 def addChangeSet(self, cs): for idx, trvCs in enumerate(cs.iterNewTroveList()): self.addTrove(trvCs.getNewNameVersionFlavor(), trvCs.getProvides(), trvCs.getRequires()) def resolveDependencies(self, label, depList, leavesOnly=False): suggMap = self.depDb.resolve(label, depList) for depSet, solListList in suggMap.iteritems(): newSolListList = [] for solList in solListList: if not self._allowNoLabel and label: newSolListList.append([ self.idMap[x] for x in solList if self.idMap[x][1].trailingLabel == label]) else: newSolListList.append([ self.idMap[x] for x in solList ]) suggMap[depSet] = newSolListList return suggMap class ResolutionMesh(resolve.BasicResolutionMethod): def __init__(self, cfg, extraMethod, mainMethod): resolve.BasicResolutionMethod.__init__(self, cfg, None) self.extraMethod = extraMethod self.mainMethod = mainMethod def prepareForResolution(self, depList): self.depList = [ x[1] for x in depList] self.extraMethod.prepareForResolution(depList) return self.mainMethod.prepareForResolution(depList) def resolveDependencies(self): suggMap = self.extraMethod.resolveDependencies() suggMap2 = self.mainMethod.resolveDependencies() for depSet in self.depList: if depSet not in suggMap: suggMap[depSet] = [[] for x in depSet.iterDeps() ] if depSet not in suggMap2: suggMap2[depSet] = [[] for x in depSet.iterDeps() ] for depSet, results in suggMap.iteritems(): mainResults = suggMap2[depSet] for troveList1, troveList2 in itertools.izip(results, mainResults): troveList2.extend(troveList1) return suggMap2 def searchLeavesOnly(self): self.extraMethod.searchLeavesOnly() self.mainMethod.searchLeavesOnly() def searchLeavesFirst(self): self.extraMethod.searchLeavesFirst() self.mainMethod.searchLeavesFirst() def searchAllVersions(self): self.extraMethod.searchAllVersions() self.mainMethod.searchAllVersions() def selectResolutionTrove(self, requiredBy, dep, depClass, troveTups, installFlavor, affFlavorDict): """ determine which of the given set of troveTups is the best choice for installing on this system. Because the repository didn't try to determine which flavors are best for our system, we have to filter the troves locally. """ #NOTE: this method should be a match exactly for the one in # conary.repository.resolvemethod for conary 1.2 and later. # when we drop support for earlier conary's we can drop this method. # we filter the troves in the following ways: # 1. prefer troves that match affinity flavor + are on the affinity # label. (And don't drop an arch) # 2. fall back to troves that match the install flavor. # If we don't match an affinity flavor + label, then use flavor # preferences and flavor scoring to select the best flavor. # We'll have to check # Within these two categories: # 1. filter via flavor preferences for each trove (this may result # in an older version for some troves) # 2. only leave the latest version for each trove # 3. pick the best flavor out of the remaining affinityMatches = [] affinityFlavors = [] otherMatches = [] otherFlavors = [] if installFlavor is not None and not installFlavor.isEmpty(): flavoredList = [] for troveTup in troveTups: label = troveTup[1].trailingLabel() affTroves = affFlavorDict[troveTup[0]] found = False if affTroves: for affName, affVersion, affFlavor in affTroves: if affVersion.trailingLabel() != label: continue newFlavor = deps.overrideFlavor(installFlavor, affFlavor, mergeType=deps.DEP_MERGE_TYPE_PREFS) # implement never drop an arch for dep resolution currentArch = deps.getInstructionSetFlavor(affFlavor) if not troveTup[2].stronglySatisfies(currentArch): continue if newFlavor.satisfies(troveTup[2]): affinityMatches.append((newFlavor, troveTup)) affinityFlavors.append(troveTup[2]) found = True if not found and not affinityMatches: if installFlavor.satisfies(troveTup[2]): otherMatches.append((installFlavor, troveTup)) otherFlavors.append(troveTup[2]) else: otherMatches = [ (None, x) for x in troveTups ] otherFlavors = [x[2] for x in troveTups] if affinityMatches: allFlavors = affinityFlavors flavoredList = affinityMatches else: allFlavors = otherFlavors flavoredList = otherMatches # Now filter by flavor preferences. newFlavors = [] if self.flavorPreferences: for flavor in self.flavorPreferences: for trvFlavor in allFlavors: if trvFlavor.stronglySatisfies(flavor): newFlavors.append(trvFlavor) if newFlavors: break if newFlavors: flavoredList = [ x for x in flavoredList if x[1][2] in newFlavors ] return self._selectMatchingResolutionTrove(requiredBy, dep, depClass, flavoredList) def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass, flavoredList): # this function should be an exact match of # resolvemethod._selectMatchingResolutionTrove from conary 1.2 and # later. # finally, filter by latest then score. trovesByNL = {} for installFlavor, (n,v,f) in flavoredList: l = v.trailingLabel() myTimeStamp = v.timeStamps()[-1] if installFlavor is None: myScore = 0 else: # FIXME: we should cache this scoring from before. myScore = installFlavor.score(f) if (n,l) in trovesByNL: curScore, curTimeStamp, curTup = trovesByNL[n,l] if curTimeStamp > myTimeStamp: continue if curTimeStamp == myTimeStamp: if myScore < curScore: continue trovesByNL[n,l] = (myScore, myTimeStamp, (n,v,f)) scoredList = sorted(trovesByNL.itervalues()) if not scoredList: return None else: # highest score, then latest timestamp, then name. return scoredList[-1][-1] if hasattr(resolve.BasicResolutionMethod, '_selectMatchingResolutionTrove'): selectResolutionTrove = resolve.BasicResolutionMethod.selectResolutionTrove _selectMatchingResolutionTrove = resolve.BasicResolutionMethod._selectMatchingResolutionTrove class rMakeResolveSource(ResolutionMesh): """ Resolve by trove list first and then resort back to label path. Also respects intra-trove deps. If foo:runtime requires foo:lib, it requires exactly the same version of foo:lib. """ def __init__(self, cfg, builtTroveSource, resolveTroveSource, troveLists, repos): self.removeFileDependencies = False self.builtTroveSource = builtTroveSource self.troveLists = troveLists self.resolveTroveSource = resolveTroveSource self.repos = repos self.cfg = cfg self.repos = repos self.flavor = cfg.flavor sources = [] builtResolveSource = resolve.BasicResolutionMethod(cfg, None) builtResolveSource.setTroveSource(builtTroveSource) sources = [] if troveLists: troveListSources = [resolve.DepResolutionByTroveList(cfg, None, x) for x in troveLists] [ x.setTroveSource(self.repos) for x in troveListSources ] sources.extend(troveListSources) mainMethod = resolve.ResolutionStack(*sources) flavorPreferences = self.repos._flavorPreferences for source in sources: source.setFlavorPreferences(flavorPreferences) ResolutionMesh.__init__(self, cfg, builtResolveSource, mainMethod) self.setFlavorPreferences(flavorPreferences) def close(self): self.builtTroveSource.close() def setLabelPath(self, labelPath): if labelPath: source = resolve.DepResolutionByLabelPath(self.cfg, None, labelPath) source.setTroveSource(self.repos) self.mainMethod.addSource(source) def prepareForResolution(self, depList): # need to get intratrove deps while we still have the full dependency # request information - including what trove the dep arises from. intraDeps = self._getIntraTroveDeps(depList) self.intraDeps = intraDeps return ResolutionMesh.prepareForResolution(self, depList) def _resolveIntraTroveDeps(self, intraDeps): trovesToGet = [] for depSet, deps in intraDeps.iteritems(): for dep, troveTups in deps.iteritems(): trovesToGet.extend(troveTups) hasTroves = self.troveSource.hasTroves(trovesToGet) if isinstance(hasTroves, list): hasTroves = dict(itertools.izip(trovesToGet, hasTroves)) results = {} for depSet, deps in intraDeps.iteritems(): d = {} results[depSet] = d for dep, troveTups in deps.iteritems(): d[dep] = [ x for x in troveTups if hasTroves[x] ] return results def resolveDependencies(self): sugg = ResolutionMesh.resolveDependencies(self) intraDepSuggs = self._resolveIntraTroveDeps(self.intraDeps) for depSet, intraDeps in self.intraDeps.iteritems(): for idx, (depClass, dep) in enumerate(depSet.iterDeps(sort=True)): if depClass.tag == deps.DEP_CLASS_TROVES: if (dep in intraDepSuggs[depSet] and intraDepSuggs[depSet][dep]): sugg[depSet][idx] = intraDepSuggs[depSet][dep] return sugg def _getIntraTroveDeps(self, depList): suggsByDep = {} intraDeps = {} for troveTup, depSet in depList: pkgName = troveTup[0].split(':', 1)[0] for dep in depSet.iterDepsByClass(deps.TroveDependencies): if (dep.name.startswith(pkgName) and dep.name.split(':', 1)[0] == pkgName): troveToGet = (dep.name, troveTup[1], troveTup[2]) l = suggsByDep.setdefault(dep, []) l.append(troveToGet) intraDeps.setdefault(depSet, {}).setdefault(dep, l) return intraDeps def filterDependencies(self, depList): if self.removeFileDependencies: depList = [(x[0], flavorutil.removeFileDeps(x[1])) for x in depList ] return [ x for x in depList if not x[1].isEmpty() ] return depList def _selectMatchingResolutionTrove(self, requiredBy, dep, depClass, flavoredList): # if all packages are the same and only their flavor score or timestamp # is keeping one from being picked over the other, prefer the # newly built package. builtTroves = [] resolveTroves = [] newList = flavoredList if self.resolveTroveSource: minResolveIdx = len(self.resolveTroveSource.sources) ilp = self.cfg.installLabelPath for installFlavor, troveTup in flavoredList: if self.extraMethod.troveSource.hasTrove(*troveTup): branch = troveTup[1].branch() if branch.hasParentBranch(): label = branch.parentBranch().label() else: label = branch.label() list = builtTroves elif (self.resolveTroveSource and self.resolveTroveSource.hasTrove(*troveTup)): # if a package is both in the resolveTroves list # and found via ILP, it might be in this list even # though it was not found via resolveTroves. So we # limit results to ones found as early as possible # in the resolveTroves list for resolveIdx, source in enumerate(self.resolveTroveSource.sources): if source.hasTrove(*troveTup): if resolveIdx < minResolveIdx: resolveTroves = [] minResolveIdx = resolveIdx break if resolveIdx > minResolveIdx: continue list = resolveTroves label = troveTup[1].trailingLabel() else: continue if label in ilp: index = ilp.index(label) else: index = len(ilp) list.append((index, (installFlavor, troveTup))) if builtTroves: minIndex = sorted(builtTroves, key=lambda x: x[0])[0][0] newList = [ x[1] for x in builtTroves if x[0] == minIndex ] elif resolveTroves: minIndex = sorted(resolveTroves, key=lambda x: x[0])[0][0] newList = [ x[1] for x in resolveTroves if x[0] == minIndex ] return ResolutionMesh._selectMatchingResolutionTrove(self, requiredBy, dep, depClass, newList)
""" Merge OpenType Layout tables (GDEF / GPOS / GSUB). """ from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * from fontTools.misc import classifyTools from fontTools.ttLib.tables import otTables as ot from fontTools.ttLib.tables import otBase as otBase from fontTools.ttLib.tables.DefaultTable import DefaultTable from fontTools.varLib import builder, varStore from fontTools.varLib.varStore import VarStoreInstancer from functools import reduce class Merger(object): def __init__(self, font=None): self.font = font @classmethod def merger(celf, clazzes, attrs=(None,)): assert celf != Merger, 'Subclass Merger instead.' if 'mergers' not in celf.__dict__: celf.mergers = {} if type(clazzes) == type: clazzes = (clazzes,) if type(attrs) == str: attrs = (attrs,) def wrapper(method): assert method.__name__ == 'merge' done = [] for clazz in clazzes: if clazz in done: continue # Support multiple names of a clazz done.append(clazz) mergers = celf.mergers.setdefault(clazz, {}) for attr in attrs: assert attr not in mergers, \ "Oops, class '%s' has merge function for '%s' defined already." % (clazz.__name__, attr) mergers[attr] = method return None return wrapper @classmethod def mergersFor(celf, thing, _default={}): typ = type(thing) for celf in celf.mro(): mergers = getattr(celf, 'mergers', None) if mergers is None: break; m = celf.mergers.get(typ, None) if m is not None: return m return _default def mergeObjects(self, out, lst, exclude=()): keys = sorted(vars(out).keys()) assert all(keys == sorted(vars(v).keys()) for v in lst), \ (keys, [sorted(vars(v).keys()) for v in lst]) mergers = self.mergersFor(out) defaultMerger = mergers.get('*', self.__class__.mergeThings) try: for key in keys: if key in exclude: continue value = getattr(out, key) values = [getattr(table, key) for table in lst] mergerFunc = mergers.get(key, defaultMerger) mergerFunc(self, value, values) except Exception as e: e.args = e.args + ('.'+key,) raise def mergeLists(self, out, lst): count = len(out) assert all(count == len(v) for v in lst), (count, [len(v) for v in lst]) for i,(value,values) in enumerate(zip(out, zip(*lst))): try: self.mergeThings(value, values) except Exception as e: e.args = e.args + ('[%d]' % i,) raise def mergeThings(self, out, lst): clazz = type(out) try: assert all(type(item) == clazz for item in lst), (out, lst) mergerFunc = self.mergersFor(out).get(None, None) if mergerFunc is not None: mergerFunc(self, out, lst) elif hasattr(out, '__dict__'): self.mergeObjects(out, lst) elif isinstance(out, list): self.mergeLists(out, lst) else: assert all(out == v for v in lst), (out, lst) except Exception as e: e.args = e.args + (clazz.__name__,) raise def mergeTables(self, font, master_ttfs, tables): for tag in tables: if tag not in font: continue self.mergeThings(font[tag], [m[tag] for m in master_ttfs]) # # Aligning merger # class AligningMerger(Merger): pass def _SinglePosUpgradeToFormat2(self): if self.Format == 2: return self ret = ot.SinglePos() ret.Format = 2 ret.Coverage = self.Coverage ret.ValueFormat = self.ValueFormat ret.Value = [self.Value for g in ret.Coverage.glyphs] ret.ValueCount = len(ret.Value) return ret def _merge_GlyphOrders(font, lst, values_lst=None, default=None): """Takes font and list of glyph lists (must be sorted by glyph id), and returns two things: - Combined glyph list, - If values_lst is None, return input glyph lists, but padded with None when a glyph was missing in a list. Otherwise, return values_lst list-of-list, padded with None to match combined glyph lists. """ if values_lst is None: dict_sets = [set(l) for l in lst] else: dict_sets = [{g:v for g,v in zip(l,vs)} for l,vs in zip(lst,values_lst)] combined = set() combined.update(*dict_sets) sortKey = font.getReverseGlyphMap().__getitem__ order = sorted(combined, key=sortKey) # Make sure all input glyphsets were in proper order assert all(sorted(vs, key=sortKey) == vs for vs in lst) del combined paddedValues = None if values_lst is None: padded = [[glyph if glyph in dict_set else default for glyph in order] for dict_set in dict_sets] else: assert len(lst) == len(values_lst) padded = [[dict_set[glyph] if glyph in dict_set else default for glyph in order] for dict_set in dict_sets] return order, padded def _Lookup_SinglePos_get_effective_value(subtables, glyph): for self in subtables: if self is None or \ type(self) != ot.SinglePos or \ self.Coverage is None or \ glyph not in self.Coverage.glyphs: continue if self.Format == 1: return self.Value elif self.Format == 2: return self.Value[self.Coverage.glyphs.index(glyph)] else: assert 0 return None def _Lookup_PairPos_get_effective_value_pair(subtables, firstGlyph, secondGlyph): for self in subtables: if self is None or \ type(self) != ot.PairPos or \ self.Coverage is None or \ firstGlyph not in self.Coverage.glyphs: continue if self.Format == 1: ps = self.PairSet[self.Coverage.glyphs.index(firstGlyph)] pvr = ps.PairValueRecord for rec in pvr: # TODO Speed up if rec.SecondGlyph == secondGlyph: return rec continue elif self.Format == 2: klass1 = self.ClassDef1.classDefs.get(firstGlyph, 0) klass2 = self.ClassDef2.classDefs.get(secondGlyph, 0) return self.Class1Record[klass1].Class2Record[klass2] else: assert 0 return None @AligningMerger.merger(ot.SinglePos) def merge(merger, self, lst): self.ValueFormat = valueFormat = reduce(int.__or__, [l.ValueFormat for l in lst], 0) assert len(lst) == 1 or (valueFormat & ~0xF == 0), valueFormat # If all have same coverage table and all are format 1, if all(v.Format == 1 for v in lst) and all(self.Coverage.glyphs == v.Coverage.glyphs for v in lst): self.Value = otBase.ValueRecord(valueFormat) merger.mergeThings(self.Value, [v.Value for v in lst]) self.ValueFormat = self.Value.getFormat() return # Upgrade everything to Format=2 self.Format = 2 lst = [_SinglePosUpgradeToFormat2(v) for v in lst] # Align them glyphs, padded = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst], [v.Value for v in lst]) self.Coverage.glyphs = glyphs self.Value = [otBase.ValueRecord(valueFormat) for g in glyphs] self.ValueCount = len(self.Value) for i,values in enumerate(padded): for j,glyph in enumerate(glyphs): if values[j] is not None: continue # Fill in value from other subtables # Note!!! This *might* result in behavior change if ValueFormat2-zeroedness # is different between used subtable and current subtable! # TODO(behdad) Check and warn if that happens? v = _Lookup_SinglePos_get_effective_value(merger.lookup_subtables[i], glyph) if v is None: v = otBase.ValueRecord(valueFormat) values[j] = v merger.mergeLists(self.Value, padded) # Merge everything else; though, there shouldn't be anything else. :) merger.mergeObjects(self, lst, exclude=('Format', 'Coverage', 'Value', 'ValueCount')) self.ValueFormat = reduce(int.__or__, [v.getFormat() for v in self.Value], 0) @AligningMerger.merger(ot.PairSet) def merge(merger, self, lst): # Align them glyphs, padded = _merge_GlyphOrders(merger.font, [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], [vs.PairValueRecord for vs in lst]) self.PairValueRecord = pvrs = [] for glyph in glyphs: pvr = ot.PairValueRecord() pvr.SecondGlyph = glyph pvr.Value1 = otBase.ValueRecord(merger.valueFormat1) if merger.valueFormat1 else None pvr.Value2 = otBase.ValueRecord(merger.valueFormat2) if merger.valueFormat2 else None pvrs.append(pvr) self.PairValueCount = len(self.PairValueRecord) for i,values in enumerate(padded): for j,glyph in enumerate(glyphs): # Fill in value from other subtables v = ot.PairValueRecord() v.SecondGlyph = glyph if values[j] is not None: vpair = values[j] else: vpair = _Lookup_PairPos_get_effective_value_pair(merger.lookup_subtables[i], self._firstGlyph, glyph) if vpair is None: v1, v2 = None, None else: v1, v2 = vpair.Value1, vpair.Value2 v.Value1 = otBase.ValueRecord(merger.valueFormat1, src=v1) if merger.valueFormat1 else None v.Value2 = otBase.ValueRecord(merger.valueFormat2, src=v2) if merger.valueFormat2 else None values[j] = v del self._firstGlyph merger.mergeLists(self.PairValueRecord, padded) def _PairPosFormat1_merge(self, lst, merger): assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools." # Merge everything else; makes sure Format is the same. merger.mergeObjects(self, lst, exclude=('Coverage', 'PairSet', 'PairSetCount')) empty = ot.PairSet() empty.PairValueRecord = [] empty.PairValueCount = 0 # Align them glyphs, padded = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst], default=empty) self.Coverage.glyphs = glyphs self.PairSet = [ot.PairSet() for g in glyphs] self.PairSetCount = len(self.PairSet) for glyph, ps in zip(glyphs, self.PairSet): ps._firstGlyph = glyph merger.mergeLists(self.PairSet, padded) def _ClassDef_invert(self, allGlyphs=None): if isinstance(self, dict): classDefs = self else: classDefs = self.classDefs if self and self.classDefs else {} m = max(classDefs.values()) if classDefs else 0 ret = [] for _ in range(m + 1): ret.append(set()) for k,v in classDefs.items(): ret[v].add(k) # Class-0 is special. It's "everything else". if allGlyphs is None: ret[0] = None else: # Limit all classes to glyphs in allGlyphs. # Collect anything without a non-zero class into class=zero. ret[0] = class0 = set(allGlyphs) for s in ret[1:]: s.intersection_update(class0) class0.difference_update(s) return ret def _ClassDef_merge_classify(lst, allGlyphs=None): self = ot.ClassDef() self.classDefs = classDefs = {} classifier = classifyTools.Classifier() for l in lst: sets = _ClassDef_invert(l, allGlyphs=allGlyphs) if allGlyphs is None: sets = sets[1:] classifier.update(sets) classes = classifier.getClasses() if allGlyphs is None: classes.insert(0, set()) for i,classSet in enumerate(classes): if i == 0: continue for g in classSet: classDefs[g] = i return self, classes def _ClassDef_calculate_Format(self, font): fmt = 2 ranges = self._getClassRanges(font) if ranges: startGlyph = ranges[0][1] endGlyph = ranges[-1][3] glyphCount = endGlyph - startGlyph + 1 if len(ranges) * 3 >= glyphCount + 1: # Format 1 is more compact fmt = 1 self.Format = fmt def _PairPosFormat2_align_matrices(self, lst, font, transparent=False): matrices = [l.Class1Record for l in lst] # Align first classes self.ClassDef1, classes = _ClassDef_merge_classify([l.ClassDef1 for l in lst], allGlyphs=set(self.Coverage.glyphs)) _ClassDef_calculate_Format(self.ClassDef1, font) self.Class1Count = len(classes) new_matrices = [] for l,matrix in zip(lst, matrices): nullRow = None coverage = set(l.Coverage.glyphs) classDef1 = l.ClassDef1.classDefs class1Records = [] for classSet in classes: exemplarGlyph = next(iter(classSet)) if exemplarGlyph not in coverage: if nullRow is None: nullRow = ot.Class1Record() class2records = nullRow.Class2Record = [] # TODO: When merger becomes selfless, revert e6125b353e1f54a0280ded5434b8e40d042de69f for _ in range(l.Class2Count): if transparent: rec2 = None else: rec2 = ot.Class2Record() rec2.Value1 = otBase.ValueRecord(l.ValueFormat1) if l.ValueFormat1 else None rec2.Value2 = otBase.ValueRecord(l.ValueFormat2) if l.ValueFormat2 else None class2records.append(rec2) rec1 = nullRow else: klass = classDef1.get(exemplarGlyph, 0) rec1 = matrix[klass] # TODO handle out-of-range? class1Records.append(rec1) new_matrices.append(class1Records) matrices = new_matrices del new_matrices # Align second classes self.ClassDef2, classes = _ClassDef_merge_classify([l.ClassDef2 for l in lst]) _ClassDef_calculate_Format(self.ClassDef2, font) self.Class2Count = len(classes) new_matrices = [] for l,matrix in zip(lst, matrices): classDef2 = l.ClassDef2.classDefs class1Records = [] for rec1old in matrix: oldClass2Records = rec1old.Class2Record rec1new = ot.Class1Record() class2Records = rec1new.Class2Record = [] for classSet in classes: if not classSet: # class=0 rec2 = oldClass2Records[0] else: exemplarGlyph = next(iter(classSet)) klass = classDef2.get(exemplarGlyph, 0) rec2 = oldClass2Records[klass] class2Records.append(rec2) class1Records.append(rec1new) new_matrices.append(class1Records) matrices = new_matrices del new_matrices return matrices def _PairPosFormat2_merge(self, lst, merger): assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools." merger.mergeObjects(self, lst, exclude=('Coverage', 'ClassDef1', 'Class1Count', 'ClassDef2', 'Class2Count', 'Class1Record')) # Align coverages glyphs, _ = _merge_GlyphOrders(merger.font, [v.Coverage.glyphs for v in lst]) self.Coverage.glyphs = glyphs # Currently, if the coverage of PairPosFormat2 subtables are different, # we do NOT bother walking down the subtable list when filling in new # rows for alignment. As such, this is only correct if current subtable # is the last subtable in the lookup. Ensure that. # # Note that our canonicalization process merges trailing PairPosFormat2's, # so in reality this is rare. for l,subtables in zip(lst,merger.lookup_subtables): if l.Coverage.glyphs != glyphs: assert l == subtables[-1] matrices = _PairPosFormat2_align_matrices(self, lst, merger.font) self.Class1Record = list(matrices[0]) # TODO move merger to be selfless merger.mergeLists(self.Class1Record, matrices) @AligningMerger.merger(ot.PairPos) def merge(merger, self, lst): # TODO Support differing ValueFormats. merger.valueFormat1 = self.ValueFormat1 merger.valueFormat2 = self.ValueFormat2 if self.Format == 1: _PairPosFormat1_merge(self, lst, merger) elif self.Format == 2: _PairPosFormat2_merge(self, lst, merger) else: assert False del merger.valueFormat1, merger.valueFormat2 # Now examine the list of value records, and update to the union of format values, # as merge might have created new values. vf1 = 0 vf2 = 0 if self.Format == 1: for pairSet in self.PairSet: for pairValueRecord in pairSet.PairValueRecord: pv1 = pairValueRecord.Value1 if pv1 is not None: vf1 |= pv1.getFormat() pv2 = pairValueRecord.Value2 if pv2 is not None: vf2 |= pv2.getFormat() elif self.Format == 2: for class1Record in self.Class1Record: for class2Record in class1Record.Class2Record: pv1 = class2Record.Value1 if pv1 is not None: vf1 |= pv1.getFormat() pv2 = class2Record.Value2 if pv2 is not None: vf2 |= pv2.getFormat() self.ValueFormat1 = vf1 self.ValueFormat2 = vf2 def _PairSet_flatten(lst, font): self = ot.PairSet() self.Coverage = ot.Coverage() self.Coverage.Format = 1 # Align them glyphs, padded = _merge_GlyphOrders(font, [[v.SecondGlyph for v in vs.PairValueRecord] for vs in lst], [vs.PairValueRecord for vs in lst]) self.Coverage.glyphs = glyphs self.PairValueRecord = pvrs = [] for values in zip(*padded): for v in values: if v is not None: pvrs.append(v) break else: assert False self.PairValueCount = len(self.PairValueRecord) return self def _Lookup_PairPosFormat1_subtables_flatten(lst, font): assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.PairSet]), "Report bug against fonttools." self = ot.PairPos() self.Format = 1 self.Coverage = ot.Coverage() self.Coverage.Format = 1 self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) # Align them glyphs, padded = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst], [v.PairSet for v in lst]) self.Coverage.glyphs = glyphs self.PairSet = [_PairSet_flatten([v for v in values if v is not None], font) for values in zip(*padded)] self.PairSetCount = len(self.PairSet) return self def _Lookup_PairPosFormat2_subtables_flatten(lst, font): assert _all_equal([l.ValueFormat2 == 0 for l in lst if l.Class1Record]), "Report bug against fonttools." self = ot.PairPos() self.Format = 2 self.Coverage = ot.Coverage() self.Coverage.Format = 1 self.ValueFormat1 = reduce(int.__or__, [l.ValueFormat1 for l in lst], 0) self.ValueFormat2 = reduce(int.__or__, [l.ValueFormat2 for l in lst], 0) # Align them glyphs, _ = _merge_GlyphOrders(font, [v.Coverage.glyphs for v in lst]) self.Coverage.glyphs = glyphs matrices = _PairPosFormat2_align_matrices(self, lst, font, transparent=True) matrix = self.Class1Record = [] for rows in zip(*matrices): row = ot.Class1Record() matrix.append(row) row.Class2Record = [] row = row.Class2Record for cols in zip(*list(r.Class2Record for r in rows)): col = next(iter(c for c in cols if c is not None)) row.append(col) return self def _Lookup_PairPos_subtables_canonicalize(lst, font): """Merge multiple Format1 subtables at the beginning of lst, and merge multiple consecutive Format2 subtables that have the same Class2 (ie. were split because of offset overflows). Returns new list.""" lst = list(lst) l = len(lst) i = 0 while i < l and lst[i].Format == 1: i += 1 lst[:i] = [_Lookup_PairPosFormat1_subtables_flatten(lst[:i], font)] l = len(lst) i = l while i > 0 and lst[i - 1].Format == 2: i -= 1 lst[i:] = [_Lookup_PairPosFormat2_subtables_flatten(lst[i:], font)] return lst @AligningMerger.merger(ot.Lookup) def merge(merger, self, lst): subtables = merger.lookup_subtables = [l.SubTable for l in lst] # Remove Extension subtables for l,sts in list(zip(lst,subtables))+[(self,self.SubTable)]: if not sts: continue if sts[0].__class__.__name__.startswith('Extension'): assert _all_equal([st.__class__ for st in sts]) assert _all_equal([st.ExtensionLookupType for st in sts]) l.LookupType = sts[0].ExtensionLookupType new_sts = [st.ExtSubTable for st in sts] del sts[:] sts.extend(new_sts) isPairPos = self.SubTable and isinstance(self.SubTable[0], ot.PairPos) if isPairPos: # AFDKO and feaLib sometimes generate two Format1 subtables instead of one. # Merge those before continuing. # https://github.com/fonttools/fonttools/issues/719 self.SubTable = _Lookup_PairPos_subtables_canonicalize(self.SubTable, merger.font) subtables = merger.lookup_subtables = [_Lookup_PairPos_subtables_canonicalize(st, merger.font) for st in subtables] merger.mergeLists(self.SubTable, subtables) self.SubTableCount = len(self.SubTable) if isPairPos: # If format-1 subtable created during canonicalization is empty, remove it. assert len(self.SubTable) >= 1 and self.SubTable[0].Format == 1 if not self.SubTable[0].Coverage.glyphs: self.SubTable.pop(0) self.SubTableCount -= 1 # If format-2 subtable created during canonicalization is empty, remove it. assert len(self.SubTable) >= 1 and self.SubTable[-1].Format == 2 if not self.SubTable[-1].Coverage.glyphs: self.SubTable.pop(-1) self.SubTableCount -= 1 merger.mergeObjects(self, lst, exclude=['SubTable', 'SubTableCount']) del merger.lookup_subtables # # InstancerMerger # class InstancerMerger(AligningMerger): """A merger that takes multiple master fonts, and instantiates an instance.""" def __init__(self, font, model, location): Merger.__init__(self, font) self.model = model self.location = location self.scalars = model.getScalars(location) @InstancerMerger.merger(ot.Anchor) def merge(merger, self, lst): XCoords = [a.XCoordinate for a in lst] YCoords = [a.YCoordinate for a in lst] model = merger.model scalars = merger.scalars self.XCoordinate = round(model.interpolateFromMastersAndScalars(XCoords, scalars)) self.YCoordinate = round(model.interpolateFromMastersAndScalars(YCoords, scalars)) @InstancerMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): model = merger.model scalars = merger.scalars # TODO Handle differing valueformats for name, tableName in [('XAdvance','XAdvDevice'), ('YAdvance','YAdvDevice'), ('XPlacement','XPlaDevice'), ('YPlacement','YPlaDevice')]: assert not hasattr(self, tableName) if hasattr(self, name): values = [getattr(a, name, 0) for a in lst] value = round(model.interpolateFromMastersAndScalars(values, scalars)) setattr(self, name, value) # # MutatorMerger # class MutatorMerger(AligningMerger): """A merger that takes a variable font, and instantiates an instance.""" def __init__(self, font, location): Merger.__init__(self, font) self.location = location store = None if 'GDEF' in font: gdef = font['GDEF'].table if gdef.Version >= 0x00010003: store = gdef.VarStore self.instancer = VarStoreInstancer(store, font['fvar'].axes, location) def instantiate(self): font = self.font self.mergeTables(font, [font], ['GPOS']) if 'GDEF' in font: gdef = font['GDEF'].table if gdef.Version >= 0x00010003: del gdef.VarStore gdef.Version = 0x00010002 if gdef.MarkGlyphSetsDef is None: del gdef.MarkGlyphSetsDef gdef.Version = 0x00010000 if not (gdef.LigCaretList or gdef.MarkAttachClassDef or gdef.GlyphClassDef or gdef.AttachList or (gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)): del font['GDEF'] @MutatorMerger.merger(ot.Anchor) def merge(merger, self, lst): if self.Format != 3: return instancer = merger.instancer for v in "XY": tableName = v+'DeviceTable' if not hasattr(self, tableName): continue dev = getattr(self, tableName) delattr(self, tableName) if dev is None: continue assert dev.DeltaFormat == 0x8000 varidx = (dev.StartSize << 16) + dev.EndSize delta = round(instancer[varidx]) attr = v+'Coordinate' setattr(self, attr, getattr(self, attr) + delta) self.Format = 1 @MutatorMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): # All other structs are merged with self pointing to a copy of base font, # except for ValueRecords which are sometimes created later and initialized # to have 0/None members. Hence the copy. self.__dict__ = lst[0].__dict__.copy() instancer = merger.instancer # TODO Handle differing valueformats for name, tableName in [('XAdvance','XAdvDevice'), ('YAdvance','YAdvDevice'), ('XPlacement','XPlaDevice'), ('YPlacement','YPlaDevice')]: if not hasattr(self, tableName): continue dev = getattr(self, tableName) delattr(self, tableName) if dev is None: continue assert dev.DeltaFormat == 0x8000 varidx = (dev.StartSize << 16) + dev.EndSize delta = round(instancer[varidx]) setattr(self, name, getattr(self, name) + delta) # # VariationMerger # class VariationMerger(AligningMerger): """A merger that takes multiple master fonts, and builds a variable font.""" def __init__(self, model, axisTags, font): Merger.__init__(self, font) self.model = model self.store_builder = varStore.OnlineVarStoreBuilder(axisTags) self.store_builder.setModel(model) def _all_equal(lst): if not lst: return True it = iter(lst) v0 = next(it) for v in it: if v0 != v: return False return True def buildVarDevTable(store_builder, master_values): if _all_equal(master_values): return master_values[0], None base, varIdx = store_builder.storeMasters(master_values) return base, builder.buildVarDevTable(varIdx) @VariationMerger.merger(ot.Anchor) def merge(merger, self, lst): assert self.Format == 1 self.XCoordinate, XDeviceTable = buildVarDevTable(merger.store_builder, [a.XCoordinate for a in lst]) self.YCoordinate, YDeviceTable = buildVarDevTable(merger.store_builder, [a.YCoordinate for a in lst]) if XDeviceTable or YDeviceTable: self.Format = 3 self.XDeviceTable = XDeviceTable self.YDeviceTable = YDeviceTable @VariationMerger.merger(otBase.ValueRecord) def merge(merger, self, lst): for name, tableName in [('XAdvance','XAdvDevice'), ('YAdvance','YAdvDevice'), ('XPlacement','XPlaDevice'), ('YPlacement','YPlaDevice')]: if hasattr(self, name): value, deviceTable = buildVarDevTable(merger.store_builder, [getattr(a, name, 0) for a in lst]) setattr(self, name, value) if deviceTable: setattr(self, tableName, deviceTable)
from copy import deepcopy from io import StringIO import json import pytest from fastavro import json_writer, json_reader from fastavro.schema import parse_schema from fastavro.validation import ValidationError def roundtrip(schema, records): new_file = StringIO() json_writer(new_file, schema, records) new_file.seek(0) new_records = list(json_reader(new_file, schema)) return new_records def test_json(): schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [ {"name": "null", "type": "null"}, {"name": "boolean", "type": "boolean"}, {"name": "string", "type": "string"}, {"name": "bytes", "type": "bytes"}, {"name": "int", "type": "int"}, {"name": "long", "type": "long"}, {"name": "float", "type": "float"}, {"name": "double", "type": "double"}, { "name": "fixed", "type": {"type": "fixed", "name": "fixed_field", "size": 5}, }, { "name": "union", "type": [ "null", "int", { "type": "record", "name": "union_record", "fields": [{"name": "union_record_field", "type": "string"}], }, ], }, { "name": "enum", "type": { "type": "enum", "name": "enum_field", "symbols": ["FOO", "BAR"], }, }, {"name": "array", "type": {"type": "array", "items": "string"}}, {"name": "map", "type": {"type": "map", "values": "int"}}, { "name": "record", "type": { "type": "record", "name": "subrecord", "fields": [{"name": "sub_int", "type": "int"}], }, }, ], } records = [ { "null": None, "boolean": True, "string": "foo", "bytes": b"\xe2\x99\xa5", "int": 1, "long": 1 << 33, "float": 2.2, "double": 3.3, "fixed": b"\x61\x61\x61\x61\x61", "union": None, "enum": "BAR", "array": ["a", "b"], "map": {"c": 1, "d": 2}, "record": {"sub_int": 123}, }, { "null": None, "boolean": True, "string": "foo", "bytes": b"\xe2\x99\xa5", "int": 1, "long": 1 << 33, "float": 2.2, "double": 3.3, "fixed": b"\x61\x61\x61\x61\x61", "union": 321, "enum": "BAR", "array": ["a", "b"], "map": {"c": 1, "d": 2}, "record": {"sub_int": 123}, }, { "null": None, "boolean": True, "string": "foo", "bytes": b"\xe2\x99\xa5", "int": 1, "long": 1 << 33, "float": 2.2, "double": 3.3, "fixed": b"\x61\x61\x61\x61\x61", "union": {"union_record_field": "union_field"}, "enum": "BAR", "array": ["a", "b"], "map": {"c": 1, "d": 2}, "record": {"sub_int": 123}, }, ] new_records = roundtrip(schema, records) assert records == new_records def test_more_than_one_record(): schema = { "type": "record", "name": "test_more_than_one_record", "namespace": "test", "fields": [ {"name": "string", "type": "string"}, {"name": "int", "type": "int"}, ], } records = [{"string": "foo", "int": 1}, {"string": "bar", "int": 2}] new_records = roundtrip(schema, records) assert records == new_records def test_encoded_union_output(): schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [ { "name": "union", "type": [ "null", "int", { "type": "record", "name": "union_record", "fields": [{"name": "union_record_field", "type": "string"}], }, ], } ], } # A null value is encoded as just null records = [{"union": None}] new_file = StringIO() json_writer(new_file, schema, records) assert new_file.getvalue().strip() == json.dumps({"union": None}) # A non-null, non-named type is encoded as an object with a key for the # type records = [{"union": 321}] new_file = StringIO() json_writer(new_file, schema, records) assert new_file.getvalue().strip() == json.dumps({"union": {"int": 321}}) # A non-null, named type is encoded as an object with a key for the name records = [{"union": {"union_record_field": "union_field"}}] new_file = StringIO() json_writer(new_file, schema, records) expected = json.dumps( {"union": {"test.union_record": {"union_record_field": "union_field"}}} ) assert new_file.getvalue().strip() == expected def test_union_output_without_type(): """https://github.com/fastavro/fastavro/issues/420""" schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [ { "name": "union", "type": [ "null", "int", { "type": "record", "name": "union_record", "fields": [{"name": "union_record_field", "type": "string"}], }, ], } ], } # A null value is encoded as just null records = [{"union": None}] new_file = StringIO() json_writer(new_file, schema, records, write_union_type=False) assert new_file.getvalue().strip() == json.dumps({"union": None}) # A non-null, non-named type is encoded as just the value records = [{"union": 321}] new_file = StringIO() json_writer(new_file, schema, records, write_union_type=False) assert new_file.getvalue().strip() == json.dumps({"union": 321}) # A non-null, named type is encoded as an object records = [{"union": {"union_record_field": "union_field"}}] new_file = StringIO() json_writer(new_file, schema, records, write_union_type=False) expected = json.dumps({"union": {"union_record_field": "union_field"}}) assert new_file.getvalue().strip() == expected def test_union_string_and_bytes(): schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [{"name": "union", "type": ["string", "bytes"]}], } records = [{"union": "asdf"}, {"union": b"asdf"}] new_records = roundtrip(schema, records) assert records == new_records def test_simple_type(): schema = {"type": "string"} records = ["foo", "bar"] new_records = roundtrip(schema, records) assert records == new_records def test_array_type_simple(): schema = {"type": "array", "items": "string"} records = [["foo", "bar"], ["a", "b"]] new_records = roundtrip(schema, records) assert records == new_records def test_array_type_records(): schema = { "type": "array", "items": { "type": "record", "name": "test_array_type", "fields": [ {"name": "field1", "type": "string"}, {"name": "field2", "type": "int"}, ], }, } records = [[{"field1": "foo", "field2": 1}], [{"field1": "bar", "field2": 2}]] new_records = roundtrip(schema, records) assert records == new_records def test_empty_maps(): """https://github.com/fastavro/fastavro/issues/380""" schema = {"type": "map", "values": "int"} records = [{"foo": 1}, {}] new_records = roundtrip(schema, records) assert records == new_records def test_empty_arrays(): """https://github.com/fastavro/fastavro/issues/380""" schema = {"type": "array", "items": "int"} records = [[1], []] new_records = roundtrip(schema, records) assert records == new_records def test_union_in_array(): """https://github.com/fastavro/fastavro/issues/399""" schema = { "type": "array", "items": [ { "type": "record", "name": "rec1", "fields": [{"name": "field1", "type": ["string", "null"]}], }, { "type": "record", "name": "rec2", "fields": [{"name": "field2", "type": ["string", "null"]}], }, "null", ], } records = [ [{"field1": "foo"}, {"field2": None}, None], ] new_records = roundtrip(schema, records) assert records == new_records def test_union_in_array2(): """https://github.com/fastavro/fastavro/issues/399""" schema = { "type": "record", "name": "Inbox", "fields": [ {"type": "string", "name": "id"}, {"type": "string", "name": "msg_title"}, { "name": "msg_content", "type": { "type": "array", "items": [ { "type": "record", "name": "LimitedTime", "fields": [ { "type": ["string", "null"], "name": "type", "default": "now", } ], }, { "type": "record", "name": "Text", "fields": [{"type": ["string", "null"], "name": "text"}], }, ], }, }, ], } records = [ { "id": 1234, "msg_title": "Hi", "msg_content": [{"type": "now"}, {"text": "hi from here!"}], }, ] new_records = roundtrip(schema, records) assert records == new_records def test_union_in_map(): """https://github.com/fastavro/fastavro/issues/399""" schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [ { "name": "map", "type": {"type": "map", "values": ["string", "null"]}, } ], } records = [{"map": {"c": "1", "d": None}}] new_records = roundtrip(schema, records) assert records == new_records def test_with_dependent_schema(): """Tests a schema with dependent schema https://github.com/fastavro/fastavro/issues/418""" dependency = { "type": "record", "name": "Dependency", "namespace": "test", "fields": [{"name": "_name", "type": "string"}], } schema = { "type": "record", "name": "Test", "namespace": "test", "fields": [ {"name": "_name", "type": "string"}, {"name": "_dependency", "type": "Dependency"}, ], } records = [{"_name": "parent", "_dependency": {"_name": "child"}}] parsed_schema = parse_schema([dependency, schema]) new_records = roundtrip(parsed_schema, records) assert records == new_records def test_enum_named_type(): """https://github.com/fastavro/fastavro/issues/450""" schema = { "type": "record", "name": "test_enum_named_type", "fields": [ { "name": "test1", "type": {"type": "enum", "name": "my_enum", "symbols": ["FOO", "BAR"]}, }, {"name": "test2", "type": "my_enum"}, ], } records = [{"test1": "FOO", "test2": "BAR"}] parsed_schema = parse_schema(schema) assert records == roundtrip(parsed_schema, records) def test_fixed_named_type(): """https://github.com/fastavro/fastavro/issues/450""" schema = { "type": "record", "name": "test_fixed_named_type", "fields": [ { "name": "test1", "type": {"type": "fixed", "name": "my_fixed", "size": 4}, }, {"name": "test2", "type": "my_fixed"}, ], } records = [{"test1": b"1234", "test2": b"4321"}] parsed_schema = parse_schema(schema) assert records == roundtrip(parsed_schema, records) def test_record_named_type(): """https://github.com/fastavro/fastavro/issues/450""" schema = { "type": "record", "name": "test_record_named_type", "fields": [ { "name": "test1", "type": { "type": "record", "name": "my_record", "fields": [{"name": "field1", "type": "string"}], }, }, {"name": "test2", "type": "my_record"}, ], } records = [{"test1": {"field1": "foo"}, "test2": {"field1": "bar"}}] parsed_schema = parse_schema(schema) assert records == roundtrip(parsed_schema, records) def test_default_union_values(): """https://github.com/fastavro/fastavro/issues/485""" schema = { "type": "record", "name": "User", "fields": [ {"name": "name", "type": "string"}, {"name": "age", "type": "long"}, { "name": "pets", "type": {"type": "array", "items": "string"}, }, { "name": "accounts", "type": {"type": "map", "values": "long"}, }, { "name": "favorite_colors", "type": { "type": "enum", "name": "favorite_color", "symbols": ["BLUE", "YELLOW", "GREEN"], }, }, {"name": "country", "type": ["string", "null"], "default": "Argentina"}, {"name": "address", "type": ["null", "string"], "default": None}, ], "doc": "An User", "namespace": "User.v1", "aliases": ["user-v1", "super user"], } record = { "name": "MgXqfDAqzbgJSTTHDXtN", "age": 551, "pets": ["aRvwODwbOWfrkxYYkJiI"], "accounts": {"DQSZRzofFrNCiOhhIOvX": 4431}, "favorite_colors": "GREEN", "address": {"string": "YgmVDKhXctMgODKkhNHJ"}, } new_file = StringIO(json.dumps(record)) read_record = next(json_reader(new_file, schema)) assert read_record["country"] == "Argentina" def test_all_default_values(): """https://github.com/fastavro/fastavro/issues/485""" default_boolean = True default_string = "default_string" default_bytes = "default_bytes" default_int = -1 default_long = -2 default_float = 1.1 default_double = 2.2 default_fixed = "12345" default_union = None default_enum = "FOO" default_array = ["a", "b"] default_map = {"a": 1, "b": 2} default_record = {"sub_int": -3} schema = { "type": "record", "name": "test_all_default_values", "fields": [ {"name": "boolean", "type": "boolean", "default": default_boolean}, {"name": "string", "type": "string", "default": default_string}, {"name": "bytes", "type": "bytes", "default": default_bytes}, {"name": "int", "type": "int", "default": default_int}, {"name": "long", "type": "long", "default": default_long}, {"name": "float", "type": "float", "default": default_float}, {"name": "double", "type": "double", "default": default_double}, { "name": "fixed", "type": {"type": "fixed", "name": "fixed_field", "size": 5}, "default": default_fixed, }, { "name": "union", "type": [ "null", "int", { "type": "record", "name": "union_record", "fields": [{"name": "union_record_field", "type": "string"}], }, ], "default": default_union, }, { "name": "enum", "type": { "type": "enum", "name": "enum_field", "symbols": ["FOO", "BAR"], }, "default": default_enum, }, { "name": "array", "type": {"type": "array", "items": "string"}, "default": deepcopy(default_array), }, { "name": "map", "type": {"type": "map", "values": "int"}, "default": deepcopy(default_map), }, { "name": "record", "type": { "type": "record", "name": "subrecord", "fields": [{"name": "sub_int", "type": "int"}], }, "default": default_record, }, ], } record = {} new_file = StringIO(json.dumps(record)) read_record = next(json_reader(new_file, schema)) assert read_record["boolean"] == default_boolean assert read_record["string"] == default_string assert read_record["bytes"] == default_bytes.encode("iso-8859-1") assert read_record["int"] == default_int assert read_record["long"] == default_long assert read_record["float"] == default_float assert read_record["double"] == default_double assert read_record["fixed"] == default_fixed.encode("iso-8859-1") assert read_record["union"] == default_union assert read_record["enum"] == default_enum assert read_record["array"] == default_array assert read_record["map"] == default_map assert read_record["record"] == default_record def test_default_value_missing(): """https://github.com/fastavro/fastavro/issues/485""" schema = { "type": "record", "name": "test_default_value_missing", "fields": [{"name": "string", "type": "string"}], } record = {} new_file = StringIO(json.dumps(record)) with pytest.raises(ValueError, match="no value and no default"): next(json_reader(new_file, schema)) def test_map_of_union_of_array_and_map(): """https://github.com/fastavro/fastavro/issues/572""" schema = { "name": "Test", "namespace": "test", "type": "record", "fields": [ { "name": "metadata", "type": { "type": "map", "values": [ {"type": "array", "items": "string"}, {"type": "map", "values": ["string"]}, ], }, } ], } records = [{"metadata": {"map1": {"map2": "str"}}}] new_records = roundtrip(schema, records) assert records == new_records def test_json_writer_with_validation(): """https://github.com/fastavro/fastavro/issues/580""" schema = { "doc": "A weather reading.", "name": "Weather", "namespace": "test", "type": "record", "fields": [ {"name": "station", "type": "string"}, {"name": "time", "type": "long"}, {"name": "temp", "type": "int"}, ], } records = [ {"station": "011990-99999", "temp": 0, "time": 1433269388}, {"station": "011990-99999", "temp": 22, "time": "last day"}, {"station": "011990-99999", "temp": -11, "time": 1433273379}, {"station": "012650-99999", "temp": 111.9, "time": 1433275478}, ] new_file = StringIO() with pytest.raises(ValidationError): json_writer(new_file, schema, records, validator=True)
#!/usr/bin/env python 'An interactive fiction system offering control over the narrative discourse.' __author__ = 'Nick Montfort' __copyright__ = 'Copyright 2011 Nick Montfort' __license__ = 'ISC' __version__ = '0.5.0.0' __status__ = 'Development' import sys import os import time import optparse import clarifier import command_map import discourse_model import joker import microplanner import preparer import presenter import recognizer import reply_planner import world_model class Multistream(object): 'Encapsulates multiple output streams.' def __init__(self, streams, log=None): self.streams = streams self.log = log def close(self): """Close each of the streams. If one or more of the streams returns some exit status, the maximum value is returned by this method.""" overall_status = None for stream in self.streams: status = stream.close() if status is not None: overall_status = max(overall_status, status) return overall_status def write(self, string): 'Write string to each of the streams.' for stream in self.streams: stream.write(string) def start_log(out_streams): 'Open a log file named with the next available integer.' log_files = [os.path.splitext(l)[0] for l in os.listdir('logs/') if os.path.splitext(l)[1] == '.log'] if len(log_files) == 0: latest = 0 else: latest = max([int(log_file) for log_file in log_files]) log_file = 'logs/' + str(latest + 1) + '.log' try: log = file(log_file, 'w') except IOError, err: msg = ('Unable to open log file "' + log_file + '" for ' + 'writing due to this error: ' + str(err)) raise joker.StartupError(msg) # So that we output to the screen and the log file: out_streams.streams.append(log) # And indicate that this stream is the log file: out_streams.log = log presenter.present('\nLogged to: ' + log_file + '\nSession started ' + time.strftime("%Y-%m-%d %H:%M:%S"), out_streams) return out_streams def initialize(if_file, spin_files, out_streams): 'Load all files and present the header and prologue.' for startup_string in joker.session_startup(__version__): presenter.center(startup_string, out_streams) fiction = joker.load_fiction(if_file, ['discourse', 'items'], discourse_model.FICTION_DEFAULTS) presenter.center('fiction: ' + if_file, out_streams) world = world_model.World(fiction) world.set_concepts(fiction.concepts) for i in dir(fiction): if i[:8] == 'COMMAND_': setattr(command_map, i.partition('_')[2], getattr(fiction, i)) delattr(fiction, i) for (key, value) in discourse_model.SPIN_DEFAULTS.items(): if key not in fiction.discourse['spin']: fiction.discourse['spin'][key] = value while len(spin_files) > 0: next_file = spin_files.pop(0) new_spin = joker.load_spin(fiction.discourse['spin'], next_file) fiction.discourse['spin'].update(new_spin) presenter.center('spin: ' + next_file, out_streams) presenter.present('\n', out_streams) presenter.present('', out_streams) discourse = discourse_model.Discourse(fiction.discourse) reply = joker.show_frontmatter(discourse) if 'prologue' in discourse.metadata: reply += '\n\n' + joker.show_prologue(discourse.metadata) presenter.present(reply, out_streams) return (world, discourse) def handle_input(user_input, world, discourse, in_stream, out_streams): """Deal with input obtained, sending it to the appropriate module. The commanded character's concept is used when trying to recognize commands.""" c_concept = world.concept[discourse.spin['commanded']] user_input = recognizer.recognize(user_input, discourse, c_concept) if user_input.unrecognized: user_input = clarifier.clarify(user_input, c_concept, discourse, in_stream, out_streams) if user_input.command: user_input, id_list, world = simulator(user_input, world, discourse.spin['commanded']) if hasattr(world.item['@cosmos'], 'update_spin'): discourse.spin = world.item['@cosmos'].update_spin(world, discourse) spin = discourse.spin if hasattr(world.item['@cosmos'], 'use_spin'): spin = world.item['@cosmos'].use_spin(world, discourse.spin) f_concept = world.concept[spin['focalizer']] tale, discourse = teller(id_list, f_concept, discourse) presenter.present(tale, out_streams) elif user_input.directive: texts, world, discourse = joker.joke(user_input.normal, world, discourse) for text in texts: if text is not None: presenter.present(text, out_streams) discourse.input_list.update(user_input) return (user_input, world, discourse) def each_turn(world, discourse, in_stream, out_streams): 'Obtain and processes input, if the session is interactive.' if discourse.spin['commanded'] is None: if hasattr(world.item['@cosmos'], 'interval'): world.item['@cosmos'].interval() _, id_list, world = simulator(None, world, discourse.spin['commanded']) focal_concept = world.concept[discourse.spin['focalizer']] reply_text, discourse = teller(id_list, focal_concept, discourse) presenter.present(reply_text, out_streams) else: if (hasattr(discourse, 'initial_inputs') and len(discourse.initial_inputs) > 0): input_string = discourse.initial_inputs.pop(0) user_input = preparer.tokenize(input_string, discourse.separator) presenter.present('[> ' + input_string, out_streams, '', '') else: user_input = preparer.prepare(discourse.separator, discourse.typo.prompt, in_stream, out_streams) # After each input, present a newline all by itself. presenter.present('\n', out_streams, '', '') while len(user_input.tokens) > 0 and world.running: (user_input, world, discourse) = handle_input(user_input, world, discourse, in_stream, out_streams) presenter.present(discourse.input_list.show(1), out_streams.log) return (world, discourse) def simulator(user_input, world, commanded, actions_to_do=None): 'Simulate the IF world using the Action from user input.' if actions_to_do is None: actions_to_do = [] done_list = [] start_time = world.ticks for tag in world.item: if (world.item[tag].actor and not tag == commanded and world.item[tag].alive): # The commanded character does not act automatically. That is, # his, her, or its "act" method is not called. new_actions = world.item[tag].act(command_map, world.concept[tag]) actions_to_do.extend(new_actions) if commanded is not None and user_input is not None: commanded = world.item[commanded] c_action = commanded.do_command(user_input.normal, command_map, world) print c_action if c_action is not None: c_action.cause = '"' + ' '.join(user_input.normal) + '"' actions_to_do.append(c_action) print actions_to_do if user_input is not None: user_input.caused = c_action.id current_time = start_time while len(actions_to_do) > 0 and world.running: action = actions_to_do.pop(0) to_be_done = action.do(world) done_list.append(action.id) if action.final: world.running = False actions_to_do = to_be_done + actions_to_do if action.end > current_time: world.advance_clock(action.end - current_time) current_time = action.end return user_input, done_list, world def teller(id_list, concept, discourse): 'Narrate actions based on the concept. Update the discourse.' reply_plan = reply_planner.plan(id_list, concept, discourse) section = microplanner.specify(reply_plan, concept, discourse) output = section.realize(concept, discourse) return output, discourse def parse_command_line(argv): 'Improved option/argument parsing and help thanks to Andrew Plotkin.' parser = optparse.OptionParser(usage='[options] fiction.py [ spin.py ... ]') parser.add_option('--auto', dest='autofile', help='read inputs from FILE', metavar='FILE') parser.add_option('--nodebug', action='store_false', dest='debug', help='disable debugging directives', default=True) opts, args = parser.parse_args(argv[1:]) if not args: parser.print_usage() msg = ('At least one argument (the fiction file name) is ' + 'needed; any other file names are processed in order ' + 'as spin files.') raise joker.StartupError(msg) return opts, args def main(argv, in_stream=sys.stdin, out_stream=sys.stdout): "Set up a session and run Curveship's main loop." return_code = 0 try: out_streams = Multistream([out_stream]) opts, args = parse_command_line(argv) out_streams = start_log(out_streams) world, discourse = initialize(args[0], args[1:], out_streams) discourse.debug = opts.debug if opts.autofile is not None: auto = open(opts.autofile, 'r+') discourse.initial_inputs = auto.readlines() auto.close() if len(world.act) > 0: _, id_list, world = simulator(None, world, discourse.spin['commanded'], world.act.values()) focal_concept = world.concept[discourse.spin['focalizer']] reply_text, discourse = teller(id_list, focal_concept, discourse) presenter.present(reply_text, out_streams) while world.running: previous_time = time.time() world, discourse = each_turn(world, discourse, in_stream, out_streams) out_streams.log.write('#' + str(time.time() - previous_time)) except joker.StartupError, err: presenter.present(err.msg, Multistream([sys.stderr])) return_code = 2 except KeyboardInterrupt, err: presenter.present('\n', out_streams) return_code = 2 except EOFError, err: presenter.present('\n', out_streams) return_code = 2 finally: in_stream.close() out_streams.close() return return_code if __name__ == '__main__': sys.exit(main(sys.argv))