gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# -*- coding: utf-8 -*- """The extraction front-end.""" import logging import os import pdb import traceback from dfvfs.helpers import source_scanner from dfvfs.resolver import context import plaso from plaso import parsers # pylint: disable=unused-import from plaso import hashers # pylint: disable=unused-import from plaso.engine import single_process from plaso.engine import utils as engine_utils from plaso.frontend import frontend from plaso.frontend import presets from plaso.lib import definitions from plaso.lib import errors from plaso.lib import event from plaso.lib import storage from plaso.lib import timelib from plaso.multi_processing import multi_process from plaso.hashers import manager as hashers_manager from plaso.parsers import manager as parsers_manager import pytz class ExtractionFrontend(frontend.Frontend): """Class that implements an extraction front-end.""" _DEFAULT_PROFILING_SAMPLE_RATE = 1000 # Approximately 250 MB of queued items per worker. _DEFAULT_QUEUE_SIZE = 125000 def __init__(self): """Initializes the front-end object.""" super(ExtractionFrontend, self).__init__() self._buffer_size = 0 self._collection_process = None self._debug_mode = False self._enable_preprocessing = False self._enable_profiling = False self._engine = None self._filter_expression = None self._filter_object = None self._mount_path = None self._operating_system = None self._output_module = None self._parser_names = None self._process_archive_files = False self._profiling_sample_rate = self._DEFAULT_PROFILING_SAMPLE_RATE self._profiling_type = u'all' self._use_old_preprocess = False self._queue_size = self._DEFAULT_QUEUE_SIZE self._resolver_context = context.Context() self._single_process_mode = False self._show_worker_memory_information = False self._storage_file_path = None self._text_prepend = None def _CheckStorageFile(self, storage_file_path): """Checks if the storage file path is valid. Args: storage_file_path: The path of the storage file. Raises: BadConfigOption: if the storage file path is invalid. """ if os.path.exists(storage_file_path): if not os.path.isfile(storage_file_path): raise errors.BadConfigOption( u'Storage file: {0:s} already exists and is not a file.'.format( storage_file_path)) logging.warning(u'Appending to an already existing storage file.') dirname = os.path.dirname(storage_file_path) if not dirname: dirname = '.' # TODO: add a more thorough check to see if the storage file really is # a plaso storage file. if not os.access(dirname, os.W_OK): raise errors.BadConfigOption( u'Unable to write to storage file: {0:s}'.format(storage_file_path)) # Note that this function is not called by the normal termination. def _CleanUpAfterAbort(self): """Signals the tool to stop running nicely after an abort.""" if self._single_process_mode and self._debug_mode: logging.warning(u'Running in debug mode, set up debugger.') pdb.post_mortem() return if self._engine: self._engine.SignalAbort() def _GetParserFilterPreset(self, os_guess=u'', os_version=u''): """Determines the parser filter preset. Args: os_guess: optional string containing the operating system guessed by the preprocessing. The default is an empty string. os_version: optional string containing the operating system version determined by the preprocessing. The default is an empty string. Returns: The parser filter string or None. """ # TODO: Make this more sane. Currently we are only checking against # one possible version of Windows, and then making the assumption if # that is not correct we default to Windows 7. Same thing with other # OS's, no assumption or checks are really made there. # Also this is done by default, and no way for the user to turn off # this behavior, need to add a parameter to the frontend that takes # care of overwriting this behavior. parser_filter_string = None if not parser_filter_string and os_version: os_version = os_version.lower() # TODO: Improve this detection, this should be more 'intelligent', since # there are quite a lot of versions out there that would benefit from # loading up the set of 'winxp' parsers. if u'windows xp' in os_version: parser_filter_string = u'winxp' elif u'windows server 2000' in os_version: parser_filter_string = u'winxp' elif u'windows server 2003' in os_version: parser_filter_string = u'winxp' elif u'windows' in os_version: # Fallback for other Windows versions. parser_filter_string = u'win7' if not parser_filter_string and os_guess: if os_guess == definitions.OS_LINUX: parser_filter_string = u'linux' elif os_guess == definitions.OS_MACOSX: parser_filter_string = u'macosx' elif os_guess == definitions.OS_WINDOWS: parser_filter_string = u'win7' return parser_filter_string def _PreprocessSource(self, source_path_specs, source_type): """Preprocesses the source. Args: source_path_specs: list of path specifications (instances of dfvfs.PathSpec) to process. source_type: the dfVFS source type definition. Returns: The preprocessing object (instance of PreprocessObject). """ pre_obj = None if self._use_old_preprocess and os.path.isfile(self._storage_file_path): # Check if the storage file contains a preprocessing object. try: with storage.StorageFile( self._storage_file_path, read_only=True) as store: storage_information = store.GetStorageInformation() if storage_information: logging.info(u'Using preprocessing information from a prior run.') pre_obj = storage_information[-1] self._enable_preprocessing = False except IOError: logging.warning(u'Storage file does not exist, running preprocess.') logging.debug(u'Starting preprocessing.') # TODO: move source_scanner.SourceScannerContext.SOURCE_TYPE_ # to definitions.SOURCE_TYPE_. if (self._enable_preprocessing and source_type in [ source_scanner.SourceScannerContext.SOURCE_TYPE_DIRECTORY, source_scanner.SourceScannerContext.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, source_scanner.SourceScannerContext.SOURCE_TYPE_STORAGE_MEDIA_IMAGE]): try: self._engine.PreprocessSource( source_path_specs, self._operating_system, resolver_context=self._resolver_context) except IOError as exception: logging.error(u'Unable to preprocess with error: {0:s}'.format( exception)) return event.PreprocessObject() logging.debug(u'Preprocessing done.') # TODO: Remove the need for direct access to the pre_obj in favor # of the knowledge base. pre_obj = getattr(self._engine.knowledge_base, u'_pre_obj', None) if not pre_obj: pre_obj = event.PreprocessObject() return pre_obj # TODO: have the frontend fill collecton information gradually # and set it as the last step of preprocessing? # Split in: # * extraction preferences (user preferences) # * extraction settings (actual settings used) # * output/storage settings # * processing settings # * source settings (support more than one source) # * credentials (encryption) # * mount point def _PreprocessSetCollectionInformation( self, pre_obj, source_type, unused_engine, filter_file=None, parser_filter_string=None, preferred_encoding=u'utf-8'): """Sets the collection information as part of the preprocessing. Args: pre_obj: the preprocess object (instance of PreprocessObject). source_type: the dfVFS source type definition. engine: the engine object (instance of BaseEngine). filter_file: a path to a file that contains find specifications. The default is None. parser_filter_string: optional parser filter string. The default is None. preferred_encoding: optional preferred encoding. The default is UTF-8. """ collection_information = {} # TODO: informational values. collection_information[u'version'] = plaso.GetVersion() collection_information[u'debug'] = self._debug_mode # TODO: extraction preferences: if not parser_filter_string: parser_filter_string = u'(no list set)' collection_information[u'parser_selection'] = parser_filter_string collection_information[u'preferred_encoding'] = preferred_encoding # TODO: extraction info: collection_information[u'configured_zone'] = pre_obj.zone collection_information[u'parsers'] = self._parser_names collection_information[u'preprocess'] = self._enable_preprocessing if self._filter_expression: collection_information[u'filter'] = self._filter_expression if filter_file and os.path.isfile(filter_file): filters = [] with open(filter_file, 'rb') as file_object: for line in file_object.readlines(): filters.append(line.rstrip()) collection_information[u'file_filter'] = u', '.join(filters) if self._operating_system: collection_information[u'os_detected'] = self._operating_system else: collection_information[u'os_detected'] = u'N/A' # TODO: processing settings: collection_information[u'protobuf_size'] = self._buffer_size collection_information[u'time_of_run'] = timelib.Timestamp.GetNow() if self._single_process_mode: collection_information[u'runtime'] = u'single process mode' else: collection_information[u'runtime'] = u'multi process mode' # TODO: retrieve this value from the multi-process engine. # refactor engine to set number_of_extraction_workers # before ProcessSources. collection_information[u'workers'] = 0 # TODO: output/storage settings: collection_information[u'output_file'] = self._storage_file_path # TODO: source settings: # TODO: move source_scanner.SourceScannerContext.SOURCE_TYPE_ # to definitions.SOURCE_TYPE_. if source_type == source_scanner.SourceScannerContext.SOURCE_TYPE_DIRECTORY: recursive = True else: recursive = False # TODO: replace by scan node. # collection_information[u'file_processed'] = self._source_path collection_information[u'recursive'] = recursive # TODO: replace by scan node. # collection_information[u'vss parsing'] = bool(self.vss_stores) # TODO: move source_scanner.SourceScannerContext.SOURCE_TYPE_ # to definitions.SOURCE_TYPE_. if source_type in [ source_scanner.SourceScannerContext.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, source_scanner.SourceScannerContext.SOURCE_TYPE_STORAGE_MEDIA_IMAGE]: collection_information[u'method'] = u'imaged processed' # TODO: replace by scan node. # collection_information[u'image_offset'] = self.partition_offset else: collection_information[u'method'] = u'OS collection' pre_obj.collection_information = collection_information def _PreprocessSetTimezone(self, pre_obj, timezone=pytz.UTC): """Sets the timezone as part of the preprocessing. Args: pre_obj: the previously created preprocessing object (instance of PreprocessObject) or None. timezone: optional preferred timezone. The default is UTC. """ if not timezone: timezone = pytz.UTC if hasattr(pre_obj, u'time_zone_str'): logging.info(u'Setting timezone to: {0:s}'.format(pre_obj.time_zone_str)) try: pre_obj.zone = pytz.timezone(pre_obj.time_zone_str) except pytz.UnknownTimeZoneError: if not timezone: logging.warning(u'timezone was not properly set, defaulting to UTC') timezone = pytz.UTC else: logging.warning(( u'Unable to automatically configure timezone falling back ' u'to preferred timezone value: {0:s}').format(timezone)) pre_obj.zone = timezone else: # TODO: shouldn't the user to be able to always override the timezone # detection? Or do we need an input sanitization function. pre_obj.zone = timezone if not getattr(pre_obj, u'zone', None): pre_obj.zone = timezone def GetHashersInformation(self): """Retrieves the hashers information. Returns: A list of tuples of hasher names and descriptions. """ return hashers_manager.HashersManager.GetHashersInformation() def GetParserPluginsInformation(self): """Retrieves the parser plugins information. Returns: A list of tuples of parser plugin names and descriptions. """ return parsers_manager.ParsersManager.GetParserPluginsInformation() def GetParserPresetsInformation(self): """Retrieves the parser presets information. Returns: A list of tuples of parser preset names and related parsers names. """ parser_presets_information = [] for preset_name, parser_names in sorted(presets.categories.items()): parser_presets_information.append((preset_name, u', '.join(parser_names))) return parser_presets_information def GetParsersInformation(self): """Retrieves the parsers information. Returns: A list of tuples of parser names and descriptions. """ return parsers_manager.ParsersManager.GetParsersInformation() def ProcessSources( self, source_path_specs, source_type, enable_sigsegv_handler=False, filter_file=None, hasher_names_string=None, parser_filter_string=None, preferred_encoding=u'utf-8', single_process_mode=False, status_update_callback=None, storage_serializer_format=definitions.SERIALIZER_FORMAT_PROTOBUF, timezone=pytz.UTC): """Processes the sources. Args: source_path_specs: list of path specifications (instances of dfvfs.PathSpec) to process. source_type: the dfVFS source type definition. enable_sigsegv_handler: optional boolean value to indicate the SIGSEGV handler should be enabled. The default is False. filter_file: optional path to a file that contains find specifications. The default is None. hasher_names_string: optional comma separated string of names of hashers to enable. The default is None. parser_filter_string: optional parser filter string. The default is None. preferred_encoding: optional preferred encoding. The default is UTF-8. single_process_mode: optional boolean value to indicate if the front-end should run in single process mode. The default is False. status_update_callback: optional callback function for status updates. The default is None. storage_serializer_format: optional storage serializer format. The default is protobuf. timezone: optional preferred timezone. The default is UTC. Returns: The processing status (instance of ProcessingStatus) or None. Raises: SourceScannerError: if the source scanner could not find a supported file system. UserAbort: if the user initiated an abort. """ # If the source is a directory or a storage media image # run pre-processing. # TODO: move source_scanner.SourceScannerContext.SOURCE_TYPE_ # to definitions.SOURCE_TYPE_. if source_type in [ source_scanner.SourceScannerContext.SOURCE_TYPE_DIRECTORY, source_scanner.SourceScannerContext.SOURCE_TYPE_STORAGE_MEDIA_DEVICE, source_scanner.SourceScannerContext.SOURCE_TYPE_STORAGE_MEDIA_IMAGE]: self.SetEnablePreprocessing(True) else: self.SetEnablePreprocessing(False) self._CheckStorageFile(self._storage_file_path) self._single_process_mode = single_process_mode # TODO: move source_scanner.SourceScannerContext.SOURCE_TYPE_ # to definitions.SOURCE_TYPE_. if source_type == source_scanner.SourceScannerContext.SOURCE_TYPE_FILE: # No need to multi process a single file source. self._single_process_mode = True if self._single_process_mode: self._engine = single_process.SingleProcessEngine(self._queue_size) else: self._engine = multi_process.MultiProcessEngine( maximum_number_of_queued_items=self._queue_size) self._engine.SetEnableDebugOutput(self._debug_mode) self._engine.SetEnableProfiling( self._enable_profiling, profiling_sample_rate=self._profiling_sample_rate, profiling_type=self._profiling_type) pre_obj = self._PreprocessSource(source_path_specs, source_type) self._operating_system = getattr(pre_obj, u'guessed_os', None) if not parser_filter_string: guessed_os = self._operating_system os_version = getattr(pre_obj, u'osversion', u'') parser_filter_string = self._GetParserFilterPreset( os_guess=guessed_os, os_version=os_version) if parser_filter_string: logging.info(u'Parser filter expression changed to: {0:s}'.format( parser_filter_string)) self._parser_names = [] for _, parser_class in parsers_manager.ParsersManager.GetParsers( parser_filter_string=parser_filter_string): self._parser_names.append(parser_class.NAME) if u'filestat' in self._parser_names: include_directory_stat = True else: include_directory_stat = False self._hasher_names = [] hasher_manager = hashers_manager.HashersManager for hasher_name in hasher_manager.GetHasherNamesFromString( hasher_names_string=hasher_names_string): self._hasher_names.append(hasher_name) self._PreprocessSetTimezone(pre_obj, timezone=timezone) if filter_file: filter_find_specs = engine_utils.BuildFindSpecsFromFile( filter_file, pre_obj=pre_obj) else: filter_find_specs = None self._PreprocessSetCollectionInformation( pre_obj, source_type, self._engine, filter_file=filter_file, parser_filter_string=parser_filter_string, preferred_encoding=preferred_encoding) if self._output_module: storage_writer = storage.BypassStorageWriter( self._engine.event_object_queue, self._storage_file_path, output_module_string=self._output_module, pre_obj=pre_obj) else: storage_writer = storage.FileStorageWriter( self._engine.event_object_queue, self._storage_file_path, buffer_size=self._buffer_size, pre_obj=pre_obj, serializer_format=storage_serializer_format) storage_writer.SetEnableProfiling( self._enable_profiling, profiling_type=self._profiling_type) processing_status = None try: if self._single_process_mode: logging.debug(u'Starting extraction in single process mode.') processing_status = self._engine.ProcessSources( source_path_specs, storage_writer, filter_find_specs=filter_find_specs, filter_object=self._filter_object, hasher_names_string=hasher_names_string, include_directory_stat=include_directory_stat, mount_path=self._mount_path, parser_filter_string=parser_filter_string, process_archive_files=self._process_archive_files, resolver_context=self._resolver_context, status_update_callback=status_update_callback, text_prepend=self._text_prepend) else: logging.debug(u'Starting extraction in multi process mode.') # TODO: pass number_of_extraction_workers. processing_status = self._engine.ProcessSources( source_path_specs, storage_writer, enable_sigsegv_handler=enable_sigsegv_handler, filter_find_specs=filter_find_specs, filter_object=self._filter_object, hasher_names_string=hasher_names_string, include_directory_stat=include_directory_stat, mount_path=self._mount_path, parser_filter_string=parser_filter_string, process_archive_files=self._process_archive_files, status_update_callback=status_update_callback, show_memory_usage=self._show_worker_memory_information, text_prepend=self._text_prepend) except KeyboardInterrupt: self._CleanUpAfterAbort() raise errors.UserAbort # TODO: check if this still works and if still needed. except Exception as exception: if not self._single_process_mode: raise # The tool should generally not be run in single process mode # for other reasons than to debug. Hence the general error # catching. logging.error(u'An uncaught exception occurred: {0:s}.\n{1:s}'.format( exception, traceback.format_exc())) if self._debug_mode: pdb.post_mortem() return processing_status def SetDebugMode(self, enable_debug=False): """Enables or disables debug mode. Args: enable_debug: optional boolean value to indicate whether debugging mode should be enabled. The default is False. """ self._debug_mode = enable_debug def SetEnablePreprocessing(self, enable_preprocessing): """Enables or disables preprocessing. Args: enable_preprocessing: boolean value to indicate if the preprocessing should be performed. """ self._enable_preprocessing = enable_preprocessing def SetEnableProfiling( self, enable_profiling, profiling_sample_rate=1000, profiling_type=u'all'): """Enables or disables profiling. Args: enable_profiling: boolean value to indicate if the profiling should be enabled. profiling_sample_rate: optional integer indicating the profiling sample rate. The value contains the number of files processed. The default value is 1000. profiling_type: optional profiling type. The default is 'all'. """ self._enable_profiling = enable_profiling self._profiling_sample_rate = profiling_sample_rate self._profiling_type = profiling_type def SetUseOldPreprocess(self, use_old_preprocess): """Set the use old preprocess flag. Args: use_old_preprocess: boolean value to indicate if the engine should use the old preprocessing information or run preprocessing again. """ self._use_old_preprocess = use_old_preprocess def SetStorageFile(self, storage_file_path): """Sets the storage file path. Args: storage_file_path: The path of the storage file. """ self._storage_file_path = storage_file_path def SetStorageSerializer(self, storage_serializer_format): """Sets the storage serializer. Args: storage_serializer_format: string denoting the type of serializer to be used in the storage. The values can be either "proto" or "json". """ if storage_serializer_format not in ( self._EVENT_SERIALIZER_FORMAT_JSON, self._EVENT_SERIALIZER_FORMAT_PROTO): return self._storage_serializer_format = storage_serializer_format def SetShowMemoryInformation(self, show_memory=True): """Sets a flag telling the worker monitor to show memory information. Args: show_memory: a boolean (defaults to True) that indicates whether or not the foreman should include memory information as part of the worker monitoring. """ self._show_worker_memory_information = show_memory def SetTextPrepend(self, text_prepend): """Sets the text prepend. Args: text_prepend: free form text that is prepended to each path. """ self._text_prepend = text_prepend
import re import csv from aerofiles.errors import ParserError RE_COUNTRY = re.compile(r'^([\w]{2})?$', re.I) RE_LATITUDE = re.compile(r'^([\d]{2})([\d]{2}\.[\d]{3})([NS])$', re.I) RE_LONGITUDE = re.compile(r'^([\d]{3})([\d]{2}\.[\d]{3})([EW])$', re.I) RE_ELEVATION = re.compile(r'^(-?[\d]*(?:\.[\d]+)?)\s?(m|ft)?$', re.I) RE_RUNWAY_LENGTH = re.compile(r'^(?:([\d]+(?:\.[\d]+)?)\s?(ml|nm|m)?)?$', re.I) RE_FREQUENCY = re.compile(r'^1[\d]{2}\.[\d]+?$') RE_DISTANCE = re.compile(r'^(-?[\d]*(?:\.[\d]+)?)\s?(m|ft|km|ml|nm)?$', re.I) class Reader: """ A reader for the SeeYou CUP waypoint file format. see http://download.naviter.com/docs/CUP-file-format-description.pdf """ def __init__(self, fp=None): self.fp = fp def __iter__(self): return self.next() def next(self): waypoints = self.read(self.fp)['waypoints'] for waypoint in waypoints: yield waypoint def read(self, fp): waypoints = [] tasks = [] task_information = False for fields in csv.reader(fp): if fields == ["-----Related Tasks-----"]: task_information = True continue if task_information: if fields[0].lower() == 'options': tasks[-1]['Options'] = self.decode_task_options(fields) elif fields[0].lower().startswith('obszone'): tasks[-1]['obs_zones'].append(self.decode_task_obs_zone(fields)) else: tasks.append( { 'name': self.decode_task_name(fields), 'waypoints': self.decode_task_waypoints(fields), 'options': None, 'obs_zones': [] } ) else: waypoint = self.decode_waypoint(fields) if waypoint: waypoints.append(waypoint) return dict(waypoints=waypoints, tasks=tasks) def decode_waypoint(self, fields): # Ignore header line if fields == ['name', 'code', 'country', 'lat', 'lon', 'elev', 'style', 'rwdir', 'rwlen', 'freq', 'desc']: return # Ignore empty lines num_fields = len(fields) if num_fields == 0: return # Ignore comments if fields[0].startswith('*'): return if num_fields < 11: raise ParserError('Not enough fields provided. Expecting at minimum following 11 fileds:\nname,code,country,lat,lon,elev,style,rwdir,rwlen,freq,desc') if num_fields > 13: raise ParserError('Too many fields provided. Expecting at maximum following 13 fileds:\nname,code,country,lat,lon,elev,style,rwdir,rwlen,freq,desc,userdata,pics') fields = [field.strip() for field in fields] return { 'name': self.decode_name(fields[0]), 'code': self.decode_code(fields[1]), 'country': self.decode_country(fields[2]), 'latitude': self.decode_latitude(fields[3]), 'longitude': self.decode_longitude(fields[4]), 'elevation': self.decode_elevation(fields[5]), 'style': self.decode_style(fields[6]), 'runway_direction': self.decode_runway_direction(fields[7]), 'runway_length': self.decode_runway_length(fields[8]), 'frequency': self.decode_frequency(fields[9]), 'description': self.decode_description(fields[10]), } def decode_name(self, name): if not name: raise ParserError('Name field must not be empty') return name def decode_code(self, code): if not code: return None return code def decode_country(self, country): if RE_COUNTRY.match(country): return country else: raise ParserError('Invalid country code') def decode_latitude(self, latitude): match = RE_LATITUDE.match(latitude) if not match: raise ParserError('Reading latitude failed') latitude = int(match.group(1)) + float(match.group(2)) / 60. if not (0 <= latitude <= 90): raise ParserError('Latitude out of bounds') if match.group(3).upper() == 'S': latitude = -latitude return latitude def decode_longitude(self, longitude): match = RE_LONGITUDE.match(longitude) if not match: raise ParserError('Reading longitude failed') longitude = int(match.group(1)) + float(match.group(2)) / 60. if not (0 <= longitude <= 180): raise ParserError('Longitude out of bounds') if match.group(3).upper() == 'W': longitude = -longitude return longitude def decode_elevation(self, elevation): match = RE_ELEVATION.match(elevation) if not match: raise ParserError('Reading elevation failed') try: value = float(match.group(1)) except ValueError: value = None unit = match.group(2) if unit and unit.lower() not in ('m', 'ft'): raise ParserError('Unknown elevation unit') return { 'value': value, 'unit': unit, } def decode_style(self, style): try: style = int(style) except ValueError: raise ParserError('Reading style failed') if not (1 <= style <= 17): style = 0 return style def decode_runway_direction(self, runway_direction): if not runway_direction: return None try: runway_direction = int(runway_direction) except ValueError: raise ParserError('Reading runway direction failed') return runway_direction def decode_runway_length(self, runway_length): if not runway_length: return { 'value': None, 'unit': None, } match = RE_RUNWAY_LENGTH.match(runway_length) if not match: raise ParserError('Reading runway length failed') try: value = float(match.group(1)) except ValueError: value = None unit = match.group(2) if unit and unit.lower() not in ('m', 'nm', 'ml'): raise ParserError('Unknown runway length unit') return { 'value': value, 'unit': unit, } def decode_frequency(self, frequency): if not frequency: return None if not RE_FREQUENCY.match(frequency): raise ParserError('Reading frequency failed') return frequency def decode_description(self, description): if not description: return None return description def decode_task_options(self, fields): if not fields[0] == "Options": return task_options = { 'no_start': None, 'task_time': None, 'wp_dis': False, 'near_dis': None, 'near_alt': None, 'min_dis': False, 'random_order': False, 'max_pts': None, 'before_pts': None, 'after_pts': None, 'bonus': None } for field in fields[1:]: field_type, field_entry = field.split("=") if field_type == 'NoStart': task_options['no_start'] = field_entry elif field_type == 'TaskTime': task_options['task_time'] = field_entry elif field_type == 'WpDis': task_options['wp_dis'] = field_entry == "True" elif field_type == 'NearDis': task_options['near_dis'] = self.decode_distance(field_entry) elif field_type == 'NearAlt': task_options['near_alt'] = self.decode_distance(field_entry) elif field_type == 'MinDis': task_options['min_dis'] = field_entry == "True" elif field_type == 'RandomOrder': task_options['random_order'] = field_entry == "True" elif field_type == 'MaxPts': task_options['max_pts'] = int(field_entry) elif field_type == 'BeforePts': task_options['before_pts'] = int(field_entry) elif field_type == 'AfterPts': task_options['after_pts'] = int(field_entry) elif field_type == 'Bonus': task_options['bonus'] = int(field_entry) else: raise Exception('Input contains unsupported option %s' % field) return task_options def decode_task_obs_zone(self, fields): task_obs_zone = { 'obs_zone': None, 'style': None, 'r1': None, 'a1': None, 'r2': None, 'a2': None, 'a12': None, 'line': False, 'move': False, 'reduce': False } for field in fields: field_type, field_entry = field.split("=") if field_type.lower() == 'obszone': task_obs_zone['obs_zone'] = int(field_entry) elif field_type == 'Style': task_obs_zone['style'] = int(field_entry) elif field_type == 'A1': task_obs_zone['a1'] = int(field_entry) elif field_type == 'A2': task_obs_zone['a2'] = int(field_entry) elif field_type == 'A12': task_obs_zone['a12'] = int(field_entry) elif field_type == 'R1': task_obs_zone['r1'] = self.decode_distance(field_entry) elif field_type == 'R2': task_obs_zone['r2'] = self.decode_distance(field_entry) elif field_type == 'Line' and field_entry == "1": task_obs_zone['line'] = True elif field_type == 'Move' and field_entry == "1": task_obs_zone['move'] = True elif field_type == 'Reduce' and field_entry == "1": task_obs_zone['reduce'] = True else: raise Exception('A taskpoint may not contain key %s' % field_type) return task_obs_zone def decode_task_name(self, fields): return fields[0] def decode_task_waypoints(self, fields): return fields[1::] def decode_distance(self, distance_str): if not distance_str: return { 'value': None, 'unit': None, } match = RE_DISTANCE.match(distance_str) if not match: raise ParserError('Reading neardis failed') try: value = float(match.group(1)) except ValueError: value = None unit = match.group(2) if unit and unit.lower() not in ('m', 'ft', 'km', 'ml', 'nm'): raise ParserError('Unknown distance unit') return { 'value': value, 'unit': unit, }
# Python test set -- part 4b, built-in functions n-z from test_support import * print 'oct' if oct(100) != '0144': raise TestFailed, 'oct(100)' if oct(100L) != '0144L': raise TestFailed, 'oct(100L)' if oct(-100) not in ('037777777634', '01777777777777777777634'): raise TestFailed, 'oct(-100)' if oct(-100L) != '-0144L': raise TestFailed, 'oct(-100L)' print 'open' # NB the first 4 lines are also used to test input and raw_input, below fp = open(TESTFN, 'w') try: fp.write('1+1\n') fp.write('1+1\n') fp.write('The quick brown fox jumps over the lazy dog') fp.write('.\n') fp.write('Dear John\n') fp.write('XXX'*100) fp.write('YYY'*100) finally: fp.close() # fp = open(TESTFN, 'r') try: if fp.readline(4) != '1+1\n': raise TestFailed, 'readline(4) # exact' if fp.readline(4) != '1+1\n': raise TestFailed, 'readline(4) # exact' if fp.readline() != 'The quick brown fox jumps over the lazy dog.\n': raise TestFailed, 'readline() # default' if fp.readline(4) != 'Dear': raise TestFailed, 'readline(4) # short' if fp.readline(100) != ' John\n': raise TestFailed, 'readline(100)' if fp.read(300) != 'XXX'*100: raise TestFailed, 'read(300)' if fp.read(1000) != 'YYY'*100: raise TestFailed, 'read(1000) # truncate' finally: fp.close() print 'ord' if ord(' ') != 32: raise TestFailed, 'ord(\' \')' if ord('A') != 65: raise TestFailed, 'ord(\'A\')' if ord('a') != 97: raise TestFailed, 'ord(\'a\')' print 'pow' if pow(0,0) != 1: raise TestFailed, 'pow(0,0)' if pow(0,1) != 0: raise TestFailed, 'pow(0,1)' if pow(1,0) != 1: raise TestFailed, 'pow(1,0)' if pow(1,1) != 1: raise TestFailed, 'pow(1,1)' # if pow(2,0) != 1: raise TestFailed, 'pow(2,0)' if pow(2,10) != 1024: raise TestFailed, 'pow(2,10)' if pow(2,20) != 1024*1024: raise TestFailed, 'pow(2,20)' if pow(2,30) != 1024*1024*1024: raise TestFailed, 'pow(2,30)' # if pow(-2,0) != 1: raise TestFailed, 'pow(-2,0)' if pow(-2,1) != -2: raise TestFailed, 'pow(-2,1)' if pow(-2,2) != 4: raise TestFailed, 'pow(-2,2)' if pow(-2,3) != -8: raise TestFailed, 'pow(-2,3)' # if pow(0L,0) != 1: raise TestFailed, 'pow(0L,0)' if pow(0L,1) != 0: raise TestFailed, 'pow(0L,1)' if pow(1L,0) != 1: raise TestFailed, 'pow(1L,0)' if pow(1L,1) != 1: raise TestFailed, 'pow(1L,1)' # if pow(2L,0) != 1: raise TestFailed, 'pow(2L,0)' if pow(2L,10) != 1024: raise TestFailed, 'pow(2L,10)' if pow(2L,20) != 1024*1024: raise TestFailed, 'pow(2L,20)' if pow(2L,30) != 1024*1024*1024: raise TestFailed, 'pow(2L,30)' # if pow(-2L,0) != 1: raise TestFailed, 'pow(-2L,0)' if pow(-2L,1) != -2: raise TestFailed, 'pow(-2L,1)' if pow(-2L,2) != 4: raise TestFailed, 'pow(-2L,2)' if pow(-2L,3) != -8: raise TestFailed, 'pow(-2L,3)' # if fcmp(pow(0.,0), 1.): raise TestFailed, 'pow(0.,0)' if fcmp(pow(0.,1), 0.): raise TestFailed, 'pow(0.,1)' if fcmp(pow(1.,0), 1.): raise TestFailed, 'pow(1.,0)' if fcmp(pow(1.,1), 1.): raise TestFailed, 'pow(1.,1)' # if fcmp(pow(2.,0), 1.): raise TestFailed, 'pow(2.,0)' if fcmp(pow(2.,10), 1024.): raise TestFailed, 'pow(2.,10)' if fcmp(pow(2.,20), 1024.*1024.): raise TestFailed, 'pow(2.,20)' if fcmp(pow(2.,30), 1024.*1024.*1024.): raise TestFailed, 'pow(2.,30)' # # XXX These don't work -- negative float to the float power... #if fcmp(pow(-2.,0), 1.): raise TestFailed, 'pow(-2.,0)' #if fcmp(pow(-2.,1), -2.): raise TestFailed, 'pow(-2.,1)' #if fcmp(pow(-2.,2), 4.): raise TestFailed, 'pow(-2.,2)' #if fcmp(pow(-2.,3), -8.): raise TestFailed, 'pow(-2.,3)' # for x in 2, 2L, 2.0: for y in 10, 10L, 10.0: for z in 1000, 1000L, 1000.0: if fcmp(pow(x, y, z), 24.0): raise TestFailed, 'pow(%s, %s, %s)' % (x, y, z) print 'range' if range(3) != [0, 1, 2]: raise TestFailed, 'range(3)' if range(1, 5) != [1, 2, 3, 4]: raise TestFailed, 'range(1, 5)' if range(0) != []: raise TestFailed, 'range(0)' if range(-3) != []: raise TestFailed, 'range(-3)' if range(1, 10, 3) != [1, 4, 7]: raise TestFailed, 'range(1, 10, 3)' if range(5, -5, -3) != [5, 2, -1, -4]: raise TestFailed, 'range(5, -5, -3)' print 'input and raw_input' import sys fp = open(TESTFN, 'r') savestdin = sys.stdin try: sys.stdin = fp if input() != 2: raise TestFailed, 'input()' if input('testing\n') != 2: raise TestFailed, 'input()' if raw_input() != 'The quick brown fox jumps over the lazy dog.': raise TestFailed, 'raw_input()' if raw_input('testing\n') != 'Dear John': raise TestFailed, 'raw_input(\'testing\\n\')' finally: sys.stdin = savestdin fp.close() print 'reduce' if reduce(lambda x, y: x+y, ['a', 'b', 'c'], '') != 'abc': raise TestFailed, 'reduce(): implode a string' if reduce(lambda x, y: x+y, [['a', 'c'], [], ['d', 'w']], []) != ['a','c','d','w']: raise TestFailed, 'reduce(): append' if reduce(lambda x, y: x*y, range(2,8), 1) != 5040: raise TestFailed, 'reduce(): compute 7!' if reduce(lambda x, y: x*y, range(2,21), 1L) != 2432902008176640000L: raise TestFailed, 'reduce(): compute 20!, use long' class Squares: def __init__(self, max): self.max = max self.sofar = [] def __len__(self): return len(self.sofar) def __getitem__(self, i): if not 0 <= i < self.max: raise IndexError n = len(self.sofar) while n <= i: self.sofar.append(n*n) n = n+1 return self.sofar[i] if reduce(lambda x, y: x+y, Squares(10)) != 285: raise TestFailed, 'reduce(<+>, Squares(10))' if reduce(lambda x, y: x+y, Squares(10), 0) != 285: raise TestFailed, 'reduce(<+>, Squares(10), 0)' if reduce(lambda x, y: x+y, Squares(0), 0) != 0: raise TestFailed, 'reduce(<+>, Squares(0), 0)' print 'reload' import marshal reload(marshal) import string reload(string) ## import sys ## try: reload(sys) ## except ImportError: pass ## else: raise TestFailed, 'reload(sys) should fail' print 'repr' if repr('') != '\'\'': raise TestFailed, 'repr(\'\')' if repr(0) != '0': raise TestFailed, 'repr(0)' if repr(0L) != '0L': raise TestFailed, 'repr(0L)' if repr(()) != '()': raise TestFailed, 'repr(())' if repr([]) != '[]': raise TestFailed, 'repr([])' if repr({}) != '{}': raise TestFailed, 'repr({})' print 'round' if round(0.0) != 0.0: raise TestFailed, 'round(0.0)' if round(1.0) != 1.0: raise TestFailed, 'round(1.0)' if round(10.0) != 10.0: raise TestFailed, 'round(10.0)' if round(1000000000.0) != 1000000000.0: raise TestFailed, 'round(1000000000.0)' if round(1e20) != 1e20: raise TestFailed, 'round(1e20)' if round(-1.0) != -1.0: raise TestFailed, 'round(-1.0)' if round(-10.0) != -10.0: raise TestFailed, 'round(-10.0)' if round(-1000000000.0) != -1000000000.0: raise TestFailed, 'round(-1000000000.0)' if round(-1e20) != -1e20: raise TestFailed, 'round(-1e20)' if round(0.1) != 0.0: raise TestFailed, 'round(0.0)' if round(1.1) != 1.0: raise TestFailed, 'round(1.0)' if round(10.1) != 10.0: raise TestFailed, 'round(10.0)' if round(1000000000.1) != 1000000000.0: raise TestFailed, 'round(1000000000.0)' if round(-1.1) != -1.0: raise TestFailed, 'round(-1.0)' if round(-10.1) != -10.0: raise TestFailed, 'round(-10.0)' if round(-1000000000.1) != -1000000000.0: raise TestFailed, 'round(-1000000000.0)' if round(0.9) != 1.0: raise TestFailed, 'round(0.9)' if round(9.9) != 10.0: raise TestFailed, 'round(9.9)' if round(999999999.9) != 1000000000.0: raise TestFailed, 'round(999999999.9)' if round(-0.9) != -1.0: raise TestFailed, 'round(-0.9)' if round(-9.9) != -10.0: raise TestFailed, 'round(-9.9)' if round(-999999999.9) != -1000000000.0: raise TestFailed, 'round(-999999999.9)' print 'setattr' import sys setattr(sys, 'spam', 1) if sys.spam != 1: raise TestFailed, 'setattr(sys, \'spam\', 1)' print 'str' if str('') != '': raise TestFailed, 'str(\'\')' if str(0) != '0': raise TestFailed, 'str(0)' if str(0L) != '0': raise TestFailed, 'str(0L)' if str(()) != '()': raise TestFailed, 'str(())' if str([]) != '[]': raise TestFailed, 'str([])' if str({}) != '{}': raise TestFailed, 'str({})' print 'tuple' if tuple(()) != (): raise TestFailed, 'tuple(())' if tuple((0, 1, 2, 3)) != (0, 1, 2, 3): raise TestFailed, 'tuple((0, 1, 2, 3))' if tuple([]) != (): raise TestFailed, 'tuple([])' if tuple([0, 1, 2, 3]) != (0, 1, 2, 3): raise TestFailed, 'tuple([0, 1, 2, 3])' if tuple('') != (): raise TestFailed, 'tuple('')' if tuple('spam') != ('s', 'p', 'a', 'm'): raise TestFailed, "tuple('spam')" print 'type' if type('') != type('123') or type('') == type(()): raise TestFailed, 'type()' print 'vars' a = b = None a = vars().keys() b = dir() a.sort() b.sort() if a != b: raise TestFailed, 'vars()' import sys a = vars(sys).keys() b = dir(sys) a.sort() b.sort() if a != b: raise TestFailed, 'vars(sys)' def f0(): if vars() != {}: raise TestFailed, 'vars() in f0()' f0() def f2(): f0() a = 1 b = 2 if vars() != {'a': a, 'b': b}: raise TestFailed, 'vars() in f2()' f2() print 'xrange' if tuple(xrange(10)) != tuple(range(10)): raise TestFailed, 'xrange(10)' if tuple(xrange(5,10)) != tuple(range(5,10)): raise TestFailed, 'xrange(5,10)' if tuple(xrange(0,10,2)) != tuple(range(0,10,2)): raise TestFailed, 'xrange(0,10,2)' # regression tests for SourceForge bug #121695 def _range_test(r): verify(r.start != r.stop, 'Test not valid for passed-in xrange object.') if r.stop in r: raise TestFailed, 'r.stop in ' + `r` if r.stop-r.step not in r: raise TestFailed, 'r.stop-r.step not in ' + `r` if r.start not in r: raise TestFailed, 'r.start not in ' + `r` if r.stop+r.step in r: raise TestFailed, 'r.stop+r.step in ' + `r` _range_test(xrange(10)) _range_test(xrange(9, -1, -1)) _range_test(xrange(0, 10, 2)) print 'zip' a = (1, 2, 3) b = (4, 5, 6) t = [(1, 4), (2, 5), (3, 6)] if zip(a, b) != t: raise TestFailed, 'zip(a, b) - same size, both tuples' b = [4, 5, 6] if zip(a, b) != t: raise TestFailed, 'zip(a, b) - same size, tuple/list' b = (4, 5, 6, 7) if zip(a, b) != t: raise TestFailed, 'zip(a, b) - b is longer' class I: def __getitem__(self, i): if i < 0 or i > 2: raise IndexError return i + 4 if zip(a, I()) != t: raise TestFailed, 'zip(a, b) - b is instance' exc = 0 try: zip() except TypeError: exc = 1 except: e = sys.exc_info()[0] raise TestFailed, 'zip() - no args, expected TypeError, got %s' % e if not exc: raise TestFailed, 'zip() - no args, missing expected TypeError' exc = 0 try: zip(None) except TypeError: exc = 1 except: e = sys.exc_info()[0] raise TestFailed, 'zip(None) - expected TypeError, got %s' % e if not exc: raise TestFailed, 'zip(None) - missing expected TypeError' class G: pass exc = 0 try: zip(a, G()) except AttributeError: exc = 1 except: e = sys.exc_info()[0] raise TestFailed, 'zip(a, b) - b instance w/o __getitem__' if not exc: raise TestFailed, 'zip(a, b) - missing expected AttributeError' # Epilogue -- unlink the temp file unlink(TESTFN)
#!/usr/bin/env python # -*- coding: utf-8 -*- # # king_phisher/utilities.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import datetime import ipaddress import logging import os import random import re import shlex import string import subprocess import sys import time from king_phisher import color from king_phisher import its from king_phisher import version from smoke_zephyr.utilities import which EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,6}$', flags=re.IGNORECASE) class Mock(object): """ A fake object used to replace missing imports when generating documentation. """ __all__ = [] def __init__(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return Mock() @classmethod def __getattr__(cls, name): if name in ('__file__', '__path__'): return os.devnull else: return Mock() @classmethod def __setattr__(cls, name): pass def __getitem__(self, name): return Mock() def __setitem__(self, name, value): pass def argp_add_args(parser, default_root=''): """ Add standard arguments to a new :py:class:`argparse.ArgumentParser` instance for configuring logging options from the command line and displaying the version information. :param parser: The parser to add arguments to. :type parser: :py:class:`argparse.ArgumentParser` :param str default_root: The default root logger to specify. """ parser.add_argument('-v', '--version', action='version', version=parser.prog + ' Version: ' + version.version) parser.add_argument('-L', '--log', dest='loglvl', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING', help='set the logging level') parser.add_argument('--logger', default=default_root, help='specify the root logger') def configure_stream_logger(level, logger): """ Configure the default stream handler for logging messages to the console. This also configures the basic logging environment for the application. :param level: The level to set the logger to. :type level: int, str :param str logger: The logger to add the stream handler for. :return: The new configured stream handler. :rtype: :py:class:`logging.StreamHandler` """ if isinstance(level, str): level = getattr(logging, level) root_logger = logging.getLogger('') for handler in root_logger.handlers: root_logger.removeHandler(handler) logging.getLogger(logger).setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(level) if its.on_linux: console_log_handler.setFormatter(color.ColoredLogFormatter("%(levelname)s %(message)s")) else: console_log_handler.setFormatter(logging.Formatter("%(levelname)-8s %(message)s")) logging.getLogger(logger).addHandler(console_log_handler) logging.captureWarnings(True) return console_log_handler def datetime_utc_to_local(dt): """ Convert a :py:class:`datetime.datetime` instance from UTC time to the local time. :param dt: The time to convert from UTC to local. :type dt: :py:class:`datetime.datetime` :return: The time converted to the local timezone. :rtype: :py:class:`datetime.datetime` """ return dt - datetime.timedelta(seconds=time.timezone) def format_datetime(dt): """ Format a date time object into a string. If the object *dt* is not an instance of :py:class:`datetime.datetime` then an empty string will be returned. :param dt: The object to format. :type dt: :py:class:`datetime.datetime` :return: The string representing the formatted time. :rtype: str """ if not isinstance(dt, datetime.datetime): return '' return dt.strftime('%Y-%m-%d %H:%M:%S') def is_valid_email_address(email_address): """ Check that the string specified appears to be a valid email address. :param str email_address: The email address to validate. :return: Whether the email address appears to be valid or not. :rtype: bool """ if email_address == None: return False return EMAIL_REGEX.match(email_address) != None def is_valid_ip_address(ip_address): """ Check that the string specified appears to be either a valid IPv4 or IPv6 address. :param str ip_address: The IP address to validate. :return: Whether the IP address appears to be valid or not. :rtype: bool """ try: ipaddress.ip_address(ip_address) except ValueError: return False return True def open_uri(uri): """ Open a URI in a platform intelligent way. On Windows this will use 'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open depending on which is available. If no suitable application can be found to open the URI, a RuntimeError will be raised. :param str uri: The URI to open. """ proc_args = [] if sys.platform.startswith('win'): proc_args.append(which('cmd.exe')) proc_args.append('/c') proc_args.append('start') elif which('gvfs-open'): proc_args.append(which('gvfs-open')) elif which('xdg-open'): proc_args.append(which('xdg-open')) else: raise RuntimeError('could not find suitable application to open uri') proc_args.append(uri) return start_process(proc_args) def random_string(size): """ Generate a random string consisting of uppercase letters, lowercase letters and numbers of the specified size. :param int size: The size of the string to make. :return: The string containing the random characters. :rtype: str """ return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size)) def random_string_lower_numeric(size): """ Generate a random string consisting of lowercase letters and numbers of the specified size. :param int size: The size of the string to make. :return: The string containing the random characters. :rtype: str """ return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size)) def start_process(proc_args, wait=True): """ Start an external process. :param proc_args: The arguments of the process to start. :type proc_args: list, str :param bool wait: Wait for the process to exit before returning. """ if isinstance(proc_args, str): proc_args = shlex.split(proc_args) close_fds = True startupinfo = None preexec_fn = None if wait else getattr(os, 'setsid', None) if sys.platform.startswith('win'): close_fds = False startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=preexec_fn, close_fds=close_fds, startupinfo=startupinfo) if not wait: return proc_h return proc_h.wait() == 0
import decimal import json as _json import sys import re from _plotly_utils.optional_imports import get_module from _plotly_utils.basevalidators import ImageUriValidator PY36_OR_LATER = sys.version_info >= (3, 6) class PlotlyJSONEncoder(_json.JSONEncoder): """ Meant to be passed as the `cls` kwarg to json.dumps(obj, cls=..) See PlotlyJSONEncoder.default for more implementation information. Additionally, this encoder overrides nan functionality so that 'Inf', 'NaN' and '-Inf' encode to 'null'. Which is stricter JSON than the Python version. """ def coerce_to_strict(self, const): """ This is used to ultimately *encode* into strict JSON, see `encode` """ # before python 2.7, 'true', 'false', 'null', were include here. if const in ("Infinity", "-Infinity", "NaN"): return None else: return const def encode(self, o): """ Load and then dump the result using parse_constant kwarg Note that setting invalid separators will cause a failure at this step. """ # this will raise errors in a normal-expected way encoded_o = super(PlotlyJSONEncoder, self).encode(o) # now: # 1. `loads` to switch Infinity, -Infinity, NaN to None # 2. `dumps` again so you get 'null' instead of extended JSON try: new_o = _json.loads(encoded_o, parse_constant=self.coerce_to_strict) except ValueError: # invalid separators will fail here. raise a helpful exception raise ValueError( "Encoding into strict JSON failed. Did you set the separators " "valid JSON separators?" ) else: return _json.dumps( new_o, sort_keys=self.sort_keys, indent=self.indent, separators=(self.item_separator, self.key_separator), ) def default(self, obj): """ Accept an object (of unknown type) and try to encode with priority: 1. builtin: user-defined objects 2. sage: sage math cloud 3. pandas: dataframes/series 4. numpy: ndarrays 5. datetime: time/datetime objects Each method throws a NotEncoded exception if it fails. The default method will only get hit if the object is not a type that is naturally encoded by json: Normal objects: dict object list, tuple array str, unicode string int, long, float number True true False false None null Extended objects: float('nan') 'NaN' float('infinity') 'Infinity' float('-infinity') '-Infinity' Therefore, we only anticipate either unknown iterables or values here. """ # TODO: The ordering if these methods is *very* important. Is this OK? encoding_methods = ( self.encode_as_plotly, self.encode_as_sage, self.encode_as_numpy, self.encode_as_pandas, self.encode_as_datetime, self.encode_as_date, self.encode_as_list, # because some values have `tolist` do last. self.encode_as_decimal, self.encode_as_pil, ) for encoding_method in encoding_methods: try: return encoding_method(obj) except NotEncodable: pass return _json.JSONEncoder.default(self, obj) @staticmethod def encode_as_plotly(obj): """Attempt to use a builtin `to_plotly_json` method.""" try: return obj.to_plotly_json() except AttributeError: raise NotEncodable @staticmethod def encode_as_list(obj): """Attempt to use `tolist` method to convert to normal Python list.""" if hasattr(obj, "tolist"): return obj.tolist() else: raise NotEncodable @staticmethod def encode_as_sage(obj): """Attempt to convert sage.all.RR to floats and sage.all.ZZ to ints""" sage_all = get_module("sage.all") if not sage_all: raise NotEncodable if obj in sage_all.RR: return float(obj) elif obj in sage_all.ZZ: return int(obj) else: raise NotEncodable @staticmethod def encode_as_pandas(obj): """Attempt to convert pandas.NaT""" pandas = get_module("pandas", should_load=False) if not pandas: raise NotEncodable if obj is pandas.NaT: return None else: raise NotEncodable @staticmethod def encode_as_numpy(obj): """Attempt to convert numpy.ma.core.masked""" numpy = get_module("numpy", should_load=False) if not numpy: raise NotEncodable if obj is numpy.ma.core.masked: return float("nan") else: raise NotEncodable @staticmethod def encode_as_datetime(obj): """Convert datetime objects to iso-format strings""" try: return obj.isoformat() except AttributeError: raise NotEncodable @staticmethod def encode_as_date(obj): """Attempt to convert to utc-iso time string using date methods.""" try: time_string = obj.isoformat() except AttributeError: raise NotEncodable else: return iso_to_plotly_time_string(time_string) @staticmethod def encode_as_decimal(obj): """Attempt to encode decimal by converting it to float""" if isinstance(obj, decimal.Decimal): return float(obj) else: raise NotEncodable @staticmethod def encode_as_pil(obj): """Attempt to convert PIL.Image.Image to base64 data uri""" image = get_module("PIL.Image") if image is not None and isinstance(obj, image.Image): return ImageUriValidator.pil_image_to_uri(obj) else: raise NotEncodable class NotEncodable(Exception): pass def iso_to_plotly_time_string(iso_string): """Remove timezone info and replace 'T' delimeter with ' ' (ws).""" # make sure we don't send timezone info to plotly if (iso_string.split("-")[:3] == "00:00") or (iso_string.split("+")[0] == "00:00"): raise Exception( "Plotly won't accept timestrings with timezone info.\n" "All timestrings are assumed to be in UTC." ) iso_string = iso_string.replace("-00:00", "").replace("+00:00", "") if iso_string.endswith("T00:00:00"): return iso_string.replace("T00:00:00", "") else: return iso_string.replace("T", " ") def template_doc(**names): def _decorator(func): if not sys.version_info[:2] == (3, 2): if func.__doc__ is not None: func.__doc__ = func.__doc__.format(**names) return func return _decorator def _natural_sort_strings(vals, reverse=False): def key(v): v_parts = re.split(r"(\d+)", v) for i in range(len(v_parts)): try: v_parts[i] = int(v_parts[i]) except ValueError: # not an int pass return tuple(v_parts) return sorted(vals, key=key, reverse=reverse)
from __future__ import absolute_import import ast import logging import os.path from pathlib import Path import pytest import pretend from pip_check_reqs import common @pytest.mark.parametrize( ["path", "result"], [ ('/', ''), ('__init__.py', ''), # a top-level file like this has no package name ('/__init__.py', ''), # no package name ('spam/__init__.py', 'spam'), ('spam/__init__.pyc', 'spam'), ('spam/__init__.pyo', 'spam'), ('ham/spam/__init__.py', 'ham/spam'), ('/ham/spam/__init__.py', '/ham/spam'), ]) def test_is_package_file(path, result): assert common.is_package_file(path) == result def test_FoundModule(): fm = common.FoundModule('spam', 'ham') assert fm.modname == 'spam' assert fm.filename == os.path.realpath('ham') assert fm.locations == [] assert str(fm) == 'FoundModule("spam")' @pytest.mark.parametrize( ["stmt", "result"], [ ('import ast', ['ast']), ('import ast, sys', ['ast', 'sys']), ('from sys import version', ['sys']), ('from os import path', ['os']), ('import distutils.command.check', ['distutils']), ('import spam', []), # don't break because bad programmer ]) def test_ImportVisitor(stmt, result): class options: def ignore_mods(self, modname): return False vis = common.ImportVisitor(options()) vis.set_location('spam.py') vis.visit(ast.parse(stmt)) result = vis.finalise() assert set(result.keys()) == set(result) def test_pyfiles_file(monkeypatch): monkeypatch.setattr(os.path, 'abspath', pretend.call_recorder(lambda x: '/spam/ham.py')) assert list(common.pyfiles('spam')) == ['/spam/ham.py'] def test_pyfiles_file_no_dice(monkeypatch): monkeypatch.setattr(os.path, 'abspath', pretend.call_recorder(lambda x: '/spam/ham')) with pytest.raises(ValueError): list(common.pyfiles('spam')) def test_pyfiles_package(monkeypatch): monkeypatch.setattr(os.path, 'abspath', pretend.call_recorder(lambda x: '/spam')) monkeypatch.setattr(os.path, 'isdir', pretend.call_recorder(lambda x: True)) walk_results = [ ('spam', [], ['__init__.py', 'spam', 'ham.py']), ('spam/dub', [], ['bass.py', 'dropped']), ] monkeypatch.setattr(os, 'walk', pretend.call_recorder(lambda x: walk_results)) assert list(common.pyfiles('spam')) == \ ['spam/__init__.py', 'spam/ham.py', 'spam/dub/bass.py'] @pytest.mark.parametrize(["ignore_ham", "ignore_hashlib", "expect", "locs"], [ (False, False, ['ast', 'os', 'hashlib'], [('spam.py', 2), ('ham.py', 2)]), (False, True, ['ast', 'os'], [('spam.py', 2), ('ham.py', 2)]), (True, False, ['ast'], [('spam.py', 2)]), (True, True, ['ast'], [('spam.py', 2)]), ]) def test_find_imported_modules(monkeypatch, caplog, ignore_ham, ignore_hashlib, expect, locs): monkeypatch.setattr(common, 'pyfiles', pretend.call_recorder(lambda x: ['spam.py', 'ham.py'])) class FakeFile(): contents = [ 'from os import path\nimport ast, hashlib', 'from __future__ import braces\nimport ast, sys\n' 'from . import friend', ] def __init__(self, filename, encoding=None): pass def read(self): return self.contents.pop() def __enter__(self): return self def __exit__(self, *args): pass monkeypatch.setattr(common, 'open', FakeFile, raising=False) caplog.set_level(logging.INFO) class options: paths = ['dummy'] verbose = True @staticmethod def ignore_files(path): if path == 'ham.py' and ignore_ham: return True return False @staticmethod def ignore_mods(module): if module == 'hashlib' and ignore_hashlib: return True return False result = common.find_imported_modules(options) assert set(result) == set(expect) assert result['ast'].locations == locs if ignore_ham: assert caplog.records[0].message == 'ignoring: ham.py' @pytest.mark.parametrize(["ignore_cfg", "candidate", "result"], [ ([], 'spam', False), ([], 'ham', False), (['spam'], 'spam', True), (['spam'], 'spam.ham', False), (['spam'], 'eggs', False), (['spam*'], 'spam', True), (['spam*'], 'spam.ham', True), (['spam*'], 'eggs', False), (['spam'], '/spam', True), ]) def test_ignorer(monkeypatch, tmp_path: Path, ignore_cfg, candidate, result): monkeypatch.setattr(os.path, 'relpath', lambda s: s.lstrip('/')) ignorer = common.ignorer(ignore_cfg) assert ignorer(candidate) == result def test_find_required_modules(monkeypatch, tmp_path: Path): class options: skip_incompatible = False options.ignore_reqs = common.ignorer(ignore_cfg=['barfoo']) fake_requirements_file = tmp_path / 'requirements.txt' fake_requirements_file.write_text('foobar==1\nbarfoo==2') reqs = common.find_required_modules( options=options, requirements_filename=str(fake_requirements_file), ) assert reqs == set(['foobar']) def test_find_required_modules_env_markers(monkeypatch, tmp_path): class options: skip_incompatible = True def ignore_reqs(self, modname): return False fake_requirements_file = tmp_path / 'requirements.txt' fake_requirements_file.write_text('spam==1; python_version<"2.0"\n' 'ham==2;\n' 'eggs==3\n') reqs = common.find_required_modules( options=options(), requirements_filename=str(fake_requirements_file), ) assert reqs == {'ham', 'eggs'} def test_find_imported_modules_sets_encoding_to_utf8_when_reading(tmp_path): (tmp_path / 'module.py').touch() class options: paths = [tmp_path] def ignore_files(*_): return False expected_encoding = 'utf-8' used_encoding = None original_open = common.__builtins__['open'] def mocked_open(*args, **kwargs): # As of Python 3.9, the args to open() are as follows: # file, mode, buffering, encoding, erorrs, newline, closedf, opener nonlocal used_encoding if 'encoding' in kwargs: used_encoding = kwargs['encoding'] return original_open(*args, **kwargs) common.__builtins__['open'] = mocked_open common.find_imported_modules(options) common.__builtins__['open'] = original_open assert used_encoding == expected_encoding
#!/usr/bin/env python import numpy; import sptensor; import tools; from scipy import sparse; class sptenmat: subs = None; vals = None; rdims = None; cdims = None; tsize = None; def __init__(self, T, rdim = None, cdim = None, tsiz = None, option = None): """Create a sptenmat object from a given ndarray or sptensor T""" if(rdim != None and rdim.__class__ == list): rdim = numpy.array(rdim); if(cdim != None and cdim.__class__ == list): cdim = numpy.array(cdim); if(tsiz != None and tsiz.__class__ == list): tsiz = numpy.array(tsiz); #subs, vals, rdims, cdims, tsize are all given #When I is a (2-D) ndarray or sptensor, and rdim, cdim, tsiz are given if(rdim != None and cdim != None and tsiz != None): B = T.flatten().reshape(len(T), T.size / len(T)); subs = []; vals = []; maxrowind = 0; maxcolind = 0; for i in range(0,len(B)): for j in range(0, len(B[0])): if(B[i][j] != 0): subs.extend([[i,j]]); vals.extend([B[i][j]]); if(i > maxrowind): maxrowind = i; if(j > maxcolind): maxcolind = j; self.subs = numpy.array(subs); self.vals = numpy.array(vals); self.rdims = rdim.copy(); self.cdims = cdim.copy(); self.tsize = tsiz; n = len(self.tsize); temp = numpy.concatenate((self.rdims,self.cdims)); temp.sort(); if not ((numpy.arange(n) == temp).all()): raise ValueError ("Incorrect specification of dimensions"); if (tools.getelts(self.tsize, self.rdims).prod() < maxrowind): raise ValueError ("error, invalid row index"); if (tools.getelts(self.tsize, self.cdims).prod() < maxcolind): raise ValueError ("error, invalid column index"); return; # T is a sptensor T = T.copy(); self.tsize = T.shape; self.subs = T.subs; self.vals = T.vals; n = T.ndims(); if (rdim != None): if(cdim != None): self.rdims = rdim; self.cdims = cdim; elif(option != None): if(option == 'fc'): self.rdims = rdim; if(self.rdims.size != 1): raise ValueError ("only one row dimension for 'fc' option"); self.cdims = []; for i in range(self.rdim[0]+1,n): self.cdims.append(i); for i in range(0, self.rdim[0]): self.cdims.append(i); self.cdims = numpy.array(self.cdims); elif(option == 'bc'): self.rdims = rdim; if(self.rdims.size != 1): raise ValueError ("only one row dimension for 'bc' option"); self.cdims = []; for i in range(0, self.rdim[0])[::-1]: self.cdims.append(i); for i in range(self.rdim[0]+1,n)[::-1]: self.cdims.append(i); self.cdims = numpy.array(self.cdims); else: raise ValueError ("unknown option: {0}".format(option)); else: self.rdims = rdim; self.cdims = tools.notin(n, self.rdims); elif(cdim != None): self.cdims = cdim; if(option == 't'): self.rdims = tools.notin(n, self.cdims); else: raise ValueError ("unknown option: {0}".format(option)); else: raise ValueError("Both rdims and cdims are None"); #error check temp = numpy.concatenate((self.rdims,self.cdims)); temp.sort(); if not ((numpy.arange(n) == temp).all()): raise ValueError ("Incorrect specification of dimensions"); rsize = tools.getelts(self.tsize, self.rdims); csize = tools.getelts(self.tsize, self.cdims); if (len(rsize) == 0): ridx = numpy.ndarray([T.nnz()]); ridx.fill(0); else: temp1 = []; for i in range (0, len(self.subs)): temp2 = []; for j in range(0, len(self.rdims)): temp2.extend([self.subs[i][self.rdims[j]]]); temp1.extend([temp2]); temp1 = numpy.array(temp1); ridx = tools.sub2ind(rsize, temp1); if (len(csize) == 0): cidx = numpy.ndarray([T.nnz()]); cidx.fill(0); else: temp1 = []; for i in range (0, len(self.subs)): temp2 = []; for j in range(0, len(self.cdims)): temp2.extend([self.subs[i][self.cdims[j]]]); temp1.extend([temp2]); temp1 = numpy.array(temp1); cidx = tools.sub2ind(csize, temp1); self.subs = []; for i in range(0,len(ridx)): self.subs.extend([[ridx[i][0], cidx[i][0]]]); self.subs = numpy.array(self.subs); def tosptensor(self): # extract the shape of sptensor newshape = self.tsize; #extract the subscripts of sptensor rowsubs = []; if (len(self.rdims) != 0): rowshape = []; for i in range(0, len(self.rdims)): rowshape.extend([self.tsize[self.rdims[i]]]); for i in range(0, len(self.subs)): rowsubs.extend([tools.ind2sub(rowshape,self.subs[i][0])]); rowsubs = numpy.array(rowsubs); colsubs = []; if (len(self.cdims) != 0): colshape = []; for i in range(0, len(self.cdims)): colshape.extend([self.tsize[self.cdims[i]]]); for i in range(0, len(self.subs)): colsubs.extend([tools.ind2sub(colshape,self.subs[i][1])]); colsubs = numpy.array(colsubs); newsubs = []; for i in range(0, len(self.subs)): newsubs.extend([[]]); for k in range(0, len(newshape)): find = tools.find(self.rdims,k); if(find != -1): newsubs = numpy.concatenate((newsubs, rowsubs[:,find].reshape([len(self.subs),1])), axis = 1); else: find = tools.find(self.cdims,k); newsubs = numpy.concatenate((newsubs, colsubs[:,find].reshape([len(self.subs),1])), axis = 1); #extract the values of sptensor newvals = self.vals; return sptensor.sptensor(newsubs, newvals, newshape); def tosparsemat(self): """returns a sparse matrix object(scipy.sparse) that contains the same values""" m = 1; for i in range(0, len(self.rdims)): m = m * self.tsize[self.rdims[i]]; n = 1; for i in range(0, len(self.cdims)): n = n * self.tsize[self.cdims[i]]; return sparse.coo_matrix((self.vals.flatten(), (self.subs[:,0], self.subs[:,1])), shape = [m,n]); def __str__(self): ret =""; ret += "sptenmat from an sptensor of size {0} with {1} nonzeros\n".format(self.tsize, len(self.vals)); ret += "rindices {0}\n".format(self.rdims); ret += "cindices {0}\n".format(self.cdims); for i in range(0,len(self.vals)): ret += "{0} {1}\n".format(self.subs[i], self.vals[i]); return ret;
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo.config import cfg import webob from nova.api.openstack.compute import plugins from nova.api.openstack.compute.plugins.v3 import config_drive from nova.api.openstack.compute.plugins.v3 import servers from nova.compute import api as compute_api from nova.compute import flavors from nova import db from nova import exception from nova.network import manager from nova.openstack.common import jsonutils from nova import test from nova.tests.api.openstack import fakes from nova.tests import fake_instance from nova.tests.image import fake CONF = cfg.CONF FAKE_UUID = fakes.FAKE_UUID def fake_gen_uuid(): return FAKE_UUID def return_security_group(context, instance_id, security_group_id): pass class ConfigDriveTest(test.TestCase): def setUp(self): super(ConfigDriveTest, self).setUp() fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fake.stub_out_image_service(self.stubs) def test_show(self): self.stubs.Set(db, 'instance_get', fakes.fake_instance_get()) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) req = webob.Request.blank('/v3/servers/1') req.headers['Content-Type'] = 'application/json' response = req.get_response(fakes.wsgi_app_v3( init_only=('servers', 'os-config-drive'))) self.assertEqual(response.status_int, 200) res_dict = jsonutils.loads(response.body) self.assertIn(config_drive.ATTRIBUTE_NAME, res_dict['server']) def test_detail_servers(self): self.stubs.Set(db, 'instance_get_all_by_filters', fakes.fake_instance_get_all_by_filters()) self.stubs.Set(db, 'instance_get_by_uuid', fakes.fake_instance_get()) req = fakes.HTTPRequestV3.blank('/v3/servers/detail') res = req.get_response(fakes.wsgi_app_v3( init_only=('servers', 'os-config-drive'))) server_dicts = jsonutils.loads(res.body)['servers'] self.assertNotEqual(len(server_dicts), 0) for server_dict in server_dicts: self.assertIn(config_drive.ATTRIBUTE_NAME, server_dict) class ServersControllerCreateTest(test.TestCase): def setUp(self): """Shared implementation for tests below that create instance.""" super(ServersControllerCreateTest, self).setUp() self.flags(verbose=True, enable_instance_password=True) self.instance_cache_num = 0 self.instance_cache_by_id = {} self.instance_cache_by_uuid = {} ext_info = plugins.LoadedExtensionInfo() self.controller = servers.ServersController(extension_info=ext_info) CONF.set_override('extensions_blacklist', 'os-config-drive', 'osapi_v3') self.no_config_drive_controller = servers.ServersController( extension_info=ext_info) def instance_create(context, inst): inst_type = flavors.get_flavor_by_flavor_id(3) image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' def_image_ref = 'http://localhost/images/%s' % image_uuid self.instance_cache_num += 1 instance = fake_instance.fake_db_instance(**{ 'id': self.instance_cache_num, 'display_name': inst['display_name'] or 'test', 'uuid': FAKE_UUID, 'instance_type': dict(inst_type), 'access_ip_v4': '1.2.3.4', 'access_ip_v6': 'fead::1234', 'image_ref': inst.get('image_ref', def_image_ref), 'user_id': 'fake', 'project_id': 'fake', 'reservation_id': inst['reservation_id'], "created_at": datetime.datetime(2010, 10, 10, 12, 0, 0), "updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0), "config_drive": None, "progress": 0, "fixed_ips": [], "task_state": "", "vm_state": "", "root_device_name": inst.get('root_device_name', 'vda'), }) self.instance_cache_by_id[instance['id']] = instance self.instance_cache_by_uuid[instance['uuid']] = instance return instance def instance_get(context, instance_id): """Stub for compute/api create() pulling in instance after scheduling """ return self.instance_cache_by_id[instance_id] def instance_update(context, uuid, values): instance = self.instance_cache_by_uuid[uuid] instance.update(values) return instance def server_update(context, instance_uuid, params): inst = self.instance_cache_by_uuid[instance_uuid] inst.update(params) return (inst, inst) def fake_method(*args, **kwargs): pass def project_get_networks(context, user_id): return dict(id='1', host='localhost') def queue_get_for(context, *args): return 'network_topic' fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fake.stub_out_image_service(self.stubs) fakes.stub_out_nw_api(self.stubs) self.stubs.Set(uuid, 'uuid4', fake_gen_uuid) self.stubs.Set(db, 'instance_add_security_group', return_security_group) self.stubs.Set(db, 'project_get_networks', project_get_networks) self.stubs.Set(db, 'instance_create', instance_create) self.stubs.Set(db, 'instance_system_metadata_update', fake_method) self.stubs.Set(db, 'instance_get', instance_get) self.stubs.Set(db, 'instance_update', instance_update) self.stubs.Set(db, 'instance_update_and_get_original', server_update) self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip', fake_method) def _test_create_extra(self, params, no_image=False, override_controller=None): image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' server = dict(name='server_test', image_ref=image_uuid, flavor_ref=2) if no_image: server.pop('image_ref', None) server.update(params) body = dict(server=server) req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" if override_controller: server = override_controller.create(req, body=body).obj['server'] else: server = self.controller.create(req, body=body).obj['server'] def test_create_instance_with_config_drive_disabled(self): params = {config_drive.ATTRIBUTE_NAME: "False"} old_create = compute_api.API.create def create(*args, **kwargs): self.assertNotIn('config_drive', kwargs) return old_create(*args, **kwargs) self.stubs.Set(compute_api.API, 'create', create) self._test_create_extra(params, override_controller=self.no_config_drive_controller) def _create_instance_body_of_config_drive(self, param): def create(*args, **kwargs): self.assertIn('config_drive', kwargs) return old_create(*args, **kwargs) old_create = compute_api.API.create self.stubs.Set(compute_api.API, 'create', create) image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6' flavor_ref = 'http://localhost/v3/flavors/3' body = { 'server': { 'name': 'config_drive_test', 'image_ref': image_href, 'flavor_ref': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', }, config_drive.ATTRIBUTE_NAME: param, }, } req = fakes.HTTPRequestV3.blank('/servers') req.method = 'POST' req.body = jsonutils.dumps(body) req.headers["content-type"] = "application/json" return req, body def test_create_instance_with_config_drive(self): param = True req, body = self._create_instance_body_of_config_drive(param) res = self.controller.create(req, body=body).obj server = res['server'] self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_with_config_drive_as_boolean_string(self): param = 'false' req, body = self._create_instance_body_of_config_drive(param) res = self.controller.create(req, body=body).obj server = res['server'] self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_with_bad_config_drive(self): param = 12345 req, body = self._create_instance_body_of_config_drive(param) self.assertRaises(exception.ValidationError, self.controller.create, req, body=body) def test_create_instance_without_config_drive(self): param = True req, body = self._create_instance_body_of_config_drive(param) del body['server'][config_drive.ATTRIBUTE_NAME] res = self.controller.create(req, body=body).obj server = res['server'] self.assertEqual(FAKE_UUID, server['id']) def test_create_instance_with_empty_config_drive(self): param = '' req, body = self._create_instance_body_of_config_drive(param) self.assertRaises(exception.ValidationError, self.controller.create, req, body=body)
""" ios.py Handle arguments, configuration file @author: K.Edeline """ import sys import argparse import configparser import logging import shutil class IOManager(object): """ extend me """ #DEFAULT_CONFIG_LOC="/tmp/deploypl.ini" PKG_FILE = "packages.txt" def __init__(self, child=None, **kwargs): super().__init__(**kwargs) if child == None: raise IOSException("Child class not found") self.child = child self.args = None self.config = None self.logger = None def load_inputs(self): self.arguments() if "start" in self.args.cmd: self.configuration() def load_outputs(self, decoy=False): self.log(decoy=decoy) ######################################################## # ARGPARSE ######################################################## def arguments(self): """ Parse arguments Used mostly to provide the location of the config file. """ parser = argparse.ArgumentParser(description='PlanetLab C&C server') parser.add_argument('cmd', type=str, choices=["start", "stop", "restart", "status"]) parser.add_argument('-l' , '--log-file', type=str, default="deploypl.log", help='log file location (default: deploypl.log)') parser.add_argument('-c' , '--config', type=str, #default=IOManager.DEFAULT_CONFIG_LOC, help='configuration file location') parser.add_argument('-d' , '--debug', action='store_true', help='increase log output level') parser.add_argument('-v' , '--verbose', action='store_true', help='status print node descriptions') parser.add_argument('-vv' , '--vverbose', action='store_true', help='print info about non-usable nodes') parser.add_argument('-n' , '--names', action='store_true', help='status print node names, not addresses') self.args = parser.parse_args() return self.args ######################################################## # CONFIGPARSER ######################################################## def configuration(self): """ Parse configuration file """ if self.args == None or self.args.config == None: raise IOSException("Arguments not found") self.config = configparser.ConfigParser() parsed = self.config.read(self.args.config) if not parsed: print("Configuration file not found:", self.args.config) sys.exit(1) # copy cfg file to /tmp/ #if self.args.config != IOManager.DEFAULT_CONFIG_LOC: # shutil.copyfile(self.args.config, IOManager.DEFAULT_CONFIG_LOC) # Load config self._load_config() return self.config def _load_config(self): """ Load configuration """ self.slice = self.config["core"]["slice"] self.user = self.config["core"]["user"] # PL settings self._nodedir = self._to_absolute(self.config["core"]["nodes_dir"]) self._datadir = self._to_absolute(self.config["core"]["data_dir"]) self._logdir = self._to_absolute(self.config["core"]["log_dir"]) self._rawfile = self._to_absolute(self.config["core"]["raw_nodes"], root=self._nodedir) self.userdir = self._to_absolute(self.user, root=self._logdir) self.pkgfile = self._to_absolute(IOManager.PKG_FILE, root=self.userdir) self.threadlimit = int(self.config["core"]["thread_limit"]) self.sshlimit = int(self.config["core"]["ssh_limit"]) self.sshkeyloc = self.config["core"]["ssh_keyloc"] self.period = int(self.config["core"]["probing_period"]) self.initialdelay = (self.config["core"]["initial_delay"] == 'yes') self._package_list() def _package_list(self): """ load pkg list from file """ self.pkglist = [] if not self.userdir: return def pkgs(line): return (line and not line.startswith(';')) with open(self.pkgfile, 'r') as f: lines = map(str.rstrip, f.readlines()) self.pkglist = list(filter(pkgs, lines)) def _to_absolute(self, path, root=None): """ Convert path to absolute if it's not already """ if not path: return None if path.startswith("/"): return path if not root: root = self.cwd return "/".join([root, path]) ######################################################## # LOGGING ######################################################## def log(self, decoy=False, console=False, logfile=True, errfile=False): """ load logging facility """ if decoy: decoy_logger = lambda _ : None self.debug = self.info \ = self.warn \ = self.error \ = self.critical \ = decoy_logger return if self.args == None: raise IOManagerException("Arguments not found") if self.config == None: raise IOManagerException("Configuration not found") # create logger self.logger = logging.getLogger(self.child.__class__.__name__) self.logger.setLevel(logging.DEBUG) # console handler and set level to debug if console: ch = logging.StreamHandler() ch.setLevel(logging.INFO if self.args.debug else logging.ERROR) # XXX #filehandler = logging.handlers.TimedRotatingFileHandler('/tmp/daemon.log', # when='midnight',interval=1,backupCount=10) # log file handler if logfile: fh = logging.FileHandler(self._to_absolute(self.args.log_file, root=self._logdir)) fh.setLevel(logging.DEBUG if self.args.debug else logging.INFO) # error file handler if errfile: eh = logging.FileHandler(self._to_absolute(self.args.error_file, root=self._logdir)) eh.setLevel(logging.ERROR) # add formatter to handlers & handlers to logger formatter = logging.Formatter("%(asctime)s : %(levelname)-5s : %(message)s", "%Y-%m-%d %H:%M:%S") if console: ch.setFormatter(formatter) self.logger.addHandler(ch) if logfile: fh.setFormatter(formatter) self.logger.addHandler(fh) if errfile: eh.setFormatter(formatter) self.logger.addHandler(eh) # log functions self.debug = self.logger.debug self.info = self.logger.info self.warn = self.logger.warn self.error = self.logger.error self.critical = self.logger.critical return self.logger class IOManagerException(Exception): """ IOManagerException(Exception) """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value)
"""Modular aircraft concept""" import cPickle as pickle import numpy as np from gpkit import Model, Vectorize, parse_variables class AircraftP(Model): """Aircraft flight physics: weight <= lift, fuel burn Variables --------- Wfuel [lbf] fuel weight Wburn [lbf] segment fuel burn Upper Unbounded --------------- Wburn, aircraft.wing.c, aircraft.wing.A Lower Unbounded --------------- Wfuel, aircraft.W, state.mu """ def setup(self, aircraft, state): self.aircraft = aircraft self.state = state exec parse_variables(AircraftP.__doc__) self.wing_aero = aircraft.wing.dynamic(aircraft.wing, state) self.perf_models = [self.wing_aero] W = aircraft.W S = aircraft.wing.S V = state.V rho = state.rho D = self.wing_aero.D CL = self.wing_aero.CL return { "lift": W + Wfuel <= 0.5*rho*CL*S*V**2, "fuel burn rate": Wburn >= 0.1*D, "performance": self.perf_models} class Aircraft(Model): """The vehicle model Variables --------- W [lbf] weight Upper Unbounded --------------- W Lower Unbounded --------------- wing.c, wing.S """ def setup(self): exec parse_variables(Aircraft.__doc__) self.fuse = Fuselage() self.wing = Wing() self.components = [self.fuse, self.wing] return { "definition of W": W >= sum(c.W for c in self.components), "components": self.components} dynamic = AircraftP class FlightState(Model): """Context for evaluating flight physics Variables --------- V 40 [knots] true airspeed mu 1.628e-5 [N*s/m^2] dynamic viscosity rho 0.74 [kg/m^3] air density """ def setup(self): exec parse_variables(FlightState.__doc__) class FlightSegment(Model): """Combines a context (flight state) and a component (the aircraft) Upper Unbounded --------------- Wburn, aircraft.wing.c, aircraft.wing.A Lower Unbounded --------------- Wfuel, aircraft.W """ def setup(self, aircraft): self.aircraft = aircraft self.flightstate = FlightState() self.aircraftp = aircraft.dynamic(aircraft, self.flightstate) self.Wburn = self.aircraftp.Wburn self.Wfuel = self.aircraftp.Wfuel return {"flightstate": self.flightstate, "aircraft performance": self.aircraftp} class Mission(Model): """A sequence of flight segments Upper Unbounded --------------- aircraft.wing.c, aircraft.wing.A Lower Unbounded --------------- aircraft.W """ def setup(self, aircraft): self.aircraft = aircraft with Vectorize(4): # four flight segments self.fs = FlightSegment(aircraft) Wburn = self.fs.aircraftp.Wburn Wfuel = self.fs.aircraftp.Wfuel self.takeoff_fuel = Wfuel[0] return { "definition of Wburn": Wfuel[:-1] >= Wfuel[1:] + Wburn[:-1], "require fuel for the last leg": Wfuel[-1] >= Wburn[-1], "flight segment": self.fs} class WingAero(Model): """Wing aerodynamics Variables --------- CD [-] drag coefficient CL [-] lift coefficient e 0.9 [-] Oswald efficiency Re [-] Reynold's number D [lbf] drag force Upper Unbounded --------------- D, Re, wing.A, state.mu Lower Unbounded --------------- CL, wing.S, state.mu, state.rho, state.V """ def setup(self, wing, state): self.wing = wing self.state = state exec parse_variables(WingAero.__doc__) c = wing.c A = wing.A S = wing.S rho = state.rho V = state.V mu = state.mu return { "drag model": CD >= 0.074/Re**0.2 + CL**2/np.pi/A/e, "definition of Re": Re == rho*V*c/mu, "definition of D": D >= 0.5*rho*V**2*CD*S} class Wing(Model): """Aircraft wing model Variables --------- W [lbf] weight S [ft^2] surface area rho 1 [lbf/ft^2] areal density A 27 [-] aspect ratio c [ft] mean chord Upper Unbounded --------------- W Lower Unbounded --------------- c, S """ def setup(self): exec parse_variables(Wing.__doc__) return {"parametrization of wing weight": W >= S*rho, "definition of mean chord": c == (S/A)**0.5} dynamic = WingAero class Fuselage(Model): """The thing that carries the fuel, engine, and payload A full model is left as an exercise for the reader. Variables --------- W 100 [lbf] weight """ def setup(self): exec parse_variables(Fuselage.__doc__) AC = Aircraft() MISSION = Mission(AC) M = Model(MISSION.takeoff_fuel, [MISSION, AC]) sol = M.solve(verbosity=0) # save solution to a file and retrieve it sol.save("solution.pkl") sol_loaded = pickle.load(open("solution.pkl")) vars_of_interest = set(AC.varkeys) # note that there's two ways to access submodels assert (MISSION["flight segment"]["aircraft performance"] is MISSION.fs.aircraftp) vars_of_interest.update(MISSION.fs.aircraftp.unique_varkeys) vars_of_interest.add(M["D"]) print sol.summary(vars_of_interest) print sol.table(tables=["loose constraints"]) MISSION["flight segment"]["aircraft performance"]["fuel burn rate"] = ( MISSION.fs.aircraftp.Wburn >= 0.2*MISSION.fs.aircraftp.wing_aero.D) sol = M.solve(verbosity=0) print(sol.diff("solution.pkl", showvars=vars_of_interest, sortbymodel=False))
# Copyright (c) 2015 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os.path from unittest import mock import eventlet import fixtures import netaddr from neutron_lib import constants as lib_const from oslo_config import fixture as fixture_config from oslo_utils import uuidutils from neutron.agent.common import ovs_lib from neutron.agent.dhcp import agent from neutron.agent import dhcp_agent from neutron.agent.linux import dhcp from neutron.agent.linux import external_process from neutron.agent.linux import interface from neutron.agent.linux import ip_lib from neutron.agent.linux import utils from neutron.agent.metadata import driver as metadata_driver from neutron.common import utils as common_utils from neutron.conf.agent import common as config from neutron.tests.common import net_helpers from neutron.tests.functional.agent.linux import helpers from neutron.tests.functional import base class DHCPAgentOVSTestFramework(base.BaseSudoTestCase): _DHCP_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:4c") _DHCP_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix _TENANT_PORT_MAC_ADDRESS = netaddr.EUI("24:77:03:7d:00:3a") _TENANT_PORT_MAC_ADDRESS.dialect = netaddr.mac_unix _IP_ADDRS = { 4: {'addr': '192.168.10.11', 'cidr': '192.168.10.0/24', 'gateway': '192.168.10.1'}, 6: {'addr': '2001:db8:0:1::c0a8:a0b', 'cidr': '2001:db8:0:1::c0a8:a00/120', 'gateway': '2001:db8:0:1::c0a8:a01'}, } def setUp(self): super(DHCPAgentOVSTestFramework, self).setUp() config.setup_logging() self.conf_fixture = self.useFixture(fixture_config.Config()) self.conf = self.conf_fixture.conf dhcp_agent.register_options(self.conf) # NOTE(cbrandily): TempDir fixture creates a folder with 0o700 # permissions but agent dir must be readable by dnsmasq user (nobody) agent_config_dir = self.useFixture(fixtures.TempDir()).path self.useFixture( helpers.RecursivePermDirFixture(agent_config_dir, 0o555)) self.conf.set_override("dhcp_confs", agent_config_dir) self.conf.set_override( 'interface_driver', 'neutron.agent.linux.interface.OVSInterfaceDriver') self.conf.set_override('report_interval', 0, 'AGENT') br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge self.conf.set_override('integration_bridge', br_int.br_name, 'OVS') self.mock_plugin_api = mock.patch( 'neutron.agent.dhcp.agent.DhcpPluginApi').start().return_value mock.patch('neutron.agent.rpc.PluginReportStateAPI').start() self.agent = agent.DhcpAgentWithStateReport('localhost') self.ovs_driver = interface.OVSInterfaceDriver(self.conf) self.conf.set_override('check_child_processes_interval', 1, 'AGENT') mock.patch('neutron.agent.common.ovs_lib.' 'OVSBridge._set_port_dead').start() def network_dict_for_dhcp(self, dhcp_enabled=True, ip_version=lib_const.IP_VERSION_4, prefix_override=None): net_id = uuidutils.generate_uuid() subnet_dict = self.create_subnet_dict( net_id, dhcp_enabled, ip_version, prefix_override) port_dict = self.create_port_dict( net_id, subnet_dict.id, mac_address=str(self._DHCP_PORT_MAC_ADDRESS), ip_version=ip_version) port_dict.device_id = common_utils.get_dhcp_agent_device_id( net_id, self.conf.host) net_dict = self.create_network_dict( net_id, [subnet_dict], [port_dict]) return net_dict def create_subnet_dict(self, net_id, dhcp_enabled=True, ip_version=lib_const.IP_VERSION_4, prefix_override=None): cidr = self._IP_ADDRS[ip_version]['cidr'] if prefix_override is not None: cidr = '/'.join((cidr.split('/')[0], str(prefix_override))) sn_dict = dhcp.DictModel( id=uuidutils.generate_uuid(), network_id=net_id, ip_version=ip_version, cidr=cidr, gateway_ip=self._IP_ADDRS[ip_version]['gateway'], enable_dhcp=dhcp_enabled, dns_nameservers=[], host_routes=[], ipv6_ra_mode=None, ipv6_address_mode=None) if ip_version == lib_const.IP_VERSION_6: sn_dict['ipv6_address_mode'] = lib_const.DHCPV6_STATEFUL return sn_dict def create_port_dict(self, network_id, subnet_id, mac_address, ip_version=lib_const.IP_VERSION_4, ip_address=None): ip_address = (self._IP_ADDRS[ip_version]['addr'] if not ip_address else ip_address) port_dict = dhcp.DictModel(id=uuidutils.generate_uuid(), name="foo", mac_address=mac_address, network_id=network_id, admin_state_up=True, device_id=uuidutils.generate_uuid(), device_owner="foo", fixed_ips=[{"subnet_id": subnet_id, "ip_address": ip_address}]) return port_dict def create_network_dict(self, net_id, subnets=None, ports=None, non_local_subnets=None): subnets = [] if not subnets else subnets ports = [] if not ports else ports non_local_subnets = [] if not non_local_subnets else non_local_subnets net_dict = dhcp.NetModel(id=net_id, subnets=subnets, non_local_subnets=non_local_subnets, ports=ports, admin_state_up=True, project_id=uuidutils.generate_uuid()) return net_dict def get_interface_name(self, network, port): device_manager = dhcp.DeviceManager(conf=self.conf, plugin=mock.Mock()) return device_manager.get_interface_name(network, port) def configure_dhcp_for_network(self, network, dhcp_enabled=True): self.agent.configure_dhcp_for_network(network) self.addCleanup(self._cleanup_network, network, dhcp_enabled) def _cleanup_network(self, network, dhcp_enabled): self.mock_plugin_api.release_dhcp_port.return_value = None if dhcp_enabled: self.agent.call_driver('disable', network) def assert_dhcp_resources(self, network, dhcp_enabled): ovs = ovs_lib.BaseOVS() port = network.ports[0] iface_name = self.get_interface_name(network, port) self.assertEqual(dhcp_enabled, ovs.port_exists(iface_name)) self.assert_dhcp_namespace(network.namespace, dhcp_enabled) self.assert_accept_ra_disabled(network.namespace) self.assert_dhcp_device(network.namespace, iface_name, dhcp_enabled) def assert_dhcp_namespace(self, namespace, dhcp_enabled): self.assertEqual(dhcp_enabled, ip_lib.network_namespace_exists(namespace)) def assert_accept_ra_disabled(self, namespace): actual = ip_lib.IPWrapper(namespace=namespace).netns.execute( ['sysctl', '-b', 'net.ipv6.conf.default.accept_ra'], privsep_exec=True) self.assertEqual('0', actual) def assert_dhcp_device(self, namespace, dhcp_iface_name, dhcp_enabled): dev = ip_lib.IPDevice(dhcp_iface_name, namespace) self.assertEqual(dhcp_enabled, ip_lib.device_exists( dhcp_iface_name, namespace)) if dhcp_enabled: self.assertEqual(self._DHCP_PORT_MAC_ADDRESS, dev.link.address) def _plug_port_for_dhcp_request(self, network, port): namespace = network.namespace vif_name = self.get_interface_name(network.id, port) self.ovs_driver.plug(network.id, port.id, vif_name, port.mac_address, self.conf.OVS.integration_bridge, namespace=namespace) def _ip_list_for_vif(self, vif_name, namespace): ip_device = ip_lib.IPDevice(vif_name, namespace) return ip_device.addr.list(ip_version=lib_const.IP_VERSION_4) def _get_network_port_for_allocation_test(self): network = self.network_dict_for_dhcp() ip_addr = netaddr.IPNetwork(network.subnets[0].cidr)[1] port = self.create_port_dict( network.id, network.subnets[0].id, mac_address=str(self._TENANT_PORT_MAC_ADDRESS), ip_address=str(ip_addr)) return network, port def assert_good_allocation_for_port(self, network, port): vif_name = self.get_interface_name(network.id, port) self._run_dhclient(vif_name, network) predicate = lambda: len( self._ip_list_for_vif(vif_name, network.namespace)) common_utils.wait_until_true(predicate, 10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) cidr = ip_list[0].get('cidr') ip_addr = str(netaddr.IPNetwork(cidr).ip) self.assertEqual(port.fixed_ips[0].ip_address, ip_addr) def assert_bad_allocation_for_port(self, network, port): vif_name = self.get_interface_name(network.id, port) self._run_dhclient(vif_name, network) # we need wait some time (10 seconds is enough) and check # that dhclient not configured ip-address for interface eventlet.sleep(10) ip_list = self._ip_list_for_vif(vif_name, network.namespace) self.assertEqual([], ip_list) def _run_dhclient(self, vif_name, network): # NOTE: Before run dhclient we should create resolv.conf file # in namespace, where we will run dhclient for testing address # allocation for port, otherwise, dhclient will override # system /etc/resolv.conf # By default, folder for dhcp-agent's namespace doesn't exist # that's why we use AdminDirFixture for create directory # with admin permissions in /etc/netns/ and touch resolv.conf in it. etc_dir = '/etc/netns/%s' % network.namespace self.useFixture(helpers.AdminDirFixture(etc_dir)) cmd = ['touch', os.path.join(etc_dir, 'resolv.conf')] utils.execute(cmd, run_as_root=True) dhclient_cmd = ['dhclient', '--no-pid', '-d', '-1', vif_name] proc = net_helpers.RootHelperProcess( cmd=dhclient_cmd, namespace=network.namespace) self.addCleanup(proc.wait) self.addCleanup(proc.kill) def _get_metadata_proxy_process(self, network): return external_process.ProcessManager( self.conf, network.id, network.namespace, service=metadata_driver.HAPROXY_SERVICE) class DHCPAgentOVSTestCase(DHCPAgentOVSTestFramework): def test_create_subnet_with_dhcp(self): dhcp_enabled = True for version in [4, 6]: network = self.network_dict_for_dhcp( dhcp_enabled, ip_version=version) self.configure_dhcp_for_network(network=network, dhcp_enabled=dhcp_enabled) self.assert_dhcp_resources(network, dhcp_enabled) def test_create_subnet_with_non64_ipv6_cidrs(self): # the agent should not throw exceptions on weird prefixes dhcp_enabled = True version = 6 for i in (0, 1, 41, 81, 121, 127, 128): network = self.network_dict_for_dhcp( dhcp_enabled, ip_version=version, prefix_override=i) self.configure_dhcp_for_network(network=network, dhcp_enabled=dhcp_enabled) self.assertFalse(self.agent.needs_resync_reasons[network.id], msg="prefix size of %s triggered resync" % i) def test_agent_mtu_set_on_interface_driver(self): network = self.network_dict_for_dhcp() network["mtu"] = 789 self.configure_dhcp_for_network(network=network) port = network.ports[0] iface_name = self.get_interface_name(network, port) dev = ip_lib.IPDevice(iface_name, network.namespace) self.assertEqual(789, dev.link.mtu) def test_good_address_allocation(self): network, port = self._get_network_port_for_allocation_test() network.ports.append(port) self.configure_dhcp_for_network(network=network) self._plug_port_for_dhcp_request(network, port) self.assert_good_allocation_for_port(network, port) def test_bad_address_allocation(self): network, port = self._get_network_port_for_allocation_test() network.ports.append(port) self.configure_dhcp_for_network(network=network) bad_mac_address = netaddr.EUI(self._TENANT_PORT_MAC_ADDRESS.value + 1) bad_mac_address.dialect = netaddr.mac_unix port.mac_address = str(bad_mac_address) self._plug_port_for_dhcp_request(network, port) self.assert_bad_allocation_for_port(network, port) def _spawn_network_metadata_proxy(self): network = self.network_dict_for_dhcp() self.conf.set_override('enable_isolated_metadata', True) self.addCleanup(self.agent.disable_isolated_metadata_proxy, network) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) common_utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.01, exception=RuntimeError("Metadata proxy didn't spawn")) return (pm, network) def test_metadata_proxy_respawned(self): pm, network = self._spawn_network_metadata_proxy() old_pid = pm.pid utils.execute(['kill', '-9', old_pid], run_as_root=True) common_utils.wait_until_true( lambda: pm.active and pm.pid != old_pid, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't respawn")) def test_stale_metadata_proxy_killed(self): pm, network = self._spawn_network_metadata_proxy() self.conf.set_override('enable_isolated_metadata', False) self.configure_dhcp_for_network(network=network) common_utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Stale metadata proxy didn't get killed")) def _test_metadata_proxy_spawn_kill_with_subnet_create_delete(self): network = self.network_dict_for_dhcp( ip_version=lib_const.IP_VERSION_6, dhcp_enabled=False) self.configure_dhcp_for_network(network=network) pm = self._get_metadata_proxy_process(network) self.assertFalse(pm.active) new_network = copy.deepcopy(network) dhcp_enabled_ipv4_subnet = self.create_subnet_dict(network.id) new_network.subnets.append(dhcp_enabled_ipv4_subnet) self.mock_plugin_api.get_network_info.return_value = new_network dhcp_port_mock = self.create_port_dict( network.id, dhcp_enabled_ipv4_subnet.id, mac_address=str(self._DHCP_PORT_MAC_ADDRESS)) self.mock_plugin_api.create_dhcp_port.return_value = dhcp_port_mock network.ports = [] new_network.ports = [] self.agent.refresh_dhcp_helper(network.id) # Metadata proxy should be spawned for the newly added subnet common_utils.wait_until_true( lambda: pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't spawn")) self.mock_plugin_api.get_network_info.return_value = network self.agent.refresh_dhcp_helper(network.id) # Metadata proxy should be killed because network doesn't need it. common_utils.wait_until_true( lambda: not pm.active, timeout=5, sleep=0.1, exception=RuntimeError("Metadata proxy didn't get killed")) def test_enable_isolated_metadata_for_subnet_create_delete(self): self.conf.set_override('force_metadata', False) self.conf.set_override('enable_isolated_metadata', True) self._test_metadata_proxy_spawn_kill_with_subnet_create_delete() def test_force_metadata_for_subnet_create_delete(self): self.conf.set_override('force_metadata', True) self.conf.set_override('enable_isolated_metadata', False) self._test_metadata_proxy_spawn_kill_with_subnet_create_delete() def test_notify_port_ready_after_enable_dhcp(self): network = self.network_dict_for_dhcp() dhcp_port = self.create_port_dict( network.id, network.subnets[0].id, '24:77:03:7d:00:4d', ip_address='192.168.10.11') dhcp_port.device_owner = lib_const.DEVICE_OWNER_DHCP network.ports.append(dhcp_port) self.agent.start_ready_ports_loop() self.configure_dhcp_for_network(network) ports_to_send = {p.id for p in network.ports} common_utils.wait_until_true( lambda: self.mock_plugin_api.dhcp_ready_on_ports.called, timeout=1, sleep=0.1, exception=RuntimeError("'dhcp_ready_on_ports' not be called")) self.mock_plugin_api.dhcp_ready_on_ports.assert_called_with( ports_to_send) def test_dhcp_processing_pool_size(self): mock.patch.object(self.agent, 'call_driver').start().return_value = ( True) self.agent.update_isolated_metadata_proxy = mock.Mock() self.agent.disable_isolated_metadata_proxy = mock.Mock() network_info_1 = self.network_dict_for_dhcp() self.configure_dhcp_for_network(network=network_info_1) self.assertEqual(agent.DHCP_PROCESS_GREENLET_MIN, self.agent._pool.size) network_info_2 = self.network_dict_for_dhcp() self.configure_dhcp_for_network(network=network_info_2) self.assertEqual(agent.DHCP_PROCESS_GREENLET_MIN, self.agent._pool.size) network_info_list = [network_info_1, network_info_2] for _i in range(agent.DHCP_PROCESS_GREENLET_MAX + 1): ni = self.network_dict_for_dhcp() self.configure_dhcp_for_network(network=ni) network_info_list.append(ni) self.assertEqual(agent.DHCP_PROCESS_GREENLET_MAX, self.agent._pool.size) for network in network_info_list: self.agent.disable_dhcp_helper(network.id) agent_network_info_len = len(self.agent.cache.get_network_ids()) if agent_network_info_len < agent.DHCP_PROCESS_GREENLET_MIN: self.assertEqual(agent.DHCP_PROCESS_GREENLET_MIN, self.agent._pool.size) elif (agent.DHCP_PROCESS_GREENLET_MIN <= agent_network_info_len <= agent.DHCP_PROCESS_GREENLET_MAX): self.assertEqual(agent_network_info_len, self.agent._pool.size) else: self.assertEqual(agent.DHCP_PROCESS_GREENLET_MAX, self.agent._pool.size)
class ChangeParser(): """ Provides many different notification change parsers and utility methods. """ @staticmethod def get_changed_field_handler(top_level_type, field): """ Returns a change handler for the field and top level object type. If not found then None is returned. :param top_level_type: The top level object type. :type message: str :param field: The field name. :type message: str :returns: function: Returns a change field handler, None if not found """ specific_mapped_type = __specific_field_to_change_handler__.get(top_level_type) # Check for a specific mapped field first, if there isn't one # then just try to use the general mapped fields. if specific_mapped_type is not None: specific_mapped_handler = specific_mapped_type.get(field) if specific_mapped_handler is not None: return specific_mapped_handler return __general_field_to_change_handler__.get(field) ############################################################################ # Generic change parsers ############################################################################ @staticmethod def flatten_objects_to_list(objects, key): """ Flattens an object list to list values from the input key. :type objects: list of complex objects :param objects: list(object). :type key: The name of the field/key to extract for the flattened list :param key: str :returns: str: Returns a list of extracted keys. """ return_list = [] for object in objects: return_list.append(object[key]) return return_list @staticmethod def generic_child_fields_change_handler(old_value, new_value, fields, base_fqn=None): """ Handles processing of changed fields where the input is a dictionary of values. This will only process the immediate children. :type old_value: The old mongo object to compare values against. :param old_value: A Mongo Document :type new_value: The updated/new mongo object to compare values against. :param new_value: A Mongo Document :type fields: list of field names to compare against :param fields: list(str). :type base_fqn: The base descriptor of the object :param base_fqn: str :returns: str: Returns a message summarizing the changes. """ message = "" for field in fields: old_field_value = "" new_field_value = "" if old_value is not None: old_field_value = old_value[field] if new_value is not None: new_field_value = new_value[field] if old_field_value != new_field_value: change_message = ChangeParser.generic_single_field_change_handler( old_field_value, new_field_value, field, base_fqn) message += change_message[:1].capitalize() + change_message[1:] return message @staticmethod def generic_list_change_handler(old_value, new_value, changed_field): """ Handles the processing of changed fields where the changed field is a list of items. Displays the changed value in unicode format. :type old_value: The old mongo object to compare values against. :param old_value: A Mongo Document :type new_value: The updated/new mongo object to compare values against. :param new_value: A Mongo Document :type changed_field: The field name that the comparisons will be against. :param changed_field: str :returns: str: Returns a message summarizing the changes. """ removed_names = [x for x in old_value if x not in new_value and x != ''] added_names = [x for x in new_value if x not in old_value and x != ''] message = "" if len(added_names) > 0: added_names = ', '.join(added_names) if not isinstance(added_names, unicode): added_names = unicode(added_names, 'utf-8', 'replace') message += "Added to %s: %s. " % (changed_field, added_names) if len(removed_names) > 0: removed_names = ', '.join(removed_names) if not isinstance(removed_names, unicode): removed_names = unicode(removed_names, 'utf-8', 'replace') message += "Removed from %s: %s. " % (changed_field, removed_names) return message @staticmethod def generic_list_json_change_handler(old_value, new_value, changed_field): """ Handles the processing of changed fields where the changed field is a list of items. Displays the changed value in json format via to_json(). :type old_value: The old mongo object to compare values against. :param old_value: A Mongo Document :type new_value: The updated/new mongo object to compare values against. :param new_value: A Mongo Document :type changed_field: The field name that the comparisons will be against. :param changed_field: str :returns: str: Returns a message summarizing the changes. """ removed_names = [x.to_json() for x in old_value if x not in new_value and x != ''] added_names = [x.to_json() for x in new_value if x not in old_value and x != ''] message = "" if len(added_names) > 0: added_names = ', '.join(added_names) if not isinstance(added_names, unicode): added_names = unicode(added_names, 'utf-8', 'replace') message += "Added to %s: %s. " % (changed_field, added_names) if len(removed_names) > 0: removed_names = ', '.join(removed_names) if not isinstance(removed_names, unicode): removed_names = unicode(removed_names, 'utf-8', 'replace') message += "Removed from %s: %s. " % (changed_field, removed_names) return message @staticmethod def generic_single_field_change_handler(old_value, new_value, changed_field, base_fqn=None): """ Handles the processing of a changed field where the changed field is displayable in string format. :type old_value: The old mongo object to compare values against. :param old_value: A value that can be stringified :type new_value: The updated/new mongo object to compare values against. :param new_value: A value that can be stringified :type changed_field: The field name that the comparisons will be against. :param changed_field: str :type base_fqn: The base descriptor of the object :param base_fqn: str :returns: str: Returns a message summarizing the changes. """ if base_fqn is None: return "%s changed from \"%s\" to \"%s\"\n" % (changed_field, old_value, new_value) else: return "%s.%s changed from \"%s\" to \"%s\"\n" % (base_fqn, changed_field, old_value, new_value) @staticmethod def generic_single_field_json_change_handler(old_value, new_value, changed_field, base_fqn=None): """ Handles the processing of a changed field where the changed field is displayable in json format via to_json(). :type old_value: The old mongo object to compare values against. :param old_value: A Mongo Document :type new_value: The updated/new mongo object to compare values against. :param new_value: A Mongo Document :type changed_field: The field name that the comparisons will be against. :param changed_field: str :type base_fqn: The base descriptor of the object :param base_fqn: str :returns: str: Returns a message summarizing the changes. """ if base_fqn is None: return "%s changed from \"%s\" to \"%s\"\n" % (changed_field, old_value.to_json(), new_value.to_json()) else: return "%s.%s changed from \"%s\" to \"%s\"\n" % (base_fqn, changed_field, old_value.to_json(), new_value.to_json()) @staticmethod def get_changed_object_list(old_objects, new_objects, object_key): """ Detects which objects have changed by comparing the 'object_key' from both the input old_objects and new_objects parameters. :type old_objects: A list of the old values. :param old_objects: An iterable segment of a Mongo Document :type new_objects: A list of the new values :param new_objects: An iterable segment of a Mongo Document :type object_key: The field name that will be the key in the returned dict. :param object_key: str :returns: dict(key, dict): Returns a dictionary of changed objects, in the format of {key: {old, new}} """ changed_objects = {} # Try and detect which objects have changed for old_object in old_objects: if old_object not in new_objects and object_key in old_object: if old_object[object_key] not in changed_objects: changed_objects[old_object[object_key]] = {'old': old_object} else: changed_objects[old_object[object_key]]['old'] = old_object for new_object in new_objects: if new_object not in old_objects and object_key in new_object: if new_object[object_key] not in changed_objects: changed_objects[new_object[object_key]] = {'new': new_object} else: changed_objects[new_object[object_key]]['new'] = new_object return changed_objects @staticmethod def get_changed_primitive_list(old_objects, new_objects): """ Detects which objects have changed by comparing the value of both the input old_objects and new_objects parameters. :type old_objects: A list of the old values. :param old_objects: An iterable segment of a Mongo Document :type new_objects: A list of the new values :param new_objects: An iterable segment of a Mongo Document :returns: dict(key, dict): Returns a dictionary of changed objects, in the format of {key: {old, new}} """ changed_objects = {} # Try and detect which items have changed for old_object in old_objects: if old_object not in new_objects: if old_object not in changed_objects: changed_objects[old_object] = {'old': old_object} else: changed_objects[old_object]['old'] = old_object for new_object in new_objects: if new_object not in old_objects: if new_object not in changed_objects: changed_objects[new_object] = {'new': new_object} else: changed_objects[new_object]['new'] = new_object return changed_objects @staticmethod def get_short_name(obj, summary_handler, default): """ Generates and returns a human readable short name of the input object by using the input summary_handler parameter. Returns the default parameter if the summary_handler is None. :param obj: The object. :type obj: class which inherits from :class:`crits.core.crits_mongoengine.CritsBaseAttributes` :type summary_handler: A summary handler function that will be used to generate the short name, if not None. :param summary_handler: function :type default: The default value to use if the summary handler is not able to generate a short name value. :param default: str :returns: str: Returns a short name description. """ short_name = default if summary_handler is not None: short_name = summary_handler(obj) return short_name @staticmethod def parse_generic_change_object_list(change_dictionary, field_name, object_key, change_parser_handler=None, summary_handler=None): """ Parses a list of complex objects and tries to determine if the object was modified, added, or deleted. Returns a string of the summary of changes. :type change_dictionary: A dict of changes in the format {key: {old, new}}. :param change_dictionary: dict(key, dict) :type field_name: A description of the field that changed, e.g. its name. :param field_name: str :type object_key: A secondary description of the field that changed, e.g. its name. :param object_key: str :type change_parser_handler: A handler function that determines the fields that were changed for the object. This is used if the object was modified and if the handler function is not None :param change_parser_handler: function :type summary_handler: A handler function that, if not None, generates a short description of the compared object. :param summary_handler: function """ message = "" for changed_key_name in change_dictionary: old_value = change_dictionary[changed_key_name].get('old') new_value = change_dictionary[changed_key_name].get('new') if old_value is not None and new_value is not None: short_name = ChangeParser.get_short_name(old_value, summary_handler, changed_key_name) message += "%s %s modified: %s\n" % (field_name, object_key, short_name) if change_parser_handler is not None: message += change_parser_handler(old_value, new_value, field_name) elif old_value is not None and new_value is None: short_name = ChangeParser.get_short_name(old_value, summary_handler, changed_key_name) message += "%s %s removed: %s\n" % (field_name, object_key, short_name) elif old_value is None and new_value is not None: short_name = ChangeParser.get_short_name(new_value, summary_handler, changed_key_name) message += "%s %s added: %s\n" % (field_name, object_key, short_name) else: message += "Unknown operation on %s %s: %s\n" % (field_name, object_key, changed_key_name) return message ############################################################################ # Summary generation handlers # # These methods generates and returns a human readable short name of # the input object ############################################################################ @staticmethod def actions_summary_handler(object): return "%s - %s" % (object.action_type, unicode(object.date)) @staticmethod def indicator_activity_summary_handler(object): return object.description @staticmethod def objects_summary_handler(object): return "%s - %s" % (object.name, object.value) @staticmethod def raw_data_highlights_summary_handler(object): if not isinstance(object.line_data, unicode): line_data = unicode(object.line_data, 'utf-8', 'replace') return "line %s: %s" % (object.line, line_data) @staticmethod def raw_data_inlines_summary_handler(object): return "line %s: %s" % (object.line, object.comment) @staticmethod def relationships_summary_handler(object): #target_of_relationship = class_from_id(object.type, object.value) # TODO: Print out a meaningful relationship summary, should consolidate # relationships code to generically get the "key" that best describes # a generic mongo object. return "%s - %s" % (object.rel_type, object.object_id) ############################################################################ # Specific Change Handlers/Parsers # # These methods parse the modified field and determine the specific change # that was made. ############################################################################ @staticmethod def actions_change_handler(old_value, new_value, changed_field): changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date') message = ChangeParser.parse_generic_change_object_list( changed_data, changed_field, 'instance', ChangeParser.actions_parse_handler, ChangeParser.actions_summary_handler) return message @staticmethod def actions_parse_handler(old_value, new_value, base_fqn): fields = ['action_type', 'active', 'reason', 'begin_date', 'end_date', 'performed_date'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def bucket_list_change_handler(old_value, new_value, changed_field): return ChangeParser.generic_list_change_handler(old_value, new_value, changed_field) @staticmethod def campaign_change_handler(old_value, new_value, changed_field): changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'name') message = ChangeParser.parse_generic_change_object_list( changed_data, changed_field, 'name', ChangeParser.campaign_parse_handler) return message @staticmethod def campaign_parse_handler(old_value, new_value, base_fqn): fields = ['name', 'confidence', 'description'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def indicator_activity_change_handler(old_value, new_value, changed_field): changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date') message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance', ChangeParser.indicator_activity_parse_handler, ChangeParser.indicator_activity_summary_handler) return message @staticmethod def indicator_activity_parse_handler(old_value, new_value, base_fqn): fields = ['description', 'end_date', 'start_date'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def indicator_confidence_change_handler(old_value, new_value, changed_field): fields = ['rating'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, changed_field) return message @staticmethod def indicator_impact_change_handler(old_value, new_value, changed_field): fields = ['rating'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, changed_field) return message @staticmethod def objects_change_handler(old_value, new_value, changed_field): changed_objects = ChangeParser.get_changed_object_list(old_value, new_value, 'name') message = ChangeParser.parse_generic_change_object_list(changed_objects, 'Objects', 'item', ChangeParser.objects_parse_handler, ChangeParser.objects_summary_handler) return message @staticmethod def objects_parse_handler(old_value, new_value, base_fqn): fields = ['name', 'value'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def relationships_parse_handler(old_value, new_value, base_fqn): fields = ['relationship', 'rel_type', 'rel_reason', 'rel_confidence'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def raw_data_highlights_change_handler(old_value, new_value, changed_field): changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date') message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance', ChangeParser.raw_data_highlights_parse_handler, ChangeParser.raw_data_highlights_summary_handler) return message @staticmethod def raw_data_highlights_parse_handler(old_value, new_value, base_fqn): fields = ['line', 'line_data'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def raw_data_inlines_change_handler(old_value, new_value, changed_field): changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date') message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance', ChangeParser.raw_data_inlines_parse_handler, ChangeParser.raw_data_inlines_summary_handler) return message @staticmethod def raw_data_inlines_parse_handler(old_value, new_value, base_fqn): fields = ['line', 'comment'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def relationships_change_handler(old_value, new_value, changed_field): changed_data = ChangeParser.get_changed_object_list(old_value, new_value, 'date') message = ChangeParser.parse_generic_change_object_list(changed_data, changed_field, 'instance', ChangeParser.relationships_parse_handler, ChangeParser.relationships_summary_handler) return message @staticmethod def screenshots_change_handler(old_value, new_value, changed_field): changed_screenshots = ChangeParser.get_changed_primitive_list(old_value, new_value) message = ChangeParser.parse_generic_change_object_list(changed_screenshots, changed_field, 'id') return message @staticmethod def skip_change_handler(old_value, new_value, changed_field): return None @staticmethod def source_change_handler(old_value, new_value, changed_field): changed_sources = ChangeParser.get_changed_object_list(old_value, new_value, 'name') message = ChangeParser.parse_generic_change_object_list(changed_sources, changed_field, 'name', ChangeParser.source_parse_handler) return {'message': message, 'source_filter': changed_sources.keys()} @staticmethod def source_instances_parse_handler(old_value, new_value, base_fqn): fields = ['method', 'reference'] message = ChangeParser.generic_child_fields_change_handler(old_value, new_value, fields, base_fqn) return message @staticmethod def source_parse_handler(old_value, new_value, base_fqn): changed_source_instances = ChangeParser.get_changed_object_list( old_value['instances'], new_value['instances'], 'date') message = ChangeParser.parse_generic_change_object_list(changed_source_instances, 'source', 'instances', ChangeParser.source_instances_parse_handler) return message @staticmethod def tickets_change_handler(old_value, new_value, changed_field): old_tickets_list = ChangeParser.flatten_objects_to_list(old_value, 'ticket_number') new_tickets_list = ChangeParser.flatten_objects_to_list(new_value, 'ticket_number') return ChangeParser.generic_list_change_handler(old_tickets_list, new_tickets_list, changed_field) class MappedMongoFields(): @staticmethod def get_mapped_mongo_field(top_level_type, field): specific_mapped_type = __specific_mongo_to_doc_field__.get(top_level_type) # Check for a specific mapped field first, if there isn't one # then just try to use the general mapped fields. if specific_mapped_type is not None: specific_mapped_value = specific_mapped_type.get(field) if specific_mapped_value is not None: return specific_mapped_value return __general_mongo_to_doc_field__.get(field, field) class NotificationHeaderManager(): """ The following generate_*_header() functions generate a meaningful description for that specific object type. """ @staticmethod def get_header_handler(obj_type): return __notification_header_handler__.get(obj_type) @staticmethod def generate_actor_header(obj): return "Actor: %s" % (obj.name) @staticmethod def generate_backdoor_header(obj): return "Backdoor: %s" % (obj.name) @staticmethod def generate_campaign_header(obj): return "Campaign: %s" % (obj.name) @staticmethod def generate_certificate_header(obj): return "Certificate: %s" % (obj.filename) @staticmethod def generate_domain_header(obj): return "Domain: %s" % (obj.domain) @staticmethod def generate_email_header(obj): return "Email: %s" % (obj.subject) @staticmethod def generate_event_header(obj): return "Event: %s" % (obj.title) @staticmethod def generate_exploit_header(obj): return "Exploit: %s" % (obj.name) @staticmethod def generate_indicator_header(obj): return "Indicator: %s - %s" % (obj.ind_type, obj.value) @staticmethod def generate_ip_header(obj): return "IP: %s" % (obj.ip) @staticmethod def generate_pcap_header(obj): return "PCAP: %s" % (obj.filename) @staticmethod def generate_raw_data_header(obj): return "RawData: %s (version %s)" % (obj.title, obj.version) @staticmethod def generate_sample_header(obj): return "Sample: %s" % (obj.filename) @staticmethod def generate_screenshot_header(obj): return "Screenshot: %s" % (obj.filename) @staticmethod def generate_target_header(obj): return "Target: %s" % (obj.email_address) # Use dictionaries to hold the list of handlers because dictionary # lookup time is O(1) whereas a list or a long 'if/else' block is worst # case O(n). The consequence of using a dict is that this # consumes more memory on startup since the dict needs to be constructed. __general_field_to_change_handler__ = { "actions": ChangeParser.actions_change_handler, "analysis": ChangeParser.skip_change_handler, "bucket_list": ChangeParser.bucket_list_change_handler, "campaign": ChangeParser.campaign_change_handler, "obj": ChangeParser.objects_change_handler, "relationships": ChangeParser.relationships_change_handler, "screenshots": ChangeParser.screenshots_change_handler, "source": ChangeParser.source_change_handler, "tickets": ChangeParser.tickets_change_handler, } __specific_field_to_change_handler__ = { "Indicator": { "activity": ChangeParser.indicator_activity_change_handler, "confidence": ChangeParser.indicator_confidence_change_handler, "impact": ChangeParser.indicator_impact_change_handler, }, "RawData": { "tool": ChangeParser.generic_single_field_json_change_handler, "highlights": ChangeParser.raw_data_highlights_change_handler, "inlines": ChangeParser.raw_data_inlines_change_handler, } } __notification_header_handler__ = { "Actor": NotificationHeaderManager.generate_actor_header, "Backdoor": NotificationHeaderManager.generate_backdoor_header, "Campaign": NotificationHeaderManager.generate_campaign_header, "Certificate": NotificationHeaderManager.generate_certificate_header, "Domain": NotificationHeaderManager.generate_domain_header, "Email": NotificationHeaderManager.generate_email_header, "Event": NotificationHeaderManager.generate_event_header, "Exploit": NotificationHeaderManager.generate_exploit_header, "Indicator": NotificationHeaderManager.generate_indicator_header, "IP": NotificationHeaderManager.generate_ip_header, "PCAP": NotificationHeaderManager.generate_pcap_header, "RawData": NotificationHeaderManager.generate_raw_data_header, "Sample": NotificationHeaderManager.generate_sample_header, "Screenshot": NotificationHeaderManager.generate_screenshot_header, "Target": NotificationHeaderManager.generate_target_header, } __general_mongo_to_doc_field__ = { "objects": "obj" } __specific_mongo_to_doc_field__ = { "Email": { "from": "from_address", "raw_headers": "raw_header", }, "Indicator": { "type": "ind_type" } }
from operator import attrgetter from typing import Iterable, Iterator, List, MutableSequence, \ Optional, Sequence, TypeVar, Type from iota.codecs import TrytesDecodeError from iota.crypto import Curl, HASH_LENGTH from iota.json import JsonSerializable from iota.transaction.types import BundleHash, Fragment, Nonce, \ TransactionHash, TransactionTrytes from iota.trits import int_from_trits, trits_from_int from iota.types import Address, Tag, TryteString, TrytesCompatible __all__ = [ 'Bundle', 'Transaction', ] T = TypeVar('T', bound='Transaction') class Transaction(JsonSerializable): """ A transaction that has been attached to the Tangle. :param Optional[TransactionHash] hash_: Transaction ID :param Optional[Fragment] signature_message_fragment: Signature or message fragment. :param Address address: The address associated with this transaction. :param int value: Value of the transaction in iotas. Can be negative as well (spending from address). :param int timestamp: Unix timestamp in seconds. :param Optional[int] current_index: Index of the transaction within the bundle. :param Optional[int] last_index: Index of head transaction in the bundle. :param Optional[BundleHash] bundle_hash: Bundle hash of the bundle containing the transaction. :param Optional[TransactionHash] trunk_transaction_hash: Hash of trunk transaction. :param Optional[TransactionHash] branch_transaction_hash: Hash of branch transaction. :param Optional[Tag] tag: Optional classification tag applied to this transaction. :param Optional[int] attachment_timestamp: Unix timestamp in milliseconds, decribes when the proof-of-work for this transaction was done. :param Optional[int] attachment_timestamp_lower_bound: Unix timestamp in milliseconds, lower bound of attachment. :param Optional[int] attachment_timestamp_upper_bound: Unix timestamp in milliseconds, upper bound of attachment. :param Optional[Nonce] nonce: Unique value used to increase security of the transaction hash. Result of the proof-of-work aglorithm. :param Optional[Tag] legacy_tag: Optional classification legacy_tag applied to this transaction. :return: :py:class:`Transaction` object. """ @classmethod def from_tryte_string( cls: Type[T], trytes: TrytesCompatible, hash_: Optional[TransactionHash] = None ) -> T: """ Creates a Transaction object from a sequence of trytes. :param TrytesCompatible trytes: Raw trytes. Should be exactly 2673 trytes long. :param Optional[TransactionHash] hash_: The transaction hash, if available. If not provided, it will be computed from the transaction trytes. :return: :py:class:`Transaction` object. Example usage:: from iota import Transaction txn =\\ Transaction.from_tryte_string( b'GYPRVHBEZOOFXSHQBLCYW9ICTCISLHDBNMMVYD9JJHQMPQCTIQAQTJNNNJ9IDXLRCC' b'OYOXYPCLR9PBEY9ORZIEPPDNTI9CQWYZUOTAVBXPSBOFEQAPFLWXSWUIUSJMSJIIIZ' b'WIKIRH9GCOEVZFKNXEVCUCIIWZQCQEUVRZOCMEL9AMGXJNMLJCIA9UWGRPPHCEOPTS' b'VPKPPPCMQXYBHMSODTWUOABPKWFFFQJHCBVYXLHEWPD9YUDFTGNCYAKQKVEZYRBQRB' b'XIAUX9SVEDUKGMTWQIYXRGSWYRK9SRONVGTW9YGHSZRIXWGPCCUCDRMAXBPDFVHSRY' b'WHGB9DQSQFQKSNICGPIPTRZINYRXQAFSWSEWIFRMSBMGTNYPRWFSOIIWWT9IDSELM9' b'JUOOWFNCCSHUSMGNROBFJX9JQ9XT9PKEGQYQAWAFPRVRRVQPUQBHLSNTEFCDKBWRCD' b'X9EYOBB9KPMTLNNQLADBDLZPRVBCKVCYQEOLARJYAGTBFR9QLPKZBOYWZQOVKCVYRG' b'YI9ZEFIQRKYXLJBZJDBJDJVQZCGYQMROVHNDBLGNLQODPUXFNTADDVYNZJUVPGB9LV' b'PJIYLAPBOEHPMRWUIAJXVQOEM9ROEYUOTNLXVVQEYRQWDTQGDLEYFIYNDPRAIXOZEB' b'CS9P99AZTQQLKEILEVXMSHBIDHLXKUOMMNFKPYHONKEYDCHMUNTTNRYVMMEYHPGASP' b'ZXASKRUPWQSHDMU9VPS99ZZ9SJJYFUJFFMFORBYDILBXCAVJDPDFHTTTIYOVGLRDYR' b'TKHXJORJVYRPTDH9ZCPZ9ZADXZFRSFPIQKWLBRNTWJHXTOAUOL9FVGTUMMPYGYICJD' b'XMOESEVDJWLMCVTJLPIEKBE9JTHDQWV9MRMEWFLPWGJFLUXI9BXPSVWCMUWLZSEWHB' b'DZKXOLYNOZAPOYLQVZAQMOHGTTQEUAOVKVRRGAHNGPUEKHFVPVCOYSJAWHZU9DRROH' b'BETBAFTATVAUGOEGCAYUXACLSSHHVYDHMDGJP9AUCLWLNTFEVGQGHQXSKEMVOVSKQE' b'EWHWZUDTYOBGCURRZSJZLFVQQAAYQO9TRLFFN9HTDQXBSPPJYXMNGLLBHOMNVXNOWE' b'IDMJVCLLDFHBDONQJCJVLBLCSMDOUQCKKCQJMGTSTHBXPXAMLMSXRIPUBMBAWBFNLH' b'LUJTRJLDERLZFUBUSMF999XNHLEEXEENQJNOFFPNPQ9PQICHSATPLZVMVIWLRTKYPI' b'XNFGYWOJSQDAXGFHKZPFLPXQEHCYEAGTIWIJEZTAVLNUMAFWGGLXMBNUQTOFCNLJTC' b'DMWVVZGVBSEBCPFSM99FLOIDTCLUGPSEDLOKZUAEVBLWNMODGZBWOVQT9DPFOTSKRA' b'BQAVOQ9RXWBMAKFYNDCZOJGTCIDMQSQQSODKDXTPFLNOKSIZEOY9HFUTLQRXQMEPGO' b'XQGLLPNSXAUCYPGZMNWMQWSWCKAQYKXJTWINSGPPZG9HLDLEAWUWEVCTVRCBDFOXKU' b'ROXH9HXXAXVPEJFRSLOGRVGYZASTEBAQNXJJROCYRTDPYFUIQJVDHAKEG9YACV9HCP' b'JUEUKOYFNWDXCCJBIFQKYOXGRDHVTHEQUMHO999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999999999999999999999999999999999999' b'999999999999RKWEEVD99A99999999A99999999NFDPEEZCWVYLKZGSLCQNOFUSENI' b'XRHWWTZFBXMPSQHEDFWZULBZFEOMNLRNIDQKDNNIELAOXOVMYEI9PGTKORV9IKTJZQ' b'UBQAWTKBKZ9NEZHBFIMCLV9TTNJNQZUIJDFPTTCTKBJRHAITVSKUCUEMD9M9SQJ999' b'999TKORV9IKTJZQUBQAWTKBKZ9NEZHBFIMCLV9TTNJNQZUIJDFPTTCTKBJRHAITVSK' b'UCUEMD9M9SQJ999999999999999999999999999999999999999999999999999999' b'999999999999999999999999999999999' ) """ tryte_string = TransactionTrytes(trytes) if not hash_: hash_trits: MutableSequence[int] = [0] * HASH_LENGTH sponge = Curl() sponge.absorb(tryte_string.as_trits()) sponge.squeeze(hash_trits) hash_ = TransactionHash.from_trits(hash_trits) return cls( hash_=hash_, signature_message_fragment=Fragment(tryte_string[0:2187]), address=Address(tryte_string[2187:2268]), value=int_from_trits(tryte_string[2268:2295].as_trits()), legacy_tag=Tag(tryte_string[2295:2322]), timestamp=int_from_trits(tryte_string[2322:2331].as_trits()), current_index=int_from_trits(tryte_string[2331:2340].as_trits()), last_index=int_from_trits(tryte_string[2340:2349].as_trits()), bundle_hash=BundleHash(tryte_string[2349:2430]), trunk_transaction_hash=TransactionHash(tryte_string[2430:2511]), branch_transaction_hash=TransactionHash(tryte_string[2511:2592]), tag=Tag(tryte_string[2592:2619]), attachment_timestamp=int_from_trits( tryte_string[2619:2628].as_trits()), attachment_timestamp_lower_bound=int_from_trits( tryte_string[2628:2637].as_trits()), attachment_timestamp_upper_bound=int_from_trits( tryte_string[2637:2646].as_trits()), nonce=Nonce(tryte_string[2646:2673]), ) def __init__( self, hash_: Optional[TransactionHash], signature_message_fragment: Optional[Fragment], address: Address, value: int, timestamp: int, current_index: Optional[int], last_index: Optional[int], bundle_hash: Optional[BundleHash], trunk_transaction_hash: Optional[TransactionHash], branch_transaction_hash: Optional[TransactionHash], tag: Optional[Tag], attachment_timestamp: Optional[int], attachment_timestamp_lower_bound: Optional[int], attachment_timestamp_upper_bound: Optional[int], nonce: Optional[Nonce], legacy_tag: Optional[Tag] = None ) -> None: self.hash: TransactionHash = hash_ """ The transaction hash, used to uniquely identify the transaction on the Tangle. This value is generated by taking a hash of the raw transaction trits. :type: :py:class:`TransactionHash` """ self.bundle_hash: Optional[BundleHash] = bundle_hash """ The bundle hash, used to identify transactions that are part of the same bundle. This value is generated by taking a hash of the metadata from all transactions in the bundle. :type: :py:class:`BundleHash` """ self.address: Address = address """ The address associated with this transaction. Depending on the transaction's ``value``, this address may be a sender or a recipient. If ``value`` is != 0, the associated address' balance is adjusted as a result of this transaction. :type: :py:class:`Address` """ self.value: int = value """ The number of iotas being transferred in this transaction: - If this value is negative, then the ``address`` is spending iotas. - If it is positive, then the ``address`` is receiving iotas. - If it is zero, then this transaction is being used to carry metadata (such as a signature fragment or a message) instead of transferring iotas. :type: ``int`` """ self._legacy_tag: Optional[Tag] = legacy_tag """ A short message attached to the transaction. .. warning:: Deprecated, use :py:attr:`Transaction.tag` instead. :type: :py:class:`Tag` """ self.nonce: Optional[Nonce] = nonce """ Unique value used to increase security of the transaction hash. This is the product of the PoW process. :type: :py:class:`Nonce` """ self.timestamp: int = timestamp """ Timestamp used to increase the security of the transaction hash. Describes when the transaction was created. .. important:: This value is easy to forge! Do not rely on it when resolving conflicts! :type: ``int``, unix timestamp in seconds. """ self.current_index: Optional[int] = current_index """ The position of the transaction inside the bundle. - If the ``current_index`` value is 0, then this is the "head transaction". - If it is equal to ``last_index``, then this is the "tail transaction". For value transfers, the "spend" transaction is generally in the 0th position, followed by inputs, and the "change" transaction is last. :type: ``int`` """ self.last_index: Optional[int] = last_index """ The index of the final transaction in the bundle. This value is attached to every transaction to make it easier to traverse and verify bundles. :type: ``int`` """ self.trunk_transaction_hash: Optional[TransactionHash] = trunk_transaction_hash """ The transaction hash of the next transaction in the bundle. In order to add a transaction to the Tangle, the client must perform PoW to "approve" two existing transactions, called the "trunk" and "branch" transactions. The trunk transaction is generally used to link transactions within a bundle. :type: :py:class:`TransactionHash` """ self.branch_transaction_hash: Optional[TransactionHash] = branch_transaction_hash """ An unrelated transaction that this transaction "approves". In order to add a transaction to the Tangle, the client must perform PoW to "approve" two existing transactions, called the "trunk" and "branch" transactions. The branch transaction may be selected strategically to maximize the bundle's chances of getting confirmed; otherwise it usually has no significance. :type: :py:class:`TransactionHash` """ self.tag: Optional[Tag] = tag """ Optional classification tag applied to this transaction. Many transactions have empty tags (``Tag(b'999999999999999999999999999')``). :type: :py:class:`Tag` """ self.attachment_timestamp: Optional[int] = attachment_timestamp """ Estimated epoch time of the attachment to the tangle. Decribes when the proof-of-work for this transaction was done. :type: ``int``, unix timestamp in milliseconds, """ self.attachment_timestamp_lower_bound: Optional[int] = attachment_timestamp_lower_bound """ The lowest possible epoch time of the attachment to the tangle. :type: ``int``, unix timestamp in milliseconds. """ self.attachment_timestamp_upper_bound: Optional[int] = attachment_timestamp_upper_bound """ The highest possible epoch time of the attachment to the tangle. :type: ``int``, unix timestamp in milliseconds. """ self.signature_message_fragment: Optional[Fragment] = signature_message_fragment """ "Signature/Message Fragment" (note the slash): - For inputs, this contains a fragment of the cryptographic signature, used to verify the transaction (depending on the security level of the corresponding address, the entire signature is usually too large to fit into a single transaction, so it is split across multiple transactions instead). - For other transactions, this contains a fragment of the message attached to the transaction (if any). This can be pretty much any value. Like signatures, the message may be split across multiple transactions if it is too large to fit inside a single transaction. :type: :py:class:`Fragment` """ self.is_confirmed: bool = None """ Whether this transaction has been confirmed by neighbor nodes. Must be set manually via the ``getInclusionStates`` API command. :type: ``Optional[bool]`` References: - :py:meth:`Iota.get_inclusion_states` - :py:meth:`Iota.get_transfers` """ @property def is_tail(self) -> bool: """ Returns whether this transaction is a tail (first one in the bundle). Because of the way the Tangle is organized, the tail transaction is generally the last one in the bundle that gets attached, even though it occupies the first logical position inside the bundle. """ return self.current_index == 0 @property def value_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`value`. """ # Note that we are padding to 81 *trits*. return TryteString.from_trits(trits_from_int(self.value, pad=81)) @property def timestamp_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`timestamp`. """ # Note that we are padding to 27 *trits*. return TryteString.from_trits(trits_from_int(self.timestamp, pad=27)) @property def current_index_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`current_index`. """ # Note that we are padding to 27 *trits*. return TryteString.from_trits( trits_from_int(self.current_index, pad=27), ) @property def last_index_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`last_index`. """ # Note that we are padding to 27 *trits*. return TryteString.from_trits(trits_from_int(self.last_index, pad=27)) @property def attachment_timestamp_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`attachment_timestamp`. """ # Note that we are padding to 27 *trits*. return TryteString.from_trits( trits_from_int(self.attachment_timestamp, pad=27), ) @property def attachment_timestamp_lower_bound_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`attachment_timestamp_lower_bound`. """ # Note that we are padding to 27 *trits*. return TryteString.from_trits( trits_from_int(self.attachment_timestamp_lower_bound, pad=27), ) @property def attachment_timestamp_upper_bound_as_trytes(self) -> TryteString: """ Returns a TryteString representation of the transaction's :py:attr:`attachment_timestamp_upper_bound`. """ # Note that we are padding to 27 *trits*. return TryteString.from_trits( trits_from_int(self.attachment_timestamp_upper_bound, pad=27), ) def as_json_compatible(self) -> dict: """ Returns a JSON-compatible representation of the object. :return: ``dict`` with the following structure:: { 'hash_': TransactionHash, 'signature_message_fragment': Fragment, 'address': Address, 'value': int, 'legacy_tag': Tag, 'timestamp': int, 'current_index': int, 'last_index': int, 'bundle_hash': BundleHash, 'trunk_transaction_hash': TransactionHash, 'branch_transaction_hash': TransactionHash, 'tag': Tag, 'attachment_timestamp': int, 'attachment_timestamp_lower_bound': int, 'attachment_timestamp_upper_bound': int, 'nonce': Nonce, } References: - :py:class:`iota.json.JsonEncoder`. """ return { 'hash_': self.hash, 'signature_message_fragment': self.signature_message_fragment, 'address': self.address, 'value': self.value, 'legacy_tag': self.legacy_tag, 'timestamp': self.timestamp, 'current_index': self.current_index, 'last_index': self.last_index, 'bundle_hash': self.bundle_hash, 'trunk_transaction_hash': self.trunk_transaction_hash, 'branch_transaction_hash': self.branch_transaction_hash, 'tag': self.tag, 'attachment_timestamp': self.attachment_timestamp, 'attachment_timestamp_lower_bound': self.attachment_timestamp_lower_bound, 'attachment_timestamp_upper_bound': self.attachment_timestamp_upper_bound, 'nonce': self.nonce, } def as_tryte_string(self) -> TransactionTrytes: """ Returns a TryteString representation of the transaction. :return: :py:class:`TryteString` object. """ return TransactionTrytes( self.signature_message_fragment + self.address.address + self.value_as_trytes + self.legacy_tag + self.timestamp_as_trytes + self.current_index_as_trytes + self.last_index_as_trytes + self.bundle_hash + self.trunk_transaction_hash + self.branch_transaction_hash + self.tag + self.attachment_timestamp_as_trytes + self.attachment_timestamp_lower_bound_as_trytes + self.attachment_timestamp_upper_bound_as_trytes + self.nonce ) def get_bundle_essence_trytes(self) -> TryteString: """ Returns the values needed for calculating bundle hash. The bundle hash is the hash of the bundle essence, which is itself the hash of the following fields of transactions in the bundle: - ``address``, - ``value``, - ``legacy_tag``, - ``current_index``, - ``last_index``, - and ``timestamp``. The transaction's ``signature_message_fragment`` field contains the signature generated by signing the bundle hash with the address's private key. :return: :py:class:`TryteString` object. """ return ( self.address.address + self.value_as_trytes + self.legacy_tag + self.timestamp_as_trytes + self.current_index_as_trytes + self.last_index_as_trytes ) @property def legacy_tag(self) -> Tag: """ Return the legacy tag of the transaction. If no legacy tag was set, returns the tag instead. """ return self._legacy_tag or self.tag B = TypeVar('B', bound='Bundle') class Bundle(JsonSerializable, Sequence[Transaction]): """ A collection of transactions, treated as an atomic unit when attached to the Tangle. Note: unlike a block in a blockchain, bundles are not first-class citizens in IOTA; only transactions get stored in the Tangle. Instead, Bundles must be inferred by following linked transactions with the same bundle hash. :param Optional[Iterable[Transaction]] transactions: Transactions in the bundle. Note that transactions will be sorted into ascending order based on their ``current_index``. :return: :py:class:`Bundle` object. References: - :py:class:`Iota.get_transfers` """ @classmethod def from_tryte_strings(cls: Type[B], trytes: Iterable[TryteString]) -> B: """ Creates a Bundle object from a list of tryte values. Note, that this is effectively calling :py:meth:`Transaction.from_tryte_string` on the iterbale elements and constructing the bundle from the created transactions. :param Iterable[TryteString] trytes: List of raw transaction trytes. :return: :py:class:`Bundle` object. Example usage:: from iota import Bundle bundle = Bundle.from_tryte_strings([ b'GYPRVHBEZOOFXSHQBLCYW9ICTCISLHDBNMMVYD9JJHQMPQCTIQAQTJNNNJ9IDXLRCC...', b'OYOXYPCLR9PBEY9ORZIEPPDNTI9CQWYZUOTAVBXPSBOFEQAPFLWXSWUIUSJMSJIIIZ...', # etc. ]) """ return cls(map(Transaction.from_tryte_string, trytes)) def __init__( self, transactions: Optional[Iterable[Transaction]] = None ) -> None: super(Bundle, self).__init__() self.transactions: List[Transaction] = [] """ List of :py:class:`Transaction` objects that are in the bundle. """ if transactions: self.transactions.extend( sorted(transactions, key=attrgetter('current_index')), ) self._is_confirmed: Optional[bool] = None """ Whether this bundle has been confirmed by neighbor nodes. Must be set manually. References: - :py:class:`Iota.get_transfers` """ def __contains__(self, transaction: Transaction) -> bool: return transaction in self.transactions def __getitem__(self, index: int) -> Transaction: return self.transactions[index] def __iter__(self) -> Iterator[Transaction]: return iter(self.transactions) def __len__(self) -> int: return len(self.transactions) @property def is_confirmed(self) -> Optional[bool]: """ Returns whether this bundle has been confirmed by neighbor nodes. This attribute must be set manually. :return: ``bool`` References: - :py:class:`Iota.get_transfers` """ return self._is_confirmed @is_confirmed.setter def is_confirmed(self, new_is_confirmed: bool) -> None: """ Sets the ``is_confirmed`` for the bundle. """ self._is_confirmed = new_is_confirmed for txn in self: txn.is_confirmed = new_is_confirmed @property def hash(self) -> Optional[BundleHash]: """ Returns the hash of the bundle. This value is determined by inspecting the bundle's tail transaction, so in a few edge cases, it may be incorrect. :return: - :py:class:`BundleHash` object, or - If the bundle has no transactions, this method returns ``None``. """ try: return self.tail_transaction.bundle_hash except IndexError: return None @property def tail_transaction(self) -> Transaction: """ Returns the tail transaction of the bundle. :return: :py:class:`Transaction` """ return self[0] def get_messages(self, errors: str = 'drop') -> List[str]: """ Attempts to decipher encoded messages from the transactions in the bundle. :param str errors: How to handle trytes that can't be converted, or bytes that can't be decoded using UTF-8: 'drop' Drop the trytes from the result. 'strict' Raise an exception. 'replace' Replace with a placeholder character. 'ignore' Omit the invalid tryte/byte sequence. :return: ``List[str]`` """ decode_errors = 'strict' if errors == 'drop' else errors messages = [] for group in self.group_transactions(): # Ignore inputs. if group[0].value < 0: continue message_trytes = TryteString(b'') for txn in group: message_trytes += txn.signature_message_fragment if message_trytes: try: messages.append(message_trytes.decode(decode_errors)) except (TrytesDecodeError, UnicodeDecodeError): if errors != 'drop': raise return messages def as_tryte_strings(self, head_to_tail: bool = False) -> List[TransactionTrytes]: """ Returns TryteString representations of the transactions in this bundle. :param bool head_to_tail: Determines the order of the transactions: - ``True``: head txn first, tail txn last. - ``False`` (default): tail txn first, head txn last. Note that the order is reversed by default, as this is the way bundles are typically broadcast to the Tangle. :return: ``List[TransactionTrytes]`` """ transactions = self if head_to_tail else reversed(self) return [t.as_tryte_string() for t in transactions] def as_json_compatible(self) -> List[dict]: """ Returns a JSON-compatible representation of the object. :return: ``List[dict]``. The ``dict`` list elements contain individual transactions as in :py:meth:`Transaction.as_json_compatible`. References: - :py:class:`iota.json.JsonEncoder`. """ return [txn.as_json_compatible() for txn in self] def group_transactions(self) -> List[List[Transaction]]: """ Groups transactions in the bundle by address. :return: ``List[List[Transaction]]`` """ groups = [] if self: last_txn = self.tail_transaction current_group = [last_txn] for current_txn in self.transactions[1:]: # Transactions are grouped by address, so as long as the # address stays consistent from one transaction to # another, we are still in the same group. if current_txn.address == last_txn.address: current_group.append(current_txn) else: groups.append(current_group) current_group = [current_txn] last_txn = current_txn if current_group: groups.append(current_group) return groups
# Copyright 2012 by Wibowo Arindrarto. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Bio.SearchIO parser for BLAST+ XML output formats.""" # for more info: http://www.ncbi.nlm.nih.gov/dtd/NCBI_BlastOutput.mod.dtd import sys import re import warnings from itertools import chain from xml.sax.saxutils import XMLGenerator, escape from Bio import BiopythonParserWarning # For speed try to use cElementTree rather than ElementTree try: if (3, 0) <= sys.version_info[:2] <= (3, 1): # Workaround for bug in python 3.0 and 3.1, # see http://bugs.python.org/issue9257 from xml.etree import ElementTree as ElementTree else: from xml.etree import cElementTree as ElementTree except ImportError: from xml.etree import ElementTree as ElementTree from Bio.Alphabet import generic_dna, generic_protein from Bio.SearchIO._index import SearchIndexer from Bio.SearchIO._model import QueryResult, Hit, HSP, HSPFragment from Bio._py3k import _as_bytes, _bytes_to_string, unicode _empty_bytes_string = _as_bytes("") __all__ = ['BlastXmlParser', 'BlastXmlIndexer', 'BlastXmlWriter'] # element - optional qresult attribute name mapping _ELEM_QRESULT_OPT = { 'Statistics_db-num': ('stat_db_num', int), 'Statistics_db-len': ('stat_db_len', int), 'Statistics_eff-space': ('stat_eff_space', float), 'Statistics_hsp-len': ('stat_hsp_len', int), 'Statistics_kappa': ('stat_kappa', float), 'Statistics_lambda': ('stat_lambda', float), 'Statistics_entropy': ('stat_entropy', float), } # element - hit attribute name mapping _ELEM_HIT = { # 'Hit_def': ('description', str), # not set by this dict 'Hit_accession': ('accession', str), 'Hit_len': ('seq_len', int), } # element - hsp attribute name mapping _ELEM_HSP = { 'Hsp_bit-score': ('bitscore', float), 'Hsp_score': ('bitscore_raw', int), 'Hsp_evalue': ('evalue', float), 'Hsp_identity': ('ident_num', int), 'Hsp_positive': ('pos_num', int), 'Hsp_gaps': ('gap_num', int), 'Hsp_density': ('density', float), } # element - fragment attribute name mapping _ELEM_FRAG = { 'Hsp_query-from': ('query_start', int), 'Hsp_query-to': ('query_end', int), 'Hsp_hit-from': ('hit_start', int), 'Hsp_hit-to': ('hit_end', int), 'Hsp_query-frame': ('query_frame', int), 'Hsp_hit-frame': ('hit_frame', int), 'Hsp_align-len': ('aln_span', int), 'Hsp_pattern-from': ('pattern_start', int), 'Hsp_pattern-to': ('pattern_end', int), 'Hsp_hseq': ('hit', str), 'Hsp_qseq': ('query', str), } # dictionary for mapping tag name and meta key name _ELEM_META = { 'BlastOutput_db': ('target', str), 'BlastOutput_program': ('program', str), 'BlastOutput_version': ('version', str), 'BlastOutput_reference': ('reference', str), 'Parameters_expect': ('param_evalue_threshold', float), 'Parameters_entrez-query': ('param_entrez_query', str), 'Parameters_filter': ('param_filter', str), 'Parameters_gap-extend': ('param_gap_extend', int), 'Parameters_gap-open': ('param_gap_open', int), 'Parameters_include': ('param_include', str), 'Parameters_matrix': ('param_matrix', str), 'Parameters_pattern': ('param_pattern', str), 'Parameters_sc-match': ('param_score_match', int), 'Parameters_sc-mismatch': ('param_score_mismatch', int), } # these are fallback tags that store information on the first query # outside the <Iteration> tag # only used if query_{ID,def,len} is not found in <Iteration> # (seen in legacy Blast <2.2.14) _ELEM_QRESULT_FALLBACK = { 'BlastOutput_query-ID': ('id', str), 'BlastOutput_query-def': ('description', str), 'BlastOutput_query-len': ('len', str), } # element-attribute maps, for writing _WRITE_MAPS = { 'preamble': ( ('program', 'program'), ('version', 'version'), ('reference', 'reference'), ('db', 'target'), ('query-ID', 'id'), ('query-def', 'description'), ('query-len', 'seq_len'), ('param', None), ), 'param': ( ('matrix', 'param_matrix'), ('expect', 'param_evalue_threshold'), ('sc-match', 'param_score_match'), ('sc-mismatch', 'param_score_mismatch'), ('gap-open', 'param_gap_open'), ('gap-extend', 'param_gap_extend'), ('filter', 'param_filter'), ('pattern', 'param_pattern'), ('entrez-query', 'param_entrez_query'), ), 'qresult': ( ('query-ID', 'id'), ('query-def', 'description'), ('query-len', 'seq_len'), ), 'stat': ( ('db-num', 'stat_db_num'), ('db-len', 'stat_db_len'), ('hsp-len', 'stat_hsp_len'), ('eff-space', 'stat_eff_space'), ('kappa', 'stat_kappa'), ('lambda', 'stat_lambda'), ('entropy', 'stat_entropy'), ), 'hit': ( ('id', 'id'), ('def', 'description'), ('accession', 'accession'), ('len', 'seq_len'), ), 'hsp': ( ('bit-score', 'bitscore'), ('score', 'bitscore_raw'), ('evalue', 'evalue'), ('query-from', 'query_start'), ('query-to', 'query_end'), ('hit-from', 'hit_start'), ('hit-to', 'hit_end'), ('pattern-from', 'pattern_start'), ('pattern-to', 'pattern_end'), ('query-frame', 'query_frame'), ('hit-frame', 'hit_frame'), ('identity', 'ident_num'), ('positive', 'pos_num'), ('gaps', 'gap_num'), ('align-len', 'aln_span'), ('density', 'density'), ('qseq', 'query'), ('hseq', 'hit'), ('midline', None), ), } # optional elements, based on the DTD _DTD_OPT = ( 'BlastOutput_query-seq', 'BlastOutput_mbstat', 'Iteration_query-def', 'Iteration_query-len', 'Iteration-hits', 'Iteration_stat', 'Iteration_message', 'Parameters_matrix', 'Parameters_include', 'Parameters_sc-match', 'Parameters_sc-mismatch', 'Parameters_filter', 'Parameters_pattern', 'Parameters_entrez-query', 'Hit_hsps', 'Hsp_pattern-from', 'Hsp_pattern-to', 'Hsp_query-frame', 'Hsp_hit-frame', 'Hsp_identity', 'Hsp_positive', 'Hsp_gaps', 'Hsp_align-len', 'Hsp_density', 'Hsp_midline', ) # compile RE patterns # for capturing BLAST version _RE_VERSION = re.compile(r'\d+\.\d+\.\d+\+?') # for splitting ID-description pairs _RE_ID_DESC_PAIRS_PATTERN = re.compile(" +>") # for splitting ID and description (must be used with maxsplit = 1) _RE_ID_DESC_PATTERN = re.compile(" +") def _extract_ids_and_descs(concat_str): # Given a string space-separate string of IDs and descriptions, # return a list of tuples, each tuple containing an ID and # a description string (which may be empty) # create a list of lists, each list containing an ID and description # or just an ID, if description is not present id_desc_pairs = [re.split(_RE_ID_DESC_PATTERN, x, 1) for x in re.split(_RE_ID_DESC_PAIRS_PATTERN, concat_str)] # make sure empty descriptions are added as empty strings # also, we return lists for compatibility reasons between Py2 and Py3 add_descs = lambda x: x if len(x) == 2 else x + [""] return [pair for pair in map(add_descs, id_desc_pairs)] class BlastXmlParser(object): """Parser for the BLAST XML format""" def __init__(self, handle): self.xml_iter = iter(ElementTree.iterparse(handle, events=('start', 'end'))) self._meta, self._fallback = self._parse_preamble() def __iter__(self): for qresult in self._parse_qresult(): yield qresult def _parse_preamble(self): """Parses all tag data prior to the first query result.""" # dictionary for containing all information prior to the first query meta = {} # dictionary for fallback information fallback = {} # parse the preamble part (anything prior to the first result) for event, elem in self.xml_iter: # get the tag values, cast appropriately, store into meta if event == 'end' and elem.tag in _ELEM_META: attr_name, caster = _ELEM_META[elem.tag] if caster is not str: meta[attr_name] = caster(elem.text) else: meta[attr_name] = elem.text # delete element after we finish parsing it elem.clear() continue # capture fallback values # these are used only if the first <Iteration> does not have any # ID, ref, or len. elif event == 'end' and elem.tag in _ELEM_QRESULT_FALLBACK: attr_name, caster = _ELEM_QRESULT_FALLBACK[elem.tag] if caster is not str: fallback[attr_name] = caster(elem.text) else: fallback[attr_name] = elem.text elem.clear() continue if event == 'start' and elem.tag == 'Iteration': break # we only want the version number, sans the program name or date if meta.get('version') is not None: meta['version'] = re.search(_RE_VERSION, meta['version']).group(0) return meta, fallback def _parse_qresult(self): """Parses query results.""" # parse the queries for event, qresult_elem in self.xml_iter: # </Iteration> marks the end of a single query # which means we can process it if event == 'end' and qresult_elem.tag == 'Iteration': # we'll use the following schema # <!ELEMENT Iteration ( # Iteration_iter-num, # Iteration_query-ID?, # Iteration_query-def?, # Iteration_query-len?, # Iteration_hits?, # Iteration_stat?, # Iteration_message?)> # assign query attributes with fallbacks query_id = qresult_elem.findtext('Iteration_query-ID') if query_id is None: query_id = self._fallback['id'] query_desc = qresult_elem.findtext('Iteration_query-def') if query_desc is None: query_desc = self._fallback['description'] query_len = qresult_elem.findtext('Iteration_query-len') if query_len is None: query_len = self._fallback['len'] # handle blast searches against databases with Blast's IDs # 'Query_' marks the beginning of a BLAST+-generated ID, # 'lcl|' marks the beginning of a BLAST legacy-generated ID if query_id.startswith('Query_') or query_id.startswith('lcl|'): # store the Blast-generated query ID blast_query_id = query_id id_desc = query_desc.split(' ', 1) query_id = id_desc[0] try: query_desc = id_desc[1] except IndexError: query_desc = '' else: blast_query_id = '' hit_list, key_list = [], [] for hit in self._parse_hit(qresult_elem.find('Iteration_hits'), query_id): if hit: # need to keep track of hit IDs, since there could be duplicates, if hit.id in key_list: warnings.warn("Adding hit with BLAST-generated ID " "%r since hit ID %r is already present " "in query %r. Your BLAST database may contain " "duplicate entries." % (hit._blast_id, hit.id, query_id), BiopythonParserWarning) # fallback to Blast-generated IDs, if the ID is already present # and restore the desc, too hit.description = '%s %s' % (hit.id, hit.description) hit.id = hit._blast_id # and change the hit_id of the HSPs contained for hsp in hit: hsp.hit_id = hit._blast_id else: key_list.append(hit.id) hit_list.append(hit) # create qresult and assign its attributes qresult = QueryResult(hit_list, query_id) qresult.description = query_desc qresult.seq_len = int(query_len) qresult._blast_id = blast_query_id for key, value in self._meta.items(): setattr(qresult, key, value) # statistics are stored in Iteration_stat's 'grandchildren' with the # following DTD # <!ELEMENT Statistics ( # Statistics_db-num, # Statistics_db-len, # Statistics_hsp-len, # Statistics_eff-space, # Statistics_kappa, # Statistics_lambda, # Statistics_entropy)> stat_iter_elem = qresult_elem.find('Iteration_stat') if stat_iter_elem is not None: stat_elem = stat_iter_elem.find('Statistics') for key, val_info in _ELEM_QRESULT_OPT.items(): value = stat_elem.findtext(key) if value is not None: caster = val_info[1] # recast only if value is not intended to be str if value is not None and caster is not str: value = caster(value) setattr(qresult, val_info[0], value) # delete element after we finish parsing it qresult_elem.clear() yield qresult def _parse_hit(self, root_hit_elem, query_id): """Generator that transforms Iteration_hits XML elements into Hit objects. :param root_hit_elem: root element of the Iteration_hits tag. :type root_hit_elem: XML element tag :param query_id: QueryResult ID of this Hit :type query_id: string """ # Hit level processing # Hits are stored in the Iteration_hits tag, with the following # DTD # <!ELEMENT Hit ( # Hit_num, # Hit_id, # Hit_def, # Hit_accession, # Hit_len, # Hit_hsps?)> # feed the loop below an empty list so iteration still works if root_hit_elem is None: root_hit_elem = [] for hit_elem in root_hit_elem: # create empty hit object hit_id = hit_elem.findtext('Hit_id') hit_desc = hit_elem.findtext('Hit_def') # handle blast searches against databases with Blast's IDs if hit_id.startswith('gnl|BL_ORD_ID|'): blast_hit_id = hit_id id_desc = hit_desc.split(' ', 1) hit_id = id_desc[0] try: hit_desc = id_desc[1] except IndexError: hit_desc = '' else: blast_hit_id = '' # combine primary ID and defline first before splitting full_id_desc = hit_id + ' ' + hit_desc id_descs = _extract_ids_and_descs(full_id_desc) hit_id, hit_desc = id_descs[0] hsps = [hsp for hsp in self._parse_hsp(hit_elem.find('Hit_hsps'), query_id, hit_id)] hit = Hit(hsps) hit.description = hit_desc hit._id_alt = [x[0] for x in id_descs[1:]] hit._description_alt = [x[1] for x in id_descs[1:]] # blast_hit_id is only set if the hit ID is Blast-generated hit._blast_id = blast_hit_id for key, val_info in _ELEM_HIT.items(): value = hit_elem.findtext(key) if value is not None: caster = val_info[1] # recast only if value is not intended to be str if value is not None and caster is not str: value = caster(value) setattr(hit, val_info[0], value) # delete element after we finish parsing it hit_elem.clear() yield hit def _parse_hsp(self, root_hsp_frag_elem, query_id, hit_id): """Iterator that transforms Hit_hsps XML elements into HSP objects. :param root_hsp_frag_elem: the ``Hit_hsps`` tag :type root_hsp_frag_elem: XML element tag :param query_id: query ID :type query_id: string :param hit_id: hit ID :type hit_id: string """ # Hit_hsps DTD: # <!ELEMENT Hsp ( # Hsp_num, # Hsp_bit-score, # Hsp_score, # Hsp_evalue, # Hsp_query-from, # Hsp_query-to, # Hsp_hit-from, # Hsp_hit-to, # Hsp_pattern-from?, # Hsp_pattern-to?, # Hsp_query-frame?, # Hsp_hit-frame?, # Hsp_identity?, # Hsp_positive?, # Hsp_gaps?, # Hsp_align-len?, # Hsp_density?, # Hsp_qseq, # Hsp_hseq, # Hsp_midline?)> # if value is None, feed the loop below an empty list if root_hsp_frag_elem is None: root_hsp_frag_elem = [] for hsp_frag_elem in root_hsp_frag_elem: coords = {} # temporary container for coordinates frag = HSPFragment(hit_id, query_id) for key, val_info in _ELEM_FRAG.items(): value = hsp_frag_elem.findtext(key) caster = val_info[1] # adjust 'from' and 'to' coordinates to 0-based ones if value is not None: if key.endswith('-from') or key.endswith('-to'): # store coordinates for further processing coords[val_info[0]] = caster(value) continue # recast only if value is not intended to be str elif caster is not str: value = caster(value) setattr(frag, val_info[0], value) # set the similarity characters into aln_annotation dict frag.aln_annotation['similarity'] = \ hsp_frag_elem.findtext('Hsp_midline') # process coordinates # since 'x-from' could be bigger than 'x-to', we need to figure # out which one is smaller/bigger since 'x_start' is always smaller # than 'x_end' for coord_type in ('query', 'hit', 'pattern'): start_type = coord_type + '_start' end_type = coord_type + '_end' try: start = coords[start_type] end = coords[end_type] except KeyError: continue else: # convert to python range and setattr setattr(frag, start_type, min(start, end) - 1) setattr(frag, end_type, max(start, end)) # set alphabet, based on program prog = self._meta.get('program') if prog == 'blastn': frag.alphabet = generic_dna elif prog in ['blastp', 'blastx', 'tblastn', 'tblastx']: frag.alphabet = generic_protein hsp = HSP([frag]) for key, val_info in _ELEM_HSP.items(): value = hsp_frag_elem.findtext(key) caster = val_info[1] if value is not None: if caster is not str: value = caster(value) setattr(hsp, val_info[0], value) # delete element after we finish parsing it hsp_frag_elem.clear() yield hsp class BlastXmlIndexer(SearchIndexer): """Indexer class for BLAST XML output.""" _parser = BlastXmlParser qstart_mark = _as_bytes('<Iteration>') qend_mark = _as_bytes('</Iteration>') block_size = 16384 def __init__(self, filename): SearchIndexer.__init__(self, filename) # TODO: better way to do this? iter_obj = self._parser(self._handle) self._meta, self._fallback = iter_obj._meta, iter_obj._fallback def __iter__(self): qstart_mark = self.qstart_mark qend_mark = self.qend_mark blast_id_mark = _as_bytes('Query_') block_size = self.block_size handle = self._handle handle.seek(0) re_desc = re.compile(_as_bytes(r'<Iteration_query-ID>(.*?)' '</Iteration_query-ID>\s+?<Iteration_query-def>' '(.*?)</Iteration_query-def>')) re_desc_end = re.compile(_as_bytes(r'</Iteration_query-def>')) counter = 0 while True: start_offset = handle.tell() line = handle.readline() if not line: break if qstart_mark not in line: continue # The following requirements are to make supporting BGZF compressed # BLAST XML files simpler (avoids complex offset manipulations): assert line.count(qstart_mark) == 1, "XML without line breaks?" assert line.lstrip().startswith(qstart_mark), line if qend_mark in line: # Should cope with <Iteration>...</Iteration> on one long line block = line else: # Load the rest of this block up to and including </Iteration> block = [line] while line and qend_mark not in line: line = handle.readline() assert qstart_mark not in line, line block.append(line) assert line.rstrip().endswith(qend_mark), line block = _empty_bytes_string.join(block) assert block.count(qstart_mark) == 1, "XML without line breaks? %r" % block assert block.count(qend_mark) == 1, "XML without line breaks? %r" % block # Now we have a full <Iteration>...</Iteration> block, find the ID regx = re.search(re_desc, block) try: qstart_desc = regx.group(2) qstart_id = regx.group(1) except AttributeError: # use the fallback values assert re.search(re_desc_end, block) qstart_desc = _as_bytes(self._fallback['description']) qstart_id = _as_bytes(self._fallback['id']) if qstart_id.startswith(blast_id_mark): qstart_id = qstart_desc.split(_as_bytes(' '), 1)[0] yield _bytes_to_string(qstart_id), start_offset, len(block) counter += 1 def _parse(self, handle): # overwrites SearchIndexer._parse, since we need to set the meta and # fallback dictionaries to the parser generator = self._parser(handle, **self._kwargs) generator._meta = self._meta generator._fallback = self._fallback return next(iter(generator)) def get_raw(self, offset): """Return the raw record from the file as a bytes string.""" qend_mark = self.qend_mark handle = self._handle handle.seek(offset) qresult_raw = handle.readline() assert qresult_raw.lstrip().startswith(self.qstart_mark) while qend_mark not in qresult_raw: qresult_raw += handle.readline() assert qresult_raw.rstrip().endswith(qend_mark) assert qresult_raw.count(qend_mark) == 1 # Note this will include any leading and trailing whitespace, in # general expecting " <Iteration>\n...\n </Iteration>\n" return qresult_raw class _BlastXmlGenerator(XMLGenerator): """Event-based XML Generator.""" def __init__(self, out, encoding='utf-8', indent=" ", increment=2): XMLGenerator.__init__(self, out, encoding) # the indentation character self._indent = indent # nest level self._level = 0 # how many indentation character should we increment per level self._increment = increment # container for names of tags with children self._parent_stack = [] # determine writer method try: # this should work for all platforms except Jython self.write = self._write except AttributeError: # Jython uses self._out.write self.write = self._out.write def startDocument(self): """Starts the XML document.""" self.write(u'<?xml version="1.0"?>\n' '<!DOCTYPE BlastOutput PUBLIC "-//NCBI//NCBI BlastOutput/EN" ' '"http://www.ncbi.nlm.nih.gov/dtd/NCBI_BlastOutput.dtd">\n') def startElement(self, name, attrs=None, children=False): """Starts an XML element. :param name: element name :type name: string :param attrs: element attributes :type attrs: dictionary {string: object} :param children: whether the element has children or not :type children: bool """ if attrs is None: attrs = {} self.ignorableWhitespace(self._indent * self._level) XMLGenerator.startElement(self, name, attrs) def endElement(self, name): """Ends and XML element of the given name.""" XMLGenerator.endElement(self, name) self.write(u'\n') def startParent(self, name, attrs=None): """Starts an XML element which has children. :param name: element name :type name: string :param attrs: element attributes :type attrs: dictionary {string: object} """ if attrs is None: attrs = {} self.startElement(name, attrs, children=True) self._level += self._increment self.write(u'\n') # append the element name, so we can end it later self._parent_stack.append(name) def endParent(self): """Ends an XML element with children.""" # the element to end is the one on top of the stack name = self._parent_stack.pop() self._level -= self._increment self.ignorableWhitespace(self._indent * self._level) self.endElement(name) def startParents(self, *names): """Starts XML elements without children.""" for name in names: self.startParent(name) def endParents(self, num): """Ends XML elements, according to the given number.""" for i in range(num): self.endParent() def simpleElement(self, name, content=None): """Creates an XML element without children with the given content.""" self.startElement(name, attrs={}) if content: self.characters(content) self.endElement(name) def characters(self, content): content = escape(unicode(content)) for a, b in ((u'"', u'&quot;'), (u"'", u'&apos;')): content = content.replace(a, b) self.write(content) class BlastXmlWriter(object): """Stream-based BLAST+ XML Writer.""" def __init__(self, handle): self.xml = _BlastXmlGenerator(handle, 'utf-8') def write_file(self, qresults): """Writes the XML contents to the output handle.""" xml = self.xml self.qresult_counter, self.hit_counter, self.hsp_counter, \ self.frag_counter = 0, 0, 0, 0 # get the first qresult, since the preamble requires its attr values first_qresult = next(qresults) # start the XML document, set the root element, and create the preamble xml.startDocument() xml.startParent('BlastOutput') self._write_preamble(first_qresult) # and write the qresults xml.startParent('BlastOutput_iterations') self._write_qresults(chain([first_qresult], qresults)) xml.endParents(2) xml.endDocument() return self.qresult_counter, self.hit_counter, self.hsp_counter, \ self.frag_counter def _write_elem_block(self, block_name, map_name, obj, opt_dict=None): """Writes sibling XML elements. :param block_name: common element name prefix :type block_name: string :param map_name: name of mapping between element and attribute names :type map_name: string :param obj: object whose attribute value will be used :type obj: object :param opt_dict: custom element-attribute mapping :type opt_dict: dictionary {string: string} """ if opt_dict is None: opt_dict = {} for elem, attr in _WRITE_MAPS[map_name]: elem = block_name + elem try: content = str(getattr(obj, attr)) except AttributeError: # ensure attrs that is not present is optional assert elem in _DTD_OPT, "Element %r (attribute %r) not " \ "found" % (elem, attr) else: # custom element-attribute mapping, for fallback values if elem in opt_dict: content = opt_dict[elem] self.xml.simpleElement(elem, content) def _write_preamble(self, qresult): """Writes the XML file preamble.""" xml = self.xml for elem, attr in _WRITE_MAPS['preamble']: elem = 'BlastOutput_' + elem if elem == 'BlastOutput_param': xml.startParent(elem) self._write_param(qresult) xml.endParent() continue try: content = str(getattr(qresult, attr)) except AttributeError: assert elem in _DTD_OPT, "Element %s (attribute %s) not " \ "found" % (elem, attr) else: if elem == 'BlastOutput_version': content = '%s %s' % (qresult.program.upper(), qresult.version) elif qresult._blast_id: if elem == 'BlastOutput_query-ID': content = qresult._blast_id elif elem == 'BlastOutput_query-def': content = ' '.join([qresult.id, qresult.description]).strip() xml.simpleElement(elem, content) def _write_param(self, qresult): """Writes the parameter block of the preamble.""" xml = self.xml xml.startParent('Parameters') self._write_elem_block('Parameters_', 'param', qresult) xml.endParent() def _write_qresults(self, qresults): """Writes QueryResult objects into iteration elements.""" xml = self.xml for num, qresult in enumerate(qresults): xml.startParent('Iteration') xml.simpleElement('Iteration_iter-num', str(num + 1)) opt_dict = {} # use custom Iteration_query-ID and Iteration_query-def mapping # if the query has a BLAST-generated ID if qresult._blast_id: opt_dict = { 'Iteration_query-ID': qresult._blast_id, 'Iteration_query-def': ' '.join([qresult.id, qresult.description]).strip(), } self._write_elem_block('Iteration_', 'qresult', qresult, opt_dict) # the Iteration_hits tag only has children if there are hits if qresult: xml.startParent('Iteration_hits') self._write_hits(qresult.hits) xml.endParent() # otherwise it's a simple element without any contents else: xml.simpleElement('Iteration_hits', '') xml.startParents('Iteration_stat', 'Statistics') self._write_elem_block('Statistics_', 'stat', qresult) xml.endParents(2) # there's a message if no hits is present if not qresult: xml.simpleElement('Iteration_message', 'No hits found') self.qresult_counter += 1 xml.endParent() def _write_hits(self, hits): """Writes Hit objects.""" xml = self.xml for num, hit in enumerate(hits): xml.startParent('Hit') xml.simpleElement('Hit_num', str(num + 1)) # use custom hit_id and hit_def mapping if the hit has a # BLAST-generated ID opt_dict = {} if hit._blast_id: opt_dict = { 'Hit_id': hit._blast_id, 'Hit_def': ' '.join([hit.id, hit.description]).strip(), } self._write_elem_block('Hit_', 'hit', hit, opt_dict) xml.startParent('Hit_hsps') self._write_hsps(hit.hsps) self.hit_counter += 1 xml.endParents(2) def _write_hsps(self, hsps): """Writes HSP objects.""" xml = self.xml for num, hsp in enumerate(hsps): xml.startParent('Hsp') xml.simpleElement('Hsp_num', str(num + 1)) for elem, attr in _WRITE_MAPS['hsp']: elem = 'Hsp_' + elem try: content = self._adjust_output(hsp, elem, attr) # make sure any elements that is not present is optional # in the DTD except AttributeError: assert elem in _DTD_OPT, "Element %s (attribute %s) not found" \ % (elem, attr) else: xml.simpleElement(elem, str(content)) self.hsp_counter += 1 self.frag_counter += len(hsp.fragments) xml.endParent() def _adjust_output(self, hsp, elem, attr): """Adjusts output to mimic native BLAST+ XML as much as possible.""" # adjust coordinates if attr in ('query_start', 'query_end', 'hit_start', 'hit_end', 'pattern_start', 'pattern_end'): content = getattr(hsp, attr) + 1 if '_start' in attr: content = getattr(hsp, attr) + 1 else: content = getattr(hsp, attr) # adjust for 'from' <--> 'to' flip if it's not a translated search # and frames are different # adapted from /src/algo/blast/format/blastxml_format.cpp#L216 if hsp.query_frame != 0 and hsp.hit_frame < 0: if attr == 'hit_start': content = getattr(hsp, 'hit_end') elif attr == 'hit_end': content = getattr(hsp, 'hit_start') + 1 # for seqrecord objects, we only need the sequence string elif elem in ('Hsp_hseq', 'Hsp_qseq'): content = str(getattr(hsp, attr).seq) elif elem == 'Hsp_midline': content = hsp.aln_annotation['similarity'] elif elem in ('Hsp_evalue', 'Hsp_bit-score'): # adapted from src/algo/blast/format/blastxml_format.cpp#L138-140 content = '%.*g' % (6, getattr(hsp, attr)) else: content = getattr(hsp, attr) return content # if not used as a module, run the doctest if __name__ == "__main__": from Bio._utils import run_doctest run_doctest()
from django.test import override_settings import mock import datetime import ldap import copy from django.conf import settings from tests.utilities.utils import SafeTestCase from tests.utilities.ldap import ( get_ldap_user_defaults, get_ldap_group_defaults, build_mock_rcldap_user, build_mock_rcldap_group, LdapTestCase ) # Import namespace for mock import accounts.models from accounts.models import ( IdTracker, AccountRequest, RcLdapUser, RcLdapGroup, User ) class RcLdapUserTestCase(LdapTestCase): def test_create_ldap_user(self): ldap_user_dict = get_ldap_user_defaults() with self.assertRaises(ValueError): RcLdapUser.objects.create(**ldap_user_dict) with self.assertRaises(ValueError): RcLdapUser.objects.create(organization='invalid_org',**ldap_user_dict) ldap_users = RcLdapUser.objects.all() self.assertEqual(ldap_users.count(),0) # Create ucb user RcLdapUser.objects.create(organization='ucb',**ldap_user_dict) ucb_user = RcLdapUser.objects.get(uid=1010) self.assertEqual(ucb_user.dn.lower(),'uid=testuser,ou=ucb,ou=people,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(ucb_user.organization,'ucb') self.assertEqual(ucb_user.effective_uid,'testuser') self.assertEqual(ucb_user.username,'testuser') # Create csu user csu_ldap_user_dict = get_ldap_user_defaults() csu_ldap_user_dict.update(dict(uid=1011,gid=1011)) RcLdapUser.objects.create(organization='csu',**csu_ldap_user_dict) csu_user = RcLdapUser.objects.get(uid=1011) self.assertEqual(csu_user.dn.lower(),'uid=testuser,ou=csu,ou=people,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(csu_user.organization,'csu') self.assertEqual(csu_user.effective_uid,'testuser@colostate.edu') self.assertEqual(csu_user.username,'testuser') def test_create_ldap_user_no_uid(self): idt = IdTracker.objects.create( category='posix', min_id=1000, max_id=1500, next_id=1001 ) ldap_user_dict = get_ldap_user_defaults() del ldap_user_dict['uid'] del ldap_user_dict['gid'] RcLdapUser.objects.create(organization='ucb',**ldap_user_dict) ldap_user = RcLdapUser.objects.get(username='testuser') self.assertEqual(ldap_user.uid,1001) self.assertEqual(ldap_user.gid,1001) def test_get_ldap_user_from_suffixed_username(self): ucb_ldap_user_dict = get_ldap_user_defaults() csu_ldap_user_dict = get_ldap_user_defaults() csu_ldap_user_dict.update(dict(uid=1011,gid=1011)) RcLdapUser.objects.create(organization='ucb',**ucb_ldap_user_dict) RcLdapUser.objects.create(organization='csu',**csu_ldap_user_dict) ucb_user = RcLdapUser.objects.get_user_from_suffixed_username('testuser') self.assertEqual(ucb_user.uid,1010) csu_user = RcLdapUser.objects.get_user_from_suffixed_username('testuser@colostate.edu') self.assertEqual(csu_user.uid,1011) class RcLdapGroupTestCase(LdapTestCase): def test_create_ldap_group(self): ldap_group_dict = get_ldap_group_defaults() with self.assertRaises(ValueError): RcLdapGroup.objects.create(**ldap_group_dict) with self.assertRaises(ValueError): RcLdapGroup.objects.create(organization='invalid_org',**ldap_group_dict) ldap_groups = RcLdapGroup.objects.all() self.assertEqual(ldap_groups.count(),0) # Create ucb group RcLdapGroup.objects.create(organization='ucb',**ldap_group_dict) ucb_group = RcLdapGroup.objects.get(gid=1010) self.assertEqual(ucb_group.dn.lower(),'cn=testusergrp,ou=ucb,ou=groups,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(ucb_group.organization,'ucb') self.assertEqual(ucb_group.effective_cn,'testusergrp') self.assertEqual(ucb_group.name,'testusergrp') # Create csu group csu_ldap_group_dict = get_ldap_group_defaults() csu_ldap_group_dict['gid'] = 1011 RcLdapGroup.objects.create(organization='csu',**csu_ldap_group_dict) csu_group = RcLdapGroup.objects.get(gid=1011) self.assertEqual(csu_group.dn.lower(),'cn=testusergrp,ou=csu,ou=groups,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(csu_group.organization,'csu') self.assertEqual(csu_group.effective_cn,'testusergrp@colostate.edu') self.assertEqual(csu_group.name,'testusergrp') def test_create_ldap_group_no_gid(self): idt = IdTracker.objects.create( category='posix', min_id=1000, max_id=1500, next_id=1001 ) ldap_group_dict = get_ldap_group_defaults() del ldap_group_dict['gid'] RcLdapGroup.objects.create(organization='ucb',**ldap_group_dict) ldap_group = RcLdapGroup.objects.get(name='testusergrp') self.assertEqual(ldap_group.gid,1001) class IdTrackerTestCase(SafeTestCase): def test_get_next_id(self): idt = IdTracker.objects.create( category='posix', min_id=1000, max_id=1500, next_id=1001 ) with mock.patch('accounts.models.RcLdapUser.objects.filter',return_value=[]): with mock.patch('accounts.models.RcLdapGroup.objects.filter',return_value=[]): next_id = idt.get_next_id() self.assertEqual(next_id, 1001) self.assertEqual(idt.next_id, 1002) def test_get_next_id_no_initial_value(self): mock_ldap_user = build_mock_rcldap_user(uid=1000) idt = IdTracker.objects.create( category='posix', min_id=1000, max_id=1500 ) side_effects = [[mock_ldap_user], []] with mock.patch('accounts.models.RcLdapUser.objects.filter',side_effect=side_effects): with mock.patch('accounts.models.RcLdapGroup.objects.filter',return_value=[]): next_id = idt.get_next_id() self.assertEqual(next_id, 1001) self.assertEqual(idt.next_id, 1002) def test_get_next_id_conflict(self): mock_ldap_user = build_mock_rcldap_user(uid=1002) idt = IdTracker.objects.create( category='posix', min_id=1000, max_id=1500, next_id=1002 ) side_effects = [[mock_ldap_user], []] with mock.patch('accounts.models.RcLdapUser.objects.filter',side_effect=side_effects): with mock.patch('accounts.models.RcLdapGroup.objects.filter',return_value=[]): next_id = idt.get_next_id() self.assertEqual(next_id, 1003) self.assertEqual(idt.next_id, 1004) def get_account_request_defaults(): """Returns a dictionary of reasonable defaults for account request objects.""" account_request_defaults = dict( username = 'testuser', first_name = 'Test', last_name = 'User', email = 'testuser@colorado.edu', role = 'faculty', department = 'physics', organization = 'ucb' ) return account_request_defaults class AccountCreationTestCase(LdapTestCase): def setUp(self): super(AccountCreationTestCase,self).setUp() idt = IdTracker.objects.create( category='posix', min_id=1000, max_id=1500, next_id=1001 ) # Create UCB license group license_grp_dict = dict( name = 'ucb', gid = 4000, members = [] ) RcLdapGroup.objects.create(organization='ucb',**license_grp_dict) @override_settings(LICENSE_GROUPS=dict(ucb = 'ucb')) def test_create_user_from_request(self): dict_from_request = get_account_request_defaults() mock_cu_user = mock.MagicMock(uid=9999) with mock.patch('accounts.models.CuLdapUser.objects.get',return_value=mock_cu_user): ldap_user = RcLdapUser.objects.create_user_from_request(**dict_from_request) self.assertEqual(ldap_user.dn.lower(), 'uid=testuser,ou=ucb,ou=people,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(ldap_user.username, 'testuser') self.assertEqual(ldap_user.first_name, 'Test') self.assertEqual(ldap_user.last_name, 'User') self.assertEqual(ldap_user.full_name, 'User, Test') self.assertEqual(ldap_user.email, 'testuser@colorado.edu') self.assertEqual(ldap_user.uid, 9999) self.assertEqual(ldap_user.gid, 9999) self.assertEqual(ldap_user.gecos, 'Test User,,,') self.assertEqual(ldap_user.home_directory, '/home/testuser') self.assertEqual(ldap_user.login_shell, '/bin/bash') self.assertEqual(ldap_user.role, ['pi','faculty']) pgrp = RcLdapGroup.objects.get(name='testuserpgrp') sgrp = RcLdapGroup.objects.get(name='testusergrp') license_grp = RcLdapGroup.objects.get(name='ucb') self.assertEqual(pgrp.dn.lower(), 'cn=testuserpgrp,ou=ucb,ou=groups,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(pgrp.gid, 9999) self.assertEqual(sgrp.dn.lower(), 'cn=testusergrp,ou=ucb,ou=groups,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(sgrp.gid, 1001) idt = IdTracker.objects.get(category='posix') self.assertEqual(idt.next_id,1002) self.assertEqual(pgrp.members, ['testuser']) self.assertEqual(sgrp.members, ['testuser']) self.assertEqual(license_grp.members, ['testuser']) def test_create_suffixed_user_from_request(self): dict_from_request = get_account_request_defaults() dict_from_request.update(dict(email='testuser@colostate.edu',organization='csu')) ldap_user = RcLdapUser.objects.create_user_from_request(**dict_from_request) self.assertEqual(ldap_user.dn.lower(), 'uid=testuser,ou=csu,ou=people,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(ldap_user.uid, 1001) self.assertEqual(ldap_user.gid, 1001) self.assertEqual(ldap_user.home_directory, '/home/testuser@colostate.edu') pgrp = RcLdapGroup.objects.get(name='testuserpgrp') sgrp = RcLdapGroup.objects.get(name='testusergrp') self.assertEqual(pgrp.dn.lower(), 'cn=testuserpgrp,ou=csu,ou=groups,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(pgrp.gid, 1001) self.assertEqual(sgrp.dn.lower(), 'cn=testusergrp,ou=csu,ou=groups,dc=rc,dc=int,dc=colorado,dc=edu') self.assertEqual(sgrp.gid, 1002) self.assertEqual(pgrp.members, ['testuser']) self.assertEqual(sgrp.members, ['testuser']) def test_create_user_from_request_missing_fields(self): user_dict = { 'username': 'requestuser', 'first_name': 'Request', 'last_name': 'User', 'email': 'requser@requests.org', 'organization': 'ucb', } for k in user_dict.keys(): tmp_dict = copy.deepcopy(user_dict) del tmp_dict[k] self.assertRaises( TypeError, RcLdapUser.objects.create_user_from_request, **tmp_dict ) self.assertRaises( RcLdapGroup.DoesNotExist, RcLdapGroup.objects.get, gid=1001 ) def test_create_user_from_request_sponsored(self): dict_from_request = get_account_request_defaults() dict_from_request['role'] = 'sponsored' mock_cu_user = mock.MagicMock(uid=9999) with mock.patch('accounts.models.CuLdapUser.objects.get',return_value=mock_cu_user): ldap_user = RcLdapUser.objects.create_user_from_request(**dict_from_request) self.assertEqual(ldap_user.dn.lower(), 'uid=testuser,ou=ucb,ou=people,dc=rc,dc=int,dc=colorado,dc=edu') today = datetime.date.today() expire = today.replace(year=today.year+1) expire_days = (expire - datetime.date(1970, 1, 1)).days self.assertEqual(ldap_user.expires, expire_days) class AccountRequestTestCase(SafeTestCase): def setUp(self): super(AccountRequestTestCase,self).setUp() self.ar_dict = get_account_request_defaults() ar = AccountRequest.objects.create(**self.ar_dict) def test_update_account_request(self): ar = AccountRequest.objects.get(username='testuser') self.assertEqual(ar.status,'p') ar.save() self.assertEqual(ar.status,'p') self.assertIsNone(ar.approved_on) def test_update_pending_account_request_approve_on_is_none(self): ar = AccountRequest.objects.get(username='testuser') ar.approved_on = datetime.date.today() self.assertEqual(ar.status,'p') self.assertIsNotNone(ar.approved_on) ar.save() self.assertEqual(ar.status,'p') self.assertIsNone(ar.approved_on) def test_approve_request(self): mock_ldap_manager = mock.MagicMock() mock_rc_ldap_user = build_mock_rcldap_user() mock_rc_ldap_user.organization = 'ucb' mock_rc_ldap_user.effective_uid = mock_rc_ldap_user.username mock_ldap_manager.create_user_from_request.return_value = mock_rc_ldap_user with mock.patch('accounts.models.RcLdapUser.objects',mock_ldap_manager),mock.patch('django.dispatch.Signal.send') as account_request_approved_mock: ar = AccountRequest.objects.get(username='testuser') ar.status = 'a' ar.save() expected_dict = copy.deepcopy(self.ar_dict) del expected_dict['department'] mock_ldap_manager.create_user_from_request.assert_called_once_with(**expected_dict) self.assertIsNotNone(ar.approved_on) auth_user = User.objects.get(username=mock_rc_ldap_user.username) # Create new approved request new_req = get_account_request_defaults() new_req.update(dict(username='testuser1',email='testuser1@colorado.edu')) mock_ldap_manager.reset_mock() with mock.patch('accounts.models.RcLdapUser.objects',mock_ldap_manager),mock.patch('django.dispatch.Signal.send') as account_request_approved_mock: ar = AccountRequest.objects.create(status='a',**new_req) expected_dict = copy.deepcopy(new_req) del expected_dict['department'] mock_ldap_manager.create_user_from_request.assert_called_once_with(**expected_dict) self.assertEqual(ar.status,'a') self.assertIsNotNone(ar.approved_on)
############################################################################### ## ## Copyright (C) 2013-2014 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### from __future__ import absolute_import __all__ = ['Serializer', 'JsonObjectSerializer', 'JsonSerializer'] import six import struct from autobahn.wamp.interfaces import IObjectSerializer, ISerializer from autobahn.wamp.exception import ProtocolError from autobahn.wamp import message class Serializer: """ Base class for WAMP serializers. A WAMP serializer is the core glue between parsed WAMP message objects and the bytes on wire (the transport). """ MESSAGE_TYPE_MAP = { message.Hello.MESSAGE_TYPE: message.Hello, message.Welcome.MESSAGE_TYPE: message.Welcome, message.Abort.MESSAGE_TYPE: message.Abort, message.Challenge.MESSAGE_TYPE: message.Challenge, message.Authenticate.MESSAGE_TYPE: message.Authenticate, message.Goodbye.MESSAGE_TYPE: message.Goodbye, message.Heartbeat.MESSAGE_TYPE: message.Heartbeat, message.Error.MESSAGE_TYPE: message.Error, message.Publish.MESSAGE_TYPE: message.Publish, message.Published.MESSAGE_TYPE: message.Published, message.Subscribe.MESSAGE_TYPE: message.Subscribe, message.Subscribed.MESSAGE_TYPE: message.Subscribed, message.Unsubscribe.MESSAGE_TYPE: message.Unsubscribe, message.Unsubscribed.MESSAGE_TYPE: message.Unsubscribed, message.Event.MESSAGE_TYPE: message.Event, message.Call.MESSAGE_TYPE: message.Call, message.Cancel.MESSAGE_TYPE: message.Cancel, message.Result.MESSAGE_TYPE: message.Result, message.Register.MESSAGE_TYPE: message.Register, message.Registered.MESSAGE_TYPE: message.Registered, message.Unregister.MESSAGE_TYPE: message.Unregister, message.Unregistered.MESSAGE_TYPE: message.Unregistered, message.Invocation.MESSAGE_TYPE: message.Invocation, message.Interrupt.MESSAGE_TYPE: message.Interrupt, message.Yield.MESSAGE_TYPE: message.Yield } """ Mapping of WAMP message type codes to WAMP message classes. """ def __init__(self, serializer): """ Constructor. :param serializer: The object serializer to use for WAMP wire-level serialization. :type serializer: An object that implements :class:`autobahn.interfaces.IObjectSerializer`. """ self._serializer = serializer def serialize(self, msg): """ Implements :func:`autobahn.wamp.interfaces.ISerializer.serialize` """ return msg.serialize(self._serializer), self._serializer.BINARY def unserialize(self, payload, isBinary = None): """ Implements :func:`autobahn.wamp.interfaces.ISerializer.unserialize` """ if isBinary is not None: if isBinary != self._serializer.BINARY: raise ProtocolError("invalid serialization of WAMP message (binary {}, but expected {})".format(isBinary, self._serializer.BINARY)) try: raw_msgs = self._serializer.unserialize(payload) except Exception as e: raise ProtocolError("invalid serialization of WAMP message ({})".format(e)) msgs = [] for raw_msg in raw_msgs: if type(raw_msg) != list: raise ProtocolError("invalid type {} for WAMP message".format(type(raw_msg))) if len(raw_msg) == 0: raise ProtocolError("missing message type in WAMP message") message_type = raw_msg[0] if type(message_type) != int: raise ProtocolError("invalid type {} for WAMP message type".format(type(message_type))) Klass = self.MESSAGE_TYPE_MAP.get(message_type) if Klass is None: raise ProtocolError("invalid WAMP message type {}".format(message_type)) ## this might again raise `ProtocolError` .. msg = Klass.parse(raw_msg) msgs.append(msg) return msgs ## ## JSON serialization is always supported ## import json class JsonObjectSerializer: BINARY = False def __init__(self, batched = False): """ Ctor. :param batched: Flag that controls whether serializer operates in batched mode. :type batched: bool """ self._batched = batched def serialize(self, obj): """ Implements :func:`autobahn.wamp.interfaces.IObjectSerializer.serialize` """ s = json.dumps(obj, separators = (',',':')) if six.PY3: if self._batched: return s.encode('utf8') + '\30' else: return s.encode('utf8') else: if self._batched: return s + '\30' else: return s def unserialize(self, payload): """ Implements :func:`autobahn.wamp.interfaces.IObjectSerializer.unserialize` """ if self._batched: chunks = payload.split('\30')[:-1] else: chunks = [payload] if len(chunks) == 0: raise Exception("batch format error") if six.PY3: return [json.loads(data.decode('utf8')) for data in chunks] else: return [json.loads(data) for data in chunks] IObjectSerializer.register(JsonObjectSerializer) class JsonSerializer(Serializer): SERIALIZER_ID = "json" MIME_TYPE = "application/json" def __init__(self, batched = False): """ Ctor. :param batched: Flag to control whether to put this serialized into batched mode. :type batched: bool """ Serializer.__init__(self, JsonObjectSerializer(batched = batched)) if batched: self.SERIALIZER_ID = "json.batched" ISerializer.register(JsonSerializer) ## ## MsgPack serialization depends on the `msgpack` package being available ## try: import msgpack except ImportError: pass else: class MsgPackObjectSerializer: BINARY = True """ Flag that indicates whether this serializer needs a binary clean transport. """ ENABLE_V5 = True """ Enable version 5 of the MsgPack specification (which differentiates between strings and binary). """ def __init__(self, batched = False): """ Ctor. :param batched: Flag that controls whether serializer operates in batched mode. :type batched: bool """ self._batched = batched def serialize(self, obj): """ Implements :func:`autobahn.wamp.interfaces.IObjectSerializer.serialize` """ data = msgpack.packb(obj, use_bin_type = self.ENABLE_V5) if self._batched: return struct.pack("!L", len(data)) + data else: return data def unserialize(self, payload): """ Implements :func:`autobahn.wamp.interfaces.IObjectSerializer.unserialize` """ if self._batched: msgs = [] N = len(payload) i = 0 while i < N: ## read message length prefix if i + 4 > N: raise Exception("batch format error [1]") l = struct.unpack("!L", payload[i:i+4])[0] ## read message data if i + 4 + l > N: raise Exception("batch format error [2]") data = payload[i+4:i+4+l] ## append parsed raw message msgs.append(msgpack.unpackb(data, encoding = 'utf-8')) ## advance until everything consumed i = i+4+l if i != N: raise Exception("batch format error [3]") return msgs else: return [msgpack.unpackb(payload, encoding = 'utf-8')] IObjectSerializer.register(MsgPackObjectSerializer) __all__.append('MsgPackObjectSerializer') class MsgPackSerializer(Serializer): SERIALIZER_ID = "msgpack" MIME_TYPE = "application/x-msgpack" def __init__(self, batched = False): """ Ctor. :param batched: Flag to control whether to put this serialized into batched mode. :type batched: bool """ Serializer.__init__(self, MsgPackObjectSerializer(batched = batched)) if batched: self.SERIALIZER_ID = "msgpack.batched" ISerializer.register(MsgPackSerializer) __all__.append('MsgPackSerializer')
"""Test Z-Wave config panel.""" import asyncio import json from unittest.mock import MagicMock, patch from homeassistant.bootstrap import async_setup_component from homeassistant.components import config from homeassistant.components.zwave import DATA_NETWORK, const from homeassistant.components.config.zwave import ( ZWaveNodeValueView, ZWaveNodeGroupView, ZWaveNodeConfigView, ZWaveUserCodeView) from tests.common import mock_http_component_app from tests.mock.zwave import MockNode, MockValue, MockEntityValues VIEW_NAME = 'api:config:zwave:device_config' @asyncio.coroutine def test_get_device_config(hass, test_client): """Test getting device config.""" with patch.object(config, 'SECTIONS', ['zwave']): yield from async_setup_component(hass, 'config', {}) client = yield from test_client(hass.http.app) def mock_read(path): """Mock reading data.""" return { 'hello.beer': { 'free': 'beer', }, 'other.entity': { 'do': 'something', }, } with patch('homeassistant.components.config._read', mock_read): resp = yield from client.get( '/api/config/zwave/device_config/hello.beer') assert resp.status == 200 result = yield from resp.json() assert result == {'free': 'beer'} @asyncio.coroutine def test_update_device_config(hass, test_client): """Test updating device config.""" with patch.object(config, 'SECTIONS', ['zwave']): yield from async_setup_component(hass, 'config', {}) client = yield from test_client(hass.http.app) orig_data = { 'hello.beer': { 'ignored': True, }, 'other.entity': { 'polling_intensity': 2, }, } def mock_read(path): """Mock reading data.""" return orig_data written = [] def mock_write(path, data): """Mock writing data.""" written.append(data) with patch('homeassistant.components.config._read', mock_read), \ patch('homeassistant.components.config._write', mock_write): resp = yield from client.post( '/api/config/zwave/device_config/hello.beer', data=json.dumps({ 'polling_intensity': 2 })) assert resp.status == 200 result = yield from resp.json() assert result == {'result': 'ok'} orig_data['hello.beer']['polling_intensity'] = 2 assert written[0] == orig_data @asyncio.coroutine def test_update_device_config_invalid_key(hass, test_client): """Test updating device config.""" with patch.object(config, 'SECTIONS', ['zwave']): yield from async_setup_component(hass, 'config', {}) client = yield from test_client(hass.http.app) resp = yield from client.post( '/api/config/zwave/device_config/invalid_entity', data=json.dumps({ 'polling_intensity': 2 })) assert resp.status == 400 @asyncio.coroutine def test_update_device_config_invalid_data(hass, test_client): """Test updating device config.""" with patch.object(config, 'SECTIONS', ['zwave']): yield from async_setup_component(hass, 'config', {}) client = yield from test_client(hass.http.app) resp = yield from client.post( '/api/config/zwave/device_config/hello.beer', data=json.dumps({ 'invalid_option': 2 })) assert resp.status == 400 @asyncio.coroutine def test_update_device_config_invalid_json(hass, test_client): """Test updating device config.""" with patch.object(config, 'SECTIONS', ['zwave']): yield from async_setup_component(hass, 'config', {}) client = yield from test_client(hass.http.app) resp = yield from client.post( '/api/config/zwave/device_config/hello.beer', data='not json') assert resp.status == 400 @asyncio.coroutine def test_get_values(hass, test_client): """Test getting values on node.""" app = mock_http_component_app(hass) ZWaveNodeValueView().register(app.router) node = MockNode(node_id=1) value = MockValue(value_id=123456, node=node, label='Test Label', instance=1, index=2) values = MockEntityValues(primary=value) node2 = MockNode(node_id=2) value2 = MockValue(value_id=234567, node=node2, label='Test Label 2') values2 = MockEntityValues(primary=value2) hass.data[const.DATA_ENTITY_VALUES] = [values, values2] client = yield from test_client(app) resp = yield from client.get('/api/zwave/values/1') assert resp.status == 200 result = yield from resp.json() assert result == { '123456': { 'label': 'Test Label', 'instance': 1, 'index': 2, } } @asyncio.coroutine def test_get_groups(hass, test_client): """Test getting groupdata on node.""" app = mock_http_component_app(hass) ZWaveNodeGroupView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=2) node.groups.associations = 'assoc' node.groups.associations_instances = 'inst' node.groups.label = 'the label' node.groups.max_associations = 'max' node.groups = {1: node.groups} network.nodes = {2: node} client = yield from test_client(app) resp = yield from client.get('/api/zwave/groups/2') assert resp.status == 200 result = yield from resp.json() assert result == { '1': { 'association_instances': 'inst', 'associations': 'assoc', 'label': 'the label', 'max_associations': 'max' } } @asyncio.coroutine def test_get_groups_nogroups(hass, test_client): """Test getting groupdata on node with no groups.""" app = mock_http_component_app(hass) ZWaveNodeGroupView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=2) network.nodes = {2: node} client = yield from test_client(app) resp = yield from client.get('/api/zwave/groups/2') assert resp.status == 200 result = yield from resp.json() assert result == {} @asyncio.coroutine def test_get_groups_nonode(hass, test_client): """Test getting groupdata on nonexisting node.""" app = mock_http_component_app(hass) ZWaveNodeGroupView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() network.nodes = {1: 1, 5: 5} client = yield from test_client(app) resp = yield from client.get('/api/zwave/groups/2') assert resp.status == 404 result = yield from resp.json() assert result == {'message': 'Node not found'} @asyncio.coroutine def test_get_config(hass, test_client): """Test getting config on node.""" app = mock_http_component_app(hass) ZWaveNodeConfigView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=2) value = MockValue( index=12, command_class=const.COMMAND_CLASS_CONFIGURATION) value.label = 'label' value.help = 'help' value.type = 'type' value.data = 'data' value.data_items = ['item1', 'item2'] value.max = 'max' value.min = 'min' node.values = {12: value} network.nodes = {2: node} node.get_values.return_value = node.values client = yield from test_client(app) resp = yield from client.get('/api/zwave/config/2') assert resp.status == 200 result = yield from resp.json() assert result == {'12': {'data': 'data', 'data_items': ['item1', 'item2'], 'help': 'help', 'label': 'label', 'max': 'max', 'min': 'min', 'type': 'type'}} @asyncio.coroutine def test_get_config_noconfig_node(hass, test_client): """Test getting config on node without config.""" app = mock_http_component_app(hass) ZWaveNodeConfigView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=2) network.nodes = {2: node} node.get_values.return_value = node.values client = yield from test_client(app) resp = yield from client.get('/api/zwave/config/2') assert resp.status == 200 result = yield from resp.json() assert result == {} @asyncio.coroutine def test_get_config_nonode(hass, test_client): """Test getting config on nonexisting node.""" app = mock_http_component_app(hass) ZWaveNodeConfigView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() network.nodes = {1: 1, 5: 5} client = yield from test_client(app) resp = yield from client.get('/api/zwave/config/2') assert resp.status == 404 result = yield from resp.json() assert result == {'message': 'Node not found'} @asyncio.coroutine def test_get_usercodes_nonode(hass, test_client): """Test getting usercodes on nonexisting node.""" app = mock_http_component_app(hass) ZWaveUserCodeView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() network.nodes = {1: 1, 5: 5} client = yield from test_client(app) resp = yield from client.get('/api/zwave/usercodes/2') assert resp.status == 404 result = yield from resp.json() assert result == {'message': 'Node not found'} @asyncio.coroutine def test_get_usercodes(hass, test_client): """Test getting usercodes on node.""" app = mock_http_component_app(hass) ZWaveUserCodeView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE]) value = MockValue( index=0, command_class=const.COMMAND_CLASS_USER_CODE) value.genre = const.GENRE_USER value.label = 'label' value.data = '1234' node.values = {0: value} network.nodes = {18: node} node.get_values.return_value = node.values client = yield from test_client(app) resp = yield from client.get('/api/zwave/usercodes/18') assert resp.status == 200 result = yield from resp.json() assert result == {'0': {'code': '1234', 'label': 'label', 'length': 4}} @asyncio.coroutine def test_get_usercode_nousercode_node(hass, test_client): """Test getting usercodes on node without usercodes.""" app = mock_http_component_app(hass) ZWaveUserCodeView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=18) network.nodes = {18: node} node.get_values.return_value = node.values client = yield from test_client(app) resp = yield from client.get('/api/zwave/usercodes/18') assert resp.status == 200 result = yield from resp.json() assert result == {} @asyncio.coroutine def test_get_usercodes_no_genreuser(hass, test_client): """Test getting usercodes on node missing genre user.""" app = mock_http_component_app(hass) ZWaveUserCodeView().register(app.router) network = hass.data[DATA_NETWORK] = MagicMock() node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE]) value = MockValue( index=0, command_class=const.COMMAND_CLASS_USER_CODE) value.genre = const.GENRE_SYSTEM value.label = 'label' value.data = '1234' node.values = {0: value} network.nodes = {18: node} node.get_values.return_value = node.values client = yield from test_client(app) resp = yield from client.get('/api/zwave/usercodes/18') assert resp.status == 200 result = yield from resp.json() assert result == {}
import functools import os.path import random from kivy.app import App from kivy.uix.button import Button from kivy.uix.gridlayout import GridLayout from kivy.uix.label import Label from kivy.uix.popup import Popup from kivy.uix.scrollview import ScrollView from kivy.uix.textinput import TextInput from src.util.board_configuration import BoardConfiguration from kivy.config import Config Config.set('graphics', 'width', '750') Config.set('graphics', 'height', '600') Config.set('graphics', 'resizable', 'False') PLAY_MODE = 0 EDIT_MODE = 1 class LightsOutGUI(App): def __init__(self, minisat_wrapper=None, row_count=3, col_count=3, **kwargs): if minisat_wrapper is None: raise Exception('minisat_wrapper should be filled!') super(LightsOutGUI, self).__init__(**kwargs) self.minisat_wrapper = minisat_wrapper self.board_config = BoardConfiguration(row_count, col_count) self.mode = PLAY_MODE self.hinting_on = False def clear_board_hinting(self): self.hinting_on = False for btn in self.buttons: btn.clear_hinting() def toggle_lights(self, num, obj): row, col = num // self.board_config.col_count, num % self.board_config.col_count if self.hinting_on: if not obj.hinting: self.clear_board_hinting() obj.clear_hinting() self.board_config.toggle_board(row, col) if self.mode == PLAY_MODE: MOVEMENT_VECTOR = set([-1, 1, -self.board_config.col_count, self.board_config.col_count]) valid_tiles = [self.buttons[num + vec] for vec in MOVEMENT_VECTOR \ if 0 <= num + vec < len(self.buttons) and \ ((num + vec) % self.board_config.col_count == num % self.board_config.col_count or \ (num + vec) // self.board_config.col_count == num // self.board_config.col_count)] for btn in valid_tiles: btn.toggle() self.board_config.toggle_board(btn.row, btn.col) if self.board_config.is_done(): self.hinting_on = False popup = Popup(title='Congratulations!', content=Label(text='Puzzle done!\nClick anywhere to continue.'), size_hint=(None, None), size=(600, 250)) popup.open() def board_changed(self, obj): self.last_result = None self.more_result_btn.disabled = True def build(self): self.main_pane = GridLayout(rows=1, cols=2) self.left_pane = GridLayout() self.right_pane = GridLayout(col_force_default=True, col_default_width=175, cols=1, size_hint_x=None, width=215, padding=[20,20]) self.buttons = [] self.fill_lights_out_left_pane() self.fill_lights_out_right_pane() self.main_pane.add_widget(self.left_pane) self.main_pane.add_widget(self.right_pane) return self.main_pane def fill_lights_out_left_pane(self): self.left_pane.rows = self.board_config.row_count self.left_pane.cols = self.board_config.col_count for row in xrange(self.board_config.row_count): for col in xrange(self.board_config.col_count): btn = LightsOutTile(initial_state=0); btn.bind(on_press=functools.partial(self.toggle_lights, row * self.board_config.col_count + col)) btn.bind(on_press=self.board_changed) btn.row = row btn.col = col self.buttons.append(btn) self.left_pane.add_widget(btn) def set_mode(self, mode, obj): self.mode = mode if mode == PLAY_MODE: self.play_mode_btn.disabled = True self.edit_mode_btn.disabled = False elif mode == EDIT_MODE: self.play_mode_btn.disabled = False self.edit_mode_btn.disabled = True def randomize_board(self, obj): self.clear_board_hinting() initial_mode = self.mode if initial_mode == PLAY_MODE: self.set_mode(EDIT_MODE, None) for btn in self.buttons: if random.getrandbits(1) == 1: btn.trigger_action() if initial_mode == PLAY_MODE: self.set_mode(PLAY_MODE, None) def fill_lights_out_right_pane(self): input_box = GridLayout(cols=1) HEIGHT = '25dp' SMALLER_HEIGHT = '10dp' input_box.add_widget(Label(text='# row', height=HEIGHT, size_hint_y=None)) self.row_input = TextInput(text='{0}'.format(self.board_config.row_count), multiline=False, padding_x=10, height=HEIGHT, size_hint_y=None) input_box.add_widget(self.row_input) input_box.add_widget(Label(text='# col', height=HEIGHT, size_hint_y=None)) self.col_input = TextInput(text='{0}'.format(self.board_config.row_count), multiline=False, padding_x=10, height=HEIGHT, size_hint_y=None) input_box.add_widget(self.col_input) input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) apply_btn = Button(text='Apply', height=HEIGHT, size_hint_y=None) apply_btn.bind(on_press=self.update_dimension) input_box.add_widget(apply_btn) input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) self.play_mode_btn = Button(text='Play Mode', height=HEIGHT, size_hint_y=None) self.play_mode_btn.bind(on_press=functools.partial(self.set_mode, PLAY_MODE)) self.play_mode_btn.disabled = True input_box.add_widget(self.play_mode_btn) input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) self.edit_mode_btn = Button(text='Edit Mode', height=HEIGHT, size_hint_y=None) self.edit_mode_btn.bind(on_press=functools.partial(self.set_mode, EDIT_MODE)) input_box.add_widget(self.edit_mode_btn) input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) randomize_btn = Button(text='Randomize!', height=HEIGHT, size_hint_y=None) randomize_btn.bind(on_press=self.randomize_board) input_box.add_widget(randomize_btn) input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) solve_btn = Button(text='Solve', height=HEIGHT, size_hint_y=None) solve_btn.bind(on_press=self.solve_board) input_box.add_widget(solve_btn) input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) self.more_result_btn = Button(text='More Soln', height=HEIGHT, size_hint_y=None) self.more_result_btn.bind(on_press=self.get_more_solution) self.more_result_btn.disabled = True input_box.add_widget(self.more_result_btn) # input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) # input_box.add_widget(Label(text='Solution:', height=HEIGHT, size_hint_y=None)) # input_box.add_widget(Label(height=SMALLER_HEIGHT, size_hint_y=None)) # self.solver_field = TextInput(text='', padding=[15], readonly=True) # input_box.add_widget(self.solver_field) self.right_pane.add_widget(input_box) def update_dimension(self, obj): self.board_config = BoardConfiguration(int(self.row_input.text), int(self.col_input.text)) for btn in self.buttons: self.left_pane.remove_widget(btn) self.buttons = [] self.fill_lights_out_left_pane() # self.solver_field.text = '' self.more_result_btn.disabled = True def solve_board(self, obj): self.clear_board_hinting() self.last_result = self.minisat_wrapper.solve(self.board_config) OFFSET = self.board_config.row_count * self.board_config.col_count + 1 soln_normalized = [((num - OFFSET) // self.board_config.col_count, (num - OFFSET) % self.board_config.col_count) \ for num in self.last_result.latest_solution if num > 0] text = '' self.more_result_btn.disabled = False if self.last_result.is_satisfiable: self.hinting_on = True for x, y in soln_normalized: idx = x * self.board_config.col_count + y self.buttons[idx].set_hinting(); text += '({0}, {1})\n'.format(x, y) if len(text) == 0: text = 'It\'s already solved!' self.more_result_btn.disabled = True popup = Popup(title='?????', content=Label(text='Why try to solve an already-solved puzzle???\nClick anywhere to continue.'), size_hint=(None, None), size=(700, 250)) popup.open() else: self.clear_board_hinting() popup = Popup(title='Uh oh!', content=Label(text='This configuration has no solution!\nClick anywhere to continue.'), size_hint=(None, None), size=(700, 250)) popup.open() self.more_result_btn.disabled = True text = '(No soln)' # self.solver_field.text = text def get_more_solution(self, obj): self.clear_board_hinting() self.last_result = self.minisat_wrapper.solve(self.board_config, self.last_result) OFFSET = self.board_config.row_count * self.board_config.col_count + 1 soln_normalized = [((num - OFFSET) // self.board_config.col_count, (num - OFFSET) % self.board_config.col_count) \ for num in self.last_result.latest_solution if num > 0] text = '' if self.last_result.is_satisfiable: self.hinting_on = True for x, y in soln_normalized: idx = x * self.board_config.col_count + y self.buttons[idx].set_hinting(); text += '({0}, {1})\n'.format(x, y) else: self.hinting_on = False text = '(No other soln)' self.more_result_btn.disabled = True popup = Popup(title='Uh oh!', content=Label(text='No other solution found!\nClick anywhere to continue.'), size_hint=(None, None), size=(600, 250)) popup.open() # self.solver_field.text = text pass class LightsOutTile(Button): ON_COLOR = [2,2,2,1] OFF_COLOR = [1,1,1,1] ON_HINT_COLOR = [1.6,2.25,1.6,1] OFF_HINT_COLOR = [0.75,1.4,0.75,1] COLORS = [ON_COLOR, OFF_COLOR] HINT_COLORS = [ON_HINT_COLOR, OFF_HINT_COLOR] def __init__(self, initial_state=0, **kwargs): super(LightsOutTile, self).__init__(**kwargs) self.light_state = initial_state self.background_down = '' self.background_color = LightsOutTile.COLORS[initial_state] self.hinting = False def toggle(self): self.light_state ^= 1 if self.hinting: self.background_color = LightsOutTile.HINT_COLORS[self.light_state] else: self.background_color = LightsOutTile.COLORS[self.light_state] def reset(self): self.light_state = 0 self.hinting = False self.background_color = LightsOutTile.COLORS[self.light_state] def on_press(self): super(LightsOutTile, self).on_press() self.toggle() def set_hinting(self, hint=None): if hint is None: hint = self.light_state self.hinting = True self.background_color = LightsOutTile.HINT_COLORS[hint]; def clear_hinting(self): self.hinting = False self.background_color = LightsOutTile.COLORS[self.light_state];
# -*- coding: utf-8 -*- """ These test the private routines in types/cast.py """ import pytest from datetime import datetime, timedelta, date import numpy as np import pandas as pd from pandas import (Timedelta, Timestamp, DatetimeIndex, DataFrame, NaT, Period, Series) from pandas.core.dtypes.cast import ( maybe_downcast_to_dtype, maybe_convert_objects, cast_scalar_to_array, infer_dtype_from_scalar, infer_dtype_from_array, maybe_convert_string_to_object, maybe_convert_scalar, find_common_type) from pandas.core.dtypes.dtypes import ( CategoricalDtype, DatetimeTZDtype, PeriodDtype) from pandas.core.dtypes.common import ( is_dtype_equal) from pandas.util import testing as tm class TestMaybeDowncast(object): def test_downcast_conv(self): # test downcasting arr = np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') assert (np.array_equal(result, arr)) arr = np.array([8., 8., 8., 8., 8.9999999999995]) result = maybe_downcast_to_dtype(arr, 'infer') expected = np.array([8, 8, 8, 8, 9]) assert (np.array_equal(result, expected)) arr = np.array([8., 8., 8., 8., 9.0000000000005]) result = maybe_downcast_to_dtype(arr, 'infer') expected = np.array([8, 8, 8, 8, 9]) assert (np.array_equal(result, expected)) # GH16875 coercing of bools ser = Series([True, True, False]) result = maybe_downcast_to_dtype(ser, np.dtype(np.float64)) expected = ser tm.assert_series_equal(result, expected) # conversions expected = np.array([1, 2]) for dtype in [np.float64, object, np.int64]: arr = np.array([1.0, 2.0], dtype=dtype) result = maybe_downcast_to_dtype(arr, 'infer') tm.assert_almost_equal(result, expected, check_dtype=False) for dtype in [np.float64, object]: expected = np.array([1.0, 2.0, np.nan], dtype=dtype) arr = np.array([1.0, 2.0, np.nan], dtype=dtype) result = maybe_downcast_to_dtype(arr, 'infer') tm.assert_almost_equal(result, expected) # empties for dtype in [np.int32, np.float64, np.float32, np.bool_, np.int64, object]: arr = np.array([], dtype=dtype) result = maybe_downcast_to_dtype(arr, 'int64') tm.assert_almost_equal(result, np.array([], dtype=np.int64)) assert result.dtype == np.int64 def test_datetimelikes_nan(self): arr = np.array([1, 2, np.nan]) exp = np.array([1, 2, np.datetime64('NaT')], dtype='datetime64[ns]') res = maybe_downcast_to_dtype(arr, 'datetime64[ns]') tm.assert_numpy_array_equal(res, exp) exp = np.array([1, 2, np.timedelta64('NaT')], dtype='timedelta64[ns]') res = maybe_downcast_to_dtype(arr, 'timedelta64[ns]') tm.assert_numpy_array_equal(res, exp) def test_datetime_with_timezone(self): # GH 15426 ts = Timestamp("2016-01-01 12:00:00", tz='US/Pacific') exp = DatetimeIndex([ts, ts]) res = maybe_downcast_to_dtype(exp, exp.dtype) tm.assert_index_equal(res, exp) res = maybe_downcast_to_dtype(exp.asi8, exp.dtype) tm.assert_index_equal(res, exp) class TestInferDtype(object): def testinfer_dtype_from_scalar(self): # Test that infer_dtype_from_scalar is returning correct dtype for int # and float. for dtypec in [np.uint8, np.int8, np.uint16, np.int16, np.uint32, np.int32, np.uint64, np.int64]: data = dtypec(12) dtype, val = infer_dtype_from_scalar(data) assert dtype == type(data) data = 12 dtype, val = infer_dtype_from_scalar(data) assert dtype == np.int64 for dtypec in [np.float16, np.float32, np.float64]: data = dtypec(12) dtype, val = infer_dtype_from_scalar(data) assert dtype == dtypec data = np.float(12) dtype, val = infer_dtype_from_scalar(data) assert dtype == np.float64 for data in [True, False]: dtype, val = infer_dtype_from_scalar(data) assert dtype == np.bool_ for data in [np.complex64(1), np.complex128(1)]: dtype, val = infer_dtype_from_scalar(data) assert dtype == np.complex_ for data in [np.datetime64(1, 'ns'), Timestamp(1), datetime(2000, 1, 1, 0, 0)]: dtype, val = infer_dtype_from_scalar(data) assert dtype == 'M8[ns]' for data in [np.timedelta64(1, 'ns'), Timedelta(1), timedelta(1)]: dtype, val = infer_dtype_from_scalar(data) assert dtype == 'm8[ns]' for tz in ['UTC', 'US/Eastern', 'Asia/Tokyo']: dt = Timestamp(1, tz=tz) dtype, val = infer_dtype_from_scalar(dt, pandas_dtype=True) assert dtype == 'datetime64[ns, {0}]'.format(tz) assert val == dt.value dtype, val = infer_dtype_from_scalar(dt) assert dtype == np.object_ assert val == dt for freq in ['M', 'D']: p = Period('2011-01-01', freq=freq) dtype, val = infer_dtype_from_scalar(p, pandas_dtype=True) assert dtype == 'period[{0}]'.format(freq) assert val == p.ordinal dtype, val = infer_dtype_from_scalar(p) dtype == np.object_ assert val == p # misc for data in [date(2000, 1, 1), Timestamp(1, tz='US/Eastern'), 'foo']: dtype, val = infer_dtype_from_scalar(data) assert dtype == np.object_ def testinfer_dtype_from_scalar_errors(self): with pytest.raises(ValueError): infer_dtype_from_scalar(np.array([1])) @pytest.mark.parametrize( "arr, expected, pandas_dtype", [('foo', np.object_, False), (b'foo', np.object_, False), (1, np.int_, False), (1.5, np.float_, False), ([1], np.int_, False), (np.array([1], dtype=np.int64), np.int64, False), ([np.nan, 1, ''], np.object_, False), (np.array([[1.0, 2.0]]), np.float_, False), (pd.Categorical(list('aabc')), np.object_, False), (pd.Categorical([1, 2, 3]), np.int64, False), (pd.Categorical(list('aabc')), 'category', True), (pd.Categorical([1, 2, 3]), 'category', True), (Timestamp('20160101'), np.object_, False), (np.datetime64('2016-01-01'), np.dtype('<M8[D]'), False), (pd.date_range('20160101', periods=3), np.dtype('<M8[ns]'), False), (pd.date_range('20160101', periods=3, tz='US/Eastern'), 'datetime64[ns, US/Eastern]', True), (pd.Series([1., 2, 3]), np.float64, False), (pd.Series(list('abc')), np.object_, False), (pd.Series(pd.date_range('20160101', periods=3, tz='US/Eastern')), 'datetime64[ns, US/Eastern]', True)]) def test_infer_dtype_from_array(self, arr, expected, pandas_dtype): dtype, _ = infer_dtype_from_array(arr, pandas_dtype=pandas_dtype) assert is_dtype_equal(dtype, expected) def test_cast_scalar_to_array(self): arr = cast_scalar_to_array((3, 2), 1, dtype=np.int64) exp = np.ones((3, 2), dtype=np.int64) tm.assert_numpy_array_equal(arr, exp) arr = cast_scalar_to_array((3, 2), 1.1) exp = np.empty((3, 2), dtype=np.float64) exp.fill(1.1) tm.assert_numpy_array_equal(arr, exp) arr = cast_scalar_to_array((2, 3), Timestamp('2011-01-01')) exp = np.empty((2, 3), dtype='datetime64[ns]') exp.fill(np.datetime64('2011-01-01')) tm.assert_numpy_array_equal(arr, exp) # pandas dtype is stored as object dtype obj = Timestamp('2011-01-01', tz='US/Eastern') arr = cast_scalar_to_array((2, 3), obj) exp = np.empty((2, 3), dtype=np.object) exp.fill(obj) tm.assert_numpy_array_equal(arr, exp) obj = Period('2011-01-01', freq='D') arr = cast_scalar_to_array((2, 3), obj) exp = np.empty((2, 3), dtype=np.object) exp.fill(obj) tm.assert_numpy_array_equal(arr, exp) class TestMaybe(object): def test_maybe_convert_string_to_array(self): result = maybe_convert_string_to_object('x') tm.assert_numpy_array_equal(result, np.array(['x'], dtype=object)) assert result.dtype == object result = maybe_convert_string_to_object(1) assert result == 1 arr = np.array(['x', 'y'], dtype=str) result = maybe_convert_string_to_object(arr) tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object)) assert result.dtype == object # unicode arr = np.array(['x', 'y']).astype('U') result = maybe_convert_string_to_object(arr) tm.assert_numpy_array_equal(result, np.array(['x', 'y'], dtype=object)) assert result.dtype == object # object arr = np.array(['x', 2], dtype=object) result = maybe_convert_string_to_object(arr) tm.assert_numpy_array_equal(result, np.array(['x', 2], dtype=object)) assert result.dtype == object def test_maybe_convert_scalar(self): # pass thru result = maybe_convert_scalar('x') assert result == 'x' result = maybe_convert_scalar(np.array([1])) assert result == np.array([1]) # leave scalar dtype result = maybe_convert_scalar(np.int64(1)) assert result == np.int64(1) result = maybe_convert_scalar(np.int32(1)) assert result == np.int32(1) result = maybe_convert_scalar(np.float32(1)) assert result == np.float32(1) result = maybe_convert_scalar(np.int64(1)) assert result == np.float64(1) # coerce result = maybe_convert_scalar(1) assert result == np.int64(1) result = maybe_convert_scalar(1.0) assert result == np.float64(1) result = maybe_convert_scalar(Timestamp('20130101')) assert result == Timestamp('20130101').value result = maybe_convert_scalar(datetime(2013, 1, 1)) assert result == Timestamp('20130101').value result = maybe_convert_scalar(Timedelta('1 day 1 min')) assert result == Timedelta('1 day 1 min').value def test_maybe_infer_to_datetimelike(self): # GH16362 # pandas=0.20.1 raises IndexError: tuple index out of range result = DataFrame(np.array([[NaT, 'a', 'b', 0], [NaT, 'b', 'c', 1]])) assert result.size == 8 # this construction was fine result = DataFrame(np.array([[NaT, 'a', 0], [NaT, 'b', 1]])) assert result.size == 6 class TestConvert(object): def test_maybe_convert_objects_copy(self): values = np.array([1, 2]) out = maybe_convert_objects(values, copy=False) assert values is out out = maybe_convert_objects(values, copy=True) assert values is not out values = np.array(['apply', 'banana']) out = maybe_convert_objects(values, copy=False) assert values is out out = maybe_convert_objects(values, copy=True) assert values is not out class TestCommonTypes(object): def test_numpy_dtypes(self): # (source_types, destination_type) testcases = ( # identity ((np.int64,), np.int64), ((np.uint64,), np.uint64), ((np.float32,), np.float32), ((np.object,), np.object), # into ints ((np.int16, np.int64), np.int64), ((np.int32, np.uint32), np.int64), ((np.uint16, np.uint64), np.uint64), # into floats ((np.float16, np.float32), np.float32), ((np.float16, np.int16), np.float32), ((np.float32, np.int16), np.float32), ((np.uint64, np.int64), np.float64), ((np.int16, np.float64), np.float64), ((np.float16, np.int64), np.float64), # into others ((np.complex128, np.int32), np.complex128), ((np.object, np.float32), np.object), ((np.object, np.int16), np.object), # bool with int ((np.dtype('bool'), np.int64), np.object), ((np.dtype('bool'), np.int32), np.object), ((np.dtype('bool'), np.int16), np.object), ((np.dtype('bool'), np.int8), np.object), ((np.dtype('bool'), np.uint64), np.object), ((np.dtype('bool'), np.uint32), np.object), ((np.dtype('bool'), np.uint16), np.object), ((np.dtype('bool'), np.uint8), np.object), # bool with float ((np.dtype('bool'), np.float64), np.object), ((np.dtype('bool'), np.float32), np.object), ((np.dtype('datetime64[ns]'), np.dtype('datetime64[ns]')), np.dtype('datetime64[ns]')), ((np.dtype('timedelta64[ns]'), np.dtype('timedelta64[ns]')), np.dtype('timedelta64[ns]')), ((np.dtype('datetime64[ns]'), np.dtype('datetime64[ms]')), np.dtype('datetime64[ns]')), ((np.dtype('timedelta64[ms]'), np.dtype('timedelta64[ns]')), np.dtype('timedelta64[ns]')), ((np.dtype('datetime64[ns]'), np.dtype('timedelta64[ns]')), np.object), ((np.dtype('datetime64[ns]'), np.int64), np.object) ) for src, common in testcases: assert find_common_type(src) == common with pytest.raises(ValueError): # empty find_common_type([]) def test_categorical_dtype(self): dtype = CategoricalDtype() assert find_common_type([dtype]) == 'category' assert find_common_type([dtype, dtype]) == 'category' assert find_common_type([np.object, dtype]) == np.object def test_datetimetz_dtype(self): dtype = DatetimeTZDtype(unit='ns', tz='US/Eastern') assert find_common_type([dtype, dtype]) == 'datetime64[ns, US/Eastern]' for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'), np.dtype('datetime64[ns]'), np.object, np.int64]: assert find_common_type([dtype, dtype2]) == np.object assert find_common_type([dtype2, dtype]) == np.object def test_period_dtype(self): dtype = PeriodDtype(freq='D') assert find_common_type([dtype, dtype]) == 'period[D]' for dtype2 in [DatetimeTZDtype(unit='ns', tz='Asia/Tokyo'), PeriodDtype(freq='2D'), PeriodDtype(freq='H'), np.dtype('datetime64[ns]'), np.object, np.int64]: assert find_common_type([dtype, dtype2]) == np.object assert find_common_type([dtype2, dtype]) == np.object
"""Test the module ensemble classifiers.""" # Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com> # Christos Aridas # License: MIT from collections import Counter import numpy as np import pytest from sklearn.datasets import load_iris, make_hastie_10_2, make_classification from sklearn.model_selection import ( GridSearchCV, ParameterGrid, train_test_split, ) from sklearn.dummy import DummyClassifier from sklearn.linear_model import Perceptron, LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.svm import SVC from sklearn.feature_selection import SelectKBest from sklearn.utils._testing import assert_array_equal from sklearn.utils._testing import assert_array_almost_equal from sklearn.utils._testing import assert_allclose from imblearn import FunctionSampler from imblearn.datasets import make_imbalance from imblearn.ensemble import BalancedBaggingClassifier from imblearn.over_sampling import RandomOverSampler, SMOTE from imblearn.pipeline import make_pipeline from imblearn.under_sampling import ClusterCentroids, RandomUnderSampler iris = load_iris() @pytest.mark.parametrize( "base_estimator", [ None, DummyClassifier(strategy="prior"), Perceptron(max_iter=1000, tol=1e-3), DecisionTreeClassifier(), KNeighborsClassifier(), SVC(gamma="scale"), ], ) @pytest.mark.parametrize( "params", ParameterGrid( { "max_samples": [0.5, 1.0], "max_features": [1, 2, 4], "bootstrap": [True, False], "bootstrap_features": [True, False], } ), ) def test_balanced_bagging_classifier(base_estimator, params): # Check classification for various parameter settings. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) BalancedBaggingClassifier( base_estimator=base_estimator, random_state=0, **params ).fit(X_train, y_train).predict(X_test) def test_bootstrap_samples(): # Test that bootstrapping samples generate non-perfect base estimators. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) base_estimator = DecisionTreeClassifier().fit(X_train, y_train) # without bootstrap, all trees are perfect on the training set # disable the resampling by passing an empty dictionary. ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_samples=1.0, bootstrap=False, n_estimators=10, sampling_strategy={}, random_state=0, ).fit(X_train, y_train) assert ensemble.score(X_train, y_train) == base_estimator.score(X_train, y_train) # with bootstrap, trees are no longer perfect on the training set ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_samples=1.0, bootstrap=True, random_state=0, ).fit(X_train, y_train) assert ensemble.score(X_train, y_train) < base_estimator.score(X_train, y_train) def test_bootstrap_features(): # Test that bootstrapping features may generate duplicate features. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_features=1.0, bootstrap_features=False, random_state=0, ).fit(X_train, y_train) for features in ensemble.estimators_features_: assert np.unique(features).shape[0] == X.shape[1] ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), max_features=1.0, bootstrap_features=True, random_state=0, ).fit(X_train, y_train) unique_features = [ np.unique(features).shape[0] for features in ensemble.estimators_features_ ] assert np.median(unique_features) < X.shape[1] def test_probability(): # Predict probabilities. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) with np.errstate(divide="ignore", invalid="ignore"): # Normal case ensemble = BalancedBaggingClassifier( base_estimator=DecisionTreeClassifier(), random_state=0 ).fit(X_train, y_train) assert_array_almost_equal( np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)), ) assert_array_almost_equal( ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)), ) # Degenerate case, where some classes are missing ensemble = BalancedBaggingClassifier( base_estimator=LogisticRegression(solver="lbfgs", multi_class="auto"), random_state=0, max_samples=5, ) ensemble.fit(X_train, y_train) assert_array_almost_equal( np.sum(ensemble.predict_proba(X_test), axis=1), np.ones(len(X_test)), ) assert_array_almost_equal( ensemble.predict_proba(X_test), np.exp(ensemble.predict_log_proba(X_test)), ) def test_oob_score_classification(): # Check that oob prediction is a good estimation of the generalization # error. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) for base_estimator in [DecisionTreeClassifier(), SVC(gamma="scale")]: clf = BalancedBaggingClassifier( base_estimator=base_estimator, n_estimators=100, bootstrap=True, oob_score=True, random_state=0, ).fit(X_train, y_train) test_score = clf.score(X_test, y_test) assert abs(test_score - clf.oob_score_) < 0.1 # Test with few estimators with pytest.warns(UserWarning): BalancedBaggingClassifier( base_estimator=base_estimator, n_estimators=1, bootstrap=True, oob_score=True, random_state=0, ).fit(X_train, y_train) def test_single_estimator(): # Check singleton ensembles. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf1 = BalancedBaggingClassifier( base_estimator=KNeighborsClassifier(), n_estimators=1, bootstrap=False, bootstrap_features=False, random_state=0, ).fit(X_train, y_train) clf2 = make_pipeline( RandomUnderSampler(random_state=clf1.estimators_[0].steps[0][1].random_state), KNeighborsClassifier(), ).fit(X_train, y_train) assert_array_equal(clf1.predict(X_test), clf2.predict(X_test)) @pytest.mark.parametrize( "params", [ {"n_estimators": 1.5}, {"n_estimators": -1}, {"max_samples": -1}, {"max_samples": 0.0}, {"max_samples": 2.0}, {"max_samples": 1000}, {"max_samples": "foobar"}, {"max_features": -1}, {"max_features": 0.0}, {"max_features": 2.0}, {"max_features": 5}, {"max_features": "foobar"}, ], ) def test_balanced_bagging_classifier_error(params): # Test that it gives proper exception on deficient input. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50} ) base = DecisionTreeClassifier() clf = BalancedBaggingClassifier(base_estimator=base, **params) with pytest.raises(ValueError): clf.fit(X, y) # Test support of decision_function assert not (hasattr(BalancedBaggingClassifier(base).fit(X, y), "decision_function")) def test_gridsearch(): # Check that bagging ensembles can be grid-searched. # Transform iris into a binary classification task X, y = iris.data, iris.target.copy() y[y == 2] = 1 # Grid search with scoring based on decision_function parameters = {"n_estimators": (1, 2), "base_estimator__C": (1, 2)} GridSearchCV( BalancedBaggingClassifier(SVC(gamma="scale")), parameters, cv=3, scoring="roc_auc", ).fit(X, y) def test_base_estimator(): # Check base_estimator and its default values. X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) ensemble = BalancedBaggingClassifier(None, n_jobs=3, random_state=0).fit( X_train, y_train ) assert isinstance(ensemble.base_estimator_.steps[-1][1], DecisionTreeClassifier) ensemble = BalancedBaggingClassifier( DecisionTreeClassifier(), n_jobs=3, random_state=0 ).fit(X_train, y_train) assert isinstance(ensemble.base_estimator_.steps[-1][1], DecisionTreeClassifier) ensemble = BalancedBaggingClassifier( Perceptron(max_iter=1000, tol=1e-3), n_jobs=3, random_state=0 ).fit(X_train, y_train) assert isinstance(ensemble.base_estimator_.steps[-1][1], Perceptron) def test_bagging_with_pipeline(): X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) estimator = BalancedBaggingClassifier( make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()), max_features=2, ) estimator.fit(X, y).predict(X) def test_warm_start(random_state=42): # Test if fitting incrementally with warm start gives a forest of the # right size and the same results as a normal fit. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf_ws = None for n_estimators in [5, 10]: if clf_ws is None: clf_ws = BalancedBaggingClassifier( n_estimators=n_estimators, random_state=random_state, warm_start=True, ) else: clf_ws.set_params(n_estimators=n_estimators) clf_ws.fit(X, y) assert len(clf_ws) == n_estimators clf_no_ws = BalancedBaggingClassifier( n_estimators=10, random_state=random_state, warm_start=False ) clf_no_ws.fit(X, y) assert {pipe.steps[-1][1].random_state for pipe in clf_ws} == { pipe.steps[-1][1].random_state for pipe in clf_no_ws } def test_warm_start_smaller_n_estimators(): # Test if warm start'ed second fit with smaller n_estimators raises error. X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True) clf.fit(X, y) clf.set_params(n_estimators=4) with pytest.raises(ValueError): clf.fit(X, y) def test_warm_start_equal_n_estimators(): # Test that nothing happens when fitting without increasing n_estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True, random_state=83) clf.fit(X_train, y_train) y_pred = clf.predict(X_test) # modify X to nonsense values, this should not change anything X_train += 1.0 warn_msg = "Warm-start fitting without increasing n_estimators does not" with pytest.warns(UserWarning, match=warn_msg): clf.fit(X_train, y_train) assert_array_equal(y_pred, clf.predict(X_test)) def test_warm_start_equivalence(): # warm started classifier with 5+5 estimators should be equivalent to # one classifier with 10 estimators X, y = make_hastie_10_2(n_samples=20, random_state=1) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43) clf_ws = BalancedBaggingClassifier( n_estimators=5, warm_start=True, random_state=3141 ) clf_ws.fit(X_train, y_train) clf_ws.set_params(n_estimators=10) clf_ws.fit(X_train, y_train) y1 = clf_ws.predict(X_test) clf = BalancedBaggingClassifier( n_estimators=10, warm_start=False, random_state=3141 ) clf.fit(X_train, y_train) y2 = clf.predict(X_test) assert_array_almost_equal(y1, y2) def test_warm_start_with_oob_score_fails(): # Check using oob_score and warm_start simultaneously fails X, y = make_hastie_10_2(n_samples=20, random_state=1) clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True, oob_score=True) with pytest.raises(ValueError): clf.fit(X, y) def test_oob_score_removed_on_warm_start(): X, y = make_hastie_10_2(n_samples=2000, random_state=1) clf = BalancedBaggingClassifier(n_estimators=50, oob_score=True) clf.fit(X, y) clf.set_params(warm_start=True, oob_score=False, n_estimators=100) clf.fit(X, y) with pytest.raises(AttributeError): getattr(clf, "oob_score_") def test_oob_score_consistency(): # Make sure OOB scores are identical when random_state, estimator, and # training data are fixed and fitting is done twice X, y = make_hastie_10_2(n_samples=200, random_state=1) bagging = BalancedBaggingClassifier( KNeighborsClassifier(), max_samples=0.5, max_features=0.5, oob_score=True, random_state=1, ) assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_ def test_estimators_samples(): # Check that format of estimators_samples_ is correct and that results # generated at fit time can be identically reproduced at a later time # using data saved in object attributes. X, y = make_hastie_10_2(n_samples=200, random_state=1) # remap the y outside of the BalancedBaggingclassifier # _, y = np.unique(y, return_inverse=True) bagging = BalancedBaggingClassifier( LogisticRegression(solver="lbfgs", multi_class="auto"), max_samples=0.5, max_features=0.5, random_state=1, bootstrap=False, ) bagging.fit(X, y) # Get relevant attributes estimators_samples = bagging.estimators_samples_ estimators_features = bagging.estimators_features_ estimators = bagging.estimators_ # Test for correct formatting assert len(estimators_samples) == len(estimators) assert len(estimators_samples[0]) == len(X) // 2 assert estimators_samples[0].dtype.kind == "i" # Re-fit single estimator to test for consistent sampling estimator_index = 0 estimator_samples = estimators_samples[estimator_index] estimator_features = estimators_features[estimator_index] estimator = estimators[estimator_index] X_train = (X[estimator_samples])[:, estimator_features] y_train = y[estimator_samples] orig_coefs = estimator.steps[-1][1].coef_ estimator.fit(X_train, y_train) new_coefs = estimator.steps[-1][1].coef_ assert_allclose(orig_coefs, new_coefs) def test_max_samples_consistency(): # Make sure validated max_samples and original max_samples are identical # when valid integer max_samples supplied by user max_samples = 100 X, y = make_hastie_10_2(n_samples=2 * max_samples, random_state=1) bagging = BalancedBaggingClassifier( KNeighborsClassifier(), max_samples=max_samples, max_features=0.5, random_state=1, ) bagging.fit(X, y) assert bagging._max_samples == max_samples class CountDecisionTreeClassifier(DecisionTreeClassifier): """DecisionTreeClassifier that will memorize the number of samples seen at fit.""" def fit(self, X, y, sample_weight=None): self.class_counts_ = Counter(y) return super().fit(X, y, sample_weight=sample_weight) @pytest.mark.parametrize( "sampler, n_samples_bootstrap", [ (None, 15), (RandomUnderSampler(), 15), # under-sampling with sample_indices_ (ClusterCentroids(), 15), # under-sampling without sample_indices_ (RandomOverSampler(), 40), # over-sampling with sample_indices_ (SMOTE(), 40), # over-sampling without sample_indices_ ], ) def test_balanced_bagging_classifier_samplers(sampler, n_samples_bootstrap): # check that we can pass any kind of sampler to a bagging classifier X, y = make_imbalance( iris.data, iris.target, sampling_strategy={0: 20, 1: 25, 2: 50}, random_state=0, ) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) clf = BalancedBaggingClassifier( base_estimator=CountDecisionTreeClassifier(), n_estimators=2, sampler=sampler, random_state=0, ) clf.fit(X_train, y_train) clf.predict(X_test) # check that we have balanced class with the right counts of class # sample depending on the sampling strategy assert_array_equal( list(clf.estimators_[0][-1].class_counts_.values()), n_samples_bootstrap ) @pytest.mark.parametrize("replace", [True, False]) def test_balanced_bagging_classifier_with_function_sampler(replace): # check that we can provide a FunctionSampler in BalancedBaggingClassifier X, y = make_classification( n_samples=1_000, n_features=10, n_classes=2, weights=[0.3, 0.7], random_state=0, ) def roughly_balanced_bagging(X, y, replace=False): """Implementation of Roughly Balanced Bagging for binary problem.""" # find the minority and majority classes class_counts = Counter(y) majority_class = max(class_counts, key=class_counts.get) minority_class = min(class_counts, key=class_counts.get) # compute the number of sample to draw from the majority class using # a negative binomial distribution n_minority_class = class_counts[minority_class] n_majority_resampled = np.random.negative_binomial(n=n_minority_class, p=0.5) # draw randomly with or without replacement majority_indices = np.random.choice( np.flatnonzero(y == majority_class), size=n_majority_resampled, replace=replace, ) minority_indices = np.random.choice( np.flatnonzero(y == minority_class), size=n_minority_class, replace=replace, ) indices = np.hstack([majority_indices, minority_indices]) return X[indices], y[indices] # Roughly Balanced Bagging rbb = BalancedBaggingClassifier( base_estimator=CountDecisionTreeClassifier(), n_estimators=2, sampler=FunctionSampler( func=roughly_balanced_bagging, kw_args={"replace": replace} ), ) rbb.fit(X, y) for estimator in rbb.estimators_: class_counts = estimator[-1].class_counts_ assert (class_counts[0] / class_counts[1]) > 0.8
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.arm_polling import ARMPolling from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_create_or_update_request_initial( resource_group_name: str, image_name: str, subscription_id: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2016-04-30-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "imageName": _SERIALIZER.url("image_name", image_name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_delete_request_initial( resource_group_name: str, image_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2016-04-30-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "imageName": _SERIALIZER.url("image_name", image_name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_request( resource_group_name: str, image_name: str, subscription_id: str, *, expand: Optional[str] = None, **kwargs: Any ) -> HttpRequest: api_version = "2016-04-30-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "imageName": _SERIALIZER.url("image_name", image_name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if expand is not None: query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str') query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_by_resource_group_request( resource_group_name: str, subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2016-04-30-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images') path_format_arguments = { "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_request( subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2016-04-30-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) class ImagesOperations(object): """ImagesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.compute.v2016_04_30_preview.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def _create_or_update_initial( self, resource_group_name: str, image_name: str, parameters: "_models.Image", **kwargs: Any ) -> "_models.Image": cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _json = self._serialize.body(parameters, 'Image') request = build_create_or_update_request_initial( resource_group_name=resource_group_name, image_name=image_name, subscription_id=self._config.subscription_id, content_type=content_type, json=_json, template_url=self._create_or_update_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('Image', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore @distributed_trace def begin_create_or_update( self, resource_group_name: str, image_name: str, parameters: "_models.Image", **kwargs: Any ) -> LROPoller["_models.Image"]: """Create or update an image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :param parameters: Parameters supplied to the Create Image operation. :type parameters: ~azure.mgmt.compute.v2016_04_30_preview.models.Image :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either Image or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_04_30_preview.models.Image] :raises: ~azure.core.exceptions.HttpResponseError """ content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_initial( resource_group_name=resource_group_name, image_name=image_name, parameters=parameters, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore def _delete_initial( self, resource_group_name: str, image_name: str, **kwargs: Any ) -> Optional["_models.OperationStatusResponse"]: cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.OperationStatusResponse"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_request_initial( resource_group_name=resource_group_name, image_name=image_name, subscription_id=self._config.subscription_id, template_url=self._delete_initial.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = None if response.status_code == 200: deserialized = self._deserialize('OperationStatusResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore @distributed_trace def begin_delete( self, resource_group_name: str, image_name: str, **kwargs: Any ) -> LROPoller["_models.OperationStatusResponse"]: """Deletes an Image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be ARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either OperationStatusResponse or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2016_04_30_preview.models.OperationStatusResponse] :raises: ~azure.core.exceptions.HttpResponseError """ polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatusResponse"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_initial( resource_group_name=resource_group_name, image_name=image_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('OperationStatusResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized if polling is True: polling_method = ARMPolling(lro_delay, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore @distributed_trace def get( self, resource_group_name: str, image_name: str, expand: Optional[str] = None, **kwargs: Any ) -> "_models.Image": """Gets an image. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param image_name: The name of the image. :type image_name: str :param expand: The expand expression to apply on the operation. :type expand: str :keyword callable cls: A custom type or function that will be passed the direct response :return: Image, or the result of cls(response) :rtype: ~azure.mgmt.compute.v2016_04_30_preview.models.Image :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.Image"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( resource_group_name=resource_group_name, image_name=image_name, subscription_id=self._config.subscription_id, expand=expand, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('Image', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images/{imageName}'} # type: ignore @distributed_trace def list_by_resource_group( self, resource_group_name: str, **kwargs: Any ) -> Iterable["_models.ImageListResult"]: """Gets the list of images under a resource group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ImageListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2016_04_30_preview.models.ImageListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_resource_group_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=self.list_by_resource_group.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_resource_group_request( resource_group_name=resource_group_name, subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ImageListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/images'} # type: ignore @distributed_trace def list( self, **kwargs: Any ) -> Iterable["_models.ImageListResult"]: """Gets the list of Images in the subscription. Use nextLink property in the response to get the next page of Images. Do this till nextLink is null to fetch all the Images. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either ImageListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2016_04_30_preview.models.ImageListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.ImageListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_request( subscription_id=self._config.subscription_id, template_url=self.list.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_request( subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("ImageListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/images'} # type: ignore
"""Manage flooding to ports on VLANs.""" # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation. # Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer. # Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd. # Copyright (C) 2015--2017 The Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from faucet import valve_of from faucet import valve_packet class ValveFloodManager(object): """Implement dataplane based flooding for standalone dataplanes.""" # Enumerate possible eth_dst flood destinations. # First bool says whether to flood this destination, if the VLAN # has unicast flooding enabled (if unicast flooding is enabled, # then we flood all destination eth_dsts). FLOOD_DSTS = ( (True, None, None), (False, valve_packet.BRIDGE_GROUP_ADDRESS, valve_packet.mac_byte_mask(3)), # 802.x (False, '01:00:5E:00:00:00', valve_packet.mac_byte_mask(3)), # IPv4 multicast (False, '33:33:00:00:00:00', valve_packet.mac_byte_mask(2)), # IPv6 multicast (False, valve_of.mac.BROADCAST_STR, None), # flood on ethernet broadcasts ) def __init__(self, flood_table, flood_priority, use_group_table, groups): self.flood_table = flood_table self.flood_priority = flood_priority self.use_group_table = use_group_table self.groups = groups @staticmethod def _vlan_all_ports(vlan, exclude_unicast): """Return list of all ports that should be flooded to on a VLAN.""" return vlan.flood_ports(vlan.get_ports(), exclude_unicast) @staticmethod def _build_flood_local_rule_actions(vlan, exclude_unicast, in_port): """Return a list of flood actions to flood packets from a port.""" flood_acts = [] exclude_ports = [] if in_port.lacp: lags = vlan.lags() exclude_ports = lags[in_port.lacp] tagged_ports = vlan.tagged_flood_ports(exclude_unicast) flood_acts.extend(valve_of.flood_tagged_port_outputs( tagged_ports, in_port, exclude_ports=exclude_ports)) untagged_ports = vlan.untagged_flood_ports(exclude_unicast) flood_acts.extend(valve_of.flood_untagged_port_outputs( untagged_ports, in_port, exclude_ports=exclude_ports)) return flood_acts def _build_flood_rule_actions(self, vlan, exclude_unicast, in_port): return self._build_flood_local_rule_actions( vlan, exclude_unicast, in_port) def _build_flood_rule_for_port(self, vlan, eth_dst, eth_dst_mask, exclude_unicast, command, flood_priority, port, preflood_acts): ofmsgs = [] match = self.flood_table.match( vlan=vlan, in_port=port.number, eth_dst=eth_dst, eth_dst_mask=eth_dst_mask) flood_acts = self._build_flood_rule_actions( vlan, exclude_unicast, port) ofmsgs.append(self.flood_table.flowmod( match=match, command=command, inst=[valve_of.apply_actions(preflood_acts + flood_acts)], priority=flood_priority)) return ofmsgs def _build_unmirrored_flood_rules(self, vlan, eth_dst, eth_dst_mask, exclude_unicast, command, flood_priority): ofmsgs = [] for port in self._vlan_all_ports(vlan, exclude_unicast): ofmsgs.extend(self._build_flood_rule_for_port( vlan, eth_dst, eth_dst_mask, exclude_unicast, command, flood_priority, port, [])) return ofmsgs def _build_mirrored_flood_rules(self, vlan, eth_dst, eth_dst_mask, exclude_unicast, command, flood_priority): ofmsgs = [] mirrored_ports = vlan.mirrored_ports() for port in mirrored_ports: mirror_acts = [valve_of.output_port(port.mirror)] ofmsgs.extend(self._build_flood_rule_for_port( vlan, eth_dst, eth_dst_mask, exclude_unicast, command, flood_priority, port, mirror_acts)) return ofmsgs def _build_multiout_flood_rules(self, vlan, command): """Build flooding rules for a VLAN without using groups.""" flood_priority = self.flood_priority ofmsgs = [] for unicast_eth_dst, eth_dst, eth_dst_mask in self.FLOOD_DSTS: if unicast_eth_dst and not vlan.unicast_flood: continue ofmsgs.extend(self._build_unmirrored_flood_rules( vlan, eth_dst, eth_dst_mask, unicast_eth_dst, command, flood_priority)) flood_priority += 1 ofmsgs.extend(self._build_mirrored_flood_rules( vlan, eth_dst, eth_dst_mask, unicast_eth_dst, command, flood_priority)) flood_priority += 1 return ofmsgs @staticmethod def _build_group_buckets(vlan, unicast_flood): buckets = [] tagged_flood_ports = vlan.tagged_flood_ports(unicast_flood) buckets.extend(valve_of.group_flood_buckets(tagged_flood_ports, False)) untagged_flood_ports = vlan.untagged_flood_ports(unicast_flood) buckets.extend(valve_of.group_flood_buckets(untagged_flood_ports, True)) return buckets def _build_group_flood_rules(self, vlan, modify, command): """Build flooding rules for a VLAN using groups.""" flood_priority = self.flood_priority broadcast_group = self.groups.get_entry( vlan.vid, self._build_group_buckets(vlan, False)) unicast_group = self.groups.get_entry( vlan.vid + valve_of.VLAN_GROUP_OFFSET, self._build_group_buckets(vlan, vlan.unicast_flood)) ofmsgs = [] if modify: ofmsgs.append(broadcast_group.modify()) ofmsgs.append(unicast_group.modify()) else: ofmsgs.extend(broadcast_group.add()) ofmsgs.extend(unicast_group.add()) for unicast_eth_dst, eth_dst, eth_dst_mask in self.FLOOD_DSTS: if unicast_eth_dst and not vlan.unicast_flood: continue group = broadcast_group if not eth_dst: group = unicast_group match = self.flood_table.match( vlan=vlan, eth_dst=eth_dst, eth_dst_mask=eth_dst_mask) ofmsgs.append(self.flood_table.flowmod( match=match, command=command, inst=[valve_of.apply_actions([valve_of.group_act(group.group_id)])], priority=flood_priority)) flood_priority += 1 return ofmsgs def build_flood_rules(self, vlan, modify=False): """Add flows to flood packets to unknown destinations on a VLAN.""" # TODO: group table support is still fairly uncommon, so # group tables are currently optional. command = valve_of.ofp.OFPFC_ADD if modify: command = valve_of.ofp.OFPFC_MODIFY_STRICT if self.use_group_table: hairpin_ports = vlan.hairpin_ports() # TODO: hairpin flooding modes. # TODO: avoid loopback flood on LAG ports if not hairpin_ports: return self._build_group_flood_rules(vlan, modify, command) return self._build_multiout_flood_rules(vlan, command) @staticmethod def edge_learn_port(_other_valves, pkt_meta): """Possibly learn a host on a port. Args: other_valves (list): All Valves other than this one. pkt_meta (PacketMeta): PacketMeta instance for packet received. Returns: port to learn host on. """ return pkt_meta.port class ValveFloodStackManager(ValveFloodManager): """Implement dataplane based flooding for stacked dataplanes.""" def __init__(self, flood_table, flood_priority, use_group_table, groups, stack, stack_ports, dp_shortest_path_to_root, shortest_path_port): super(ValveFloodStackManager, self).__init__( flood_table, flood_priority, use_group_table, groups) self.stack = stack self.stack_ports = stack_ports my_root_distance = dp_shortest_path_to_root() self.shortest_path_port = shortest_path_port self.towards_root_stack_ports = [] self.away_from_root_stack_ports = [] for port in self.stack_ports: peer_dp = port.stack['dp'] peer_root_distance = peer_dp.shortest_path_to_root() if peer_root_distance > my_root_distance: self.away_from_root_stack_ports.append(port) elif peer_root_distance < my_root_distance: self.towards_root_stack_ports.append(port) def _build_flood_rule_actions(self, vlan, exclude_unicast, in_port): """Calculate flooding destinations based on this DP's position. If a standalone switch, then flood to local VLAN ports. If a distributed switch, see the following example. Hosts |||| |||| +----+ +----+ +----+ ---+1 | |1234| | 1+--- Hosts ---+2 | | | | 2+--- Hosts ---+3 | | | | 3+--- ---+4 5+-------+5 6+-------+5 4+--- +----+ +----+ +----+ Root DP The basic strategy is flood-towards-root. The root reflects the flood back out. There are no loops and flooding is done entirely in the dataplane. On the root switch (left), flood destinations are: 1: 2 3 4 5(s) 2: 1 3 4 5(s) 3: 1 2 4 5(s) 4: 1 2 3 5(s) 5: 1 2 3 4 5(s, note reflection) On the middle switch: 1: 5(s) 2: 5(s) 3: 5(s) 4: 5(s) 5: 1 2 3 4 6(s) 6: 5(s) On the rightmost switch: 1: 5(s) 2: 5(s) 3: 5(s) 4: 5(s) 5: 1 2 3 4 """ local_flood_actions = self._build_flood_local_rule_actions( vlan, exclude_unicast, in_port) away_flood_actions = valve_of.flood_tagged_port_outputs( self.away_from_root_stack_ports, in_port) toward_flood_actions = valve_of.flood_tagged_port_outputs( self.towards_root_stack_ports, in_port) flood_all_except_self = away_flood_actions + local_flood_actions # If we're the root of a distributed switch.. if self._dp_is_root(): # If the input port was local, then flood local VLAN and stacks. if self._port_is_dp_local(in_port): return flood_all_except_self # If input port non-local, then flood outward again return [valve_of.output_in_port()] + flood_all_except_self # We are not the root of the distributed switch # If input port was connected to a switch closer to the root, # then flood outwards (local VLAN and stacks further than us) if in_port in self.towards_root_stack_ports: return flood_all_except_self # If input port local or from a further away switch, flood # towards the root. return toward_flood_actions def build_flood_rules(self, vlan, modify=False): """Add flows to flood packets to unknown destinations on a VLAN.""" command = valve_of.ofp.OFPFC_ADD if modify: command = valve_of.ofp.OFPFC_MODIFY_STRICT # TODO: group tables for stacking return self._build_multiout_flood_rules(vlan, command) def _vlan_all_ports(self, vlan, exclude_unicast): vlan_all_ports = super(ValveFloodStackManager, self)._vlan_all_ports( vlan, exclude_unicast) vlan_all_ports.extend(self.away_from_root_stack_ports) vlan_all_ports.extend(self.towards_root_stack_ports) return vlan_all_ports def _dp_is_root(self): """Return True if this datapath is the root of the stack.""" return 'priority' in self.stack def _port_is_dp_local(self, port): """Return True if port is on this datapath.""" if (port in self.away_from_root_stack_ports or port in self.towards_root_stack_ports): return False return True @staticmethod def _edge_dp_for_host(other_valves, pkt_meta): """Simple distributed unicast learning. Args: other_valves (list): All Valves other than this one. pkt_meta (PacketMeta): PacketMeta instance for packet received. Returns: Valve instance or None (of edge datapath where packet received) """ # TODO: simplest possible unicast learning. # We find just one port that is the shortest unicast path to # the destination. We could use other factors (eg we could # load balance over multiple ports based on destination MAC). # TODO: each DP learns independently. An edge DP could # call other valves so they learn immediately without waiting # for packet in. # TODO: edge DPs could use a different forwarding algorithm # (for example, just default switch to a neighbor). # Find port that forwards closer to destination DP that # has already learned this host (if any). # TODO: stacking handles failure of redundant links between DPs, # but not failure of an entire DP (should be able to find # shortest path via alternate DP). eth_src = pkt_meta.eth_src vlan_vid = pkt_meta.vlan.vid for other_valve in other_valves: other_dp_host_cache = other_valve.dp.vlans[vlan_vid].host_cache if eth_src in other_dp_host_cache: host = other_dp_host_cache[eth_src] if host.port.stack is None: return other_valve.dp return None def edge_learn_port(self, other_valves, pkt_meta): """Possibly learn a host on a port. Args: other_valves (list): All Valves other than this one. pkt_meta (PacketMeta): PacketMeta instance for packet received. Returns: port to learn host on, or None. """ if pkt_meta.port.stack is None: return super(ValveFloodStackManager, self).edge_learn_port( other_valves, pkt_meta) edge_dp = self._edge_dp_for_host(other_valves, pkt_meta) # No edge DP may have learned this host yet. if edge_dp is None: return None return self.shortest_path_port(edge_dp.name)
# Copyright 2016 Joel Dunham # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains some multithreading worker and queue logic plus the functionality -- related to foma compilation ang LM estimation -- that the worther thread initiates. The the foma worker compiles foma FST phonology, morphology and morphophonology scripts and estimates morpheme language models. Having a worker perform these tasks in a separate thread from that processing the HTTP request allows us to immediately respond to the user. The foma worker can only run a callable that is a global in :mod:`onlinelinguisticdatabase.lib.foma_worker` and which takes keyword arguments. Example usage:: from onlinelinguisticdatabase.lib.foma_worker import foma_worker_q foma_worker_q.put({ 'id': h.generate_salt(), 'func': 'compile_foma_script', 'args': {'model_name': u'Phonology', 'model_id': phonology.id, 'script_dir_path': phonology_dir_path, 'user_id': session['user'].id, 'verification_string': u'defined phonology: ', 'timeout': h.phonology_compile_timeout} }) Cf. http://www.chrismoos.com/2009/03/04/pylons-worker-threads. For an introduction to Python threading, see http://www.ibm.com/developerworks/aix/library/au-threadingpython/. """ import Queue import threading import logging from uuid import uuid4 import onlinelinguisticdatabase.lib.helpers as h from onlinelinguisticdatabase.model.meta import Session import onlinelinguisticdatabase.model as model log = logging.getLogger(__name__) ################################################################################ # WORKER THREAD & QUEUE ################################################################################ foma_worker_q = Queue.Queue(1) class FomaWorkerThread(threading.Thread): """Define the foma worker. """ def run(self): while True: msg = foma_worker_q.get() try: globals()[msg.get('func')](**msg.get('args')) except Exception, e: log.warn('Unable to process in worker thread: %s' % e) foma_worker_q.task_done() def start_foma_worker(): """Called in :mod:`onlinelinguisticdatabase.config.environment.py`. """ foma_worker = FomaWorkerThread() foma_worker.setDaemon(True) foma_worker.start() foma_worker2 = FomaWorkerThread() foma_worker2.setDaemon(True) foma_worker2.start() ################################################################################ # PHONOLOGY ################################################################################ def compile_phonology(**kwargs): """Compile the foma script of a phonology and save it to the db with values that indicating compilation success. """ phonology = Session.query(model.Phonology).get(kwargs['phonology_id']) phonology.compile(kwargs['timeout']) phonology.datetime_modified = h.now() phonology.modifier_id = kwargs['user_id'] Session.commit() ################################################################################ # MORPHOLOGY ################################################################################ def generate_and_compile_morphology(**kwargs): """Generate a foma script for a morphology and (optionally) compile it. :param int kwargs['morphology_id']: id of a morphology. :param bool kwargs['compile']: if True, the script will be generated *and* compiled. :param int kwargs['user_id']: id of the user model performing the generation/compilation. :param float kwargs['timeout']: how many seconds to wait before killing the foma compile process. """ morphology = Session.query(model.Morphology).get(kwargs['morphology_id']) unknown_category = h.unknown_category try: morphology.write(unknown_category) except Exception, e: log.warn(e) pass if kwargs.get('compile', True): try: morphology.compile(kwargs['timeout']) except Exception, e: log.warn(e) pass morphology.generate_attempt = unicode(uuid4()) morphology.modifier_id = kwargs['user_id'] morphology.datetime_modified = h.now() Session.commit() ################################################################################ # MORPHEME LANGUAGE MODEL ################################################################################ def generate_language_model(**kwargs): """Write the requisite files (corpus, vocab, ARPA, LMTrie) of a morpheme LM to disk. :param str kwargs['morpheme_language_model_id']: ``id`` value of a morpheme LM. :param int/float kwargs['timeout']: seconds to allow for ARPA file creation. :param str kwargs['user_id']: ``id`` value of an OLD user. :returns: ``None``; side-effect is to change relevant attributes of LM object. """ lm = Session.query(model.MorphemeLanguageModel).get(kwargs['morpheme_language_model_id']) trie_path = lm.get_file_path('trie') trie_mod_time = lm.get_modification_time(trie_path) lm.generate_succeeded = False try: lm.write_corpus() except Exception, e: lm.generate_message = u'Error writing the corpus file. %s' % e try: lm.write_vocabulary() except Exception, e: lm.generate_message = u'Error writing the vocabulary file. %s' % e try: lm.write_arpa(kwargs['timeout']) except Exception, e: lm.generate_message = u'Error writing the ARPA file. %s' % e try: lm.generate_trie() except Exception, e: lm.generate_message = u'Error generating the LMTrie instance. %s' % e else: if lm.get_modification_time(trie_path) != trie_mod_time: lm.generate_succeeded = True lm.generate_message = u'Language model successfully generated.' else: lm.generate_message = u'Error generating the LMTrie instance.' lm.generate_attempt = unicode(uuid4()) lm.modifier_id = kwargs['user_id'] lm.datetime_modified = h.now() Session.commit() def compute_perplexity(**kwargs): """Evaluate the LM by attempting to calculate its perplexity and changing some attribute values to reflect the attempt. """ lm = Session.query(model.MorphemeLanguageModel).get(kwargs['morpheme_language_model_id']) timeout = kwargs['timeout'] iterations = 5 try: lm.perplexity = lm.compute_perplexity(timeout, iterations) except Exception: lm.perplexity = None if lm.perplexity is None: lm.perplexity_computed = False else: lm.perplexity_computed = True lm.perplexity_attempt = unicode(uuid4()) lm.modifier_id = kwargs['user_id'] lm.datetime_modified = h.now() Session.commit() ################################################################################ # MORPHOLOGICAL PARSER (MORPHOPHONOLOGY) ################################################################################ def generate_and_compile_parser(**kwargs): """Write the parser's morphophonology FST script to file and compile it if ``compile_`` is True. Generate the language model and pickle it. """ parser = Session.query(model.MorphologicalParser).get(kwargs['morphological_parser_id']) parser.changed = False parser.write() if kwargs.get('compile', True): parser.compile(kwargs['timeout']) parser.modifier_id = kwargs['user_id'] parser.datetime_modified = h.now() if parser.changed: parser.cache.clear(persist=True) Session.commit()
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Blobstore support classes. Classes: DownloadRewriter: Rewriter responsible for transforming an application response to one that serves a blob to the user. CreateUploadDispatcher: Creates a dispatcher that is added to dispatcher chain. Handles uploads by storing blobs rewriting requests and returning a redirect. """ import cgi import cStringIO import logging import mimetools import re from google.appengine.api import apiproxy_stub_map from google.appengine.api import blobstore from google.appengine.api import datastore from google.appengine.api import datastore_errors from google.appengine.api.files import file_service_stub from google.appengine.tools import dev_appserver_upload UPLOAD_URL_PATH = '_ah/upload/' UPLOAD_URL_PATTERN = '/%s(.*)' % UPLOAD_URL_PATH AUTO_MIME_TYPE = 'application/vnd.google.appengine.auto' ERROR_RESPONSE_TEMPLATE = """ <html> <head> <title>%(response_code)d %(response_string)s</title> </head> <body text=#000000 bgcolor=#ffffff> <h1>Error: %(response_string)s</h1> <h2>%(response_text)s</h2> </body> </html> """ def GetBlobStorage(): """Get blob-storage from api-proxy stub map. Returns: BlobStorage instance as registered with blobstore API in stub map. """ return apiproxy_stub_map.apiproxy.GetStub('blobstore').storage def ParseRangeHeader(range_header): """Parse HTTP Range header. Args: range_header: A str representing the value of a range header as retrived from Range or X-AppEngine-BlobRange. Returns: Tuple (start, end): start: Start index of blob to retrieve. May be negative index. end: None or end index. End index is exclusive. (None, None) if there is a parse error. """ if not range_header: return None, None try: range_type, ranges = range_header.split('=', 1) if range_type != 'bytes': return None, None ranges = ranges.lstrip() if ',' in ranges: return None, None end = None if ranges.startswith('-'): start = int(ranges) if start == 0: return None, None else: split_range = ranges.split('-', 1) start = int(split_range[0]) if len(split_range) == 2 and split_range[1].strip(): end = int(split_range[1]) + 1 if start > end: return None, None return start, end except ValueError: return None, None def _GetGoogleStorageFileMetadata(blob_key): """Retreive metadata about a GS blob from the blob_key. Args: blob_key: The BlobKey of the blob. Returns: Tuple (size, content_type, open_key): size: The size of the blob. content_type: The content type of the blob. open_key: The key used as an argument to BlobStorage to open the blob for reading. (None, None, None) if the blob metadata was not found. """ try: gs_info = datastore.Get( datastore.Key.from_path(file_service_stub.GS_INFO_KIND, blob_key, namespace='')) return gs_info['size'], gs_info['content_type'], gs_info['storage_key'] except datastore_errors.EntityNotFoundError: return None, None, None def _GetBlobstoreMetadata(blob_key): """Retreive metadata about a blobstore blob from the blob_key. Args: blob_key: The BlobKey of the blob. Returns: Tuple (size, content_type, open_key): size: The size of the blob. content_type: The content type of the blob. open_key: The key used as an argument to BlobStorage to open the blob for reading. (None, None, None) if the blob metadata was not found. """ try: blob_info = datastore.Get( datastore.Key.from_path(blobstore.BLOB_INFO_KIND, blob_key, namespace='')) return blob_info['size'], blob_info['content_type'], blob_key except datastore_errors.EntityNotFoundError: return None, None, None def _GetBlobMetadata(blob_key): """Retrieve the metadata about a blob from the blob_key. Args: blob_key: The BlobKey of the blob. Returns: Tuple (size, content_type, open_key): size: The size of the blob. content_type: The content type of the blob. open_key: The key used as an argument to BlobStorage to open the blob for reading. (None, None, None) if the blob metadata was not found. """ size, content_type, open_key = _GetGoogleStorageFileMetadata(blob_key) if size is None: size, content_type, open_key = _GetBlobstoreMetadata(blob_key) return size, content_type, open_key def _SetRangeRequestNotSatisfiable(response, blob_size): """Short circuit response and return 416 error. Args: response: Response object to be rewritten. blob_size: The size of the blob. """ response.status_code = 416 response.status_message = 'Requested Range Not Satisfiable' response.body = cStringIO.StringIO('') response.headers['Content-Length'] = '0' response.headers['Content-Range'] = '*/%d' % blob_size del response.headers['Content-Type'] def DownloadRewriter(response, request_headers): """Intercepts blob download key and rewrites response with large download. Checks for the X-AppEngine-BlobKey header in the response. If found, it will discard the body of the request and replace it with the blob content indicated. If a valid blob is not found, it will send a 404 to the client. If the application itself provides a content-type header, it will override the content-type stored in the action blob. If blobstore.BLOB_RANGE_HEADER header is provided, blob will be partially served. If Range is present, and not blobstore.BLOB_RANGE_HEADER, will use Range instead. Args: response: Response object to be rewritten. request_headers: Original request headers. Looks for 'Range' header to copy to response. """ blob_key = response.headers.getheader(blobstore.BLOB_KEY_HEADER) if blob_key: del response.headers[blobstore.BLOB_KEY_HEADER] blob_size, blob_content_type, blob_open_key = _GetBlobMetadata(blob_key) range_header = response.headers.getheader(blobstore.BLOB_RANGE_HEADER) if range_header is not None: del response.headers[blobstore.BLOB_RANGE_HEADER] else: range_header = request_headers.getheader('Range') if (blob_size is not None and blob_content_type is not None and response.status_code == 200): content_length = blob_size start = 0 end = content_length if range_header: start, end = ParseRangeHeader(range_header) if start is None: _SetRangeRequestNotSatisfiable(response, blob_size) return else: if start < 0: start = max(blob_size + start, 0) elif start >= blob_size: _SetRangeRequestNotSatisfiable(response, blob_size) return if end is not None: end = min(end, blob_size) else: end = blob_size content_length = min(end, blob_size) - start end = start + content_length response.status_code = 206 response.status_message = 'Partial Content' response.headers['Content-Range'] = 'bytes %d-%d/%d' % ( start, end - 1, blob_size) blob_stream = GetBlobStorage().OpenBlob(blob_open_key) blob_stream.seek(start) response.body = cStringIO.StringIO(blob_stream.read(content_length)) response.headers['Content-Length'] = str(content_length) content_type = response.headers.getheader('Content-Type') if not content_type or content_type == AUTO_MIME_TYPE: response.headers['Content-Type'] = blob_content_type response.large_response = True else: if response.status_code != 200: logging.error('Blob-serving response with status %d, expected 200.', response.status_code) else: logging.error('Could not find blob with key %s.', blob_key) response.status_code = 500 response.status_message = 'Internal Error' response.body = cStringIO.StringIO() if response.headers.getheader('content-type'): del response.headers['content-type'] response.headers['Content-Length'] = '0' def CreateUploadDispatcher(get_blob_storage=GetBlobStorage): """Function to create upload dispatcher. Returns: New dispatcher capable of handling large blob uploads. """ from google.appengine.tools import dev_appserver class UploadDispatcher(dev_appserver.URLDispatcher): """Dispatcher that handles uploads.""" def __init__(self): """Constructor. Args: blob_storage: A BlobStorage instance. """ self.__cgi_handler = dev_appserver_upload.UploadCGIHandler( get_blob_storage()) def Dispatch(self, request, outfile, base_env_dict=None): """Handle post dispatch. This dispatcher will handle all uploaded files in the POST request, store the results in the blob-storage, close the upload session and transform the original request in to one where the uploaded files have external bodies. Returns: New AppServerRequest indicating request forward to upload success handler. """ if base_env_dict['REQUEST_METHOD'] != 'POST': outfile.write('Status: 400\n\n') return upload_key = re.match(UPLOAD_URL_PATTERN, request.relative_url).group(1) try: upload_session = datastore.Get(upload_key) except datastore_errors.EntityNotFoundError: upload_session = None if upload_session: success_path = upload_session['success_path'] max_bytes_per_blob = upload_session['max_bytes_per_blob'] max_bytes_total = upload_session['max_bytes_total'] upload_form = cgi.FieldStorage(fp=request.infile, headers=request.headers, environ=base_env_dict) try: mime_message_string = self.__cgi_handler.GenerateMIMEMessageString( upload_form, max_bytes_per_blob=max_bytes_per_blob, max_bytes_total=max_bytes_total) datastore.Delete(upload_session) self.current_session = upload_session header_end = mime_message_string.find('\n\n') + 1 content_start = header_end + 1 header_text = mime_message_string[:header_end].replace('\n', '\r\n') content_text = mime_message_string[content_start:].replace('\n', '\r\n') complete_headers = ('%s' 'Content-Length: %d\r\n' '\r\n') % (header_text, len(content_text)) return dev_appserver.AppServerRequest( success_path, None, mimetools.Message(cStringIO.StringIO(complete_headers)), cStringIO.StringIO(content_text), force_admin=True) except dev_appserver_upload.InvalidMIMETypeFormatError: outfile.write('Status: 400\n\n') except dev_appserver_upload.UploadEntityTooLargeError: outfile.write('Status: 413\n\n') response = ERROR_RESPONSE_TEMPLATE % { 'response_code': 413, 'response_string': 'Request Entity Too Large', 'response_text': 'Your client issued a request that was too ' 'large.'} outfile.write(response) except dev_appserver_upload.FilenameOrContentTypeTooLargeError, ex: outfile.write('Status: 400\n\n') response = ERROR_RESPONSE_TEMPLATE % { 'response_code': 400, 'response_string': 'Bad Request', 'response_text': str(ex)} outfile.write(response) else: logging.error('Could not find session for %s', upload_key) outfile.write('Status: 404\n\n') def EndRedirect(self, dispatched_output, original_output): """Handle the end of upload complete notification. Makes sure the application upload handler returned an appropriate status code. """ response = dev_appserver.RewriteResponse(dispatched_output) logging.info('Upload handler returned %d', response.status_code) outfile = cStringIO.StringIO() outfile.write('Status: %s\n' % response.status_code) if response.body and len(response.body.read()) > 0: response.body.seek(0) outfile.write(response.body.read()) else: outfile.write(''.join(response.headers.headers)) outfile.seek(0) dev_appserver.URLDispatcher.EndRedirect(self, outfile, original_output) return UploadDispatcher()
from django.db import models from django.db.models import Q, F, Sum, Case, When, ExpressionWrapper from django.db.models.functions import Coalesce from django.core.validators import MinValueValidator, RegexValidator from django.core.exceptions import ValidationError from django.utils.translation import gettext_lazy as _ from django.utils import timezone from filer.fields.image import FilerImageField from cms.models.pluginmodel import CMSPlugin from datetime import timedelta from danceschool.core.models import Invoice, InvoiceItem from danceschool.core.constants import getConstant from danceschool.register.models import RegisterPaymentMethod from .managers import MerchOrderManager def get_defaultSalesTaxRate(): ''' Callable for default used by MerchItem class ''' return getConstant('registration__merchSalesTaxRate') class MerchItemCategory(models.Model): ''' A category of merchandise product. Used for determining the set of merchandise items to display as options for sale. ''' name = models.CharField( _('Name'), max_length=100, help_text=_('Give a descriptive name for this category.') ) def __str__(self): return self.name class Meta: verbose_name = _('Merchandise category') verbose_name_plural = _('Merchandise category') ordering = ['name', ] class MerchItem(models.Model): ''' A basic product that can be sold. A MerchItem may have one or more MerchItemVariants associated with it, representing things like different sizes or colors. ''' name = models.CharField( _('Name'), max_length=100, help_text=_('Give a descriptive name for this item.') ) category = models.ForeignKey( MerchItemCategory, help_text=_('Used on product pages to determine what items to list.'), on_delete=models.SET_NULL, null=True ) description = models.TextField( _('Item Description'), null=True, blank=True, help_text=_('Provide a full item description for customers.') ) defaultPrice = models.FloatField( _('Default price'), default=0, validators=[MinValueValidator(0)], help_text=_( 'This price may be overridden by a particular item variant.' ) ) salesTaxRate = models.FloatField( _('Sales tax rate'), default=get_defaultSalesTaxRate, validators=[MinValueValidator(0)], help_text=_( 'The sales tax percentage rate to be applied to this item (e.g. ' + 'enter \'10\' to apply 10 percent sales tax).' ) ) photo = FilerImageField( verbose_name=_('Photo'), on_delete=models.SET_NULL, blank=True, null=True, related_name='item_photo', help_text=_('Individual item variants may have their own photos.') ) disabled = models.BooleanField( _('Item disabled'), default=False, help_text=_( 'If checked, then this item will not be available for purchase, ' 'regardless of current inventory.' ) ) creationDate = models.DateTimeField(_('Creation Date'), auto_now_add=True) @property def fullName(self): return self.name @property def soldOut(self): return self.item_variant.exclude(soldOut=True).exists() @property def numVariants(self): return self.item_variant.count() numVariants.fget.short_description = _('# Variants') def __str__(self): return self.name class Meta: verbose_name = _('Merchandise item') verbose_name_plural = _('Merchandise items') class MerchItemVariant(models.Model): ''' An individual variant of a MerchItem. For example, a particular size or color. ''' item = models.ForeignKey( MerchItem, on_delete=models.CASCADE, related_name='item_variant', verbose_name=_('Item'), ) sku = models.CharField( _('SKU'), unique=True, max_length=100, validators=[RegexValidator(regex=r'^[a-zA-Z\-_0-9]+$')], help_text=_( 'The SKU for this item variant.' ) ) name = models.CharField( _('Name'), max_length=100, null=True, blank=True, help_text=_( 'Give a unique name for this variant (e.g. "Size Medium")' ) ) price = models.FloatField( _('Price'), null=True, blank=True, validators=[MinValueValidator(0)], help_text=_( 'If specified, then price supercedes the item default price.' ) ) photo = FilerImageField( verbose_name=_('Photo'), on_delete=models.SET_NULL, blank=True, null=True, related_name='itemvariant_photo', help_text=_('A photo specific to this variant, if applicable.') ) originalQuantity = models.PositiveIntegerField( _('Original Quantity'), help_text=_( 'For inventory purposes, enter an initial quantity here' ), validators=[MinValueValidator(0)] ) # This is stored as a database field rather than dynamically generated as # a property, so that we can easily construct database queries that only # include items/variants that are not sold out. On MerchItem, this is a # property. soldOut = models.BooleanField(_('Sold out'), default=False) @property def fullName(self): return '{}: {}'.format(self.item.name, self.name) @property def currentInventory(self): return ( (self.originalQuantity or 0) + (self.quantity_adjustments.aggregate(total=Sum('amount')).get('total', 0) or 0) - ( self.orders.exclude( order__status__in=[ MerchOrder.OrderStatus.unsubmitted, MerchOrder.OrderStatus.cancelled, ] ).aggregate(total=Sum('quantity')).get('total', 0) or 0 ) ) currentInventory.fget.short_description = _('Current inventory') def getPrice(self): if self.price is not None: return self.price return self.item.defaultPrice def updateSoldOut(self, commit=True): ''' This should be called whenever an order is completed or inventory is adjusted. ''' changed = False if self.currentInventory > 0 and self.soldOut: self.soldOut = False changed = True elif self.currentInventory <= 0 and not self.soldOut: self.soldOut = True changed = True if commit and changed: self.save() def save(self, *args, **kwargs): ''' Update the sold out status of the item variant. ''' self.updateSoldOut(commit=False) super().save(*args, **kwargs) def __str__(self): return '{} ({})'.format(self.fullName, self.sku) class Meta: verbose_name = _('Item variant') verbose_name_plural = _('Item variants') unique_together = ('item', 'name',) class MerchQuantityAdjustment(models.Model): variant = models.ForeignKey( MerchItemVariant, on_delete=models.CASCADE, related_name='quantity_adjustments' ) amount = models.IntegerField( _('Change in inventory quantity'), default=0 ) submissionDate = models.DateTimeField(_('Submission Date'), auto_now_add=True) def save(self, *args, **kwargs): ''' Update the sold out status of the associated item variant. ''' super().save(*args, **kwargs) self.variant.updateSoldOut() def delete(self, *args, **kwargs): ''' Quantity adjustments cannot be deleted. Another quantity adjustment is instead submitted negating the quantity of this adjustment. ''' new_adjustment = MerchQuantityAdjustment( variant=self.variant, amount = -1*self.amount ) new_adjustment.save() def __str__(self): return str(_('Inventory adjustment for {itemName}, {submissionDate}'.format( itemName=self.variant.fullName, submissionDate=self.submissionDate) )) class MerchOrder(models.Model): class OrderStatus(models.TextChoices): unsubmitted = ('UN', _('Not yet submitted')) submitted = ('SU', _('Submitted')) approved = ('AP', _('Approved for fulfillment')) fulfilled = ('F', _('Fulfilled')) cancelled = ('C', _('Cancelled')) fullRefund = ('R', _('Refunded in full')) status = models.CharField( _('Order status'), max_length=2, choices=OrderStatus.choices, default=OrderStatus.unsubmitted, help_text=_( 'Use the order status to keep track of submission, processing, ' + 'shipment (if applicable), and fulfillment.' ) ) invoice = models.OneToOneField( Invoice, on_delete=models.CASCADE, related_name='merchOrder', help_text=_( 'All merchandise orders must be associated with an invoice.' ) ) creationDate = models.DateTimeField(_('Creation Date'), auto_now_add=True) lastModified = models.DateTimeField(_('Last Modified Date'), auto_now=True) data = models.JSONField(_('Additional data'), default=dict, blank=True) # This custom manager prevents deletion of MerchOrders that are not # unsubmitted, even using queryset methods. objects = MerchOrderManager() @property def grossTotal(self): return self.items.annotate( unitPrice=Case( When(item__price__isnull=True, then=F('item__item__defaultPrice')), default=F('item__price'), output_field=models.FloatField() ), totalPrice=ExpressionWrapper( F('quantity')*F('unitPrice'), output_field=models.FloatField() ) ).aggregate(total=Sum('totalPrice')).get('total', 0) or 0 @property def itemsEditable(self): ''' Only unsubmitted orders can have items added or removed. ''' return self.status == self.OrderStatus.unsubmitted def getOtherInvoiceDetails(self): ''' Return a dictionary with details on the sum of totals for non-order items on the invoice associated with this order. ''' if not getattr(self, 'invoice', None): return {} return self.invoice.invoiceitem_set.exclude( id__in=self.items.values_list( 'invoiceItem', flat=True ) ).aggregate( grossTotal=Coalesce(Sum('grossTotal'), 0), total=Coalesce(Sum('total'), 0), adjustments=Coalesce(Sum('adjustments'), 0), taxes=Coalesce(Sum('taxes'), 0), fees=Coalesce(Sum('fees'), 0), ) def link_invoice(self, update=True, save=True, **kwargs): ''' If an invoice does not already exist for this order, then create one. If an update is requested, then ensure that all details of the invoice match the order. Return the linked invoice. ''' submissionUser = kwargs.pop('submissionUser', None) collectedByUser = kwargs.pop('collectedByUser', None) status = kwargs.pop('status', None) expirationDate = kwargs.pop('expirationDate', None) default_expiry = timezone.now() + timedelta(minutes=getConstant('registration__sessionExpiryMinutes')) if not getattr(self, 'invoice', None): invoice_kwargs = { 'firstName': kwargs.pop('firstName', None), 'lastName': kwargs.pop('lastName', None), 'email': kwargs.pop('email', None), 'grossTotal': self.grossTotal, 'total': self.grossTotal, 'submissionUser': submissionUser, 'collectedByUser': collectedByUser, 'buyerPaysSalesTax': getConstant('registration__buyerPaysSalesTax'), 'data': kwargs, } if ( (not status or status == Invoice.PaymentStatus.preliminary) and (self.status == self.OrderStatus.unsubmitted) ): invoice_kwargs.update({ 'status': Invoice.PaymentStatus.preliminary, 'expirationDate': expirationDate or default_expiry }) elif not status: invoice_kwargs.update({ 'status': Invoice.PaymentStatus.unpaid, }) new_invoice = Invoice(**invoice_kwargs) new_invoice.save() self.invoice = new_invoice elif update: needs_update = False other_details = self.getOtherInvoiceDetails() if kwargs.get('firstName', None): self.invoice.firstName = kwargs.get('firstName', None) needs_update = True if kwargs.get('lastName', None): self.invoice.lastName = kwargs.get('lastName', None) needs_update = True if kwargs.get('email', None): self.invoice.email = kwargs.get('email', None) needs_update = True if status and status != self.invoice.status: self.invoice.status = status needs_update = True if ( self.invoice.grossTotal != self.grossTotal + other_details.get('grossTotal',0) ): self.invoice.grossTotal = self.grossTotal + other_details.get('grossTotal', 0) needs_update = True if ( expirationDate and expirationDate != self.invoice.expirationDate and self.invoice.status == Invoice.PaymentStatus.preliminary ): self.invoice.expirationDate = expirationDate needs_update = True elif self.invoice.status != Invoice.PaymentStatus.preliminary: self.invoice.expirationDate = None needs_update = True if needs_update and save: self.invoice.save() return self.invoice def save(self, *args, **kwargs): ''' Before saving this order, ensure that an associated invoice exists. If an invoice already exists, then update the invoice if anything requires updating. ''' link_kwargs = { 'submissionUser': kwargs.pop('submissionUser', None), 'collectedByUser': kwargs.pop('collectedByUser', None), 'status': kwargs.pop('status', None), 'expirationDate': kwargs.pop('expirationDate', None), 'update': kwargs.pop('updateInvoice', True), } self.invoice = self.link_invoice(**link_kwargs) super().save(*args, **kwargs) # Update the sold out status of the associated item variants if needed. if ( self.status not in [ self.OrderStatus.unsubmitted, self.OrderStatus.cancelled ] and self.status != self.__initial_status ): variants = [x.item for x in self.items.all()] for variant in variants: variant.updateSoldOut() def delete(self, *args, **kwargs): ''' Only unsubmitted orders can be deleted. Orders can also only be cancelled if money has not been collected on the invoice. Otherwise, the order status needs to be changed via a refund on the invoice. ''' if self.status == self.OrderStatus.unsubmitted: super().delete(*args, **kwargs) elif getattr(self.invoice, 'status') in [ Invoice.PaymentStatus.needsCollection, Invoice.PaymentStatus.cancelled ]: self.status = self.OrderStatus.cancelled self.save() def __init__(self, *args, **kwargs): ''' Keep track of initial status in memory to detect status changes. ''' super().__init__(*args, **kwargs) self.__initial_status = self.status class Meta: verbose_name = _('Merchandise order') verbose_name_plural = _('Merchandise orders') class MerchOrderItem(models.Model): ''' An individual item selected for purchase. Notice that all of the details of pricing are handled by the invoice item it's associated with, not the item ordered itself. ''' order = models.ForeignKey( MerchOrder, on_delete=models.CASCADE, related_name='items', ) item = models.ForeignKey( MerchItemVariant, verbose_name=_('Item'), on_delete=models.CASCADE, related_name='orders' ) invoiceItem = models.OneToOneField( InvoiceItem, on_delete=models.CASCADE, help_text=_( 'All merchandise orders must be associated with an invoice.' ) ) quantity = models.PositiveIntegerField( _('Quantity'), default=1, ) @property def grossTotal(self): return self.quantity * self.item.getPrice() def link_invoice_item(self, update=True, **kwargs): ''' If an order's contents are created or modified, this method ensures that the corresponding invoice items exist and that the totals are updated accordingly. ''' newTotal = self.grossTotal invoice = self.order.invoice invoiceItem = self.invoiceItem if ( invoice and invoiceItem and invoiceItem.invoice != invoice ): raise ValidationError(_('Invoice item does not match order invoice')) if ( update and invoiceItem and ( invoiceItem.grossTotal != newTotal or invoiceItem.total != newTotal ) ): invoiceItem.grossTotal = newTotal invoiceItem.total = newTotal invoiceItem.taxRate = self.item.item.salesTaxRate invoiceItem.calculateTaxes() invoiceItem.save() elif not invoiceItem and invoice: new_item = InvoiceItem( invoice=invoice, description=self.item.fullName, grossTotal=newTotal, total=newTotal, taxRate=self.item.item.salesTaxRate ) new_item.calculateTaxes() new_item.save() self.invoiceItem = new_item self.save() return self.invoiceItem def save(self, *args, **kwargs): restrictStatus = kwargs.pop('restrictStatus', True) if self.order.itemsEditable or not restrictStatus: self.invoiceItem = self.link_invoice_item( update=kwargs.pop('updateInvoiceItem', True) ) super().save(*args, **kwargs) def delete(self, *args, **kwargs): restrictStatus = kwargs.pop('restrictStatus', True) if self.order.itemsEditable or not restrictStatus: super().delete(*args, **kwargs) class Meta: verbose_name = _('Merchandise order item') verbose_name_plural = _('Merchandise order items') unique_together = ('item', 'order',) class RegisterMerchPluginModel(CMSPlugin): ''' This model holds information on a set of merchandise items that are sold at the door. ''' title = models.CharField( _('Custom list title'), max_length=250, default=_('Merchandise'), blank=True ) categories = models.ManyToManyField( MerchItemCategory, blank=True, verbose_name=_('Limit to merchandise categories'), help_text=_('Leave blank for no restriction'), ) separateVariants = models.BooleanField( _('Display item variants as separate items'), default=False ) displaySoldOut = models.BooleanField( _('Display items and variants that are sold out'), default=False ) requireFullRegistration = models.BooleanField( _('Require full registration'), blank=True, default=True, help_text=_( 'If checked, then the user will be sent to the second page of the ' + 'registration process to provide name and email. Particular payment ' + 'methods may also require the full registration process.' ) ) autoFulfill = models.BooleanField( _('Automatically mark order as fulfilled upon payment.'), default=False, help_text=_( 'If checked, and if all items added to this merch order also ' + 'this option check, then the order will automatically be marked ' + 'as fulfilled when the invoice is finalized. Useful for ' + 'merchandise that is sold immediately at the point-of-sale.' ) ) paymentMethods = models.ManyToManyField( RegisterPaymentMethod, verbose_name=_('Payment Methods'), help_text=_( 'If you would like separate buttons for individual payment methods, ' + 'then select them here. If left blank, a single button will be shown ' + 'and no payment method will be specified.' ), blank=True, ) template = models.CharField(_('Plugin template'), max_length=250, null=True, blank=True) cssClasses = models.CharField( _('Custom CSS classes'), max_length=250, null=True, blank=True, help_text=_('Classes are applied to surrounding &lt;div&gt;') ) def getMerch(self): filters = Q(disabled=False) categories = self.categories.all() if categories: filters = filters & Q(category__in=categories) if not self.displaySoldOut: filters = filters & Q(item_variant__soldOut=False) return MerchItem.objects.filter(filters).distinct() def copy_relations(self, oldinstance): super().copy_relations(oldinstance) # Delete existing choice instances to avoid duplicates, then duplicate # choice instances from the old plugin instance. Following Django CMS # documentation. self.categories.all().delete() self.paymentMethods.all().delete() for choice in oldinstance.categories.all(): choice.pk = None choice.registermerchpluginmodel = self choice.save() for choice in oldinstance.paymentMethods.all(): choice.pk = None choice.registermerchpluginmodel = self choice.save()
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six.moves.cPickle as pickle import mock import os import unittest import random import itertools from contextlib import closing from gzip import GzipFile from tempfile import mkdtemp from shutil import rmtree from test import listen_zero from test.unit import FakeLogger, make_timestamp_iter from test.unit import debug_logger, patch_policies, mocked_http_conn from time import time from distutils.dir_util import mkpath from eventlet import spawn, Timeout from swift.obj import updater as object_updater from swift.obj.diskfile import (ASYNCDIR_BASE, get_async_dir, DiskFileManager, get_tmp_dir) from swift.common.ring import RingData from swift.common import utils from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import hash_path, normalize_timestamp, mkdirs, \ write_pickle from swift.common.storage_policy import StoragePolicy, POLICIES _mocked_policies = [StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', True)] @patch_policies(_mocked_policies) class TestObjectUpdater(unittest.TestCase): def setUp(self): utils.HASH_PATH_SUFFIX = 'endcap' utils.HASH_PATH_PREFIX = '' self.testdir = mkdtemp() ring_file = os.path.join(self.testdir, 'container.ring.gz') with closing(GzipFile(ring_file, 'wb')) as f: pickle.dump( RingData([[0, 1, 2, 0, 1, 2], [1, 2, 0, 1, 2, 0], [2, 3, 1, 2, 3, 1]], [{'id': 0, 'ip': '127.0.0.1', 'port': 1, 'device': 'sda1', 'zone': 0}, {'id': 1, 'ip': '127.0.0.1', 'port': 1, 'device': 'sda1', 'zone': 2}, {'id': 2, 'ip': '127.0.0.1', 'port': 1, 'device': 'sda1', 'zone': 4}], 30), f) self.devices_dir = os.path.join(self.testdir, 'devices') os.mkdir(self.devices_dir) self.sda1 = os.path.join(self.devices_dir, 'sda1') os.mkdir(self.sda1) for policy in POLICIES: os.mkdir(os.path.join(self.sda1, get_tmp_dir(policy))) self.logger = debug_logger() def tearDown(self): rmtree(self.testdir, ignore_errors=1) def test_creation(self): ou = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '2', 'node_timeout': '5.5'}) self.assertTrue(hasattr(ou, 'logger')) self.assertTrue(ou.logger is not None) self.assertEqual(ou.devices, self.devices_dir) self.assertEqual(ou.interval, 1) self.assertEqual(ou.concurrency, 2) self.assertEqual(ou.node_timeout, 5.5) self.assertTrue(ou.get_container_ring() is not None) @mock.patch('os.listdir') def test_listdir_with_exception(self, mock_listdir): e = OSError('permission_denied') mock_listdir.side_effect = e # setup updater conf = { 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, } daemon = object_updater.ObjectUpdater(conf) daemon.logger = FakeLogger() paths = daemon._listdir('foo/bar') self.assertEqual([], paths) log_lines = daemon.logger.get_lines_for_level('error') msg = ('ERROR: Unable to access foo/bar: permission_denied') self.assertEqual(log_lines[0], msg) @mock.patch('os.listdir', return_value=['foo', 'bar']) def test_listdir_without_exception(self, mock_listdir): # setup updater conf = { 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, } daemon = object_updater.ObjectUpdater(conf) daemon.logger = FakeLogger() path = daemon._listdir('foo/bar/') log_lines = daemon.logger.get_lines_for_level('error') self.assertEqual(len(log_lines), 0) self.assertEqual(path, ['foo', 'bar']) def test_object_sweep(self): def check_with_idx(index, warn, should_skip): if int(index) > 0: asyncdir = os.path.join(self.sda1, ASYNCDIR_BASE + "-" + index) else: asyncdir = os.path.join(self.sda1, ASYNCDIR_BASE) prefix_dir = os.path.join(asyncdir, 'abc') mkpath(prefix_dir) # A non-directory where directory is expected should just be # skipped, but should not stop processing of subsequent # directories. not_dirs = ( os.path.join(self.sda1, 'not_a_dir'), os.path.join(self.sda1, ASYNCDIR_BASE + '-' + 'twentington'), os.path.join(self.sda1, ASYNCDIR_BASE + '-' + str(int(index) + 100))) for not_dir in not_dirs: with open(not_dir, 'w'): pass objects = { 'a': [1089.3, 18.37, 12.83, 1.3], 'b': [49.4, 49.3, 49.2, 49.1], 'c': [109984.123], } expected = set() for o, timestamps in objects.items(): ohash = hash_path('account', 'container', o) for t in timestamps: o_path = os.path.join(prefix_dir, ohash + '-' + normalize_timestamp(t)) if t == timestamps[0]: expected.add((o_path, int(index))) write_pickle({}, o_path) seen = set() class MockObjectUpdater(object_updater.ObjectUpdater): def process_object_update(self, update_path, device, policy): seen.add((update_path, int(policy))) os.unlink(update_path) ou = MockObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '5'}) ou.logger = mock_logger = mock.MagicMock() ou.object_sweep(self.sda1) self.assertEqual(mock_logger.warning.call_count, warn) self.assertTrue( os.path.exists(os.path.join(self.sda1, 'not_a_dir'))) if should_skip: # if we were supposed to skip over the dir, we didn't process # anything at all self.assertTrue(os.path.exists(prefix_dir)) self.assertEqual(set(), seen) else: self.assertTrue(not os.path.exists(prefix_dir)) self.assertEqual(expected, seen) # test cleanup: the tempdir gets cleaned up between runs, but this # way we can be called multiple times in a single test method for not_dir in not_dirs: os.unlink(not_dir) # first check with valid policies for pol in POLICIES: check_with_idx(str(pol.idx), 0, should_skip=False) # now check with a bogus async dir policy and make sure we get # a warning indicating that the '99' policy isn't valid check_with_idx('99', 1, should_skip=True) @mock.patch.object(object_updater, 'ismount') def test_run_once_with_disk_unmounted(self, mock_ismount): mock_ismount.return_value = False ou = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15'}) ou.run_once() async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0])) os.mkdir(async_dir) ou.run_once() self.assertTrue(os.path.exists(async_dir)) # mount_check == False means no call to ismount self.assertEqual([], mock_ismount.mock_calls) ou = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'TrUe', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15'}, logger=self.logger) odd_dir = os.path.join(async_dir, 'not really supposed ' 'to be here') os.mkdir(odd_dir) ou.run_once() self.assertTrue(os.path.exists(async_dir)) self.assertTrue(os.path.exists(odd_dir)) # skipped - not mounted! # mount_check == True means ismount was checked self.assertEqual([ mock.call(self.sda1), ], mock_ismount.mock_calls) self.assertEqual(ou.logger.get_increment_counts(), {'errors': 1}) @mock.patch.object(object_updater, 'ismount') def test_run_once(self, mock_ismount): mock_ismount.return_value = True ou = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15'}, logger=self.logger) ou.run_once() async_dir = os.path.join(self.sda1, get_async_dir(POLICIES[0])) os.mkdir(async_dir) ou.run_once() self.assertTrue(os.path.exists(async_dir)) # mount_check == False means no call to ismount self.assertEqual([], mock_ismount.mock_calls) ou = object_updater.ObjectUpdater({ 'devices': self.devices_dir, 'mount_check': 'TrUe', 'swift_dir': self.testdir, 'interval': '1', 'concurrency': '1', 'node_timeout': '15'}, logger=self.logger) odd_dir = os.path.join(async_dir, 'not really supposed ' 'to be here') os.mkdir(odd_dir) ou.run_once() self.assertTrue(os.path.exists(async_dir)) self.assertTrue(not os.path.exists(odd_dir)) # mount_check == True means ismount was checked self.assertEqual([ mock.call(self.sda1), ], mock_ismount.mock_calls) ohash = hash_path('a', 'c', 'o') odir = os.path.join(async_dir, ohash[-3:]) mkdirs(odir) older_op_path = os.path.join( odir, '%s-%s' % (ohash, normalize_timestamp(time() - 1))) op_path = os.path.join( odir, '%s-%s' % (ohash, normalize_timestamp(time()))) for path in (op_path, older_op_path): with open(path, 'wb') as async_pending: pickle.dump({'op': 'PUT', 'account': 'a', 'container': 'c', 'obj': 'o', 'headers': { 'X-Container-Timestamp': normalize_timestamp(0)}}, async_pending) ou.run_once() self.assertTrue(not os.path.exists(older_op_path)) self.assertTrue(os.path.exists(op_path)) self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1, 'unlinks': 1}) self.assertIsNone(pickle.load(open(op_path)).get('successes')) bindsock = listen_zero() def accepter(sock, return_code): try: with Timeout(3): inc = sock.makefile('rb') out = sock.makefile('wb') out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' % return_code) out.flush() self.assertEqual(inc.readline(), 'PUT /sda1/0/a/c/o HTTP/1.1\r\n') headers = HeaderKeyDict() line = inc.readline() while line and line != '\r\n': headers[line.split(':')[0]] = \ line.split(':')[1].strip() line = inc.readline() self.assertTrue('x-container-timestamp' in headers) self.assertTrue('X-Backend-Storage-Policy-Index' in headers) except BaseException as err: return err return None def accept(return_codes): try: events = [] for code in return_codes: with Timeout(3): sock, addr = bindsock.accept() events.append( spawn(accepter, sock, code)) for event in events: err = event.wait() if err: raise err except BaseException as err: return err return None event = spawn(accept, [201, 500, 500]) for dev in ou.get_container_ring().devs: if dev is not None: dev['port'] = bindsock.getsockname()[1] ou.logger._clear() ou.run_once() err = event.wait() if err: raise err self.assertTrue(os.path.exists(op_path)) self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1}) self.assertEqual([0], pickle.load(open(op_path)).get('successes')) event = spawn(accept, [404, 201]) ou.logger._clear() ou.run_once() err = event.wait() if err: raise err self.assertTrue(os.path.exists(op_path)) self.assertEqual(ou.logger.get_increment_counts(), {'failures': 1}) self.assertEqual([0, 2], pickle.load(open(op_path)).get('successes')) event = spawn(accept, [201]) ou.logger._clear() ou.run_once() err = event.wait() if err: raise err self.assertTrue(not os.path.exists(op_path)) self.assertEqual(ou.logger.get_increment_counts(), {'unlinks': 1, 'successes': 1}) def test_obj_put_legacy_updates(self): ts = (normalize_timestamp(t) for t in itertools.count(int(time()))) policy = POLICIES.get_by_index(0) # setup updater conf = { 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, } async_dir = os.path.join(self.sda1, get_async_dir(policy)) os.mkdir(async_dir) account, container, obj = 'a', 'c', 'o' # write an async for op in ('PUT', 'DELETE'): self.logger._clear() daemon = object_updater.ObjectUpdater(conf, logger=self.logger) dfmanager = DiskFileManager(conf, daemon.logger) # don't include storage-policy-index in headers_out pickle headers_out = HeaderKeyDict({ 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', 'x-timestamp': next(ts), }) data = {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out} dfmanager.pickle_async_update(self.sda1, account, container, obj, data, next(ts), policy) request_log = [] def capture(*args, **kwargs): request_log.append((args, kwargs)) # run once fake_status_codes = [200, 200, 200] with mocked_http_conn(*fake_status_codes, give_connect=capture): daemon.run_once() self.assertEqual(len(fake_status_codes), len(request_log)) for request_args, request_kwargs in request_log: ip, part, method, path, headers, qs, ssl = request_args self.assertEqual(method, op) self.assertEqual(headers['X-Backend-Storage-Policy-Index'], str(int(policy))) self.assertEqual(daemon.logger.get_increment_counts(), {'successes': 1, 'unlinks': 1, 'async_pendings': 1}) def test_obj_put_async_updates(self): ts_iter = make_timestamp_iter() policies = list(POLICIES) random.shuffle(policies) # setup updater conf = { 'devices': self.devices_dir, 'mount_check': 'false', 'swift_dir': self.testdir, } daemon = object_updater.ObjectUpdater(conf, logger=self.logger) async_dir = os.path.join(self.sda1, get_async_dir(policies[0])) os.mkdir(async_dir) def do_test(headers_out, expected): # write an async dfmanager = DiskFileManager(conf, daemon.logger) account, container, obj = 'a', 'c', 'o' op = 'PUT' data = {'op': op, 'account': account, 'container': container, 'obj': obj, 'headers': headers_out} dfmanager.pickle_async_update(self.sda1, account, container, obj, data, next(ts_iter), policies[0]) request_log = [] def capture(*args, **kwargs): request_log.append((args, kwargs)) # run once fake_status_codes = [ 200, # object update success 200, # object update success 200, # object update conflict ] with mocked_http_conn(*fake_status_codes, give_connect=capture): daemon.run_once() self.assertEqual(len(fake_status_codes), len(request_log)) for request_args, request_kwargs in request_log: ip, part, method, path, headers, qs, ssl = request_args self.assertEqual(method, 'PUT') self.assertDictEqual(expected, headers) self.assertEqual( daemon.logger.get_increment_counts(), {'successes': 1, 'unlinks': 1, 'async_pendings': 1}) self.assertFalse(os.listdir(async_dir)) daemon.logger.clear() ts = next(ts_iter) # use a dict rather than HeaderKeyDict so we can vary the case of the # pickled headers headers_out = { 'x-size': 0, 'x-content-type': 'text/plain', 'x-etag': 'd41d8cd98f00b204e9800998ecf8427e', 'x-timestamp': ts.normal, 'X-Backend-Storage-Policy-Index': int(policies[0]), 'User-Agent': 'object-server %s' % os.getpid() } expected = { 'X-Size': '0', 'X-Content-Type': 'text/plain', 'X-Etag': 'd41d8cd98f00b204e9800998ecf8427e', 'X-Timestamp': ts.normal, 'X-Backend-Storage-Policy-Index': str(int(policies[0])), 'User-Agent': 'object-updater %s' % os.getpid() } do_test(headers_out, expected) # updater should add policy header if missing headers_out['X-Backend-Storage-Policy-Index'] = None do_test(headers_out, expected) # updater should not overwrite a mismatched policy header headers_out['X-Backend-Storage-Policy-Index'] = int(policies[1]) expected['X-Backend-Storage-Policy-Index'] = str(int(policies[1])) do_test(headers_out, expected) # check for case insensitivity headers_out['user-agent'] = headers_out.pop('User-Agent') headers_out['x-backend-storage-policy-index'] = headers_out.pop( 'X-Backend-Storage-Policy-Index') do_test(headers_out, expected) if __name__ == '__main__': unittest.main()
""" Methods for identifying space-time interaction in spatio-temporal event data. """ __author__ = "Nicholas Malizia <nmalizia@asu.edu>" import pysal #from pysal.common import * import numpy as np import scipy.stats as stats import pysal.weights.Distance as Distance from pysal import cg from pysal.spatial_dynamics import util __all__ = ['SpaceTimeEvents', 'knox', 'mantel', 'jacquez', 'modified_knox'] class SpaceTimeEvents: """ Method for reformatting event data stored in a shapefile for use in calculating metrics of spatio-temporal interaction. Parameters ---------- path : string the path to the appropriate shapefile, including the file name, but excluding the extension time : string column header in the DBF file indicating the column containing the time stamp Attributes ---------- n : int number of events x : array n x 1 array of the x coordinates for the events y : array n x 1 array of the y coordinates for the events t : array n x 1 array of the temporal coordinates for the events space : array n x 2 array of the spatial coordinates (x,y) for the events time : array n x 2 array of the temporal coordinates (t,1) for the events, the second column is a vector of ones Examples -------- >>> import numpy as np >>> import pysal Read in the example shapefile data, ensuring to omit the file extension. In order to successfully create the event data the .dbf file associated with the shapefile should have a column of values that are a timestamp for the events. There should be a numerical value (not a date) in every field. >>> path = pysal.examples.get_path("burkitt") Create an instance of SpaceTimeEvents from a shapefile, where the temporal information is stored in a column named "T". >>> events = SpaceTimeEvents(path,'T') See how many events are in the instance. >>> events.n 188 Check the spatial coordinates of the first event. >>> events.space[0] array([ 300., 302.]) Check the time of the first event. >>> events.t[0] array([413]) """ def __init__(self, path, time_col): shp = pysal.open(path + '.shp') dbf = pysal.open(path + '.dbf') # extract the spatial coordinates from the shapefile x = [] y = [] n = 0 for i in shp: count = 0 for j in i: if count == 0: x.append(j) elif count == 1: y.append(j) count += 1 n += 1 self.n = n x = np.array(x) y = np.array(y) self.x = np.reshape(x, (n, 1)) self.y = np.reshape(y, (n, 1)) self.space = np.hstack((self.x, self.y)) # extract the temporal information from the database t = np.array(dbf.by_col(time_col)) line = np.ones((n, 1)) self.t = np.reshape(t, (n, 1)) self.time = np.hstack((self.t, line)) # close open objects dbf.close() shp.close() def knox(events, delta, tau, permutations=99): """ Knox test for spatio-temporal interaction. [1]_ Parameters ---------- events : space time events object an output instance from the class SpaceTimeEvents delta : float threshold for proximity in space tau : float threshold for proximity in time permutations : int the number of permutations used to establish pseudo- significance (default is 99) Returns ------- knox_result : dictionary contains the statistic (stat) for the test and the associated p-value (pvalue) stat : float value of the knox test for the dataset pvalue : float pseudo p-value associated with the statistic References ---------- .. [1] E. Knox. 1964. The detection of space-time interactions. Journal of the Royal Statistical Society. Series C (Applied Statistics), 13(1):25-30. Examples -------- >>> import numpy as np >>> import pysal Read in the example data and create an instance of SpaceTimeEvents. >>> path = pysal.examples.get_path("burkitt") >>> events = SpaceTimeEvents(path,'T') Set the random seed generator. This is used by the permutation based inference to replicate the pseudo-significance of our example results - the end-user will normally omit this step. >>> np.random.seed(100) Run the Knox test with distance and time thresholds of 20 and 5, respectively. This counts the events that are closer than 20 units in space, and 5 units in time. >>> result = knox(events,delta=20,tau=5,permutations=99) Next, we examine the results. First, we call the statistic from the results results dictionary. This reports that there are 13 events close in both space and time, according to our threshold definitions. >>> print(result['stat']) 13.0 Next, we look at the pseudo-significance of this value, calculated by permuting the timestamps and rerunning the statistics. In this case, the results indicate there is likely no space-time interaction between the events. >>> print("%2.2f"%result['pvalue']) 0.18 """ n = events.n s = events.space t = events.t # calculate the spatial and temporal distance matrices for the events sdistmat = cg.distance_matrix(s) tdistmat = cg.distance_matrix(t) # identify events within thresholds spacmat = np.ones((n, n)) test = sdistmat <= delta spacmat = spacmat * test timemat = np.ones((n, n)) test = tdistmat <= tau timemat = timemat * test # calculate the statistic knoxmat = timemat * spacmat stat = (knoxmat.sum() - n) / 2 # return results (if no inference) if permutations == 0: return stat distribution = [] # loop for generating a random distribution to assess significance for p in range(permutations): rtdistmat = util.shuffle_matrix(tdistmat, range(n)) timemat = np.ones((n, n)) test = rtdistmat <= tau timemat = timemat * test knoxmat = timemat * spacmat k = (knoxmat.sum() - n) / 2 distribution.append(k) # establish the pseudo significance of the observed statistic distribution = np.array(distribution) greater = np.ma.masked_greater_equal(distribution, stat) count = np.ma.count_masked(greater) pvalue = (count + 1.0) / (permutations + 1.0) # return results knox_result = {'stat': stat, 'pvalue': pvalue} return knox_result def mantel(events, permutations=99, scon=1.0, spow=-1.0, tcon=1.0, tpow=-1.0): """ Standardized Mantel test for spatio-temporal interaction. [2]_ Parameters ---------- events : space time events object an output instance from the class SpaceTimeEvents permutations : int the number of permutations used to establish pseudo- significance (default is 99) scon : float constant added to spatial distances spow : float value for power transformation for spatial distances tcon : float constant added to temporal distances tpow : float value for power transformation for temporal distances Returns ------- mantel_result : dictionary contains the statistic (stat) for the test and the associated p-value (pvalue) stat : float value of the knox test for the dataset pvalue : float pseudo p-value associated with the statistic Reference --------- .. [2] N. Mantel. 1967. The detection of disease clustering and a generalized regression approach. Cancer Research, 27(2):209-220. Examples -------- >>> import numpy as np >>> import pysal Read in the example data and create an instance of SpaceTimeEvents. >>> path = pysal.examples.get_path("burkitt") >>> events = SpaceTimeEvents(path,'T') Set the random seed generator. This is used by the permutation based inference to replicate the pseudo-significance of our example results - the end-user will normally omit this step. >>> np.random.seed(100) The standardized Mantel test is a measure of matrix correlation between the spatial and temporal distance matrices of the event dataset. The following example runs the standardized Mantel test without a constant or transformation; however, as recommended by Mantel (1967) [2]_, these should be added by the user. This can be done by adjusting the constant and power parameters. >>> result = mantel(events, 99, scon=1.0, spow=-1.0, tcon=1.0, tpow=-1.0) Next, we examine the result of the test. >>> print("%6.6f"%result['stat']) 0.048368 Finally, we look at the pseudo-significance of this value, calculated by permuting the timestamps and rerunning the statistic for each of the 99 permutations. According to these parameters, the results indicate space-time interaction between the events. >>> print("%2.2f"%result['pvalue']) 0.01 """ n = events.n s = events.space t = events.t # calculate the spatial and temporal distance matrices for the events distmat = cg.distance_matrix(s) timemat = cg.distance_matrix(t) # calculate the transformed standardized statistic timevec = (util.get_lower(timemat) + tcon) ** tpow distvec = (util.get_lower(distmat) + scon) ** spow stat = stats.pearsonr(timevec, distvec)[0].sum() # return the results (if no inference) if permutations == 0: return stat # loop for generating a random distribution to assess significance dist = [] for i in range(permutations): trand = util.shuffle_matrix(timemat, range(n)) timevec = (util.get_lower(trand) + tcon) ** tpow m = stats.pearsonr(timevec, distvec)[0].sum() dist.append(m) ## establish the pseudo significance of the observed statistic distribution = np.array(dist) greater = np.ma.masked_greater_equal(distribution, stat) count = np.ma.count_masked(greater) pvalue = (count + 1.0) / (permutations + 1.0) # report the results mantel_result = {'stat': stat, 'pvalue': pvalue} return mantel_result def jacquez(events, k, permutations=99): """ Jacquez k nearest neighbors test for spatio-temporal interaction. [3]_ Parameters ---------- events : space time events object an output instance from the class SpaceTimeEvents k : int the number of nearest neighbors to be searched permutations : int the number of permutations used to establish pseudo- significance (default is 99) Returns ------- jacquez_result : dictionary contains the statistic (stat) for the test and the associated p-value (pvalue) stat : float value of the Jacquez k nearest neighbors test for the dataset pvalue : float p-value associated with the statistic (normally distributed with k-1 df) References ---------- .. [3] G. Jacquez. 1996. A k nearest neighbour test for space-time interaction. Statistics in Medicine, 15(18):1935-1949. Examples -------- >>> import numpy as np >>> import pysal Read in the example data and create an instance of SpaceTimeEvents. >>> path = pysal.examples.get_path("burkitt") >>> events = SpaceTimeEvents(path,'T') The Jacquez test counts the number of events that are k nearest neighbors in both time and space. The following runs the Jacquez test on the example data and reports the resulting statistic. In this case, there are 13 instances where events are nearest neighbors in both space and time. >>> np.random.seed(100) >>> result = jacquez(events,k=3,permutations=99) >>> print result['stat'] 13 The significance of this can be assessed by calling the p- value from the results dictionary, as shown below. Again, no space-time interaction is observed. >>> print("%2.2f"%result['pvalue']) 0.21 """ n = events.n time = events.time space = events.space # calculate the nearest neighbors in space and time separately knnt = Distance.knnW(time, k) knns = Distance.knnW(space, k) nnt = knnt.neighbors nns = knns.neighbors knn_sum = 0 # determine which events are nearest neighbors in both space and time for i in range(n): t_neighbors = nnt[i] s_neighbors = nns[i] check = set(t_neighbors) inter = check.intersection(s_neighbors) count = len(inter) knn_sum += count stat = knn_sum # return the results (if no inference) if permutations == 0: return stat # loop for generating a random distribution to assess significance dist = [] for p in range(permutations): j = 0 trand = np.random.permutation(time) knnt = Distance.knnW(trand, k) nnt = knnt.neighbors for i in range(n): t_neighbors = nnt[i] s_neighbors = nns[i] check = set(t_neighbors) inter = check.intersection(s_neighbors) count = len(inter) j += count dist.append(j) # establish the pseudo significance of the observed statistic distribution = np.array(dist) greater = np.ma.masked_greater_equal(distribution, stat) count = np.ma.count_masked(greater) pvalue = (count + 1.0) / (permutations + 1.0) # report the results jacquez_result = {'stat': stat, 'pvalue': pvalue} return jacquez_result def modified_knox(events, delta, tau, permutations=99): """ Baker's modified Knox test for spatio-temporal interaction. [1]_ Parameters ---------- events : space time events object an output instance from the class SpaceTimeEvents delta : float threshold for proximity in space tau : float threshold for proximity in time permutations : int the number of permutations used to establish pseudo- significance (default is 99) Returns ------- modknox_result : dictionary contains the statistic (stat) for the test and the associated p-value (pvalue) stat : float value of the modified knox test for the dataset pvalue : float pseudo p-value associated with the statistic References ---------- .. [1] R.D. Baker. Identifying space-time disease clusters. Acta Tropica, 91(3):291-299, 2004 Examples -------- >>> import numpy as np >>> import pysal Read in the example data and create an instance of SpaceTimeEvents. >>> path = pysal.examples.get_path("burkitt") >>> events = SpaceTimeEvents(path,'T') Set the random seed generator. This is used by the permutation based inference to replicate the pseudo-significance of our example results - the end-user will normally omit this step. >>> np.random.seed(100) Run the modified Knox test with distance and time thresholds of 20 and 5, respectively. This counts the events that are closer than 20 units in space, and 5 units in time. >>> result = modified_knox(events,delta=20,tau=5,permutations=99) Next, we examine the results. First, we call the statistic from the results dictionary. This reports the difference between the observed and expected Knox statistic. >>> print("%2.8f"%result['stat']) 2.81016043 Next, we look at the pseudo-significance of this value, calculated by permuting the timestamps and rerunning the statistics. In this case, the results indicate there is likely no space-time interaction. >>> print("%2.2f"%result['pvalue']) 0.11 """ n = events.n s = events.space t = events.t # calculate the spatial and temporal distance matrices for the events sdistmat = cg.distance_matrix(s) tdistmat = cg.distance_matrix(t) # identify events within thresholds spacmat = np.ones((n, n)) spacbin = sdistmat <= delta spacmat = spacmat * spacbin timemat = np.ones((n, n)) timebin = tdistmat <= tau timemat = timemat * timebin # calculate the observed (original) statistic knoxmat = timemat * spacmat obsstat = (knoxmat.sum() - n) # calculate the expectated value ssumvec = np.reshape((spacbin.sum(axis=0) - 1), (n, 1)) tsumvec = np.reshape((timebin.sum(axis=0) - 1), (n, 1)) expstat = (ssumvec * tsumvec).sum() # calculate the modified stat stat = (obsstat - (expstat / (n - 1.0))) / 2.0 # return results (if no inference) if permutations == 0: return stat distribution = [] # loop for generating a random distribution to assess significance for p in range(permutations): rtdistmat = util.shuffle_matrix(tdistmat, range(n)) timemat = np.ones((n, n)) timebin = rtdistmat <= tau timemat = timemat * timebin # calculate the observed knox again knoxmat = timemat * spacmat obsstat = (knoxmat.sum() - n) # calculate the expectated value again ssumvec = np.reshape((spacbin.sum(axis=0) - 1), (n, 1)) tsumvec = np.reshape((timebin.sum(axis=0) - 1), (n, 1)) expstat = (ssumvec * tsumvec).sum() # calculate the modified stat tempstat = (obsstat - (expstat / (n - 1.0))) / 2.0 distribution.append(tempstat) # establish the pseudo significance of the observed statistic distribution = np.array(distribution) greater = np.ma.masked_greater_equal(distribution, stat) count = np.ma.count_masked(greater) pvalue = (count + 1.0) / (permutations + 1.0) # return results modknox_result = {'stat': stat, 'pvalue': pvalue} return modknox_result
# The MIT License (MIT) # # Copyright (c) 2015 Brian Wray (brian@wrocket.org) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import subprocess import json import unittest def call_tulip(args): cmd = ['../../src/tulip'] cmd.extend(args) out = subprocess.check_output(cmd) return out.decode('utf-8') class TestMakeUnmakeMove(unittest.TestCase): def setUp(self): None def make_unmake_move(self, fen, move): result = call_tulip(['-makeunmake', move, fen]) return json.loads(result) def test_initial_position_e2e4(self): result = self.make_unmake_move('rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1', 'e2e4') bitboards = result['bitboards'] board = result['board'] self.assertFalse('e4' in board.keys()) self.assertEqual('P', board['e2']) self.assertEqual('000000000000FF00', bitboards['P']) self.assertEqual('00FF000000000000', bitboards['p']) self.assertEqual('0000000000000042', bitboards['N']) self.assertEqual('4200000000000000', bitboards['n']) self.assertEqual('0000000000000024', bitboards['B']) self.assertEqual('2400000000000000', bitboards['b']) self.assertEqual('0000000000000081', bitboards['R']) self.assertEqual('8100000000000000', bitboards['r']) self.assertEqual('0000000000000008', bitboards['Q']) self.assertEqual('0800000000000000', bitboards['q']) self.assertEqual('0000000000000010', bitboards['K']) self.assertEqual('1000000000000000', bitboards['k']) self.assertEqual('0000FFFFFFFF0000', bitboards['-']) self.assertEqual('white', result['toMove']) self.assertEqual('e8', result['blackKingSquare']) self.assertEqual('e1', result['whiteKingSquare']) self.assertTrue(result['castleWhiteKingside']) self.assertTrue(result['castleBlackKingside']) self.assertTrue(result['castleWhiteQueenside']) self.assertTrue(result['castleBlackQueenside']) self.assertEqual(0, result['fiftyMoveCount']) self.assertEqual('none', result['epFile']) piece_counts = result['pieceCounts'] for p in ['P', 'p']: self.assertEqual(8, piece_counts[p]) for p in ['R', 'r', 'N', 'n', 'B', 'b']: self.assertEqual(2, piece_counts[p]) for p in ['K', 'k', 'Q', 'q']: self.assertEqual(1, piece_counts[p]) self.assertEqual(32, piece_counts['-']) def test_simple_capture(self): result = self.make_unmake_move('8/4k3/2r5/8/1N2K3/8/8/8 w - - 13 1', 'b4c6') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual(4, len(board)) self.assertTrue('b4' in board.keys()) self.assertEqual('r', board['c6']) self.assertEqual('N', board['b4']) self.assertEqual(1, piece_counts['r']) self.assertEqual(1, piece_counts['N']) self.assertEqual(60, piece_counts['-']) self.assertEqual('0000000000000000', bitboards['n']) self.assertEqual('0000000002000000', bitboards['N']) self.assertEqual('FFEFFBFFEDFFFFFF', bitboards['-']) def test_castle_white_kingside(self): result = self.make_unmake_move('4k3/8/8/8/8/8/8/4K2R w K - 0 1', 'e1g1') board = result['board'] bitboards = result['bitboards'] self.assertTrue('K', board['e1']) self.assertTrue('R', board['h1']) self.assertEqual('0000000000000080', bitboards['R']) self.assertEqual('0000000000000010', bitboards['K']) self.assertEqual('EFFFFFFFFFFFFF6F', bitboards['-']) self.assertTrue(result['castleWhiteKingside']) self.assertFalse(result['castleBlackKingside']) self.assertFalse(result['castleWhiteQueenside']) self.assertFalse(result['castleBlackQueenside']) self.assertEqual('white', result['toMove']) self.assertEqual('e8', result['blackKingSquare']) self.assertEqual('e1', result['whiteKingSquare']) def test_castle_white_queenside(self): result = self.make_unmake_move('4k3/8/8/8/8/8/8/R3K3 w Q - 0 1', 'e1c1') board = result['board'] bitboards = result['bitboards'] self.assertTrue('K', board['e1']) self.assertTrue('R', board['a1']) self.assertEqual('0000000000000001', bitboards['R']) self.assertEqual('0000000000000010', bitboards['K']) self.assertEqual('EFFFFFFFFFFFFFEE', bitboards['-']) self.assertFalse(result['castleWhiteKingside']) self.assertFalse(result['castleBlackKingside']) self.assertTrue(result['castleWhiteQueenside']) self.assertFalse(result['castleBlackQueenside']) self.assertEqual('white', result['toMove']) self.assertEqual('e8', result['blackKingSquare']) self.assertEqual('e1', result['whiteKingSquare']) def test_castle_black_kingside(self): result = self.make_unmake_move('4k2r/8/8/8/8/8/8/4K3 b k - 0 1', 'e8g8') board = result['board'] bitboards = result['bitboards'] self.assertTrue('k', board['e8']) self.assertTrue('r', board['h8']) self.assertEqual('8000000000000000', bitboards['r']) self.assertEqual('1000000000000000', bitboards['k']) self.assertEqual('6FFFFFFFFFFFFFEF', bitboards['-']) self.assertFalse(result['castleWhiteKingside']) self.assertTrue(result['castleBlackKingside']) self.assertFalse(result['castleWhiteQueenside']) self.assertFalse(result['castleBlackQueenside']) self.assertEqual('black', result['toMove']) self.assertEqual('e8', result['blackKingSquare']) self.assertEqual('e1', result['whiteKingSquare']) def test_castle_black_queenside(self): result = self.make_unmake_move('r3k3/8/8/8/8/8/8/4K3 b q - 0 1', 'e8c8') board = result['board'] bitboards = result['bitboards'] self.assertTrue('k', board['e8']) self.assertTrue('r', board['a8']) self.assertEqual('0100000000000000', bitboards['r']) self.assertEqual('1000000000000000', bitboards['k']) self.assertEqual('EEFFFFFFFFFFFFEF', bitboards['-']) self.assertFalse(result['castleWhiteKingside']) self.assertFalse(result['castleBlackKingside']) self.assertFalse(result['castleWhiteQueenside']) self.assertTrue(result['castleBlackQueenside']) self.assertEqual('black', result['toMove']) self.assertEqual('e8', result['blackKingSquare']) self.assertEqual('e1', result['whiteKingSquare']) def test_white_promote_no_captures(self): result = self.make_unmake_move('4k3/1P6/8/8/8/8/8/4K3 w - - 0 1', 'b7b8=q') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual('P', board['b7']) self.assertEqual(0, piece_counts['Q']) self.assertEqual(1, piece_counts['P']) self.assertEqual('0002000000000000', bitboards['P']) self.assertEqual('0000000000000000', bitboards['Q']) self.assertEqual('EFFDFFFFFFFFFFEF', bitboards['-']) def test_black_promote_no_captures(self): result = self.make_unmake_move('4k3/8/8/8/8/8/1p6/4K3 b - - 0 1', 'b2b1=q') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual('p', board['b2']) self.assertEqual(0, piece_counts['q']) self.assertEqual(1, piece_counts['p']) self.assertEqual('0000000000000200', bitboards['p']) self.assertEqual('0000000000000000', bitboards['q']) self.assertEqual('EFFFFFFFFFFFFDEF', bitboards['-']) def test_white_promote_with_captures(self): result = self.make_unmake_move('2b1k3/1P6/8/8/8/8/8/4K3 w KQkq - 0 1', 'b7c8=q') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual('P', board['b7']) self.assertEqual(1, piece_counts['b']) self.assertEqual(0, piece_counts['Q']) self.assertEqual(1, piece_counts['P']) self.assertEqual('0400000000000000', bitboards['b']) self.assertEqual('0002000000000000', bitboards['P']) self.assertEqual('0000000000000000', bitboards['Q']) self.assertEqual('EBFDFFFFFFFFFFEF', bitboards['-']) def test_black_promote_with_captures(self): result = self.make_unmake_move('4k3/8/8/8/8/8/1p6/2B1K3 b - - 0 1', 'b2c1=q') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual('p', board['b2']) self.assertEqual(1, piece_counts['B']) self.assertEqual(0, piece_counts['q']) self.assertEqual(1, piece_counts['p']) self.assertEqual('0000000000000004', bitboards['B']) self.assertEqual('0000000000000200', bitboards['p']) self.assertEqual('0000000000000000', bitboards['q']) self.assertEqual('EFFFFFFFFFFFFDEB', bitboards['-']) def test_white_ep_capture(self): result = self.make_unmake_move('4k3/8/8/4pP2/8/8/8/4K3 w KQkq e6 0 1', 'f5e6') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual('p', board['e5']) self.assertEqual('P', board['f5']) self.assertTrue('e6' not in board.keys()) self.assertEqual(1, piece_counts['p']) self.assertEqual(1, piece_counts['P']) self.assertEqual('e', result['epFile']) self.assertEqual('0000001000000000', bitboards['p']) self.assertEqual('0000002000000000', bitboards['P']) self.assertEqual('EFFFFFCFFFFFFFEF', bitboards['-']) def test_black_ep_capture(self): result = self.make_unmake_move('4k3/8/8/8/4pP2/8/8/4K3 b KQkq f3 0 1', 'e4f3') board = result['board'] bitboards = result['bitboards'] piece_counts = result['pieceCounts'] self.assertEqual('p', board['e4']) self.assertEqual('P', board['f4']) self.assertTrue('f3' not in board.keys()) self.assertEqual(1, piece_counts['p']) self.assertEqual(1, piece_counts['P']) self.assertEqual('f', result['epFile']) self.assertEqual('0000000010000000', bitboards['p']) self.assertEqual('0000000020000000', bitboards['P']) self.assertEqual('EFFFFFFFCFFFFFEF', bitboards['-']) def test_black_ep_capture_bug(self): result = self.make_unmake_move('rnbqkbnr/1ppppp1p/6p1/8/pP2P3/5QP1/P1PP2PP/RNB1KBNR b KQkq b3 0 1', 'a4b3') board = result['board'] piece_counts = result['pieceCounts'] self.assertEqual('p', board['a4']) self.assertEqual('P', board['b4']) self.assertTrue('b3' not in board.keys()) self.assertEqual(8, piece_counts['p']) self.assertEqual(8, piece_counts['P']) if __name__ == '__main__': unittest.main()
# Class definition: # RunJobHopper # [Add description here] # Instances are generated with RunJobFactory via pUtil::getRunJob() # Implemented as a singleton class # http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern # Import relevant python/pilot modules from RunJobHPC import RunJobHPC # Parent RunJob class import os, sys, commands, time import traceback import atexit, signal import saga # Pilot modules import Site, pUtil, Job, Node, RunJobUtilities from pUtil import tolog, isAnalysisJob, readpar, getExperiment from FileStateClient import updateFileStates, dumpFileStates from ErrorDiagnosis import ErrorDiagnosis # import here to avoid issues seen at BU with missing module from PilotErrors import PilotErrors from datetime import datetime class RunJobHopper(RunJobHPC): # private data members __runjob = "RunJobHopper" # String defining the sub class __instance = None # Boolean used by subclasses to become a Singleton #__error = PilotErrors() # PilotErrors object # Required methods def __init__(self): """ Default initialization """ # e.g. self.__errorLabel = errorLabel pass def __new__(cls, *args, **kwargs): """ Override the __new__ method to make the class a singleton """ if not cls.__instance: cls.__instance = super(RunJobHPC, cls).__new__(cls, *args, **kwargs) return cls.__instance def getRunJob(self): """ Return a string with the execution class name """ return self.__runjob def getRunJobFileName(self): """ Return the filename of the module """ return super(RunJobHopper, self).getRunJobFileName() # def argumentParser(self): <-- see example in RunJob.py def allowLoopingJobKiller(self): """ Should the pilot search for looping jobs? """ # The pilot has the ability to monitor the payload work directory. If there are no updated files within a certain # time limit, the pilot will consider the as stuck (looping) and will kill it. The looping time limits are set # in environment.py (see e.g. loopingLimitDefaultProd) return False def get_backfill(self, partition, max_nodes = None): # Function collect information about current available resources and # return number of nodes with possible maximum value for walltime according Titan policy # cmd = 'showbf -p %s' % partition res_tuple = commands.getstatusoutput(cmd) showbf_str = "" if res_tuple[0] == 0: showbf_str = res_tuple[1] res = {} tolog ("Available resources in %s partition" % partition) tolog (showbf_str) if showbf_str: shobf_out = showbf_str.splitlines() tolog ("Fitted resources") for l in shobf_out[2:]: d = l.split() nodes = int(d[2]) if not d[3] == 'INFINITY': wal_time_arr = d[3].split(":") if len(wal_time_arr) < 4: wal_time_sec = int(wal_time_arr[0])*(60*60) + int(wal_time_arr[1])*60 + int(wal_time_arr[2]) if wal_time_sec > 24 * 3600: wal_time_sec = 24 * 3600 else: wal_time_sec = 24 * 3600 if nodes > 1: nodes = nodes - 1 else: wal_time_sec = 12 * 3600 # Fitting Hopper policy # https://www.nersc.gov/users/computational-systems/hopper/running-jobs/queues-and-policies/ nodes = max_nodes if nodes > max_nodes else nodes if nodes < 65 and wal_time_sec > 96 * 3600: wal_time_sec = 96 * 3600 elif nodes < 683 and wal_time_sec > 48 * 3600: wal_time_sec = 48 * 3600 elif nodes < 4097 and wal_time_sec > 36 * 3600: wal_time_sec = 36 * 3600 elif wal_time_sec > 12 * 3600: wal_time_sec = 12 * 3600 tolog ("Nodes: %s, Walltime (str): %s, Walltime (min) %s" % (nodes, d[3], wal_time_sec/60 )) res.update({nodes:wal_time_sec}) else: tolog ("No availble resources. Default values will be used.") return res def get_hpc_resources(self, partition, max_nodes = None, min_nodes = 1, min_walltime = 30): # # Function return number of nodes and walltime for submission # nodes = min_nodes walltime = min_walltime backfill = self.get_backfill(partition, max_nodes) if backfill: for n in sorted(backfill.keys(), reverse=True): if min_walltime <= backfill[n] and nodes <= n: nodes = n walltime = backfill[n] / 60 walltime = walltime - 2 break if walltime <= 0: walltime = min_walltime nodes = min_nodes return nodes, walltime def jobStateChangeNotification(self, src_obj, fire_on, value): tolog("Job state changed to '%s'" % value) return True def executePayload(self, thisExperiment, runCommandList, job, repeat_num = 0): """ execute the payload """ t0 = os.times() res_tuple = (0, 'Undefined') # special setup command. should be placed in queue defenition (or job defenition) ? setup_commands = ['source $MODULESHOME/init/bash', 'export LD_LIBRARY_PATH=/opt/cray/lib64:$LD_LIBRARY_PATH', 'export CRAY_ROOTFS=DSL', 'module load python'] # loop over all run commands (only >1 for multi-trfs) current_job_number = 0 getstatusoutput_was_interrupted = False number_of_jobs = len(runCommandList) for cmd in runCommandList: nodes, walltime = self.get_hpc_resources(self.partition_comp, self.max_nodes, self.nodes, self.min_walltime) cpu_number = self.cpu_number_per_node * nodes tolog("Launch parameters \nWalltime limit : %s (min)\nRequested nodes (cores): %s (%s)" % (walltime,nodes,cpu_number)) current_job_number += 1 try: # add the full job command to the job_setup.sh file to_script = "\n".join(cmd['environment']) to_script = to_script + ("\nexport G4FORCENUMBEROFTHREADS=%s" % self.number_of_threads) # needed for GEANT to_script = to_script + "\n" + "\n".join(setup_commands) to_script = "%s\naprun -n %s -d %s %s %s" % (to_script, cpu_number/self.number_of_threads, self.number_of_threads ,cmd["payload"], cmd["parameters"]) thisExperiment.updateJobSetupScript(job.workdir, to_script=to_script) # Simple SAGA fork variant tolog("******* SAGA call to execute payload *********") try: js = saga.job.Service("pbs://localhost") jd = saga.job.Description() if self.project_id: jd.project = self.project_id # should be taken from resourse description (pandaqueue) jd.wall_time_limit = walltime jd.executable = to_script jd.total_cpu_count = cpu_number jd.output = job.stdout jd.error = job.stderr jd.queue = self.executed_queue # should be taken from resourse description (pandaqueue) jd.working_directory = job.workdir fork_job = js.create_job(jd) fork_job.add_callback(saga.STATE, self.jobStateChangeNotification) #tolog("\n(PBS) Command: %s\n" % to_script) fork_job.run() #tolog("\nCommand was started at %s.\nState is: %s" % ( str(datetime.now()), fork_job.state)) tolog("Local Job ID: %s" % fork_job.id) for i in range(self.waittime * 60): time.sleep(1) if fork_job.state != saga.job.PENDING: break if fork_job.state == saga.job.PENDING: repeat_num = repeat_num + 1 tolog("Wait time (%s minutes) exceed, job will be rescheduled (%s)" % (self.waittime, repeat_num)) fork_job.cancel() fork_job.wait() if repeat_num < 10: return self.executePayload(thisExperiment, runCommandList, job, repeat_num) fork_job.wait() tolog("Job State : %s" % (fork_job.state)) tolog("Exitcode : %s" % (fork_job.exit_code)) tolog("Create time : %s" % (fork_job.created)) tolog("Start time : %s" % (fork_job.started)) tolog("End time : %s" % (fork_job.finished)) tolog("Walltime limit : %s (min)" % (jd.wall_time_limit)) tolog("Allocated nodes (cores): %s (%s)" % (nodes,cpu_number)) cons_time = datetime.strptime(fork_job.finished, '%c') - datetime.strptime(fork_job.started, '%c') cons_time_sec = (cons_time.microseconds + (cons_time.seconds + cons_time.days * 24 * 3600) * 10**6) / 10**6 tolog("Execution time : %s (sec. %s)" % (str(cons_time), cons_time_sec)) #job.timeExe = int(fork_job.finished - fork_job.started) res_tuple = (fork_job.exit_code, "Look into: %s" % job.stdout) #################################################### except saga.SagaException, ex: # Catch all saga exceptions tolog("An exception occured: (%s) %s " % (ex.type, (str(ex)))) # Trace back the exception. That can be helpful for debugging. tolog(" \n*** Backtrace:\n %s" % ex.traceback) break tolog("**********************************************") tolog("******* SAGA call finished *******************") tolog("**********************************************") except Exception, e: tolog("!!FAILED!!3000!! Failed to run command %s" % str(e)) getstatusoutput_was_interrupted = True if self.getFailureCode(): job.result[2] = self.getFailureCode() tolog("!!FAILED!!3000!! Failure code: %d" % (self.getFailureCode())) break if res_tuple[0] == 0: tolog("Job command %d/%d finished" % (current_job_number, number_of_jobs)) else: tolog("Job command %d/%d failed: res = %s" % (current_job_number, number_of_jobs, str(res_tuple))) break t1 = os.times() t = map(lambda x, y:x-y, t1, t0) # get the time consumed job.cpuConsumptionUnit, job.cpuConsumptionTime, job.cpuConversionFactor = pUtil.setTimeConsumed(t) tolog("Job CPU usage: %s %s" % (job.cpuConsumptionTime, job.cpuConsumptionUnit)) tolog("Job CPU conversion factor: %1.10f" % (job.cpuConversionFactor)) job.timeExe = int(round(t1[4] - t0[4])) tolog("Original exit code: %s" % (res_tuple[0])) if res_tuple[0] != None: tolog("Exit code: %s (returned from OS)" % (res_tuple[0]%255)) res0, exitAcronym, exitMsg = self.getTrfExitInfo(res_tuple[0], job.workdir) else: tolog("Exit code: None (returned from OS, Job was canceled)") res0 = None exitMsg = "Job was canceled by internal call" # check the job report for any exit code that should replace the res_tuple[0] res = (res0, res_tuple[1], exitMsg) # dump an extract of the payload output if number_of_jobs > 1: _stdout = job.stdout _stderr = job.stderr _stdout = _stdout.replace(".txt", "_N.txt") _stderr = _stderr.replace(".txt", "_N.txt") tolog("NOTE: For %s output, see files %s, %s (N = [1, %d])" % (job.payload, _stdout, _stderr, number_of_jobs)) else: tolog("NOTE: For %s output, see files %s, %s" % (job.payload, job.stdout, job.stderr)) # JEM job-end callback try: from JEMstub import notifyJobEnd2JEM notifyJobEnd2JEM(job, tolog) except: pass # don't care (fire and forget) return res, job, getstatusoutput_was_interrupted, current_job_number if __name__ == "__main__": tolog("Starting RunJobHopper") # Get error handler error = PilotErrors() # Get runJob object runJob = RunJobHopper() # Setup HPC specific parameters for Edison runJob.cpu_number_per_node = 24 runJob.walltime = 120 runJob.max_nodes = 10 runJob.number_of_threads = 1 runJob.min_walltime = 10 # minutes runJob.waittime = 15 # minutes runJob.nodes = 2 runJob.partition_comp = 'hopper' runJob.project_id = "" runJob.executed_queue = readpar('localqueue') # Define a new parent group os.setpgrp() # Protect the runJob code with exception handling hP_ret = False try: # always use this filename as the new jobDef module name import newJobDef jobSite = Site.Site() return_tuple = runJob.argumentParser() tolog("argumentParser returned: %s" % str(return_tuple)) jobSite.setSiteInfo(return_tuple) # jobSite.setSiteInfo(argParser(sys.argv[1:])) # reassign workdir for this job jobSite.workdir = jobSite.wntmpdir if runJob.getPilotLogFilename() != "": pUtil.setPilotlogFilename(runJob.getPilotLogFilename()) # set node info node = Node.Node() node.setNodeName(os.uname()[1]) node.collectWNInfo(jobSite.workdir) # redirect stder sys.stderr = open("%s/runjob.stderr" % (jobSite.workdir), "w") tolog("Current job workdir is: %s" % os.getcwd()) tolog("Site workdir is: %s" % jobSite.workdir) # get the experiment object thisExperiment = getExperiment(runJob.getExperiment()) tolog("RunJob will serve experiment: %s" % (thisExperiment.getExperiment())) # set the cache (used e.g. by LSST) #if runJob.getCache(): # thisExperiment.setCache(runJob.getCache()) #JR = JobRecovery() try: job = Job.Job() job.setJobDef(newJobDef.job) job.workdir = jobSite.workdir job.experiment = runJob.getExperiment() # figure out and set payload file names job.setPayloadName(thisExperiment.getPayloadName(job)) except Exception, e: pilotErrorDiag = "Failed to process job info: %s" % str(e) tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag)) runJob.failJob(0, error.ERR_UNKNOWN, job, pilotErrorDiag=pilotErrorDiag) # prepare for the output file data directory # (will only created for jobs that end up in a 'holding' state) job.datadir = runJob.getParentWorkDir() + "/PandaJob_%s_data" % (job.jobId) # register cleanup function atexit.register(runJob.cleanup, job) # to trigger an exception so that the SIGTERM signal can trigger cleanup function to run # because by default signal terminates process without cleanup. def sig2exc(sig, frm): """ signal handler """ error = PilotErrors() runJob.setGlobalPilotErrorDiag("!!FAILED!!3000!! SIGTERM Signal %s is caught in child pid=%d!\n" % (sig, os.getpid())) tolog(runJob.getGlobalPilotErrorDiag()) if sig == signal.SIGTERM: runJob.setGlobalErrorCode(error.ERR_SIGTERM) elif sig == signal.SIGQUIT: runJob.setGlobalErrorCode(error.ERR_SIGQUIT) elif sig == signal.SIGSEGV: runJob.setGlobalErrorCode(error.ERR_SIGSEGV) elif sig == signal.SIGXCPU: runJob.setGlobalErrorCode(error.ERR_SIGXCPU) elif sig == signal.SIGBUS: runJob.setGlobalErrorCode(error.ERR_SIGBUS) elif sig == signal.SIGUSR1: runJob.setGlobalErrorCode(error.ERR_SIGUSR1) else: runJob.setGlobalErrorCode(error.ERR_KILLSIGNAL) runJob.setFailureCode(runJob.getGlobalErrorCode) # print to stderr print >> sys.stderr, runJob.getGlobalPilotErrorDiag() raise SystemError(sig) signal.signal(signal.SIGTERM, sig2exc) signal.signal(signal.SIGQUIT, sig2exc) signal.signal(signal.SIGSEGV, sig2exc) signal.signal(signal.SIGXCPU, sig2exc) signal.signal(signal.SIGBUS, sig2exc) # see if it's an analysis job or not analysisJob = isAnalysisJob(job.trf.split(",")[0]) if analysisJob: tolog("User analysis job") else: tolog("Production job") tolog("runJob received a job with prodSourceLabel=%s" % (job.prodSourceLabel)) # setup starts here ................................................................................ # update the job state file job.jobState = "setup" #_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # send [especially] the process group back to the pilot job.setState([job.jobState, 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # prepare the setup and get the run command list ec, runCommandList, job, multi_trf = runJob.setup(job, jobSite, thisExperiment) if ec != 0: tolog("!!WARNING!!2999!! runJob setup failed: %s" % (job.pilotErrorDiag)) runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag) tolog("Setup has finished successfully") # job has been updated, display it again job.displayJob() # (setup ends here) ................................................................................ tolog("Setting stage-in state until all input files have been copied") job.setState(["stagein", 0, 0]) # send the special setup string back to the pilot (needed for the log transfer on xrdcp systems) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # stage-in ......................................................................................... # update the job state file job.jobState = "stagein" #_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # update copysetup[in] for production jobs if brokerage has decided that remote I/O should be used if job.transferType == 'direct': tolog('Brokerage has set transfer type to \"%s\" (remote I/O will be attempted for input files, any special access mode will be ignored)' %\ (job.transferType)) RunJobUtilities.updateCopysetups('', transferType=job.transferType) # stage-in all input files (if necessary) job, ins, statusPFCTurl, usedFAXandDirectIO = runJob.stageIn(job, jobSite, analysisJob) if job.result[2] != 0: tolog("Failing job with ec: %d" % (ec)) runJob.failJob(0, job.result[2], job, ins=ins, pilotErrorDiag=job.pilotErrorDiag) # after stageIn, all file transfer modes are known (copy_to_scratch, file_stager, remote_io) # consult the FileState file dictionary if cmd3 should be updated (--directIn should not be set if all # remote_io modes have been changed to copy_to_scratch as can happen with ByteStream files) # and update the run command list if necessary. # in addition to the above, if FAX is used as a primary site mover and direct access is enabled, then # the run command should not contain the --oldPrefix, --newPrefix options but use --usePFCTurl hasInput = job.inFiles != [''] if hasInput: runCommandList = RunJobUtilities.updateRunCommandList(runCommandList, runJob.getParentWorkDir(), job.jobId, statusPFCTurl, analysisJob, usedFAXandDirectIO, hasInput, job.prodDBlockToken) # (stage-in ends here) ............................................................................. # change to running state since all input files have been staged tolog("Changing to running state since all input files have been staged") job.setState(["running", 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # update the job state file job.jobState = "running" #_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # run the job(s) ................................................................................... # Set ATLAS_CONDDB if necessary, and other env vars RunJobUtilities.setEnvVars(jobSite.sitename) # execute the payload res, job, getstatusoutput_was_interrupted, current_job_number = runJob.executePayload(thisExperiment, runCommandList, job) # if payload leaves the input files, delete them explicitly if ins: ec = pUtil.removeFiles(job.workdir, ins) # payload error handling ed = ErrorDiagnosis() if res[0] == None: job.jobState = "cancelled" job.setState(["cancelled", 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) else: job = ed.interpretPayload(job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, runJob.getFailureCode()) if job.result[1] != 0 or job.result[2] != 0: runJob.failJob(job.result[1], job.result[2], job, pilotErrorDiag=job.pilotErrorDiag) # stage-out ........................................................................................ # update the job state file tolog(runJob.getOutputDir()) job.jobState = "stageout" #_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # verify and prepare and the output files for transfer ec, pilotErrorDiag, outs, outsDict = RunJobUtilities.prepareOutFiles(job.outFiles, job.logFile, job.workdir) if ec: # missing output file (only error code from prepareOutFiles) runJob.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag) tolog("outsDict: %s" % str(outsDict)) # update the current file states updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="created") dumpFileStates(runJob.getParentWorkDir(), job.jobId) # create xml string to pass to dispatcher for atlas jobs outputFileInfo = {} if outs or (job.logFile and job.logFile != ''): # get the datasets for the output files dsname, datasetDict = runJob.getDatasets(job) # re-create the metadata.xml file, putting guids of ALL output files into it. # output files that miss guids from the job itself will get guids in PFCxml function # first rename and copy the trf metadata file for non-build jobs if not pUtil.isBuildJob(outs): runJob.moveTrfMetadata(job.workdir, job.jobId) # create the metadata for the output + log files ec, job, outputFileInfo = runJob.createFileMetadata(list(outs), job, outsDict, dsname, datasetDict, jobSite.sitename, analysisJob=analysisJob) if ec: runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag) # move output files from workdir to local DDM area finalUpdateDone = False if outs: tolog("Setting stage-out state until all output files have been copied") job.setState(["stageout", 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # stage-out output files ec, job, rf, latereg = runJob.stageOut(job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo) # error handling if job.result[0] == "finished" or ec == error.ERR_PUTFUNCNOCALL: rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True) else: rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True, latereg=latereg) if ec == error.ERR_NOSTORAGE: # update the current file states for all files since nothing could be transferred updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="not_transferred") dumpFileStates(runJob.getParentWorkDir(), job.jobId) finalUpdateDone = True if ec != 0: runJob.sysExit(job, rf) # (stage-out ends here) ....................................................................... job.setState(["finished", 0, 0]) if not finalUpdateDone: rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True) runJob.sysExit(job) except Exception, errorMsg: error = PilotErrors() if runJob.getGlobalPilotErrorDiag() != "": pilotErrorDiag = "Exception caught in runJobHopper: %s" % (runJob.getGlobalPilotErrorDiag()) else: pilotErrorDiag = "Exception caught in runJobHopper: %s" % str(errorMsg) if 'format_exc' in traceback.__all__: pilotErrorDiag += ", " + traceback.format_exc() try: tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag)) except Exception, e: if len(pilotErrorDiag) > 10000: pilotErrorDiag = pilotErrorDiag[:10000] tolog("!!FAILED!!3001!! Truncated (%s): %s" % (e, pilotErrorDiag)) else: pilotErrorDiag = "Exception caught in runJob: %s" % (e) tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag)) # # restore the proxy if necessary # if hP_ret: # rP_ret = proxyguard.restoreProxy() # if not rP_ret: # tolog("Warning: Problems with storage can occur since proxy could not be restored") # else: # hP_ret = False # tolog("ProxyGuard has finished successfully") tolog("sys.path=%s" % str(sys.path)) cmd = "pwd;ls -lF %s;ls -lF;ls -lF .." % (runJob.getPilotInitDir()) tolog("Executing command: %s" % (cmd)) out = commands.getoutput(cmd) tolog("%s" % (out)) job = Job.Job() job.setJobDef(newJobDef.job) job.pilotErrorDiag = pilotErrorDiag job.result[0] = "failed" if runJob.getGlobalErrorCode() != 0: job.result[2] = runJob.getGlobalErrorCode() else: job.result[2] = error.ERR_RUNJOBEXC tolog("Failing job with error code: %d" % (job.result[2])) # fail the job without calling sysExit/cleanup (will be called anyway) runJob.failJob(0, job.result[2], job, pilotErrorDiag=pilotErrorDiag, docleanup=False)
import lacuna, lacuna.exceptions, lacuna.binutils.libbin import argparse, operator, os, sys from enum import Enum class BodyCache(): """ We don't want to use a request cache for any part of send_excavs, but we do want to keep track of stations, stars, and bodies that we know are no good for whatever reason. """ def __init__(self): self.stations = {} self.stars = {} self.planets = {} def is_bad(self, body_name, type = 'planet'): """ Checks to see if a given body is known to be bad. Arguments: - body_name -- Str name of the body to mark as bad. - type -- Str type of body. Either 'star', 'station', or 'planet'. Defaults to 'planet'. Returns a boolean. True if the body is known to be bad. """ if body_name in self.stations: return True elif body_name in self.stars: return True elif body_name in self.planets: return True return False def mark_as_bad(self, body_name, type = 'planet'): """ Marks a given body as 'bad' in the cache. Arguments: - body_name -- Str name of the body to mark as bad. - type -- Str type of body. Either 'star', 'station', or 'planet'. Defaults to 'planet'. """ if type == 'planet': self.planets[body_name] = True elif type == 'station': self.stations[body_name] = True elif type == 'star': self.stars[body_name] = True else: raise Exception("{}: illegal type.".format(type)) return False class SendExcavs(lacuna.binutils.libbin.Script): """ Attributes:: ally The user's lacuna.alliance.MyAlliance object, or False if the user is not in an alliance. ally_members List of Strings; the names of the members of my alliance. Used to determine if an inhabited planet is hostile or not. arch The Archaeology Ministry on self.planet. args Command-line arguments set by the user; the result of self.parser.parse_args() body_cache A libsend_excavs.BodyCache object. cell_number The cell we're working on. Starts at 1. client lacuna.clients.Member object excav_sites A list of lacuna.ship.Excavator objects. Starts as an empty list, set to the correct value by set_excav_count(). Does not include the current planet in the list. map lacuna.map.Map object num_excavs Number of excavs to be sent from self.planet. Starts at 0, set to the correct value by set_excav_count(). parser argparse.ArgumentParser object planet lacuna.body.MyBody object for the planet name passed at the command line. ring_offset The ring offset we're working on. Starts at 0. sp A Space Port on self.planet (doesn't matter which one). travelling Dict of bodies we have excavators on their way to right now. { destination_body_id: 1 } version '0.1' """ def __init__( self, testargs:dict = {} ): self.version = '0.1' parser = argparse.ArgumentParser( description = ''' Sends available excavators out to nearby planets of the requested type or types. ''', epilog = 'Full docs can be found at http://tmtowtdi.github.io/MontyLacuna/scripts/send_excavs.html', ) parser.add_argument( 'name', metavar = '<planet>', action = 'store', help = "The planet from which to send excavators. Enter 'all' to send from all of your planets." ) parser.add_argument( '-t', '--t', dest = 'ptypes', metavar = '<ptype>', action = 'append', choices = [ 'p'+ str(i) for i in range(1,41) ], required = 1, help = 'The types of planets to send excavators towards. You can include multiple planets by repeating "-t <ptype>" for each type you want to send to. Defaults to any planet type (so you probably want to specify this).' ) parser.add_argument( '--max_ring', metavar = '<max_ring>', action = 'store', type = int, default = 3, help = "Each 'ring' represents a 54 unit square ring around the original planet. The bigger max_ring is, the farther away we'll send excavators. Defaults to 3." ) parser.add_argument( '--max_send', metavar = '<max_send>', action = 'store', type = int, default = 99, help = "Will send this number of excavators, maximum. If you want to send an even number of excavators to, say, p11 and p12 planets, run this program once for each type, with a max_send of 10 for each." ) super().__init__( parser, testargs = testargs ) self.ally = None self.ally_members = [] self.body_cache = BodyCache() self.excav_sites = [] self.num_excavs = 0 self.planets = [] self.travelling = {} self._set_alliance() self.set_planets() self.map = self.client.get_map() def _set_alliance(self): self.client.user_logger.debug( "Getting user's alliance." ) self.ally = self.client.get_my_alliance() if self.ally: self._set_alliance_members() def _set_alliance_members(self): for m in self.ally.members: self.ally_members.append( m.name ) def set_planet( self, pname:str ): self.client.user_logger.info( "Sending excavs from " + pname + "." ) self.planet = self.client.get_body_byname( pname ) self.ring = Ring(self.planet, 0) self.client.user_logger.debug( "Getting Arch Min for {}.".format(pname) ) self.arch = self.planet.get_buildings_bytype( 'archaeology', 1, 1, 100 )[0] ### Get the number of excav slots before doing anything else - if we ### have no available slots left, there's no need to continue. excav_sites, excav_max, num_travelling = self.arch.view_excavators() self.excav_sites = excav_sites[1:] # Omit the first excav site; it's our current planet. self.num_excavs = (excav_max - (len(self.excav_sites) + num_travelling) ) self.client.user_logger.info( "Arch min has {} slots available.".format(self.num_excavs) ) if self.num_excavs <= 0: return self.client.user_logger.debug( "Getting Space Port." ) self.sp = self.planet.get_buildings_bytype( 'spaceport', 1, 1, 100 )[0] self.client.user_logger.debug( "Making note of travelling excavs." ) self.note_travelling_excavators() self.client.user_logger.debug( "Getting usable excav count." ) self.set_excav_count() def get_ready_excavators(self): """ Returns the number of excavators onsite that have completed building """ paging = {} filter = {'type': 'excavator'} ships, excavs = self.sp.view_all_ships( paging, filter ) excavs_built = [] for i in ships: if i.task == 'Docked': excavs_built.append(i) return len(excavs_built) def note_travelling_excavators(self): """ Makes a note of any planets we currently have excavators travelling to. Returns nothing, but sets self.travelling. """ paging = {} filter = {'task': 'Travelling'} travelling_ships, travelling_count = self.sp.view_all_ships( paging, filter ) for s in travelling_ships: if s.type == 'excavator': self.travelling[ s.to.id ] = 1 def set_excav_count( self ): """ Set the number of excavs this planet is able to send right now. Returns nothing, but sets ``self.num_excavs``. """ self.client.cache_off() # we need fresh data for this method ### Get count of built and ready excavators onsite num_excavs_ready = self.get_ready_excavators() ### If we have fewer excavators ready to go than the Arch Min has ### slots available, shorten self.num_excavs. self.client.user_logger.debug( "Space port has {} excavators ready.".format(num_excavs_ready) ) if num_excavs_ready < self.num_excavs: self.num_excavs = num_excavs_ready self.client.user_logger.info( "We're ready to send out {} more excavators.".format(self.num_excavs) ) ### Last, if the user specified a max_send, make sure we're limiting ### the number to send this run to the user's spec. if int(self.num_excavs) > int(self.args.max_send): self.client.user_logger.debug( "We have more excavators ready than you wanted to use - limiting to your spec." ) self.num_excavs = self.args.max_send def get_map_square( self ): """ Gets a list of stars in the next map square. ``self.ring`` keeps track of which map square is "next", so no arguments need be passed in. Returns a list of :class:`lacuna.map.Star` objects. """ ### Get the next cell in our current ring. If we've exhausted our ### current ring, move out one more ring. req_cell = self.ring.get_next_cell() if not req_cell: next_offset = self.ring.ring_offset + 1 if next_offset > int(self.args.max_ring): self.client.user_logger.debug( "We've checked out to our max_ring; done." ) self.num_excavs = 0 return self.client.user_logger.debug( "Moving out to ring number {}.".format(next_offset) ) self.ring = Ring( self.planet, next_offset ) req_cell = self.ring.get_next_cell() self.client.user_logger.debug( "Checking cell number {}. Top {}, bottom {}, left {}, right {}." .format(self.ring.this_cell_number, req_cell.top, req_cell.bottom, req_cell.left, req_cell.right) ) if req_cell.top <= -1500 or req_cell.bottom >= 1500 or req_cell.left <= -1500 or req_cell.right >= 1500: self.client.user_logger.debug( "This cell is out of bounds." ) return [] star_list = self.map.get_star_map({ 'top': req_cell.top, 'right': req_cell.right, 'bottom': req_cell.bottom, 'left': req_cell.left }) ### The order of the stars returned doesn't really matter too much, ### but having them sorted makes it easier to debug. return sorted( star_list, key=operator.attrgetter('x', 'y') ) def star_seizure_forbids_excav( self, star ): """ Lets you know if the laws affecting a star forbid you from sending excavators to that star's planets. Arguments: - star -- lacuna.map.Star object Returns True if the star's laws forbids you from excavating its planets, False otherwise. """ if not hasattr(star, 'station'): return False if self.body_cache.is_bad(star.station.name, 'station'): self.client.user_logger.debug("This star's station has already been found to have MO Excav law turned on." ) return True if self.ally: if star.station.alliance.id == self.ally.id: self.client.user_logger.debug("This star has been seized, but it's by my alliance." ) return False laws = star.view_nonseizure_laws() for l in laws: if l.name == 'Members Only Excavation': self.client.user_logger.debug("Whoops - this star has MO Excav law turned on, and not by my alliance. Skipping." ) self.body_cache.mark_as_bad(star.station.name, 'station') return True return False def send_excavs_to_bodies_orbiting(self, stars:list): """ Sends excavators to the bodies around each star in a list, provided each body is of one of the requested types. Arguments: - stars -- list of lacuna.map.Star objects Returns the number of excavators sent. """ cnt = 0 for s in stars: self.client.user_logger.info("Checking on star '{}'." .format(s.name) ) if self.body_cache.is_bad(s.name, 'star'): self.client.user_logger.debug("We've already discovered that the star {} is no good. Skipping." .format(s.name)) continue if self.star_seizure_forbids_excav( s ): self.client.user_logger.debug("Station {} has MO Excav law on. Skipping." .format(s.station.name) ) self.body_cache.mark_as_bad(s.name, 'star') continue if self.system_contains_hostiles( s ): self.client.user_logger.debug("Star {} has at least one hostile colony that would shoot down our excav. Skipping." .format(s.name)) self.body_cache.mark_as_bad(s.name, 'star') continue cnt += self.send_excavs_to_bodies( s, s.bodies ) if self.num_excavs <= 0: self.client.user_logger.debug("We're out of excavators (or slots) so can't send out any more. Done on {}." .format(self.planet.name)) return cnt return cnt def system_contains_hostiles( self, star ): """ Checks if any of the planets orbiting a star is owned by a hostile empire. Arguments: - star -- lacuna.Map.Star object Returns True if there's a hostile colony orbiting the star, False otherwise. """ if not hasattr(star, 'bodies'): # Pretty rare, but possible, I guess. return False for b in star.bodies: if not hasattr(b, 'empire'): continue if b.empire.name in self.ally_members: continue else: self.body_cache.mark_as_bad(star.name, 'star') return True return False def send_excavs_to_bodies(self, star, bodies:list): """ Tries to send an excavator to each body in a list of bodies, provided each body is of one of the requested types. Arguments: - bodies -- list of body.Body objects Returns the integer count of excavators sent. """ cnt = 0 for b in bodies: cnt += self.send_excav_to_matching_body(star, b) if self.body_cache.is_bad(star.name, 'star'): ### send_excav_to_matching_body() found that a planet in this ### system is inhabited, so the whole star is bad. No need to ### continue checking this system. return cnt if self.num_excavs <= 0: return cnt return cnt def get_available_excav_for( self, target:dict ): """ Finds a single excavator to be sent to target. Arguments: target (dict): :ref:`gloss_target` Returns excavator (lacuna.ship.ExistingShip): Excavator ready to send to the target, or False if no available excavators could be found. """ avail = self.sp.get_task_ships_for( target, 'available' ) for s in avail: if s.type == 'excavator': return s return False def send_excav_to_matching_body(self, star, body): """ Tries to send an excavator to a body. Arguments: - body -- A lacuna.body.Body object If the body is not one of the requested types, or we already have an excavator there, or the body's star is seized by another alliance's Space Station and it has Members Only Excavation law set, this will fail to send an excavator and return 0. If everything works out, this will send an excavator, decrement self.num_excavs, and return 1. """ if self.body_cache.is_bad(body.name, 'planet'): self.client.user_logger.debug("A previous check showed that {} is no good. Next!" .format(body.name) ) return 0 if hasattr(body, 'empire'): self.client.user_logger.debug("Planet {} ({},{}) is inhabited. Next!" .format(body.name, body.x, body.y) ) ### A body in the system is inhabited; this makes the entire ### system bad, so mark the star, not just the body. self.body_cache.mark_as_bad(star.name, 'star') self.body_cache.mark_as_bad(body.name, 'planet') return 0 if body.id in self.travelling: self.client.user_logger.info("We already have an excav on the way to {}. Next!" .format(body.name) ) self.body_cache.mark_as_bad(body.name, 'planet') return 0 if body.type != 'habitable planet': self.client.user_logger.debug("Planet {} is not habitable." .format(body.name) ) self.body_cache.mark_as_bad(body.name, 'planet') return 0 if body.surface_type in self.args.ptypes: self.client.user_logger.debug("Planet {} ({},{}) is habitable, uninhabited, and the correct type ({})." .format(body.name, body.x, body.y, body.surface_type) ) else: self.client.user_logger.debug("Planet {} is not the correct type ({})." .format(body.name, body.surface_type) ) self.body_cache.mark_as_bad(body.name, 'planet') return 0 for e in self.excav_sites: if e.body.name == body.name: self.client.user_logger.debug("We already have an excav at {}." .format(body.name) ) self.body_cache.mark_as_bad(body.name, 'planet') return 0 target = { "body_id": body.id } # don't use body_name to avoid unicode names. excav = self.get_available_excav_for( target ) if not excav: self.client.user_logger.debug("We can't send an excavator to this target. We probably already have an excav there.") self.body_cache.mark_as_bad(body.name, 'planet') return 0 try: self.sp.send_ship( excav.id, target ) except lacuna.exceptions.ServerError as e: self.client.user_logger.debug("Encountered ServerError code {}, message {}." .format(e.code, e.text) ) return 0 except Exception as e: ### Probably either MO excavation is on or if we already have an ### excavator en route to this body. ### MO Excav laws _should_ have already been checked, but a race ### condition exists here; it's possible the law was just now ### passed, after the check was performed. self.client.user_logger.debug("Encountered {}, ({}) trying to send excav to {} ({}, {})." .format(type(e), e, body.name, body.x, body.y) ) self.body_cache.mark_as_bad(body.name, 'planet') return 0 else: self.client.user_logger.info( "We just sent an excavator to {} ({},{}).".format(body.name, body.x, body.y) ) self.num_excavs -= 1 return 1 class Point(): def __init__( self, x, y ): self.x = x self.y = y class Ring(): # by Josten's """ A ring of cells radiating out from a planet. Row and column numbers, as well as ring offsets, start at zero. Cell numbers start at 1. Attributes:: cell_size Integer size of the sides of the cells in the ring. cells_per_row Number of cells per row (3 for a ring_offset of 1) cells_this_ring Total number of cells in this ring only (8 for a ring_offset of 1) center_cell Cell object; the cell in the middle. center_cell_number The number of the center cell. Numbering starts at 1 in the ring's NW corner cell. center_col The column occupied by the center cell. 0-based. center_row The row occupied by the center cell. 0-based. this_cell_number Integer number of the cell just returned by get_next_cell(). Will be 0, an invalid cell number, before any calls to get_next_cell(). planet lacuna.body.MyBody object everything is relative to. ring_offset Integer offset from the center (center is 0) total_cells Total number of cells in the square, including all rings (9 for a ring_offset of 1) cell_size The max area that ``lacuna.map.Map.get_star_map()`` will return is 3001. The closest square to that is 54x54 (giving an area of 2916), so the length of any dimension of any cell is hardcoded at 54. Rings Diagram The diagram below shows ring_offset 0 and, surrounding it, ring_offset 1. In the diagram, the center_cell_number is either 1 (at ring_offset 0) or 5 (at ring_offset 1). The other cells are numbered assuming ring_offset 1, since those other cells don't exist for ring_offset 0. Each increase in ring_offset will add another layer of cells around the previous layer, just as ring_offset 1 adds a full layer of cells wrapped around the single cell at ring_offset 0:: +----------+ +----------+ +----------+ | offset 1 | | offset 1 | | offset 1 | | | | | | | | count 1 | | count 2 | | count 3 | +----------+ +----------+ +----------+ +----------+ +----------+ +----------+ | offset 1 | |offset 0/1| | offset 1 | | | | o | | | | count 4 | |count 1/5 | | count 6 | +----------+ +----------+ +----------+ +----------+ +----------+ +----------+ | offset 1 | | offset 1 | | offset 1 | | | | | | | | count 7 | | count 8 | | count 9 | +----------+ +----------+ +----------+ """ def __init__( self, planet, ring_offset:int = 0 ): self.planet = planet self.ring_offset = ring_offset self.cells_this_ring = int( 8 * self.ring_offset ) self.cells_per_row = int( (2 * self.ring_offset + 1) ) self.total_cells = int( self.cells_per_row ** 2 ) self.center_cell_number = int( (self.total_cells + 1) / 2 ) self.next_cell_number = 0 self.cell_size = 54 if self.ring_offset == 0: self.cells_this_ring = 1 self._set_center_location() self._set_center_cell() def _set_center_location(self): self.center_cell_number = int( (self.total_cells + 1) / 2 ) self.center_col = self.center_cell_number - 1 # cell numbers start at 1 while( self.center_col > self.cells_per_row ): self.center_col -= self.cells_per_row self.center_row = self.center_col def _set_center_cell(self): self.center_cell = Cell( Point(self.planet.x, self.planet.y), self.cell_size ) def _get_cell_location(self, num): """ Find a cell's location in the ring, given its number. Arguments: - num -- Integer number of the cell. The NW-most cell is 1; numbering is left-to-right, top-to-bottom. So, for ring_offset of 1, the bottom-right cell's number will be 9. Returns a tuple: - column (numbering starts at 0) - row (numbering starts at 0) - Point object (the cell's centerpoint) """ row = 0 col = num - 1 x = 0 y = 0 while( col >= self.cells_per_row ): row += 1 col -= self.cells_per_row row_diff = row - self.center_row # 0 for cell 4 col_diff = col - self.center_col # -1 for cell 4 ### We have to reverse the signs if we're starting out negative. ie ### if our center point's x is negative, to go up in space we have to ### subtract. if self.center_cell.center_point.x < 0: col_diff *= -1 if self.center_cell.center_point.y < 0: row_diff *= -1 x = self.center_cell.center_point.x + (col_diff * self.cell_size) y = self.center_cell.center_point.y + (row_diff * self.cell_size) return( col, row, Point(x, y) ) def get_next_cell(self): """ Gets the next cell in the ring. Starts at cell 1, the upper-left-most cell on the first call, then proceeds left-to-right, top-to-bottom on successive calls. After returning the final cell in the ring, the next call will return False and then reset the count. """ if not hasattr(self, 'next_cell'): self.next_cell = self._gen_next_cell() try: return next(self.next_cell) except StopIteration: self.next_cell = self._gen_next_cell() return False def _gen_next_cell(self): for i in range(1, self.total_cells): self.this_cell_number = i col, row, point = self._get_cell_location( i ) yield Cell( point, self.cell_size ) class Cell(): """ Attributes:: bottom Y coordinate of the bottom boundary of the cell center_point Point object - the center of the cell. left X coordinate of the left boundary of the cell cell_size Size of the sides of the cell right X coordinate of the right boundary of the cell top Y coordinate of the top boundary of the cell Constructor will raise Cell.OutOfBoundsError if the entire cell is out of bounds, so be sure to create a new cell in a try block. **Cell Diagram** In ring.ring_offset == 0:: +--------+ | cell 1 | | col 0 | | row 0 | +--------+ cell 1 from the ring_offset == 0 above becomes cell 5 when ring_offset == 1:: +--------++--------++--------+ | cell 1 || cell 2 || cell 3 | | col 0 || col 1 || col 2 | | row 0 || row 0 || row 0 | +--------++--------++--------+ +--------++--------++--------+ | cell 4 || cell 5 || cell 6 | | col 0 || col 1 || col 2 | | row 1 || row 1 || row 1 | +--------++--------++--------+ +--------++--------++--------+ | cell 7 || cell 8 || cell 9 | | col 0 || col 1 || col 2 | | row 2 || row 2 || row 2 | +--------++--------++--------+ """ class OutOfBoundsError(Exception): """ The entire cell is out of bounds """ def __init__(self, value): self.value = value def __str__(self): return repr(self.value) def __init__( self, point:Point, cell_size ): self.center_point = point self.cell_size = cell_size self._set_bounding_points() def _set_bounding_points(self): self.left = int( self.center_point.x - (self.cell_size / 2) ) self.right = int( self.center_point.x + (self.cell_size / 2) ) self.top = int( self.center_point.y + (self.cell_size / 2) ) self.bottom = int( self.center_point.y - (self.cell_size / 2) ) if self.top < -1500 or self.bottom > 1500 or self.left > 1500 or self.right < -1500: ### The cell is completely out of bounds. But we don't want to ### stop iteration. eg the top row of cells could be out of ### bounds, leaving the other rows in bounds. If we stopped ### iteration, we'd never process the following in-bounds cells. ### So for out-of-bounds cells, just return a single point that's ### in bounds, but which won't contain any stars. ie don't do ### anything different. pass ### Limit any part of the cell that laps out of bounds to being just ### barely in-bounds. Making TLE Map module requests with ### out-of-bounds coords is an error. self.top = 1500 if self.top > 1500 else self.top self.right = 1500 if self.right > 1500 else self.right self.bottom = -1500 if self.bottom < -1500 else self.bottom self.left = -1500 if self.left < -1500 else self.left
from sympy import Symbol, exp, log, oo, Rational from sympy.series.limits import compare, mrv, rewrite, mrv_leadterm, limit, \ sign from sympy.utilities.pytest import XFAIL """ This test suite is testing the limit algoritm using the bottom up approach. See the documentation in limits2.py. The algorithm itself is highly recursive by nature, so "compare" is logically the lowest part of the algorithm, yet in some sense it's the most complex part, because it needs to calculate a limit to return the result. Nevertheless the rest of the algorithm depends on compare that it works correctly. """ x = Symbol('x', real=True) m = Symbol('m', real=True) def test_compare1(): assert compare(2, x, x) == "<" assert compare(x, exp(x), x) == "<" assert compare(exp(x), exp(x**2), x) == "<" assert compare(exp(x**2),exp(exp(x)), x) == "<" assert compare(1,exp(exp(x)), x) == "<" assert compare(x, 2, x) == ">" assert compare(exp(x), x, x) == ">" assert compare(exp(x**2), exp(x), x) == ">" assert compare(exp(exp(x)), exp(x**2), x) == ">" assert compare(exp(exp(x)), 1, x) == ">" assert compare(2, 3, x) == "=" assert compare(3, -5, x) == "=" assert compare(2, -5, x) == "=" assert compare(x, x**2, x) == "=" assert compare(x**2, x**3, x) == "=" assert compare(x**3, 1/x, x) == "=" assert compare(1/x, x**m, x) == "=" assert compare(x**m, -x, x) == "=" assert compare(exp(x), exp(-x), x) == "=" assert compare(exp(-x), exp(2*x), x) == "=" assert compare(exp(2*x), exp(x)**2, x) == "=" assert compare(exp(x)**2, exp(x+exp(-x)), x) == "=" assert compare(exp(x), exp(x+exp(-x)), x) == "=" assert compare(exp(x**2), 1/exp(x**2), x) == "=" def test_compare2(): assert compare(exp(x),x**5,x) == ">" assert compare(exp(x**2),exp(x)**2,x) == ">" assert compare(exp(x),exp(x+exp(-x)),x) == "=" assert compare(exp(x+exp(-x)),exp(x),x) == "=" assert compare(exp(x+exp(-x)),exp(-x),x) == "=" assert compare(exp(-x),x,x) == ">" assert compare(x,exp(-x),x) == "<" assert compare(exp(x+1/x),x,x) == ">" assert compare(exp(-exp(x)),exp(x),x) == ">" assert compare(exp(exp(-exp(x))+x),exp(-exp(x)),x) == "<" def test_compare3(): assert compare(exp(exp(x)),exp(x+exp(-exp(x))),x) == ">" def test_sign1(): assert sign(Rational(0), x) == 0 assert sign(Rational(3), x) == 1 assert sign(Rational(-5), x) == -1 assert sign(log(x), x) == 1 assert sign(exp(-x), x) == 1 assert sign(exp(x), x) == 1 assert sign(-exp(x), x) == -1 assert sign(3-1/x, x) == 1 assert sign(-3-1/x, x) == -1 def test_mrv1(): assert mrv(x, x) == set([x]) assert mrv(x+1/x, x) == set([x]) assert mrv(x**2, x) == set([x]) assert mrv(log(x), x) == set([x]) assert mrv(exp(x), x) == set([exp(x)]) assert mrv(exp(-x), x) == set([exp(-x)]) assert mrv(exp(x**2), x) == set([exp(x**2)]) assert mrv(-exp(1/x), x) == set([x]) assert mrv(exp(x+1/x), x) == set([exp(x+1/x)]) def test_mrv2a(): assert mrv(exp(x+exp(-exp(x))), x) == set([exp(-exp(x))]) assert mrv(exp(x+exp(-x)), x) == set([exp(x+exp(-x)), exp(-x)]) assert mrv(exp(1/x+exp(-x)), x) == set([exp(-x)]) #sometimes infinite recursion due to log(exp(x**2)) not simplifying def test_mrv2b(): assert mrv(exp(x+exp(-x**2)), x) == set([exp(-x**2)]) #sometimes infinite recursion due to log(exp(x**2)) not simplifying def test_mrv2c(): assert mrv(exp(-x+1/x**2)-exp(x+1/x), x) == set([exp(x+1/x), exp(1/x**2-x)]) #sometimes infinite recursion due to log(exp(x**2)) not simplifying def test_mrv3(): assert mrv(exp(x**2)+x*exp(x)+log(x)**x/x, x) == set([exp(x**2)]) assert mrv(exp(x)*(exp(1/x+exp(-x))-exp(1/x)), x) == set([exp(x), exp(-x)]) assert mrv(log(x**2+2*exp(exp(3*x**3*log(x)))), x) == set([exp(exp(3*x**3*log(x)))]) assert mrv(log(x-log(x))/log(x), x) == set([x]) assert mrv((exp(1/x-exp(-x))-exp(1/x))*exp(x), x) == set([exp(x), exp(-x)]) assert mrv(1/exp(-x+exp(-x))-exp(x), x) == set([exp(x), exp(-x), exp(x-exp(-x))]) assert mrv(log(log(x*exp(x*exp(x))+1)), x) == set([exp(x*exp(x))]) assert mrv(exp(exp(log(log(x)+1/x))), x) == set([x]) def test_mrv4(): ln = log assert mrv((ln(ln(x)+ln(ln(x)))-ln(ln(x)))/ln(ln(x)+ln(ln(ln(x))))*ln(x), x) == set([x]) assert mrv(log(log(x*exp(x*exp(x))+1)) - exp(exp(log(log(x)+1/x))), x) == \ set([exp(x*exp(x))]) def test_rewrite1(): e = exp(x) assert rewrite(e, mrv(e, x), x, m) == (1/m, -x) e = exp(x**2) assert rewrite(e, mrv(e, x), x, m) == (1/m, -x**2) e = exp(x+1/x) assert rewrite(e, mrv(e, x), x, m) == (1/m, -x-1/x) e = 1/exp(-x+exp(-x))-exp(x) assert rewrite(e, mrv(e, x), x, m) == (1/(m*exp(m))-1/m, -x) def test_rewrite2(): e = exp(x)*log(log(exp(x))) assert mrv(e, x) == set([exp(x)]) assert rewrite(e, mrv(e, x), x, m) == (1/m*log(x), -x) #sometimes infinite recursion due to log(exp(x**2)) not simplifying def test_rewrite3(): e = exp(-x+1/x**2)-exp(x+1/x) #both of these are correct and should be equivalent: assert rewrite(e, mrv(e, x), x, m) in [(-1/m + m*exp(1/x+1/x**2), -x-1/x), (m - 1/m*exp(1/x + x**(-2)), x**(-2) - x)] def test_mrv_leadterm1(): assert mrv_leadterm(-exp(1/x), x) == (-1, 0) assert mrv_leadterm(1/exp(-x+exp(-x))-exp(x), x) == (-1, 0) assert mrv_leadterm((exp(1/x-exp(-x))-exp(1/x))*exp(x), x) == (-exp(1/x), 0) def test_mrv_leadterm2(): #Gruntz: p51, 3.25 assert mrv_leadterm((log(exp(x)+x)-x)/log(exp(x)+log(x))*exp(x), x) == \ (1, 0) def test_mrv_leadterm3(): #Gruntz: p56, 3.27 assert mrv(exp(-x+exp(-x)*exp(-x*log(x))), x) == set([exp(-x-x*log(x))]) assert mrv_leadterm(exp(-x+exp(-x)*exp(-x*log(x))), x) == (exp(-x), 0) def test_limit1(): assert limit(x, x, oo) == oo assert limit(x, x, -oo) == -oo assert limit(-x, x, oo) == -oo assert limit(x**2, x, -oo) == oo assert limit(-x**2, x, oo) == -oo assert limit(x*log(x), x, 0, dir="+") == 0 assert limit(1/x,x,oo) == 0 assert limit(exp(x),x,oo) == oo assert limit(-exp(x),x,oo) == -oo assert limit(exp(x)/x,x,oo) == oo assert limit(1/x-exp(-x),x,oo) == 0 assert limit(x+1/x,x,oo) == oo def test_limit2(): assert limit(x**x, x, 0, dir="+") == 1 assert limit((exp(x)-1)/x, x, 0) == 1 assert limit(1+1/x,x,oo) == 1 assert limit(-exp(1/x),x,oo) == -1 assert limit(x+exp(-x),x,oo) == oo assert limit(x+exp(-x**2),x,oo) == oo assert limit(x+exp(-exp(x)),x,oo) == oo assert limit(13+1/x-exp(-x),x,oo) == 13 def test_limit3(): a = Symbol('a') assert limit(x-log(1+exp(x)), x, oo) == 0 assert limit(x-log(a+exp(x)), x, oo) == 0 assert limit(exp(x)/(1+exp(x)), x, oo) == 1 assert limit(exp(x)/(a+exp(x)), x, oo) == 1 @XFAIL def test_limit4(): #issue 364 assert limit((3**x+5**x)**(1/x), x, oo) == 5 #issue 364 assert limit((3**(1/x)+5**(1/x))**x, x, 0) == 5 #@XFAIL #def test_MrvTestCase_page47_ex3_21(): # h = exp(-x/(1+exp(-x))) # expr = exp(h)*exp(-x/(1+h))*exp(exp(-x+h))/h**2-exp(x)+x # expected = set([1/h,exp(x),exp(x-h),exp(x/(1+h))]) # # XXX Incorrect result # assert mrv(expr,x).difference(expected) == set()
import os import sys try: from PIL import Image from PIL import ImageOps from PIL import ImageDraw from PIL import ImageEnhance import cv2 import numpy as np except Exception as e: print(str(e)) from stve.log import Log from stve.exception import * SHARPNESS = 2.0 CONTRAST = 2.0 PMC_THRESHOLD = 0.96 class POINT(object): def __init__(self, x, y, width, height): self.x = x self.y = y self.width = width self.height = height def __repr__(self): return "POINT()" def __str__(self): return "(X, Y) = (%s, %s), Width = %s, Height = %s" \ % (self.x, self.y, self.width, self.height) class Picture(object): L = Log("Picture.Library.STVE") @classmethod def exists(cls, filename): if os.path.exists(filename): return True else: cls.L.warning("%s is not exists." % filename) raise PictureError("%s is not exists." % filename) @classmethod def open(cls, filename): if cls.exists(filename): try: return Image.open(filename, 'r') except IOError as e: cls.L.warning("I/O Error %s" % str(e)) raise PictureError("it is not success of loading picture %s" % filename) @classmethod def to_opencv(cls, pic): if pic == None: raise PictureError("it is not create opencv_pic.") return np.asarray(pic) @classmethod def to_pil(cls, opencv_pic): try: return Image.fromarray(opencv_pic) except Exception as e: cls.L.warning(str(e)) raise PictureError("it is not exchange pic.") @classmethod def get_rgb(cls, pic, point=""): if point == "": point = POINT(0, 0, pic.size[0], pic.size[1]) box = (point.x, point.y, point.x + point.width, point.y + point.height) rgbimg = pic.crop(box).convert("RGB") rgb = np.array(rgbimg.getdata()) return [cls.__round(rgb[:,0]), cls.__round(rgb[:,1]), cls.__round(rgb[:,2])] @classmethod def __round(cls, array): return int(round(np.average(array))) @classmethod def resize(cls, pic, size): if size =="240P": sz = 240 elif size == "360P": sz = 360 elif size == "480P": sz = 480 elif size == "720P": sz = 720 elif size == "1080P": sz = 1080 else: return #cls.L.info("Base : %s" % str(pic.size)) width = float((float(pic.size[0]) * sz)) / float(pic.size[1]) res = (int(width), sz) #cls.L.info("Resize : %s" % str(res)) return pic.resize(res) @classmethod def info(cls, pic): cls.L.info("File Format : %s " % pic.format) cls.L.info("File Size : %s " % str(pic.size)) cls.L.info("File Mode : %s " % pic.mode) @classmethod def convert(cls, from_file, to_file, mode, width, height): rawdata = open(from_file,'rb').read() imgsize = (width, height) img = Image.frombytes(mode, imgsize, rawdata) img.save(to_file) @classmethod def save(cls, pic, filepath, q=100, opt=True): #cls.exists(filepath) if not os.path.exists(os.path.dirname(filepath)): raise PictureError("it is not exists parents directory. : %s" % os.path.dirname(filepath)) pic.save(filepath, quality=q, optimize=opt) return filepath @classmethod def binary(cls, pic): if pic == None: raise PictureError("it is not exists.") opencv_pic = cls.to_opencv(pic) cv_pic_gray = cv2.cvtColor(opencv_pic, cv2.COLOR_BGR2GRAY) cv_pic = cv2.adaptiveThreshold( cv_pic_gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) opencv_pic = cv2.cvtColor(cv_pic, cv2.COLOR_GRAY2RGB) pic = cls.to_pil(opencv_pic) pic.convert("RGB") return pic @classmethod def grayscale(cls, pic): if pic == None: raise PictureError("it is not exists.") try: return ImageOps.grayscale(pic).convert("RGB") except IOError as e: cls.L.warning("I/O Error : %s" % str(e)) raise PictureError("it is not success of converting grayscale. %s" % pic) @classmethod def brightness(cls, pic, level=1.0): if pic == None: raise PictureError("it is not exists.") try: brightness = ImageEnhance.Brightness(pic) return brightness.enhance(level) except IOError as e: cls.L.warning("I/O Error : %s" % str(e)) raise PictureError("it is not success of converting brightness. %s" % pic) @classmethod def contrast(cls, pic, threshold=CONTRAST): if pic == None: raise PictureError("it is not exists.") try: contrast_converter = ImageEnhance.Contrast(pic) return contrast_converter.enhance(threshold) except IOError as e: cls.L.warning("I/O Error : %s" % str(e)) raise PictureError("it is not success of converting contrast. %s" % pic) @classmethod def sharpness(cls, pic, threshold=SHARPNESS): if pic == None: raise PictureError("it is not exists.") try: sharpness_converter = ImageEnhance.Sharpness(pic) return sharpness_converter.enhance(threshold) except IOError as e: cls.L.warning("I/O Error : %s" % str(e)) raise PictureError("it is not success of converting sharpness. %s" % pic) @classmethod def crop(cls, pic, point): if point == None: raise PictureError("Point object is None.") box = (point.x, point.y, point.x + point.width, point.y + point.height) try: return pic.crop(box) except IOError as e: cls.L.info("I/O error : %s" % str(e)) raise PictureError("it is not succes of crop picture. %s" % pic) @classmethod def reload(cls, filename): try: cls.exists(filename) return Image.open(filename, 'r') except Error as e: cls.L.warning("Error : %s" % str(e)) raise PictureError("it is not success of loading picture %s" % filename) @classmethod def point(cls, pic, points, filename): draw = ImageDraw.Draw(pic) for point in points: p = (float(point.y), float(point.x)) p1 = (float(point.y)+1.0, float(point.x)) p2 = (float(point.y), float(point.x)+1.0) p3 = (float(point.y)+1.0, float(point.x)+1.0) draw.point(p, (0xff, 0x00, 0x00)) draw.point(p1, (0xff, 0x00, 0x00)) draw.point(p2, (0xff, 0x00, 0x00)) draw.point(p3, (0xff, 0x00, 0x00)) return cls.save(pic, filename) @classmethod def __patternmatch(cls, reference, target): if not os.path.exists(reference): raise PictureError("it is not exists reference file. : %s" % reference) if not os.path.exists(target): raise PictureError("it is not exists target file. : %s" % target) cls.L.info("target : %s" % target) img_rgb = cv2.imread(reference) img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY) template = cv2.imread(target, 0) w, h = template.shape[::-1] res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED) loc = np.where( res >= PMC_THRESHOLD) result = None for pt in zip(*loc[::-1]): result = POINT(pt[0], pt[1], w, h) return result @classmethod def is_pattern(cls, reference, target): result = cls.__patternmatch(reference, target) if result is None: return False else: return True @classmethod def search_pattern(cls, reference, target): return cls.__patternmatch(reference, target)
''' Defines all plugin-related messaging elements. @author: Eitan Isaacson @organization: Mozilla Foundation @copyright: Copyright (c) 2006, 2007 Mozilla Foundation @license: BSD All rights reserved. This program and the accompanying materials are made available under the terms of the BSD which accompanies this distribution, and is available at U{http://www.opensource.org/licenses/bsd-license.php} ''' import gi from gi.repository import Gtk as gtk from gi.repository import GObject from gi.repository import Pango from accerciser.i18n import _ class MessageManager(GObject.GObject): ''' Centralizes all plugin message handling. If the plugin is a visible widget, it displays the message within the plugin. If not it displays the message in a dedicated message tab. This manager also could emit module and plugin reload requests from user responses to messages. ''' __gsignals__ = {'plugin-reload-request' : (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_PYOBJECT, GObject.TYPE_PYOBJECT)), 'module-reload-request' : (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_PYOBJECT, GObject.TYPE_STRING, GObject.TYPE_STRING))} def __init__(self): ''' Initialize the manager. ''' GObject.GObject.__init__(self) self.message_tab = None def getMessageTab(self): ''' Get the manager's dedicated message tab. Initialize a message tab if one does not already exist. @return: The message tab. @rtype: L{MessageManager.MessageTab} ''' if not self.message_tab: self.message_tab = self.MessageTab() return self.message_tab def newPluginError(self, plugin_instance, plugin_class, error_message, details): ''' Create a new plugin error message, and display it eithe in the plugin itself or in the error tab. @param plugin_instance: Plugin instance that had the error. @type plugin_instance: L{Plugin} @param plugin_class: Plugin class. @type plugin_class: type @param error_message: Principal error message. @type error_message: string @param details: Detailed error message. @type details: string @return: The newly created error message. @rtype: L{PluginErrorMessage} ''' message = PluginErrorMessage(error_message, details) message.connect('response', self._onPluginResponseRefresh, plugin_class) if getattr(plugin_instance, 'parent', None): plugin_instance.message_area.pack_start(message, True, True, 0) message.show_all() else: self.message_tab.addMessage(message) return message def _onPluginResponseRefresh(self, message, response_id, plugin_class): ''' Callback for gtk.RESPONSE_APPLY of a plugin error message, emits a plugin reload request signal. @param message: Error message that emitted response signal. @type message: L{PluginErrorMessage} @param response_id: The response ID. @type response_id: integer @param plugin_class: The plugin class of the failed plugin. @type plugin_class: type ''' if response_id == gtk.ResponseType.APPLY: self.emit('plugin-reload-request', message, plugin_class) def newModuleError(self, module, path, error_message, details): ''' Create a new module error dialog. Usually because of a syntax error in a module. Put error message in message tab. @param module: Failed module name. @type module: string @param path: Failed module's path. @type path: string @param error_message: Principal error message. @type error_message: string @param details: Detailed error message. @type details: string @return: The newly created error message. @rtype: L{PluginErrorMessage} ''' message = PluginErrorMessage(error_message, details) message.connect('response', self._onModuleResponseRefresh, module, path) self.message_tab.addMessage(message) return message def _onModuleResponseRefresh(self, message, response_id, module, path): ''' Callback for gtk.RESPONSE_APPLY of a module error message, emits a module reload request signal. @param message: Error message that emitted response signal. @type message: L{PluginErrorMessage} @param response_id: The response ID. @type response_id: integer @param module: Failed module name. @type module: string @param path: Failed module's path. @type path: string ''' if response_id == gtk.ResponseType.APPLY: self.emit('module-reload-request', message, module, path) class MessageTab(gtk.ScrolledWindow): ''' Implements a scrolled window with a vbox for messages that cannot be displayed in their plugins ''' def __init__(self): ''' Initialize tab. ''' gtk.ScrolledWindow.__init__(self) self.set_name(_('Plugin Errors')) self._vbox = gtk.VBox() self._vbox.connect('remove', self._onMessageRemove) self.add_with_viewport(self._vbox) self.set_no_show_all(True) def addMessage(self, message): ''' Add a message to the tab. @param message: The message to be added. @type message: L{PluginMessage} ''' self._vbox.pack_start(message, False, True, 0) self.show() self._vbox.show_all() def removeMessage(self, message): ''' Remove a message from the tab. Destroys it. @param message: The message to be removed. @type message: L{PluginMessage} ''' message.destroy() def _onMessageRemove(self, vbox, message): ''' Callback for removal of children. If there are no messages displayed, hide this widget. @param vbox: Vbox that had a child removed. @type vbox: gtk.VBox @param message: The message that was removed. @type message: L{PluginMessage} ''' if len(vbox.get_children()) == 0: self.hide() class PluginMessage(gtk.Frame): ''' Pretty plugin message area that appears either above the plugin if the plugin is realized or in a seperate view. @ivar vbox: Main contents container. @type vbox: gtk.VBox @ivar action_area: Area used mainly for response buttons. @type action_area: gtk.VBox @ivar message_style: Tooltip style used for mesages. @type message_style: gtk.Style ''' __gsignals__ = {'response' : (GObject.SignalFlags.RUN_FIRST, None, (GObject.TYPE_INT,))} def __init__(self): ''' Initialize the message object. ''' gtk.Frame.__init__(self) self.vbox = gtk.VBox() self.vbox.set_spacing(3) self.action_area = gtk.VBox() self.action_area.set_homogeneous(True) # Get the tooltip style, for use with the message background color. w = gtk.Window() w.set_name('gtk-tooltip') w.ensure_style() #self.message_style = w.rc_get_style() self.message_style = gtk.rc_get_style(w) event_box = gtk.EventBox() event_box.set_style(self.message_style) self.add(event_box) hbox = gtk.HBox() event_box.add(hbox) hbox.pack_start(self.vbox, True, True, 3) hbox.pack_start(self.action_area, False, False, 3) def add_button(self, button_text, response_id): ''' Add a button to the action area that emits a response when clicked. @param button_text: The button text, or a stock ID. @type button_text: string @param response_id: The response emitted when the button is pressed. @type response_id: integer @return: Return the created button. @rtype: gtk.Button ''' button = gtk.Button() button.set_use_stock(True) button.set_label(button_text) button.connect('clicked', self._onActionActivated, response_id) self.action_area.pack_start(button, False, False, 0) return button def _onActionActivated(self, button, response_id): ''' Callback for button presses that emit the correct response. @param button: The button that was clicked. @type button: gtk.Button @param response_id: The response ID to emit a response with. @type response_id: integer ''' self.emit('response', response_id) class PluginErrorMessage(PluginMessage): ''' Standard error message. ''' def __init__(self, error_message, details): ''' Plugin error message. @param error_message: The error message. @type error_message: string @param details: Further details about the error. @type details: string ''' PluginMessage.__init__(self) hbox = gtk.HBox() hbox.set_spacing(6) self.vbox.pack_start(hbox, False, False, 0) image = gtk.Image() image.set_from_stock(gtk.STOCK_DIALOG_WARNING, gtk.IconSize.SMALL_TOOLBAR) hbox.pack_start(image, False, False, 0) label = gtk.Label() label.set_ellipsize(Pango.EllipsizeMode.END) label.set_selectable(True) label.set_markup('<b>%s</b>' % error_message) hbox.pack_start(label, True, True, 0) label = gtk.Label(details) label.set_ellipsize(Pango.EllipsizeMode.END) label.set_selectable(True) self.vbox.add(label) self.add_button(gtk.STOCK_CLEAR, gtk.ResponseType.CLOSE) self.add_button(gtk.STOCK_REFRESH, gtk.ResponseType.APPLY) self.connect('response', self._onResponse) def _onResponse(self, plugin_message, response_id): ''' Destroy the message when the "clear" button is clicked. @param plugin_message: Message that emitted this signal. @type plugin_message: L{PluginErrorMessage} @param response_id: The response ID @type response_id: integer ''' if response_id == gtk.ResponseType.CLOSE: plugin_message.destroy()
#!/usr/bin/env python # encoding: utf-8 import ast import docutils import docutils.examples import inspect import logging import os import re import sys logging.basicConfig(level=logging.DEBUG) from talus.fileset import FileSet class TalusError(Exception): pass class PyFuncTypeComponent(object): def __init__(self, raw): self.raw = raw self.type = "component" match = re.match(r'^Component\(([a-zA-Z0-9_]+)\)$', raw) if match is None: raise TalusError("Could not determine the component name from: {!r}".format(raw)) self.name = match.group(1) class PyFuncTypeFileSet(object): def __init__(self): self.type = "fileset" self.name = "FileSet" class PyFuncTypeNative(object): def __init__(self, native_type): self.type = "native" if native_type not in ["str", "list", "tuple", "dict", "int", "float", "unicode", "bool"]: raise TalusError("Unsupported native type specified for parameter: {!r}".format(native_type)) self.name = native_type class PyFuncParam(object): def __init__(self, unparsed_name, desc): self.desc = desc self.type, self.name = self.get_type_and_name(unparsed_name) def get_type_and_name(self, data): parts = data.split() if len(parts) != 3: raise TalusError("Error! Param declarations need to be of the form" + "\n\n\t:param <type> <param-name>: <desc>" + "\n\nBut you gave:\n\n\t{}".format(data)) param, type, name = parts if type.startswith("Component"): type = PyFuncTypeComponent(type) elif type == "FileSet": type = PyFuncTypeFileSet() else: type = PyFuncTypeNative(type) return type, name class PyFunc(object): def __init__(self, log, filename, func_node): self.filename = filename self.node = func_node self.name = func_node.name self._log = log.getChild(self.name) self.doc = "" if hasattr(func_node.body[0], "value") and isinstance(func_node.body[0].value, ast.Str): self.doc = func_node.body[0].value.s try: self.params = self.get_params(self.doc) except TalusError as e: raise TalusError(e.message + "\n\nError at {}:{}".format(self.filename, self.node.lineno)) def get_params(self, docstring): self._log.debug("determining params") # these need to be IN ORDER!!!! params = [] doc, _ = docutils.examples.internals(unicode(docstring)) if len(doc.children) == 0: return params for quote in doc.children: if not isinstance(quote, docutils.nodes.block_quote): continue for field in quote: if not isinstance(field, docutils.nodes.field_list): continue for f in field: name = str(f[0][0]) desc = str(f[1][0][0]) # simple test to avoid :returns: and such if "param" in name: params.append(PyFuncParam(name, desc)) return params class PyClass(object): def __init__(self, log, filename, cls_node): self.filename = filename self._log = log.getChild(cls_node.name) if "components" in self.filename: self.type = "component" self.param_method = "init" elif "tools" in self.filename: self.type = "tool" self.param_method = "run" self.node = cls_node self.desc = "" self.name = cls_node.name self.bases = self.get_bases() self.methods = {} for idx, node in enumerate(cls_node.body): if idx == 0 and isinstance(node, ast.Expr) and isinstance(node.value, ast.Str): self.desc = node.value.s elif isinstance(node, ast.FunctionDef): method = PyFunc(self._log, filename, node) self.methods[method.name] = method def get_bases(self): res = [] for x in self.node.bases: if isinstance(x, ast.Attribute): res.append(x.attr) elif isinstance(x, ast.Name): res.append(x.id) return res def get_run_params(self, query_func): self._log.debug("getting run params") params = {} if self.param_method in self.methods: for param in self.methods[self.param_method].params: self._log.debug(" param: {} ({} - {})".format(param.name, param.type.type, param.type.name)) if param.type.type == "component" and not query_func(param.type.name): raise TalusError("Invalid component specified ({}) in {}:{}".format( param.type.name, self.filename, self.name )) params[param.name] = dict( name=param.name, type=dict( type=param.type.type, # native or component name=param.type.name # str/list/etc or component name ), desc=param.desc ) else: self._log.debug("no {} method was specified?".format(self.param_method)) return params class Job(object): """This is the class that will run a task.""" def __init__(self, id, idx, params, tool, fileset_id, progress_callback, results_callback): """TODO: to be defined1. :idx: TODO :params: TODO :tool: TODO :fileset_id: The id of the default fileset that files should be added to """ self._id = id self._idx = idx self._params = params self._tool = tool self._fileset_id = fileset_id self._fileset = FileSet(self._fileset_id) self._progress_callback = progress_callback self._results_callback = results_callback self._log = logging.getLogger("JOB:{}".format(self._id)) def add_file(self, contents, content_type="application/octet-stream", filename=None, **metadata): """Add a file to the default result fileset """ return self._fileset.add( contents=contents, filename=filename, content_type=content_type, **metadata ) def run(self): self._log.debug("preparing to run job") try: tool_cls = self._get_tool_cls() real_params = self._convert_params(self._params, tool_cls) tool = tool_cls( idx=self._idx, progress_cb=self._progress_callback, results_cb=self._results_callback, parent_log=self._log, job=self ) self._log.debug("RUNNING TOOL") tool.run(**real_params) except TalusError as e: self._log.error(e.message) self._log.debug("FINISHED RUNNING TOOL") def _camel_to_under(self, name): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() def _get_tool_cls(self): mod_name = self._camel_to_under(self._tool) mod = __import__("talus.tools." + mod_name, globals(), locals(), fromlist=[str(self._tool)]) return getattr(mod, self._tool) def _get_component_cls(self, cls_name): mod_name = self._camel_to_under(cls_name) mod_base = __import__("talus.components", globals(), locals(), fromlist=[str(mod_name)]) mod = getattr(mod_base, mod_name) return getattr(mod, cls_name) def _convert_params(self, params, code_cls): filename = inspect.getfile(code_cls) param_types = self._get_param_types(code_cls) real_params = {} for name, val in params.iteritems(): if name not in param_types: raise TalusError("unmapped argument: {!r}".format(name)) real_params[name] = self._convert_val(param_types[name]["type"], val) return real_params def _to_bool(self, val): if type(val) is bool: return val if type(val) is str: return val.lower() == "true" return not (not val) def _convert_val(self, param_type, val): if param_type["type"] == "native": switch = { "str": lambda x: str(x), "list": lambda x: list(x), "tuple": lambda x: tuple(x), "dict": lambda x: dict(x), "int": lambda x: int(x), "float": lambda x: float(x), "unicode": lambda x: unicode(x) "bool" : self._to_bool, } return switch[param_type["name"]](val) elif param_type["type"] == "fileset": val = FileSet(val) return val elif param_type["type"] == "component": # val should be like this: # { "class": "SpecificComponent", "params": {} } # # allow for inheritance by letting the json specify the # specific class that will be used component_cls = self._get_component_cls(val["class"]) component_args = self._convert_params(val["params"], component_cls) val = component_cls(parent_log=self._log, job=self) val.init(**component_args) return val def _get_param_types(self, cls): cls_name = cls.__name__ filename = inspect.getfile(cls) filename = filename.replace(".pyc", ".py") with open(filename, "r") as f: source = f.read() pyclass = None mod = ast.parse(source) for node in mod.body: if isinstance(node, ast.ClassDef): cls = PyClass(self._log, filename, node) if cls.name == cls_name: pyclass = cls break # make the query func always return true - this is assuming the # pre-receive hook has done its job and prevented invalid components # from slipping in return pyclass.get_run_params(lambda x: True)
""" A module for dealing with the polylines used throughout matplotlib. The primary class for polyline handling in matplotlib is :class:`Path`. Almost all vector drawing makes use of Paths somewhere in the drawing pipeline. Whilst a :class:`Path` instance itself cannot be drawn, there exists :class:`~matplotlib.artist.Artist` subclasses which can be used for convenient Path visualisation - the two most frequently used of these are :class:`~matplotlib.patches.PathPatch` and :class:`~matplotlib.collections.PathCollection`. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six import math from weakref import WeakValueDictionary import numpy as np from numpy import ma from matplotlib import _path from matplotlib.cbook import simple_linear_interpolation, maxdict from matplotlib import rcParams class Path(object): """ :class:`Path` represents a series of possibly disconnected, possibly closed, line and curve segments. The underlying storage is made up of two parallel numpy arrays: - *vertices*: an Nx2 float array of vertices - *codes*: an N-length uint8 array of vertex types These two arrays always have the same length in the first dimension. For example, to represent a cubic curve, you must provide three vertices as well as three codes ``CURVE3``. The code types are: - ``STOP`` : 1 vertex (ignored) A marker for the end of the entire path (currently not required and ignored) - ``MOVETO`` : 1 vertex Pick up the pen and move to the given vertex. - ``LINETO`` : 1 vertex Draw a line from the current position to the given vertex. - ``CURVE3`` : 1 control point, 1 endpoint Draw a quadratic Bezier curve from the current position, with the given control point, to the given end point. - ``CURVE4`` : 2 control points, 1 endpoint Draw a cubic Bezier curve from the current position, with the given control points, to the given end point. - ``CLOSEPOLY`` : 1 vertex (ignored) Draw a line segment to the start point of the current polyline. Users of Path objects should not access the vertices and codes arrays directly. Instead, they should use :meth:`iter_segments` or :meth:`cleaned` to get the vertex/code pairs. This is important, since many :class:`Path` objects, as an optimization, do not store a *codes* at all, but have a default one provided for them by :meth:`iter_segments`. .. note:: The vertices and codes arrays should be treated as immutable -- there are a number of optimizations and assumptions made up front in the constructor that will not change when the data changes. """ # Path codes STOP = 0 # 1 vertex MOVETO = 1 # 1 vertex LINETO = 2 # 1 vertex CURVE3 = 3 # 2 vertices CURVE4 = 4 # 3 vertices CLOSEPOLY = 79 # 1 vertex #: A dictionary mapping Path codes to the number of vertices that the #: code expects. NUM_VERTICES_FOR_CODE = {STOP: 1, MOVETO: 1, LINETO: 1, CURVE3: 2, CURVE4: 3, CLOSEPOLY: 1} code_type = np.uint8 def __init__(self, vertices, codes=None, _interpolation_steps=1, closed=False, readonly=False): """ Create a new path with the given vertices and codes. Parameters ---------- vertices : array_like The ``(n, 2)`` float array, masked array or sequence of pairs representing the vertices of the path. If *vertices* contains masked values, they will be converted to NaNs which are then handled correctly by the Agg PathIterator and other consumers of path data, such as :meth:`iter_segments`. codes : {None, array_like}, optional n-length array integers representing the codes of the path. If not None, codes must be the same length as vertices. If None, *vertices* will be treated as a series of line segments. _interpolation_steps : int, optional Used as a hint to certain projections, such as Polar, that this path should be linearly interpolated immediately before drawing. This attribute is primarily an implementation detail and is not intended for public use. closed : bool, optional If *codes* is None and closed is True, vertices will be treated as line segments of a closed polygon. readonly : bool, optional Makes the path behave in an immutable way and sets the vertices and codes as read-only arrays. """ if ma.isMaskedArray(vertices): vertices = vertices.astype(np.float_).filled(np.nan) else: vertices = np.asarray(vertices, np.float_) if codes is not None: codes = np.asarray(codes, self.code_type) assert codes.ndim == 1 assert len(codes) == len(vertices) if len(codes): assert codes[0] == self.MOVETO elif closed: codes = np.empty(len(vertices), dtype=self.code_type) codes[0] = self.MOVETO codes[1:-1] = self.LINETO codes[-1] = self.CLOSEPOLY assert vertices.ndim == 2 assert vertices.shape[1] == 2 self._vertices = vertices self._codes = codes self._interpolation_steps = _interpolation_steps self._update_values() if readonly: self._vertices.flags.writeable = False if self._codes is not None: self._codes.flags.writeable = False self._readonly = True else: self._readonly = False @classmethod def _fast_from_codes_and_verts(cls, verts, codes, internals=None): """ Creates a Path instance without the expense of calling the constructor Parameters ---------- verts : numpy array codes : numpy array (may not be None) internals : dict or None The attributes that the resulting path should have. Allowed keys are ``readonly``, ``should_simplify``, ``simplify_threshold``, ``has_nonfinite`` and ``interpolation_steps``. """ internals = internals or {} pth = cls.__new__(cls) pth._vertices = verts pth._codes = codes pth._readonly = internals.pop('readonly', False) pth.should_simplify = internals.pop('should_simplify', True) pth.simplify_threshold = internals.pop('simplify_threshold', rcParams['path.simplify_threshold']) pth._has_nonfinite = internals.pop('has_nonfinite', False) pth._interpolation_steps = internals.pop('interpolation_steps', 1) if internals: raise ValueError('Unexpected internals provided to ' '_fast_from_codes_and_verts: ' '{0}'.format('\n *'.join(six.iterkeys(internals)))) return pth def _update_values(self): self._should_simplify = ( rcParams['path.simplify'] and (len(self._vertices) >= 128 and (self._codes is None or np.all(self._codes <= Path.LINETO)))) self._simplify_threshold = rcParams['path.simplify_threshold'] self._has_nonfinite = not np.isfinite(self._vertices).all() @property def vertices(self): """ The list of vertices in the `Path` as an Nx2 numpy array. """ return self._vertices @vertices.setter def vertices(self, vertices): if self._readonly: raise AttributeError("Can't set vertices on a readonly Path") self._vertices = vertices self._update_values() @property def codes(self): """ The list of codes in the `Path` as a 1-D numpy array. Each code is one of `STOP`, `MOVETO`, `LINETO`, `CURVE3`, `CURVE4` or `CLOSEPOLY`. For codes that correspond to more than one vertex (`CURVE3` and `CURVE4`), that code will be repeated so that the length of `self.vertices` and `self.codes` is always the same. """ return self._codes @codes.setter def codes(self, codes): if self._readonly: raise AttributeError("Can't set codes on a readonly Path") self._codes = codes self._update_values() @property def simplify_threshold(self): """ The fraction of a pixel difference below which vertices will be simplified out. """ return self._simplify_threshold @simplify_threshold.setter def simplify_threshold(self, threshold): self._simplify_threshold = threshold @property def has_nonfinite(self): """ `True` if the vertices array has nonfinite values. """ return self._has_nonfinite @property def should_simplify(self): """ `True` if the vertices array should be simplified. """ return self._should_simplify @should_simplify.setter def should_simplify(self, should_simplify): self._should_simplify = should_simplify @property def readonly(self): """ `True` if the `Path` is read-only. """ return self._readonly def __copy__(self): """ Returns a shallow copy of the `Path`, which will share the vertices and codes with the source `Path`. """ import copy return copy.copy(self) copy = __copy__ def __deepcopy__(self): """ Returns a deepcopy of the `Path`. The `Path` will not be readonly, even if the source `Path` is. """ return self.__class__( self.vertices.copy(), self.codes.copy(), _interpolation_steps=self._interpolation_steps) deepcopy = __deepcopy__ @classmethod def make_compound_path_from_polys(cls, XY): """ Make a compound path object to draw a number of polygons with equal numbers of sides XY is a (numpolys x numsides x 2) numpy array of vertices. Return object is a :class:`Path` .. plot:: mpl_examples/api/histogram_path_demo.py """ # for each poly: 1 for the MOVETO, (numsides-1) for the LINETO, 1 for the # CLOSEPOLY; the vert for the closepoly is ignored but we still need # it to keep the codes aligned with the vertices numpolys, numsides, two = XY.shape assert(two==2) stride = numsides + 1 nverts = numpolys * stride verts = np.zeros((nverts, 2)) codes = np.ones(nverts, int) * cls.LINETO codes[0::stride] = cls.MOVETO codes[numsides::stride] = cls.CLOSEPOLY for i in range(numsides): verts[i::stride] = XY[:,i] return cls(verts, codes) @classmethod def make_compound_path(cls, *args): """Make a compound path from a list of Path objects.""" lengths = [len(x) for x in args] total_length = sum(lengths) vertices = np.vstack([x.vertices for x in args]) vertices.reshape((total_length, 2)) codes = np.empty(total_length, dtype=cls.code_type) i = 0 for path in args: if path.codes is None: codes[i] = cls.MOVETO codes[i + 1:i + len(path.vertices)] = cls.LINETO else: codes[i:i + len(path.codes)] = path.codes i += len(path.vertices) return cls(vertices, codes) def __repr__(self): return "Path(%r, %r)" % (self.vertices, self.codes) def __len__(self): return len(self.vertices) def iter_segments(self, transform=None, remove_nans=True, clip=None, snap=False, stroke_width=1.0, simplify=None, curves=True, sketch=None): """ Iterates over all of the curve segments in the path. Each iteration returns a 2-tuple (*vertices*, *code*), where *vertices* is a sequence of 1 - 3 coordinate pairs, and *code* is one of the :class:`Path` codes. Additionally, this method can provide a number of standard cleanups and conversions to the path. Parameters ---------- transform : None or :class:`~matplotlib.transforms.Transform` instance If not None, the given affine transformation will be applied to the path. remove_nans : {False, True}, optional If True, will remove all NaNs from the path and insert MOVETO commands to skip over them. clip : None or sequence, optional If not None, must be a four-tuple (x1, y1, x2, y2) defining a rectangle in which to clip the path. snap : None or bool, optional If None, auto-snap to pixels, to reduce fuzziness of rectilinear lines. If True, force snapping, and if False, don't snap. stroke_width : float, optional The width of the stroke being drawn. Needed as a hint for the snapping algorithm. simplify : None or bool, optional If True, perform simplification, to remove vertices that do not affect the appearance of the path. If False, perform no simplification. If None, use the should_simplify member variable. curves : {True, False}, optional If True, curve segments will be returned as curve segments. If False, all curves will be converted to line segments. sketch : None or sequence, optional If not None, must be a 3-tuple of the form (scale, length, randomness), representing the sketch parameters. """ if not len(self): return cleaned = self.cleaned(transform=transform, remove_nans=remove_nans, clip=clip, snap=snap, stroke_width=stroke_width, simplify=simplify, curves=curves, sketch=sketch) vertices = cleaned.vertices codes = cleaned.codes len_vertices = vertices.shape[0] # Cache these object lookups for performance in the loop. NUM_VERTICES_FOR_CODE = self.NUM_VERTICES_FOR_CODE STOP = self.STOP i = 0 while i < len_vertices: code = codes[i] if code == STOP: return else: num_vertices = NUM_VERTICES_FOR_CODE[code] curr_vertices = vertices[i:i+num_vertices].flatten() yield curr_vertices, code i += num_vertices def cleaned(self, transform=None, remove_nans=False, clip=None, quantize=False, simplify=False, curves=False, stroke_width=1.0, snap=False, sketch=None): """ Cleans up the path according to the parameters returning a new Path instance. .. seealso:: See :meth:`iter_segments` for details of the keyword arguments. Returns ------- Path instance with cleaned up vertices and codes. """ vertices, codes = _path.cleanup_path(self, transform, remove_nans, clip, snap, stroke_width, simplify, curves, sketch) internals = {'should_simplify': self.should_simplify and not simplify, 'has_nonfinite': self.has_nonfinite and not remove_nans, 'simplify_threshold': self.simplify_threshold, 'interpolation_steps': self._interpolation_steps} return Path._fast_from_codes_and_verts(vertices, codes, internals) def transformed(self, transform): """ Return a transformed copy of the path. .. seealso:: :class:`matplotlib.transforms.TransformedPath` A specialized path class that will cache the transformed result and automatically update when the transform changes. """ return Path(transform.transform(self.vertices), self.codes, self._interpolation_steps) def contains_point(self, point, transform=None, radius=0.0): """ Returns *True* if the path contains the given point. If *transform* is not *None*, the path will be transformed before performing the test. *radius* allows the path to be made slightly larger or smaller. """ if transform is not None: transform = transform.frozen() result = _path.point_in_path(point[0], point[1], radius, self, transform) return result def contains_points(self, points, transform=None, radius=0.0): """ Returns a bool array which is *True* if the path contains the corresponding point. If *transform* is not *None*, the path will be transformed before performing the test. *radius* allows the path to be made slightly larger or smaller. """ if transform is not None: transform = transform.frozen() result = _path.points_in_path(points, radius, self, transform) return result def contains_path(self, path, transform=None): """ Returns *True* if this path completely contains the given path. If *transform* is not *None*, the path will be transformed before performing the test. """ if transform is not None: transform = transform.frozen() return _path.path_in_path(self, None, path, transform) def get_extents(self, transform=None): """ Returns the extents (*xmin*, *ymin*, *xmax*, *ymax*) of the path. Unlike computing the extents on the *vertices* alone, this algorithm will take into account the curves and deal with control points appropriately. """ from .transforms import Bbox path = self if transform is not None: transform = transform.frozen() if not transform.is_affine: path = self.transformed(transform) transform = None return Bbox(_path.get_path_extents(path, transform)) def intersects_path(self, other, filled=True): """ Returns *True* if this path intersects another given path. *filled*, when True, treats the paths as if they were filled. That is, if one path completely encloses the other, :meth:`intersects_path` will return True. """ return _path.path_intersects_path(self, other, filled) def intersects_bbox(self, bbox, filled=True): """ Returns *True* if this path intersects a given :class:`~matplotlib.transforms.Bbox`. *filled*, when True, treats the path as if it was filled. That is, if one path completely encloses the other, :meth:`intersects_path` will return True. """ from .transforms import BboxTransformTo rectangle = self.unit_rectangle().transformed( BboxTransformTo(bbox)) result = self.intersects_path(rectangle, filled) return result def interpolated(self, steps): """ Returns a new path resampled to length N x steps. Does not currently handle interpolating curves. """ if steps == 1: return self vertices = simple_linear_interpolation(self.vertices, steps) codes = self.codes if codes is not None: new_codes = Path.LINETO * np.ones(((len(codes) - 1) * steps + 1, )) new_codes[0::steps] = codes else: new_codes = None return Path(vertices, new_codes) def to_polygons(self, transform=None, width=0, height=0): """ Convert this path to a list of polygons. Each polygon is an Nx2 array of vertices. In other words, each polygon has no ``MOVETO`` instructions or curves. This is useful for displaying in backends that do not support compound paths or Bezier curves, such as GDK. If *width* and *height* are both non-zero then the lines will be simplified so that vertices outside of (0, 0), (width, height) will be clipped. """ if len(self.vertices) == 0: return [] if transform is not None: transform = transform.frozen() if self.codes is None and (width == 0 or height == 0): if transform is None: return [self.vertices] else: return [transform.transform(self.vertices)] # Deal with the case where there are curves and/or multiple # subpaths (using extension code) return _path.convert_path_to_polygons(self, transform, width, height) _unit_rectangle = None @classmethod def unit_rectangle(cls): """ Return a :class:`Path` instance of the unit rectangle from (0, 0) to (1, 1). """ if cls._unit_rectangle is None: cls._unit_rectangle = \ cls([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]], [cls.MOVETO, cls.LINETO, cls.LINETO, cls.LINETO, cls.CLOSEPOLY], readonly=True) return cls._unit_rectangle _unit_regular_polygons = WeakValueDictionary() @classmethod def unit_regular_polygon(cls, numVertices): """ Return a :class:`Path` instance for a unit regular polygon with the given *numVertices* and radius of 1.0, centered at (0, 0). """ if numVertices <= 16: path = cls._unit_regular_polygons.get(numVertices) else: path = None if path is None: theta = (2*np.pi/numVertices * np.arange(numVertices + 1).reshape((numVertices + 1, 1))) # This initial rotation is to make sure the polygon always # "points-up" theta += np.pi / 2.0 verts = np.concatenate((np.cos(theta), np.sin(theta)), 1) codes = np.empty((numVertices + 1,)) codes[0] = cls.MOVETO codes[1:-1] = cls.LINETO codes[-1] = cls.CLOSEPOLY path = cls(verts, codes, readonly=True) if numVertices <= 16: cls._unit_regular_polygons[numVertices] = path return path _unit_regular_stars = WeakValueDictionary() @classmethod def unit_regular_star(cls, numVertices, innerCircle=0.5): """ Return a :class:`Path` for a unit regular star with the given numVertices and radius of 1.0, centered at (0, 0). """ if numVertices <= 16: path = cls._unit_regular_stars.get((numVertices, innerCircle)) else: path = None if path is None: ns2 = numVertices * 2 theta = (2*np.pi/ns2 * np.arange(ns2 + 1)) # This initial rotation is to make sure the polygon always # "points-up" theta += np.pi / 2.0 r = np.ones(ns2 + 1) r[1::2] = innerCircle verts = np.vstack((r*np.cos(theta), r*np.sin(theta))).transpose() codes = np.empty((ns2 + 1,)) codes[0] = cls.MOVETO codes[1:-1] = cls.LINETO codes[-1] = cls.CLOSEPOLY path = cls(verts, codes, readonly=True) if numVertices <= 16: cls._unit_regular_polygons[(numVertices, innerCircle)] = path return path @classmethod def unit_regular_asterisk(cls, numVertices): """ Return a :class:`Path` for a unit regular asterisk with the given numVertices and radius of 1.0, centered at (0, 0). """ return cls.unit_regular_star(numVertices, 0.0) _unit_circle = None @classmethod def unit_circle(cls): """ Return the readonly :class:`Path` of the unit circle. For most cases, :func:`Path.circle` will be what you want. """ if cls._unit_circle is None: cls._unit_circle = cls.circle(center=(0, 0), radius=1, readonly=True) return cls._unit_circle @classmethod def circle(cls, center=(0., 0.), radius=1., readonly=False): """ Return a Path representing a circle of a given radius and center. Parameters ---------- center : pair of floats The center of the circle. Default ``(0, 0)``. radius : float The radius of the circle. Default is 1. readonly : bool Whether the created path should have the "readonly" argument set when creating the Path instance. Notes ----- The circle is approximated using cubic Bezier curves. This uses 8 splines around the circle using the approach presented here: Lancaster, Don. `Approximating a Circle or an Ellipse Using Four Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_. """ MAGIC = 0.2652031 SQRTHALF = np.sqrt(0.5) MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0) vertices = np.array([[0.0, -1.0], [MAGIC, -1.0], [SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45], [SQRTHALF, -SQRTHALF], [SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45], [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC], [SQRTHALF+MAGIC45, SQRTHALF-MAGIC45], [SQRTHALF, SQRTHALF], [SQRTHALF-MAGIC45, SQRTHALF+MAGIC45], [MAGIC, 1.0], [0.0, 1.0], [-MAGIC, 1.0], [-SQRTHALF+MAGIC45, SQRTHALF+MAGIC45], [-SQRTHALF, SQRTHALF], [-SQRTHALF-MAGIC45, SQRTHALF-MAGIC45], [-1.0, MAGIC], [-1.0, 0.0], [-1.0, -MAGIC], [-SQRTHALF-MAGIC45, -SQRTHALF+MAGIC45], [-SQRTHALF, -SQRTHALF], [-SQRTHALF+MAGIC45, -SQRTHALF-MAGIC45], [-MAGIC, -1.0], [0.0, -1.0], [0.0, -1.0]], dtype=np.float_) codes = [cls.CURVE4] * 26 codes[0] = cls.MOVETO codes[-1] = cls.CLOSEPOLY return Path(vertices * radius + center, codes, readonly=readonly) _unit_circle_righthalf = None @classmethod def unit_circle_righthalf(cls): """ Return a :class:`Path` of the right half of a unit circle. The circle is approximated using cubic Bezier curves. This uses 4 splines around the circle using the approach presented here: Lancaster, Don. `Approximating a Circle or an Ellipse Using Four Bezier Cubic Splines <http://www.tinaja.com/glib/ellipse4.pdf>`_. """ if cls._unit_circle_righthalf is None: MAGIC = 0.2652031 SQRTHALF = np.sqrt(0.5) MAGIC45 = np.sqrt((MAGIC*MAGIC) / 2.0) vertices = np.array( [[0.0, -1.0], [MAGIC, -1.0], [SQRTHALF-MAGIC45, -SQRTHALF-MAGIC45], [SQRTHALF, -SQRTHALF], [SQRTHALF+MAGIC45, -SQRTHALF+MAGIC45], [1.0, -MAGIC], [1.0, 0.0], [1.0, MAGIC], [SQRTHALF+MAGIC45, SQRTHALF-MAGIC45], [SQRTHALF, SQRTHALF], [SQRTHALF-MAGIC45, SQRTHALF+MAGIC45], [MAGIC, 1.0], [0.0, 1.0], [0.0, -1.0]], np.float_) codes = cls.CURVE4 * np.ones(14) codes[0] = cls.MOVETO codes[-1] = cls.CLOSEPOLY cls._unit_circle_righthalf = cls(vertices, codes, readonly=True) return cls._unit_circle_righthalf @classmethod def arc(cls, theta1, theta2, n=None, is_wedge=False): """ Return an arc on the unit circle from angle *theta1* to angle *theta2* (in degrees). If *n* is provided, it is the number of spline segments to make. If *n* is not provided, the number of spline segments is determined based on the delta between *theta1* and *theta2*. Masionobe, L. 2003. `Drawing an elliptical arc using polylines, quadratic or cubic Bezier curves <http://www.spaceroots.org/documents/ellipse/index.html>`_. """ # degrees to radians theta1 *= np.pi / 180.0 theta2 *= np.pi / 180.0 twopi = np.pi * 2.0 halfpi = np.pi * 0.5 eta1 = np.arctan2(np.sin(theta1), np.cos(theta1)) eta2 = np.arctan2(np.sin(theta2), np.cos(theta2)) eta2 -= twopi * np.floor((eta2 - eta1) / twopi) if (theta2 - theta1 > np.pi) and (eta2 - eta1 < np.pi): eta2 += twopi # number of curve segments to make if n is None: n = int(2 ** np.ceil((eta2 - eta1) / halfpi)) if n < 1: raise ValueError("n must be >= 1 or None") deta = (eta2 - eta1) / n t = np.tan(0.5 * deta) alpha = np.sin(deta) * (np.sqrt(4.0 + 3.0 * t * t) - 1) / 3.0 steps = np.linspace(eta1, eta2, n + 1, True) cos_eta = np.cos(steps) sin_eta = np.sin(steps) xA = cos_eta[:-1] yA = sin_eta[:-1] xA_dot = -yA yA_dot = xA xB = cos_eta[1:] yB = sin_eta[1:] xB_dot = -yB yB_dot = xB if is_wedge: length = n * 3 + 4 vertices = np.zeros((length, 2), np.float_) codes = cls.CURVE4 * np.ones((length, ), cls.code_type) vertices[1] = [xA[0], yA[0]] codes[0:2] = [cls.MOVETO, cls.LINETO] codes[-2:] = [cls.LINETO, cls.CLOSEPOLY] vertex_offset = 2 end = length - 2 else: length = n * 3 + 1 vertices = np.empty((length, 2), np.float_) codes = cls.CURVE4 * np.ones((length, ), cls.code_type) vertices[0] = [xA[0], yA[0]] codes[0] = cls.MOVETO vertex_offset = 1 end = length vertices[vertex_offset :end:3, 0] = xA + alpha * xA_dot vertices[vertex_offset :end:3, 1] = yA + alpha * yA_dot vertices[vertex_offset+1:end:3, 0] = xB - alpha * xB_dot vertices[vertex_offset+1:end:3, 1] = yB - alpha * yB_dot vertices[vertex_offset+2:end:3, 0] = xB vertices[vertex_offset+2:end:3, 1] = yB return cls(vertices, codes, readonly=True) @classmethod def wedge(cls, theta1, theta2, n=None): """ Return a wedge of the unit circle from angle *theta1* to angle *theta2* (in degrees). If *n* is provided, it is the number of spline segments to make. If *n* is not provided, the number of spline segments is determined based on the delta between *theta1* and *theta2*. """ return cls.arc(theta1, theta2, n, True) _hatch_dict = maxdict(8) @classmethod def hatch(cls, hatchpattern, density=6): """ Given a hatch specifier, *hatchpattern*, generates a Path that can be used in a repeated hatching pattern. *density* is the number of lines per unit square. """ from matplotlib.hatch import get_path if hatchpattern is None: return None hatch_path = cls._hatch_dict.get((hatchpattern, density)) if hatch_path is not None: return hatch_path hatch_path = get_path(hatchpattern, density) cls._hatch_dict[(hatchpattern, density)] = hatch_path return hatch_path def clip_to_bbox(self, bbox, inside=True): """ Clip the path to the given bounding box. The path must be made up of one or more closed polygons. This algorithm will not behave correctly for unclosed paths. If *inside* is `True`, clip to the inside of the box, otherwise to the outside of the box. """ # Use make_compound_path_from_polys verts = _path.clip_path_to_rect(self, bbox, inside) paths = [Path(poly) for poly in verts] return self.make_compound_path(*paths) def get_path_collection_extents( master_transform, paths, transforms, offsets, offset_transform): """ Given a sequence of :class:`Path` objects, :class:`~matplotlib.transforms.Transform` objects and offsets, as found in a :class:`~matplotlib.collections.PathCollection`, returns the bounding box that encapsulates all of them. *master_transform* is a global transformation to apply to all paths *paths* is a sequence of :class:`Path` instances. *transforms* is a sequence of :class:`~matplotlib.transforms.Affine2D` instances. *offsets* is a sequence of (x, y) offsets (or an Nx2 array) *offset_transform* is a :class:`~matplotlib.transforms.Affine2D` to apply to the offsets before applying the offset to the path. The way that *paths*, *transforms* and *offsets* are combined follows the same method as for collections. Each is iterated over independently, so if you have 3 paths, 2 transforms and 1 offset, their combinations are as follows: (A, A, A), (B, B, A), (C, A, A) """ from .transforms import Bbox if len(paths) == 0: raise ValueError("No paths provided") return Bbox.from_extents(*_path.get_path_collection_extents( master_transform, paths, transforms, offsets, offset_transform)) def get_paths_extents(paths, transforms=[]): """ Given a sequence of :class:`Path` objects and optional :class:`~matplotlib.transforms.Transform` objects, returns the bounding box that encapsulates all of them. *paths* is a sequence of :class:`Path` instances. *transforms* is an optional sequence of :class:`~matplotlib.transforms.Affine2D` instances to apply to each path. """ from .transforms import Bbox, Affine2D if len(paths) == 0: raise ValueError("No paths provided") return Bbox.from_extents(*_path.get_path_collection_extents( Affine2D(), paths, transforms, [], Affine2D())) def _define_deprecated_functions(ns): from .cbook import deprecated # The C++ functions are not meant to be used directly. # Users should use the more pythonic wrappers in the Path # class instead. for func, alternative in [ ('point_in_path', 'path.Path.contains_point'), ('get_path_extents', 'path.Path.get_extents'), ('point_in_path_collection', 'collection.Collection.contains'), ('path_in_path', 'path.Path.contains_path'), ('path_intersects_path', 'path.Path.intersects_path'), ('convert_path_to_polygons', 'path.Path.to_polygons'), ('cleanup_path', 'path.Path.cleaned'), ('points_in_path', 'path.Path.contains_points'), ('clip_path_to_rect', 'path.Path.clip_to_bbox')]: ns[func] = deprecated( since='1.3', alternative=alternative)(getattr(_path, func)) _define_deprecated_functions(locals())
#!/usr/bin/python # # This file is a modification of jsparser.py from pynarcissus ( https://code.google.com/p/pynarcissus/ ) # # ***** BEGIN LICENSE BLOCK ***** # Version: MPL 1.1/GPL 2.0/LGPL 2.1 # # The contents of this file are subject to the Mozilla Public License Version # 1.1 (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # http://www.mozilla.org/MPL/ # # Software distributed under the License is distributed on an "AS IS" basis, # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License # for the specific language governing rights and limitations under the # License. # # The Original Code is the Narcissus JavaScript engine, written in Javascript. # # The Initial Developer of the Original Code is # Brendan Eich <brendan@mozilla.org>. # Portions created by the Initial Developer are Copyright (C) 2004 # the Initial Developer. All Rights Reserved. # # The Python version of the code was created by JT Olds <jtolds@xnet5.com>, # and is a direct translation from the Javascript version. # # Alternatively, the contents of this file may be used under the terms of # either the GNU General Public License Version 2 or later (the "GPL"), or # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), # in which case the provisions of the GPL or the LGPL are applicable instead # of those above. If you wish to allow use of your version of this file only # under the terms of either the GPL or the LGPL, and not to allow others to # use your version of this file under the terms of the MPL, indicate your # decision by deleting the provisions above and replace them with the notice # and other provisions required by the GPL or the LGPL. If you do not delete # the provisions above, a recipient may use your version of this file under # the terms of any one of the MPL, the GPL or the LGPL. # # ***** END LICENSE BLOCK ***** */ """ PyNarcissus A lexical scanner and parser. JS implemented in JS, ported to Python. """ import re import sys class Object: pass class Error_(Exception): pass class ParseError(Error_): pass GLOBALS = {} tokens = {0: 'END', 1: '\n', 2: ';', 3: ',', 4: '=', 5: '?', 6: ':', 7: 'CONDITIONAL', 8: '||', 9: '&&', 10: '|', 11: '^', 12: '&', 13: '==', 14: '!=', 15: '===', 16: '!==', 17: '<', 18: '<=', 19: '>=', 20: '>', 21: '<<', 22: '>>', 23: '>>>', 24: '+', 25: '-', 26: '*', 27: '/', 28: '%', 29: '!', 30: '~', 31: 'UNARY_PLUS', 32: 'UNARY_MINUS', 33: '++', 34: '--', 35: '.', 36: '[', 37: ']', 38: '{', 39: '}', 40: '(', 41: ')', 42: 'SCRIPT', 43: 'BLOCK', 44: 'LABEL', 45: 'FOR_IN', 46: 'CALL', 47: 'NEW_WITH_ARGS', 48: 'INDEX', 49: 'ARRAY_INIT', 50: 'OBJECT_INIT', 51: 'PROPERTY_INIT', 52: 'GETTER', 53: 'SETTER', 54: 'GROUP', 55: 'LIST', 56: 'IDENTIFIER', 57: 'NUMBER', 58: 'STRING', 59: 'REGEXP', 60: 'break', 61: 'case', 62: 'catch', 63: 'const', 64: 'continue', 65: 'debugger', 66: 'default', 67: 'delete', 68: 'do', 69: 'else', 70: 'enum', 71: 'false', 72: 'finally', 73: 'for', 74: 'function', 75: 'if', 76: 'in', 77: 'instanceof', 78: 'new', 79: 'null', 80: 'return', 81: 'switch', 82: 'this', 83: 'throw', 84: 'true', 85: 'try', 86: 'typeof', 87: 'var', 88: 'void', 89: 'while', 90: 'with'} opTypeNames = [('\n', "NEWLINE"), (';', "SEMICOLON"), (',', "COMMA"), ('?', "HOOK"), (':', "COLON"), ('||', "OR"), ('&&', "AND"), ('|', "BITWISE_OR"), ('^', "BITWISE_XOR"), ('&', "BITWISE_AND"), ('===', "STRICT_EQ"), ('==', "EQ"), ('=', "ASSIGN"), ('!==', "STRICT_NE"), ('!=', "NE"), ('<<', "LSH"), ('<=', "LE"), ('<', "LT"), ('>>>', "URSH"), ('>>', "RSH"), ('>=', "GE"), ('>', "GT"), ('++', "INCREMENT"), ('--', "DECREMENT"), ('+', "PLUS"), ('-', "MINUS"), ('*', "MUL"), ('/', "DIV"), ('%', "MOD"), ('!', "NOT"), ('~', "BITWISE_NOT"), ('.', "DOT"), ('[', "LEFT_BRACKET"), (']', "RIGHT_BRACKET"), ('{', "LEFT_CURLY"), ('}', "RIGHT_CURLY"), ('(', "LEFT_PAREN"), (')', "RIGHT_PAREN")] opTypeNames = { '>=': 'GE', '>>': 'RSH', '<<': 'LSH', '<=': 'LE', '!=': 'NE', '!': 'NOT', '%': 'MOD', '&': 'BITWISE_AND', ')': 'RIGHT_PAREN', '(': 'LEFT_PAREN', '+': 'PLUS', '*': 'MUL', '-': 'MINUS', ',': 'COMMA', '/': 'DIV', '.': 'DOT', '>>>': 'URSH', ';': 'SEMICOLON', ':': 'COLON', '=': 'ASSIGN', '||': 'OR', '?': 'HOOK', '>': 'GT', '\n': 'NEWLINE', '==': 'EQ', '&&': 'AND', '[': 'LEFT_BRACKET', ']': 'RIGHT_BRACKET', '^': 'BITWISE_XOR', '===': 'STRICT_EQ', '!==': 'STRICT_NE', '++': 'INCREMENT', '<': 'LT', '--': 'DECREMENT', '{': 'LEFT_CURLY', '}': 'RIGHT_CURLY', '|': 'BITWISE_OR', '~': 'BITWISE_NOT' } GLOBALS = {'VOID': 88, 'RIGHT_BRACKET': 37, 'UNARY_MINUS': 32, 'RIGHT_PAREN': 41, 'STRICT_EQ': 15, 'TRUE': 84, 'MINUS': 25, 'NEWLINE': 1, 'PLUS': 24, 'GT': 20, 'DEBUGGER': 65, 'ENUM': 70, 'GE': 19, 'VAR': 87, 'ARRAY_INIT': 49, 'BITWISE_XOR': 11, 'RETURN': 80, 'BITWISE_NOT': 30, 'THIS': 82, 'TYPEOF': 86, 'OR': 8, 'DELETE': 67, 'INDEX': 48, 'GROUP': 54, 'NEW_WITH_ARGS': 47, 'LABEL': 44, 'BITWISE_AND': 12, 'NEW': 78, 'BLOCK': 43, 'SETTER': 53, 'WITH': 90, 'LSH': 21, 'COLON': 6, 'UNARY_PLUS': 31, 'FUNCTION': 74, 'END': 0, 'FOR': 73, 'ELSE': 69, 'TRY': 85, 'GETTER': 52, 'REGEXP': 59, 'EQ': 13, 'DECREMENT': 34, 'AND': 9, 'CONTINUE': 64, 'NOT': 29, 'LEFT_CURLY': 38, 'RIGHT_CURLY': 39, 'DEFAULT': 66, 'STRICT_NE': 16, 'WHILE': 89, 'MUL': 26, 'DOT': 35, 'CASE': 61, 'SEMICOLON': 2, 'SCRIPT': 42, 'CONDITIONAL': 7, 'LEFT_PAREN': 40, 'NE': 14, 'SWITCH': 81, 'INCREMENT': 33, 'CATCH': 62, 'IDENTIFIER': 56, 'INSTANCEOF': 77, 'FALSE': 71, 'LIST': 55, 'BREAK': 60, 'BITWISE_OR': 10, 'LEFT_BRACKET': 36, 'DO': 68, 'CONST': 63, 'NUMBER': 57, 'HOOK': 5, 'DIV': 27, 'NULL': 79, 'LE': 18, 'URSH': 23, 'LT': 17, 'COMMA': 3, 'ASSIGN': 4, 'STRING': 58, 'FINALLY': 72, 'FOR_IN': 45, 'IN': 76, 'IF': 75, 'RSH': 22, 'PROPERTY_INIT': 51, 'CALL': 46, 'OBJECT_INIT': 50, 'MOD': 28, 'THROW': 83} tokens = {0: 'END', 1: '\n', 2: ';', 3: ',', 4: '=', 5: '?', 6: ':', 7: 'CONDITIONAL', 8: '||', 9: '&&', 10: '|', 11: '^', 12: '&', 13: '==', 14: '!=', 15: '===', 16: '!==', 17: '<', 18: '<=', 19: '>=', 20: '>', 21: '<<', 22: '>>', 23: '>>>', 24: '+', 25: '-', 26: '*', 27: '/', 28: '%', 29: '!', 30: '~', 31: 'UNARY_PLUS', 32: 'UNARY_MINUS', 33: '++', 34: '--', 35: '.', 36: '[', 37: ']', 38: '{', 39: '}', 40: '(', 41: ')', 42: 'SCRIPT', 43: 'BLOCK', 44: 'LABEL', 45: 'FOR_IN', 46: 'CALL', 47: 'NEW_WITH_ARGS', 48: 'INDEX', 49: 'ARRAY_INIT', 50: 'OBJECT_INIT', 51: 'PROPERTY_INIT', 52: 'GETTER', 53: 'SETTER', 54: 'GROUP', 55: 'LIST', 56: 'IDENTIFIER', 57: 'NUMBER', 58: 'STRING', 59: 'REGEXP', 60: 'break', 61: 'case', 62: 'catch', 63: 'const', 64: 'continue', 65: 'debugger', 66: 'default', 67: 'delete', 68: 'do', 69: 'else', 70: 'enum', 71: 'false', 72: 'finally', 73: 'for', 74: 'function', 75: 'if', 76: 'in', 77: 'instanceof', 78: 'new', 79: 'null', 80: 'return', 81: 'switch', 82: 'this', 83: 'throw', 84: 'true', 85: 'try', 86: 'typeof', 87: 'var', 88: 'void', 89: 'while', 90: 'with', 'UNARY_PLUS': 31, 'ARRAY_INIT': 49, 'instanceof': 77, '--': 34, 'try': 85, 'this': 82, 'UNARY_MINUS': 32, '|': 10, 'INDEX': 48, 'GROUP': 54, 'NEW_WITH_ARGS': 47, 'LABEL': 44, 'BLOCK': 43, 'SETTER': 53, 'const': 63, 'for': 73, '+': 24, '/': 27, 'case': 61, 'continue': 64, 'new': 78, ';': 2, '?': 5, 'END': 0, 'enum': 70, 'GETTER': 52, '&&': 9, 'REGEXP': 59, '[': 36, 'throw': 83, '!==': 16, '++': 33, 'SCRIPT': 42, '(': 40, '{': 38, 'delete': 67, '*': 26, '>=': 19, '>>': 22, '\n': 1, ',': 3, '!=': 14, 'debugger': 65, '&': 12, 'CONDITIONAL': 7, 'finally': 72, '.': 35, 'var': 87, ':': 6, '>': 20, 'function': 74, 'with': 90, 'else': 69, '>>>': 23, 'catch': 62, 'true': 84, '^': 11, '===': 15, '||': 8, 'IDENTIFIER': 56, 'default': 66, 'LIST': 55, '<': 17, 'while': 89, 'typeof': 86, '~': 30, 'false': 71, 'do': 68, '<<': 21, '<=': 18, 'NUMBER': 57, 'in': 76, 'return': 80, 'null': 79, 'if': 75, '!': 29, 'FOR_IN': 45, '%': 28, ')': 41, '-': 25, '==': 13, '=': 4, 'void': 88, 'STRING': 58, ']': 37, 'break': 60, 'PROPERTY_INIT': 51, 'switch': 81, 'CALL': 46, 'OBJECT_INIT': 50, '}': 39} keywords = {'false': 71, 'debugger': 65, 'in': 76, 'null': 79, 'if': 75, 'const': 63, 'for': 73, 'with': 90, 'while': 89, 'finally': 72, 'var': 87, 'new': 78, 'function': 74, 'do': 68, 'return': 80, 'void': 88, 'enum': 70, 'else': 69, 'break': 60, 'catch': 62, 'instanceof': 77, 'true': 84, 'throw': 83, 'case': 61, 'default': 66, 'try': 85, 'this': 82, 'switch': 81, 'continue': 64, 'typeof': 86, 'delete': 67} assignOps = {0: '|', 1: '^', 2: '&', 3: '<<', 4: '>>', 5: '>>>', '>>': 22, 7: '-', 8: '*', 9: '/', '<<': 21, '%': 28, 6: '+', '&': 12, '+': 24, '*': 26, '-': 25, '/': 27, '>>>': 23, 10: '%', '^': 11, '|': 10} opRegExp = re.compile(r"^;|^,|^\?|^:|^\|\||^\&\&|^\||^\^|^\&|^===|^==|^=|^!==|^!=|^<<|^<=|^<|^>>>|^>>|^>=|^>|^\+\+|^\-\-|^\+|^\-|^\*|^\/|^%|^!|^~|^\.|^\[|^\]|^\{|^\}|^\(|^\)") # A regexp to match floating point literals (but not integer literals). fpRegExp = re.compile(r'^\d+\.\d*(?:[eE][-+]?\d+)?|^\d+(?:\.\d*)?[eE][-+]?\d+|^\.\d+(?:[eE][-+]?\d+)?') # A regexp to match regexp literals. reRegExp = re.compile(r'^\/((?:\\.|\[(?:\\.|[^\]])*\]|[^\/])+)\/([gimy]*)') class SyntaxError_(ParseError): def __init__(self, message, filename, lineno): ParseError.__init__(self, "Syntax error: %s\n%s:%s" % (message, filename, lineno)) class Tokenizer(object): def __init__(self, s, f, l): self.cursor = 0 self.source = str(s) self.tokens = {} self.tokenIndex = 0 self.lookahead = 0 self.scanNewlines = False self.scanOperand = True self.filename = f self.lineno = l input_ = property(lambda self: self.source[self.cursor:]) done = property(lambda self: self.peek() == GLOBALS['END']) token = property(lambda self: self.tokens.get(self.tokenIndex)) def match(self, tt): return self.get() == tt or self.unget() def mustMatch(self, tt): if not self.match(tt): raise self.newSyntaxError("Missing " + tokens.get(tt).lower()) return self.token def peek(self): if self.lookahead: next = self.tokens.get((self.tokenIndex + self.lookahead) & 3) if self.scanNewlines and (getattr(next, "lineno", None) != getattr(self, "lineno", None)): tt = GLOBALS['NEWLINE'] else: tt = getattr(next, "type_", None) else: tt = self.get() self.unget() return tt def peekOnSameLine(self): self.scanNewlines = True tt = self.peek() self.scanNewlines = False return tt def get(self): while self.lookahead: self.lookahead -= 1 self.tokenIndex = (self.tokenIndex + 1) & 3 token = self.tokens.get(self.tokenIndex) if getattr(token, "type_", None) != GLOBALS['NEWLINE'] or self.scanNewlines: return getattr(token, "type_", None) while True: input__ = self.input_ if self.scanNewlines: match = re.match(r'^[ \t]+', input__) else: match = re.match(r'^\s+', input__) if match: spaces = match.group(0) self.cursor += len(spaces) newlines = re.findall(r'\n', spaces) if newlines: self.lineno += len(newlines) input__ = self.input_ match = re.match(r'^\/(?:\*(?:.|\n)*?\*\/|\/.*)', input__) if not match: break comment = match.group(0) self.cursor += len(comment) newlines = re.findall(r'\n', comment) if newlines: self.lineno += len(newlines) self.tokenIndex = (self.tokenIndex + 1) & 3 token = self.tokens.get(self.tokenIndex) if not token: token = Object() self.tokens[self.tokenIndex] = token if not input__: token.type_ = GLOBALS['END'] return GLOBALS['END'] def matchInput(): match = fpRegExp.match(input__) if match: token.type_ = GLOBALS['NUMBER'] token.value = float(match.group(0)) return match.group(0) match = re.match(r'^0[xX][\da-fA-F]+|^0[0-7]*|^\d+', input__) if match: token.type_ = GLOBALS['NUMBER'] token.value = eval(match.group(0)) return match.group(0) match = re.match(r'^[$_\w]+', input__) # FIXME no ES3 unicode if match: id_ = match.group(0) token.type_ = keywords.get(id_, GLOBALS['IDENTIFIER']) token.value = id_ return match.group(0) match = re.match(r'^"(?:\\.|[^"])*"|^\'(?:\\.|[^\'])*\'', input__) if match: token.type_ = GLOBALS['STRING'] token.value = eval(match.group(0)) return match.group(0) if self.scanOperand: match = reRegExp.match(input__) if match: token.type_ = GLOBALS['REGEXP'] token.value = {"regexp": match.group(1), "modifiers": match.group(2)} return match.group(0) match = opRegExp.match(input__) if match: op = match.group(0) if op in assignOps and input__[len(op)] == '=': token.type_ = GLOBALS['ASSIGN'] token.assignOp = GLOBALS[opTypeNames[op]] token.value = op return match.group(0) + "=" token.type_ = GLOBALS[opTypeNames[op]] if self.scanOperand and (token.type_ in (GLOBALS['PLUS'], GLOBALS['MINUS'])): token.type_ += GLOBALS['UNARY_PLUS'] - GLOBALS['PLUS'] token.assignOp = None token.value = op return match.group(0) if self.scanNewlines: match = re.match(r'^\n', input__) if match: token.type_ = GLOBALS['NEWLINE'] return match.group(0) raise self.newSyntaxError("Illegal token") token.start = self.cursor self.cursor += len(matchInput()) token.end = self.cursor token.lineno = self.lineno return getattr(token, "type_", None) def unget(self): self.lookahead += 1 if self.lookahead == 4: raise "PANIC: too much lookahead!" self.tokenIndex = (self.tokenIndex - 1) & 3 def newSyntaxError(self, m): return SyntaxError_(m, self.filename, self.lineno) class CompilerContext(object): def __init__(self, inFunction): self.inFunction = inFunction self.stmtStack = [] self.funDecls = [] self.varDecls = [] self.bracketLevel = 0 self.curlyLevel = 0 self.parenLevel = 0 self.hookLevel = 0 self.ecmaStrictMode = False self.inForLoopInit = False def Script(t, x): n = Statements(t, x) n.type_ = GLOBALS['SCRIPT'] n.funDecls = x.funDecls n.varDecls = x.varDecls return n class Node(list): def __init__(self, t, type_=None, args=[]): list.__init__(self) token = t.token if token: if type_: self.type_ = type_ else: self.type_ = getattr(token, "type_", None) self.value = token.value self.lineno = token.lineno self.start = token.start self.end = token.end else: self.type_ = type_ self.lineno = t.lineno self.tokenizer = t for arg in args: self.append(arg) type = property(lambda self: tokenstr(self.type_)) # Always use push to add operands to an expression, to update start and end. def append(self, kid, numbers=[]): if kid: if hasattr(self, "start") and kid.start < self.start: self.start = kid.start if hasattr(self, "end") and self.end < kid.end: self.end = kid.end return list.append(self, kid) indentLevel = 0 def __str__(self): a = list((str(i), v) for i, v in enumerate(self)) for attr in dir(self): if attr[0] == "_": continue elif attr == "tokenizer": a.append((attr, "[object Object]")) elif attr in ("append", "count", "extend", "getSource", "index", "insert", "pop", "remove", "reverse", "sort", "type_", "target", "filename", "indentLevel", "type"): continue else: a.append((attr, getattr(self, attr))) if len(self): a.append(("length", len(self))) a.sort(lambda a, b: cmp(a[0], b[0])) INDENTATION = " " Node.indentLevel += 1 n = Node.indentLevel s = "{\n%stype: %s" % ((INDENTATION * n), tokenstr(self.type_)) for i, value in a: s += ",\n%s%s: " % ((INDENTATION * n), i) if i == "value" and self.type_ == GLOBALS['REGEXP']: s += "/%s/%s" % (value["regexp"], value["modifiers"]) elif value is None: s += "null" elif value is False: s += "false" elif value is True: s += "true" elif type(value) == list: s += ','.join((str(x) for x in value)) else: s += str(value) Node.indentLevel -= 1 n = Node.indentLevel s += "\n%s}" % (INDENTATION * n) return s __repr__ = __str__ def getSource(self): if getattr(self, "start", None) is not None: if getattr(self, "end", None) is not None: return self.tokenizer.source[self.start:self.end] return self.tokenizer.source[self.start:] if getattr(self, "end", None) is not None: return self.tokenizer.source[:self.end] return self.tokenizer.source[:] filename = property(lambda self: self.tokenizer.filename) def __nonzero__(self): return True # Statement stack and nested statement handler. def nest(t, x, node, func, end=None): x.stmtStack.append(node) n = func(t, x) x.stmtStack.pop() if end: t.mustMatch(end) return n def tokenstr(tt): t = tokens[tt] if re.match(r'^\W', t): return opTypeNames[t] return t.upper() def Statements(t, x): n = Node(t, GLOBALS['BLOCK']) x.stmtStack.append(n) while not t.done and t.peek() != GLOBALS['RIGHT_CURLY']: n.append(Statement(t, x)) x.stmtStack.pop() return n def Block(t, x): t.mustMatch(GLOBALS['LEFT_CURLY']) n = Statements(t, x) t.mustMatch(GLOBALS['RIGHT_CURLY']) return n DECLARED_FORM = 0 EXPRESSED_FORM = 1 STATEMENT_FORM = 2 def Statement(t, x): tt = t.get() # Cases for statements ending in a right curly return early, avoiding the # common semicolon insertion magic after this switch. if tt == GLOBALS['FUNCTION']: if len(x.stmtStack) > 1: type_ = STATEMENT_FORM else: type_ = DECLARED_FORM return FunctionDefinition(t, x, True, type_) elif tt == GLOBALS['LEFT_CURLY']: n = Statements(t, x) t.mustMatch(GLOBALS['RIGHT_CURLY']) return n elif tt == GLOBALS['IF']: n = Node(t) n.condition = ParenExpression(t, x) x.stmtStack.append(n) n.thenPart = Statement(t, x) if t.match(GLOBALS['ELSE']): n.elsePart = Statement(t, x) else: n.elsePart = None x.stmtStack.pop() return n elif tt == GLOBALS['SWITCH']: n = Node(t) t.mustMatch(GLOBALS['LEFT_PAREN']) n.discriminant = Expression(t, x) t.mustMatch(GLOBALS['RIGHT_PAREN']) n.cases = [] n.defaultIndex = -1 x.stmtStack.append(n) t.mustMatch(GLOBALS['LEFT_CURLY']) while True: tt = t.get() if tt == GLOBALS['RIGHT_CURLY']: break if tt in (GLOBALS['DEFAULT'], GLOBALS['CASE']): if tt == GLOBALS['DEFAULT'] and n.defaultIndex >= 0: raise t.newSyntaxError("More than one switch default") n2 = Node(t) if tt == GLOBALS['DEFAULT']: n.defaultIndex = len(n.cases) else: n2.caseLabel = Expression(t, x, GLOBALS['COLON']) else: raise t.newSyntaxError("Invalid switch case") t.mustMatch(GLOBALS['COLON']) n2.statements = Node(t, GLOBALS['BLOCK']) while True: tt = t.peek() if(tt == GLOBALS['CASE'] or tt == GLOBALS['DEFAULT'] or tt == GLOBALS['RIGHT_CURLY']): break n2.statements.append(Statement(t, x)) n.cases.append(n2) x.stmtStack.pop() return n elif tt == GLOBALS['FOR']: n = Node(t) n2 = None n.isLoop = True t.mustMatch(GLOBALS['LEFT_PAREN']) tt = t.peek() if tt != GLOBALS['SEMICOLON']: x.inForLoopInit = True if tt == GLOBALS['VAR'] or tt == GLOBALS['CONST']: t.get() n2 = Variables(t, x) else: n2 = Expression(t, x) x.inForLoopInit = False if n2 and t.match(GLOBALS['IN']): n.type_ = GLOBALS['FOR_IN'] if n2.type_ == GLOBALS['VAR']: if len(n2) != 1: raise SyntaxError("Invalid for..in left-hand side", t.filename, n2.lineno) # NB: n2[0].type_ == INDENTIFIER and n2[0].value == n2[0].name n.iterator = n2[0] n.varDecl = n2 else: n.iterator = n2 n.varDecl = None n.object = Expression(t, x) else: if n2: n.setup = n2 else: n.setup = None t.mustMatch(GLOBALS['SEMICOLON']) if t.peek() == GLOBALS['SEMICOLON']: n.condition = None else: n.condition = Expression(t, x) t.mustMatch(GLOBALS['SEMICOLON']) if t.peek() == GLOBALS['RIGHT_PAREN']: n.update = None else: n.update = Expression(t, x) t.mustMatch(GLOBALS['RIGHT_PAREN']) n.body = nest(t, x, n, Statement) return n elif tt == GLOBALS['WHILE']: n = Node(t) n.isLoop = True n.condition = ParenExpression(t, x) n.body = nest(t, x, n, Statement) return n elif tt == GLOBALS['DO']: n = Node(t) n.isLoop = True n.body = nest(t, x, n, Statement, GLOBALS['WHILE']) n.condition = ParenExpression(t, x) if not x.ecmaStrictMode: # <script language="JavaScript"> (without version hints) may need # automatic semicolon insertion without a newline after do-while. # See http://bugzilla.mozilla.org/show_bug.cgi?id=238945. t.match(GLOBALS['SEMICOLON']) return n elif tt in (GLOBALS['BREAK'], GLOBALS['CONTINUE']): n = Node(t) if t.peekOnSameLine() == GLOBALS['IDENTIFIER']: t.get() n.label = t.token.value ss = x.stmtStack i = len(ss) label = getattr(n, "label", None) if label: while True: i -= 1 if i < 0: raise t.newSyntaxError("Label not found") if getattr(ss[i], "label", None) == label: break else: while True: i -= 1 if i < 0: if tt == GLOBALS['BREAK']: raise t.newSyntaxError("Invalid break") else: raise t.newSyntaxError("Invalid continue") if (getattr(ss[i], "isLoop", None) or (tt == GLOBALS['BREAK'] and ss[i].type_ == GLOBALS['SWITCH'])): break n.target = ss[i] elif tt == GLOBALS['TRY']: n = Node(t) n.tryBlock = Block(t, x) n.catchClauses = [] while t.match(GLOBALS['CATCH']): n2 = Node(t) t.mustMatch(GLOBALS['LEFT_PAREN']) n2.varName = t.mustMatch(GLOBALS['IDENTIFIER']).value if t.match(GLOBALS['IF']): if x.ecmaStrictMode: raise t.newSyntaxError("Illegal catch guard") if n.catchClauses and not n.catchClauses[-1].guard: raise t.newSyntaxError("Gaurded catch after unguarded") n2.guard = Expression(t, x) else: n2.guard = None t.mustMatch(GLOBALS['RIGHT_PAREN']) n2.block = Block(t, x) n.catchClauses.append(n2) if t.match(GLOBALS['FINALLY']): n.finallyBlock = Block(t, x) if not n.catchClauses and not getattr(n, "finallyBlock", None): raise t.newSyntaxError("Invalid try statement") return n elif tt in (GLOBALS['CATCH'], GLOBALS['FINALLY']): raise t.newSyntaxError(tokens[tt] + " without preceding try") elif tt == GLOBALS['THROW']: n = Node(t) n.exception = Expression(t, x) elif tt == GLOBALS['RETURN']: if not x.inFunction: raise t.newSyntaxError("Invalid return") n = Node(t) tt = t.peekOnSameLine() if tt not in (GLOBALS['END'], GLOBALS['NEWLINE'], GLOBALS['SEMICOLON'], GLOBALS['RIGHT_CURLY']): n.value = Expression(t, x) elif tt == GLOBALS['WITH']: n = Node(t) n.object = ParenExpression(t, x) n.body = nest(t, x, n, Statement) return n elif tt in (GLOBALS['VAR'], GLOBALS['CONST']): n = Variables(t, x) elif tt == GLOBALS['DEBUGGER']: n = Node(t) elif tt in (GLOBALS['NEWLINE'], GLOBALS['SEMICOLON']): n = Node(t, GLOBALS['SEMICOLON']) n.expression = None return n else: if tt == GLOBALS['IDENTIFIER']: t.scanOperand = False tt = t.peek() t.scanOperand = True if tt == GLOBALS['COLON']: label = t.token.value ss = x.stmtStack i = len(ss) - 1 while i >= 0: if getattr(ss[i], "label", None) == label: raise t.newSyntaxError("Duplicate label") i -= 1 t.get() n = Node(t, GLOBALS['LABEL']) n.label = label n.statement = nest(t, x, n, Statement) return n n = Node(t, GLOBALS['SEMICOLON']) t.unget() n.expression = Expression(t, x) n.end = n.expression.end if t.lineno == t.token.lineno: tt = t.peekOnSameLine() if tt not in (GLOBALS['END'], GLOBALS['NEWLINE'], GLOBALS['SEMICOLON'], GLOBALS['RIGHT_CURLY']): raise t.newSyntaxError("Missing ; before statement") t.match(GLOBALS['SEMICOLON']) return n def FunctionDefinition(t, x, requireName, functionForm): f = Node(t) if f.type_ != GLOBALS['FUNCTION']: if f.value == "get": f.type_ = GLOBALS['GETTER'] else: f.type_ = GLOBALS['SETTER'] if t.match(GLOBALS['IDENTIFIER']): f.name = t.token.value elif requireName: raise t.newSyntaxError("Missing function identifier") t.mustMatch(GLOBALS['LEFT_PAREN']) f.params = [] while True: tt = t.get() if tt == GLOBALS['RIGHT_PAREN']: break if tt != GLOBALS['IDENTIFIER']: raise t.newSyntaxError("Missing formal parameter") f.params.append(t.token.value) if t.peek() != GLOBALS['RIGHT_PAREN']: t.mustMatch(GLOBALS['COMMA']) t.mustMatch(GLOBALS['LEFT_CURLY']) x2 = CompilerContext(True) f.body = Script(t, x2) t.mustMatch(GLOBALS['RIGHT_CURLY']) f.end = t.token.end f.functionForm = functionForm if functionForm == DECLARED_FORM: x.funDecls.append(f) return f def Variables(t, x): n = Node(t) while True: t.mustMatch(GLOBALS['IDENTIFIER']) n2 = Node(t) n2.name = n2.value if t.match(GLOBALS['ASSIGN']): if t.token.assignOp: raise t.newSyntaxError("Invalid variable initialization") n2.initializer = Expression(t, x, GLOBALS['COMMA']) n2.readOnly = not not (n.type_ == GLOBALS['CONST']) n.append(n2) x.varDecls.append(n2) if not t.match(GLOBALS['COMMA']): break return n def ParenExpression(t, x): t.mustMatch(GLOBALS['LEFT_PAREN']) n = Expression(t, x) t.mustMatch(GLOBALS['RIGHT_PAREN']) return n opPrecedence = { "SEMICOLON": 0, "COMMA": 1, "ASSIGN": 2, "HOOK": 2, "COLON": 2, # The above all have to have the same precedence, see bug 330975. "OR": 4, "AND": 5, "BITWISE_OR": 6, "BITWISE_XOR": 7, "BITWISE_AND": 8, "EQ": 9, "NE": 9, "STRICT_EQ": 9, "STRICT_NE": 9, "LT": 10, "LE": 10, "GE": 10, "GT": 10, "IN": 10, "INSTANCEOF": 10, "LSH": 11, "RSH": 11, "URSH": 11, "PLUS": 12, "MINUS": 12, "MUL": 13, "DIV": 13, "MOD": 13, "DELETE": 14, "VOID": 14, "TYPEOF": 14, # "PRE_INCREMENT": 14, "PRE_DECREMENT": 14, "NOT": 14, "BITWISE_NOT": 14, "UNARY_PLUS": 14, "UNARY_MINUS": 14, "INCREMENT": 15, "DECREMENT": 15, # postfix "NEW": 16, "DOT": 17 } # Map operator type code to precedence for i in opPrecedence.copy(): opPrecedence[GLOBALS[i]] = opPrecedence[i] opArity = { "COMMA": -2, "ASSIGN": 2, "HOOK": 3, "OR": 2, "AND": 2, "BITWISE_OR": 2, "BITWISE_XOR": 2, "BITWISE_AND": 2, "EQ": 2, "NE": 2, "STRICT_EQ": 2, "STRICT_NE": 2, "LT": 2, "LE": 2, "GE": 2, "GT": 2, "IN": 2, "INSTANCEOF": 2, "LSH": 2, "RSH": 2, "URSH": 2, "PLUS": 2, "MINUS": 2, "MUL": 2, "DIV": 2, "MOD": 2, "DELETE": 1, "VOID": 1, "TYPEOF": 1, # "PRE_INCREMENT": 1, "PRE_DECREMENT": 1, "NOT": 1, "BITWISE_NOT": 1, "UNARY_PLUS": 1, "UNARY_MINUS": 1, "INCREMENT": 1, "DECREMENT": 1, # postfix "NEW": 1, "NEW_WITH_ARGS": 2, "DOT": 2, "INDEX": 2, "CALL": 2, "ARRAY_INIT": 1, "OBJECT_INIT": 1, "GROUP": 1 } # Map operator type code to arity. for i in opArity.copy(): opArity[GLOBALS[i]] = opArity[i] def Expression(t, x, stop=None): operators = [] operands = [] bl = x.bracketLevel cl = x.curlyLevel pl = x.parenLevel hl = x.hookLevel def reduce_(): n = operators.pop() op = n.type_ arity = opArity[op] if arity == -2: # Flatten left-associative trees. left = (len(operands) >= 2 and operands[-2]) if left.type_ == op: right = operands.pop() left.append(right) return left arity = 2 # Always use append to add operands to n, to update start and end. a = operands[-arity:] del operands[-arity:] for operand in a: n.append(operand) # Include closing bracket or postfix operator in [start,end). if n.end < t.token.end: n.end = t.token.end operands.append(n) return n class BreakOutOfLoops(Exception): pass try: while True: tt = t.get() if tt == GLOBALS['END']: break if (tt == stop and x.bracketLevel == bl and x.curlyLevel == cl and x.parenLevel == pl and x.hookLevel == hl): # Stop only if tt matches the optional stop parameter, and that # token is not quoted by some kind of bracket. break if tt == GLOBALS['SEMICOLON']: # NB: cannot be empty, Statement handled that. raise BreakOutOfLoops elif tt in (GLOBALS['ASSIGN'], GLOBALS['HOOK'], GLOBALS['COLON']): if t.scanOperand: raise BreakOutOfLoops while ((operators and opPrecedence.get(operators[-1].type_, None) > opPrecedence.get(tt)) or (tt == GLOBALS['COLON'] and operators and operators[-1].type_ == GLOBALS['ASSIGN'])): reduce_() if tt == GLOBALS['COLON']: if operators: n = operators[-1] if not operators or n.type_ != GLOBALS['HOOK']: raise t.newSyntaxError("Invalid label") x.hookLevel -= 1 else: operators.append(Node(t)) if tt == GLOBALS['ASSIGN']: operands[-1].assignOp = t.token.assignOp else: x.hookLevel += 1 t.scanOperand = True elif tt in (GLOBALS['IN'], GLOBALS['COMMA'], GLOBALS['OR'], GLOBALS['AND'], GLOBALS['BITWISE_OR'], GLOBALS['BITWISE_XOR'], GLOBALS['BITWISE_AND'], GLOBALS['EQ'], GLOBALS['NE'], GLOBALS['STRICT_EQ'], GLOBALS['STRICT_NE'], GLOBALS['LT'], GLOBALS['LE'], GLOBALS['GE'], GLOBALS['GT'], GLOBALS['INSTANCEOF'], GLOBALS['LSH'], GLOBALS['RSH'], GLOBALS['URSH'], GLOBALS['PLUS'], GLOBALS['MINUS'], GLOBALS['MUL'], GLOBALS['DIV'], GLOBALS['MOD'], GLOBALS['DOT']): # We're treating comma as left-associative so reduce can fold # left-heavy COMMA trees into a single array. if tt == GLOBALS['IN']: # An in operator should not be parsed if we're parsing the # head of a for (...) loop, unless it is in the then part of # a conditional expression, or parenthesized somehow. if (x.inForLoopInit and not x.hookLevel and not x.bracketLevel and not x.curlyLevel and not x.parenLevel): raise BreakOutOfLoops if t.scanOperand: raise BreakOutOfLoops while (operators and opPrecedence.get(operators[-1].type_) >= opPrecedence.get(tt)): reduce_() if tt == GLOBALS['DOT']: t.mustMatch(GLOBALS['IDENTIFIER']) operands.append(Node(t, GLOBALS['DOT'], [operands.pop(), Node(t)])) else: operators.append(Node(t)) t.scanOperand = True elif tt in (GLOBALS['DELETE'], GLOBALS['VOID'], GLOBALS['TYPEOF'], GLOBALS['NOT'], GLOBALS['BITWISE_NOT'], GLOBALS['UNARY_PLUS'], GLOBALS['UNARY_MINUS'], GLOBALS['NEW']): if not t.scanOperand: raise BreakOutOfLoops operators.append(Node(t)) elif tt in (GLOBALS['INCREMENT'], GLOBALS['DECREMENT']): if t.scanOperand: operators.append(Node(t)) # prefix increment or decrement else: # Don't cross a line boundary for postfix {in,de}crement. if (t.tokens.get((t.tokenIndex + t.lookahead - 1) & 3).lineno != t.lineno): raise BreakOutOfLoops # Use >, not >=, so postfix has higher precedence than # prefix. while (operators and opPrecedence.get(operators[-1].type_, None) > opPrecedence.get(tt)): reduce_() n = Node(t, tt, [operands.pop()]) n.postfix = True operands.append(n) elif tt == GLOBALS['FUNCTION']: if not t.scanOperand: raise BreakOutOfLoops operands.append(FunctionDefinition(t, x, False, EXPRESSED_FORM)) t.scanOperand = False elif tt in (GLOBALS['NULL'], GLOBALS['THIS'], GLOBALS['TRUE'], GLOBALS['FALSE'], GLOBALS['IDENTIFIER'], GLOBALS['NUMBER'], GLOBALS['STRING'], GLOBALS['REGEXP']): if not t.scanOperand: raise BreakOutOfLoops operands.append(Node(t)) t.scanOperand = False elif tt == GLOBALS['LEFT_BRACKET']: if t.scanOperand: # Array initializer. Parse using recursive descent, as the # sub-grammer here is not an operator grammar. n = Node(t, GLOBALS['ARRAY_INIT']) while True: tt = t.peek() if tt == GLOBALS['RIGHT_BRACKET']: break if tt == GLOBALS['COMMA']: t.get() n.append(None) continue n.append(Expression(t, x, GLOBALS['COMMA'])) if not t.match(GLOBALS['COMMA']): break t.mustMatch(GLOBALS['RIGHT_BRACKET']) operands.append(n) t.scanOperand = False else: operators.append(Node(t, GLOBALS['INDEX'])) t.scanOperand = True x.bracketLevel += 1 elif tt == GLOBALS['RIGHT_BRACKET']: if t.scanOperand or x.bracketLevel == bl: raise BreakOutOfLoops while reduce_().type_ != GLOBALS['INDEX']: continue x.bracketLevel -= 1 elif tt == GLOBALS['LEFT_CURLY']: if not t.scanOperand: raise BreakOutOfLoops # Object initializer. As for array initializers (see above), # parse using recursive descent. x.curlyLevel += 1 n = Node(t, GLOBALS['OBJECT_INIT']) class BreakOutOfObjectInit(Exception): pass try: if not t.match(GLOBALS['RIGHT_CURLY']): while True: tt = t.get() if ((t.token.value == "get" or t.token.value == "set") and t.peek == GLOBALS['IDENTIFIER']): if x.ecmaStrictMode: raise t.newSyntaxError("Illegal property accessor") n.append(FunctionDefinition(t, x, True, EXPRESSED_FORM)) else: if tt in (GLOBALS['IDENTIFIER'], GLOBALS['NUMBER'], GLOBALS['STRING']): id_ = Node(t) elif tt == GLOBALS['RIGHT_CURLY']: if x.ecmaStrictMode: raise t.newSyntaxError("Illegal trailing ,") raise BreakOutOfObjectInit else: raise t.newSyntaxError("Invalid property name") t.mustMatch(GLOBALS['COLON']) n.append(Node(t, GLOBALS['PROPERTY_INIT'], [id_, Expression(t, x, GLOBALS['COMMA'])])) if not t.match(GLOBALS['COMMA']): break t.mustMatch(GLOBALS['RIGHT_CURLY']) except BreakOutOfObjectInit: pass operands.append(n) t.scanOperand = False x.curlyLevel -= 1 elif tt == GLOBALS['RIGHT_CURLY']: if not t.scanOperand and x.curlyLevel != cl: raise ParseError("PANIC: right curly botch") raise BreakOutOfLoops elif tt == GLOBALS['LEFT_PAREN']: if t.scanOperand: operators.append(Node(t, GLOBALS['GROUP'])) x.parenLevel += 1 else: while (operators and opPrecedence.get(operators[-1].type_) > opPrecedence[GLOBALS['NEW']]): reduce_() # Handle () now, to regularize the n-ary case for n > 0. # We must set scanOperand in case there are arguments and # the first one is a regexp or unary+/-. if operators: n = operators[-1] else: n = Object() n.type_ = None t.scanOperand = True if t.match(GLOBALS['RIGHT_PAREN']): if n.type_ == GLOBALS['NEW']: operators.pop() n.append(operands.pop()) else: n = Node(t, GLOBALS['CALL'], [operands.pop(), Node(t, GLOBALS['LIST'])]) operands.append(n) t.scanOperand = False else: if n.type_ == GLOBALS['NEW']: n.type_ = GLOBALS['NEW_WITH_ARGS'] else: operators.append(Node(t, GLOBALS['CALL'])) x.parenLevel += 1 elif tt == GLOBALS['RIGHT_PAREN']: if t.scanOperand or x.parenLevel == pl: raise BreakOutOfLoops while True: tt = reduce_().type_ if tt in (GLOBALS['GROUP'], GLOBALS['CALL'], GLOBALS['NEW_WITH_ARGS']): break if tt != GLOBALS['GROUP']: if operands: n = operands[-1] if n[1].type_ != GLOBALS['COMMA']: n[1] = Node(t, GLOBALS['LIST'], [n[1]]) else: n[1].type_ = GLOBALS['LIST'] else: raise ParseError("Unexpected amount of operands") x.parenLevel -= 1 # Automatic semicolon insertion means we may scan across a newline # and into the beginning of another statement. If so, break out of # the while loop and let the t.scanOperand logic handle errors. else: raise BreakOutOfLoops except BreakOutOfLoops: pass if x.hookLevel != hl: raise t.newSyntaxError("Missing : after ?") if x.parenLevel != pl: raise t.newSyntaxError("Missing ) in parenthetical") if x.bracketLevel != bl: raise t.newSyntaxError("Missing ] in index expression") if t.scanOperand: raise t.newSyntaxError("Missing operand") t.scanOperand = True t.unget() while operators: reduce_() return operands.pop() def parse(source, filename=None, starting_line_number=1): """Parse some Javascript Args: source: the Javascript source, as a string filename: the filename to include in messages starting_line_number: the line number of the first line of the passed in source, for output messages Returns: the parsed source code data structure Raises: ParseError """ t = Tokenizer(source, filename, starting_line_number) x = CompilerContext(False) n = Script(t, x) if not t.done: raise t.newSyntaxError("Syntax error") return n if __name__ == "__main__": print str(parse(file(sys.argv[1]).read(), sys.argv[1]))
from direct.gui.DirectGui import * from pandac.PandaModules import * from toontown.toonbase.ToontownGlobals import * from toontown.toonbase.ToonBaseGlobal import * from pandac.PandaModules import * from direct.interval.IntervalGlobal import * from direct.distributed.ClockDelta import * from toontown.toonbase import ToontownGlobals from direct.showbase import DirectObject from toontown.toon import ToonDNA from direct.fsm import ClassicFSM, State, StateData import ClosetGUI from direct.task.Task import Task import ClosetGlobals import DistributedFurnitureItem from toontown.toonbase import TTLocalizer class DistributedCloset(DistributedFurnitureItem.DistributedFurnitureItem): notify = directNotify.newCategory('DistributedCloset') def __init__(self, cr): DistributedFurnitureItem.DistributedFurnitureItem.__init__(self, cr) self.notify.debug('__init__') self.lastAvId = 0 self.hasLocalAvatar = 0 self.lastTime = 0 self.av = None self.closetGUI = None self.closetModel = None self.closetSphere = None self.closetSphereNode = None self.closetSphereNodePath = None self.topList = [] self.botList = [] self.oldTopList = [] self.oldBotList = [] self.oldStyle = None self.button = None self.topTrashButton = None self.bottomTrashButton = None self.isLocalToon = None self.popupInfo = None self.isOwner = 0 self.ownerId = 0 self.customerId = 0 self.purchaseDoneEvent = '' self.swapEvent = '' self.locked = 0 self.gender = None self.topDeleted = 0 self.bottomDeleted = 0 self.closetTrack = None self.avMoveTrack = None self.scale = 1.0 self.fsm = ClassicFSM.ClassicFSM('Closet', [State.State('off', self.enterOff, self.exitOff, ['ready', 'open', 'closed']), State.State('ready', self.enterReady, self.exitReady, ['open', 'closed']), State.State('closed', self.enterClosed, self.exitClosed, ['open', 'off']), State.State('open', self.enterOpen, self.exitOpen, ['closed', 'off'])], 'off', 'off') self.fsm.enterInitialState() return def generate(self): DistributedFurnitureItem.DistributedFurnitureItem.generate(self) def announceGenerate(self): self.notify.debug('announceGenerate') DistributedFurnitureItem.DistributedFurnitureItem.announceGenerate(self) self.load() self.setupCollisionSphere() self.fsm.request('ready') def load(self): self.setTwoSided(1) lNode = self.find('**/door_rotate_L') lDoor = self.find('**/closetdoor_L') if lNode.isEmpty() or lDoor.isEmpty(): self.leftDoor = None else: lDoor.wrtReparentTo(lNode) self.leftDoor = lNode rNode = self.find('**/door_rotate_R') rDoor = self.find('**/closetdoor_R') if rNode.isEmpty() or rDoor.isEmpty(): self.rightDoor = None else: rDoor.wrtReparentTo(rNode) self.rightDoor = rNode if not lNode.isEmpty(): self.scale = lNode.getScale()[0] return def setupCollisionSphere(self): if self.ownerId: self.closetSphereEvent = self.uniqueName('closetSphere') self.closetSphereEnterEvent = 'enter' + self.closetSphereEvent self.closetSphere = CollisionSphere(0, 0, 0, self.scale * 2.125) self.closetSphere.setTangible(0) self.closetSphereNode = CollisionNode(self.closetSphereEvent) self.closetSphereNode.setIntoCollideMask(WallBitmask) self.closetSphereNode.addSolid(self.closetSphere) self.closetSphereNodePath = self.attachNewNode(self.closetSphereNode) def disable(self): self.notify.debug('disable') self.ignore(self.closetSphereEnterEvent) self.ignoreAll() taskMgr.remove(self.uniqueName('popupChangeClothesGUI')) taskMgr.remove(self.uniqueName('lerpCamera')) taskMgr.remove(self.uniqueName('lerpToon')) if self.closetTrack: self.closetTrack.finish() self.closetTrack = None if self.closetGUI: self.closetGUI.resetClothes(self.oldStyle) self.resetCloset() if self.hasLocalAvatar: self.freeAvatar() self.ignoreAll() DistributedFurnitureItem.DistributedFurnitureItem.disable(self) return def delete(self): self.notify.debug('delete') DistributedFurnitureItem.DistributedFurnitureItem.delete(self) if self.popupInfo: self.popupInfo.destroy() self.popupInfo = None if self.av: del self.av del self.gender del self.closetSphere del self.closetSphereNode del self.closetSphereNodePath del self.closetGUI del self.fsm return def enterOff(self): pass def exitOff(self): pass def enterReady(self): if self.ownerId: self.accept(self.closetSphereEnterEvent, self.handleEnterSphere) def exitReady(self): pass def enterOpen(self): if self.ownerId: self.ignore(self.closetSphereEnterEvent) self._openDoors() if self.customerId == base.localAvatar.doId: camera.wrtReparentTo(self) camera.posQuatInterval(1, (-7.58, -6.02, 6.9), (286.3, 336.8, 0), other=self, blendType='easeOut').start() camera.setPosHpr(self, -7.58, -6.02, 6.9, 286.3, 336.8, 0) if self.av: if self.avMoveTrack: self.avMoveTrack.finish() self.av.stopSmooth() self.avMoveTrack = Sequence(Parallel(Func(self.av.play, 'walk'), LerpPosHprInterval(nodePath=self.av, other=self, duration=1.0, pos=Vec3(1.67, -3.29, 0.025), hpr=Vec3(112, 0, 0), blendType='easeOut')), Func(self.av.loop, 'neutral'), Func(self.av.startSmooth)) self.avMoveTrack.start() def exitOpen(self): if self.ownerId: self._closeDoors() def enterClosed(self): if self.ownerId: self.accept(self.closetSphereEnterEvent, self.handleEnterSphere) def exitClosed(self): pass def handleEnterSphere(self, collEntry): if self.smoothStarted: return if base.localAvatar.doId == self.lastAvId and globalClock.getFrameTime() <= self.lastTime + 0.5: self.notify.info('Ignoring duplicate entry for avatar.') return if self.hasLocalAvatar: self.freeAvatar() self.notify.debug('Entering Closet Sphere....%s' % self.closetSphereEnterEvent) if self.cr.playGame.getPlace() == None: self.notify.info('Not opening closet before place is defined.') return self.ignore(self.closetSphereEnterEvent) if not self.locked: self.cr.playGame.getPlace().fsm.request('closet') self.accept('closetAsleep', self._handleCancel) self.sendUpdate('enterAvatar', []) self.hasLocalAvatar = 1 return def setState(self, mode, avId, ownerId, gender, topList, botList): self.notify.debug('setState, mode=%s, avId=%s, ownerId=%d' % (mode, avId, ownerId)) self.isOwner = avId == ownerId self.ownerGender = gender if mode == ClosetGlobals.CLOSED: self.fsm.request('closed') return elif mode == ClosetGlobals.OPEN: self.customerId = avId self.av = self.cr.doId2do.get(self.customerId, None) if self.av: if base.localAvatar.getDoId() == self.customerId: self.gender = self.av.style.gender self.topList = topList self.botList = botList self.oldTopList = self.topList[0:] self.oldBotList = self.botList[0:] print '-----------Starting closet interaction-----------' self.printInfo() print '-------------------------------------------------' if not self.isOwner: self.__popupNotOwnerPanel() else: taskMgr.doMethodLater(0.5, self.popupChangeClothesGUI, self.uniqueName('popupChangeClothesGUI')) self.fsm.request('open') return def _revertGender(self): if self.gender: self.av.style.gender = self.gender self.av.loop('neutral') def popupChangeClothesGUI(self, task): self.notify.debug('popupChangeClothesGUI') self.purchaseDoneEvent = self.uniqueName('purchaseDone') self.swapEvent = self.uniqueName('swap') self.cancelEvent = self.uniqueName('cancel') self.accept(self.purchaseDoneEvent, self.__proceedToCheckout) self.accept(self.swapEvent, self.__handleSwap) self.accept(self.cancelEvent, self._handleCancel) self.deleteEvent = self.uniqueName('delete') if self.isOwner: self.accept(self.deleteEvent, self.__handleDelete) if not self.closetGUI: self.closetGUI = ClosetGUI.ClosetGUI(self.isOwner, self.purchaseDoneEvent, self.cancelEvent, self.swapEvent, self.deleteEvent, self.topList, self.botList) self.closetGUI.load() if self.gender != self.ownerGender: self.closetGUI.setGender(self.ownerGender) self.closetGUI.enter(base.localAvatar) self.closetGUI.showButtons() style = self.av.getStyle() self.oldStyle = ToonDNA.ToonDNA() self.oldStyle.makeFromNetString(style.makeNetString()) return Task.done def resetCloset(self): self.ignoreAll() taskMgr.remove(self.uniqueName('popupChangeClothesGUI')) taskMgr.remove(self.uniqueName('lerpCamera')) taskMgr.remove(self.uniqueName('lerpToon')) if self.closetGUI: self.closetGUI.hideButtons() self.closetGUI.exit() self.closetGUI.unload() self.closetGUI = None del self.av self.av = base.localAvatar style = self.av.getStyle() self.oldStyle = ToonDNA.ToonDNA() self.oldStyle.makeFromNetString(style.makeNetString()) self.topDeleted = 0 self.bottomDeleted = 0 return Task.done def __handleButton(self): messenger.send('next') def _handleCancel(self): if self.oldStyle: self.d_setDNA(self.oldStyle.makeNetString(), 1) else: self.notify.info('avoided crash in handleCancel') self._handlePurchaseDone() if self.closetGUI: self.closetGUI.resetClothes(self.oldStyle) if self.popupInfo != None: self.popupInfo.destroy() self.popupInfo = None return def __handleSwap(self): self.d_setDNA(self.av.getStyle().makeNetString(), 0) def __handleDelete(self, t_or_b): if t_or_b == ClosetGlobals.SHIRT: itemList = self.closetGUI.tops trashIndex = self.closetGUI.topChoice swapFunc = self.closetGUI.swapTop removeFunc = self.closetGUI.removeTop self.topDeleted = self.topDeleted | 1 def setItemChoice(i): self.closetGUI.topChoice = i else: itemList = self.closetGUI.bottoms trashIndex = self.closetGUI.bottomChoice swapFunc = self.closetGUI.swapBottom removeFunc = self.closetGUI.removeBottom self.bottomDeleted = self.bottomDeleted | 1 def setItemChoice(i): self.closetGUI.bottomChoice = i if len(itemList) > 1: trashDNA = ToonDNA.ToonDNA() trashItem = self.av.getStyle().makeNetString() trashDNA.makeFromNetString(trashItem) if trashIndex == 0: swapFunc(1) else: swapFunc(-1) removeFunc(trashIndex) self.sendUpdate('removeItem', [trashItem, t_or_b]) swapFunc(0) self.closetGUI.updateTrashButtons() else: self.notify.warning("cant delete this item(type = %s), since we don't have a replacement" % t_or_b) def resetItemLists(self): self.topList = self.oldTopList[0:] self.botList = self.oldBotList[0:] self.closetGUI.tops = self.topList self.closetGUI.bottoms = self.botList self.topDeleted = 0 self.bottomDeleted = 0 def __proceedToCheckout(self): if self.topDeleted or self.bottomDeleted: self.__popupAreYouSurePanel() else: self._handlePurchaseDone() def _handlePurchaseDone(self, timeout = 0): if timeout == 1: self.d_setDNA(self.oldStyle.makeNetString(), 1) else: which = 0 if hasattr(self.closetGUI, 'topChoice') and hasattr(self.closetGUI, 'bottomChoice'): if self.closetGUI.topChoice != 0 or self.topDeleted: which = which | 1 if self.closetGUI.bottomChoice != 0 or self.bottomDeleted: which = which | 2 self.d_setDNA(self.av.getStyle().makeNetString(), 2, which) def d_setDNA(self, dnaString, finished, whichItems = 3): self.sendUpdate('setDNA', [dnaString, finished, whichItems]) def setCustomerDNA(self, avId, dnaString): if avId and avId != base.localAvatar.doId: av = base.cr.doId2do.get(avId, None) if av: if self.av == base.cr.doId2do[avId]: oldTorso = self.av.style.torso self.av.style.makeFromNetString(dnaString) if len(oldTorso) == 2 and len(self.av.style.torso) == 2 and self.av.style.torso[1] != oldTorso[1]: self.av.swapToonTorso(self.av.style.torso, genClothes=0) self.av.loop('neutral', 0) self.av.generateToonClothes() return def printInfo(self): print 'avid: %s, gender: %s' % (self.av.doId, self.av.style.gender) print 'current top = %s,%s,%s,%s and bot = %s,%s,' % (self.av.style.topTex, self.av.style.topTexColor, self.av.style.sleeveTex, self.av.style.sleeveTexColor, self.av.style.botTex, self.av.style.botTexColor) print 'topsList = %s' % self.av.getClothesTopsList() print 'bottomsList = %s' % self.av.getClothesBottomsList() def setMovie(self, mode, avId, timestamp): self.isLocalToon = avId == base.localAvatar.doId if avId != 0: self.lastAvId = avId self.lastTime = globalClock.getFrameTime() if mode == ClosetGlobals.CLOSET_MOVIE_CLEAR: return elif mode == ClosetGlobals.CLOSET_MOVIE_COMPLETE: if self.isLocalToon: self._revertGender() print '-----------ending trunk interaction-----------' self.printInfo() print '-------------------------------------------------' self.resetCloset() self.freeAvatar() return elif mode == ClosetGlobals.CLOSET_MOVIE_TIMEOUT: taskMgr.remove(self.uniqueName('lerpCamera')) taskMgr.remove(self.uniqueName('lerpToon')) if self.isLocalToon: self.ignore(self.purchaseDoneEvent) self.ignore(self.swapEvent) if self.closetGUI: self.closetGUI.resetClothes(self.oldStyle) self._handlePurchaseDone(timeout=1) self.resetCloset() self._popupTimeoutPanel() self.freeAvatar() def freeAvatar(self): self.notify.debug('freeAvatar()') if self.hasLocalAvatar: base.localAvatar.posCamera(0, 0) place = base.cr.playGame.getPlace() if place: place.setState('walk') self.ignore('closetAsleep') base.localAvatar.startLookAround() self.hasLocalAvatar = 0 self.lastTime = globalClock.getFrameTime() def setOwnerId(self, avId): self.ownerId = avId def _popupTimeoutPanel(self): if self.popupInfo != None: self.popupInfo.destroy() self.popupInfo = None buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui') okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')) self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=TTLocalizer.ClosetTimeoutMessage, frameSize=(-1, 1, -1, 1), geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88, 1, 0.45), geom_pos=(0, 0, -.08), text_scale=0.08) DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.0, 0.0, -0.16), command=self.__handleTimeoutMessageOK) buttons.removeNode() self.popupInfo.reparentTo(aspect2d) return def __handleTimeoutMessageOK(self): self.popupInfo.reparentTo(hidden) def __popupNotOwnerPanel(self): if self.popupInfo != None: self.popupInfo.destroy() self.popupInfo = None self.purchaseDoneEvent = self.uniqueName('purchaseDone') self.swapEvent = self.uniqueName('swap') self.cancelEvent = self.uniqueName('cancel') self.accept(self.purchaseDoneEvent, self.__proceedToCheckout) self.accept(self.swapEvent, self.__handleSwap) self.accept(self.cancelEvent, self._handleCancel) self.deleteEvent = self.uniqueName('delete') if self.isOwner: self.accept(self.deleteEvent, self.__handleDelete) buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui') okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')) self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=TTLocalizer.ClosetNotOwnerMessage, frameSize=(-1, 1, -1, 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88, 1, 0.55), geom_pos=(0, 0, -.08), text_scale=0.08, text_pos=(0, 0.06)) DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.0, 0.0, -0.21), command=self._handleNotOwnerMessageOK) buttons.removeNode() self.popupInfo.reparentTo(aspect2d) return def _handleNotOwnerMessageOK(self): self.popupInfo.reparentTo(hidden) taskMgr.doMethodLater(0.1, self.popupChangeClothesGUI, self.uniqueName('popupChangeClothesGUI')) def __popupAreYouSurePanel(self): if self.popupInfo != None: self.popupInfo.destroy() self.popupInfo = None buttons = loader.loadModel('phase_3/models/gui/dialog_box_buttons_gui') okButtonImage = (buttons.find('**/ChtBx_OKBtn_UP'), buttons.find('**/ChtBx_OKBtn_DN'), buttons.find('**/ChtBx_OKBtn_Rllvr')) cancelButtonImage = (buttons.find('**/CloseBtn_UP'), buttons.find('**/CloseBtn_DN'), buttons.find('**/CloseBtn_Rllvr')) self.popupInfo = DirectFrame(parent=hidden, relief=None, state='normal', text=TTLocalizer.ClosetAreYouSureMessage, frameSize=(-1, 1, -1, 1), text_wordwrap=10, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.88, 1, 0.55), geom_pos=(0, 0, -.08), text_scale=0.08, text_pos=(0, 0.08)) DirectButton(self.popupInfo, image=okButtonImage, relief=None, text=TTLocalizer.ClosetPopupOK, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(-0.1, 0.0, -0.21), command=self._handleYesImSure) DirectButton(self.popupInfo, image=cancelButtonImage, relief=None, text=TTLocalizer.ClosetPopupCancel, text_scale=0.05, text_pos=(0.0, -0.1), textMayChange=0, pos=(0.1, 0.0, -0.21), command=self._handleNotSure) buttons.removeNode() self.popupInfo.reparentTo(aspect2d) return def _handleYesImSure(self): self.popupInfo.reparentTo(hidden) self._handlePurchaseDone() def _handleNotSure(self): self.popupInfo.reparentTo(hidden) def _openDoors(self): if self.closetTrack: self.closetTrack.finish() leftHpr = Vec3(-110, 0, 0) rightHpr = Vec3(110, 0, 0) self.closetTrack = Parallel() if self.rightDoor: self.closetTrack.append(self.rightDoor.hprInterval(0.5, rightHpr)) if self.leftDoor: self.closetTrack.append(self.leftDoor.hprInterval(0.5, leftHpr)) self.closetTrack.start() def _closeDoors(self): if self.closetTrack: self.closetTrack.finish() leftHpr = Vec3(0, 0, 0) rightHpr = Vec3(0, 0, 0) self.closetTrack = Parallel() if self.rightDoor: self.closetTrack.append(self.rightDoor.hprInterval(0.5, rightHpr)) if self.leftDoor: self.closetTrack.append(self.leftDoor.hprInterval(0.5, leftHpr)) self.closetTrack.start()
"""Contains knowledge to build a COM object definition. This module is used by both the @dynamic@ and @makepy@ modules to build all knowledge of a COM object. This module contains classes which contain the actual knowledge of the object. This include parameter and return type information, the COM dispid and CLSID, etc. Other modules may use this information to generate .py files, use the information dynamically, or possibly even generate .html documentation for objects. """ # # NOTES: DispatchItem and MapEntry used by dynamic.py. # the rest is used by makepy.py # # OleItem, DispatchItem, MapEntry, BuildCallList() is used by makepy import sys import string from keyword import iskeyword import pythoncom from pywintypes import TimeType import winerror import datetime # A string ending with a quote can not be safely triple-quoted. def _safeQuotedString(s): if s[-1]=='"': s = s[:-1]+'\\"' return '"""%s"""' % s error = "PythonCOM.Client.Build error" class NotSupportedException(Exception): pass # Raised when we cant support a param type. DropIndirection="DropIndirection" NoTranslateTypes = [ pythoncom.VT_BOOL, pythoncom.VT_CLSID, pythoncom.VT_CY, pythoncom.VT_DATE, pythoncom.VT_DECIMAL, pythoncom.VT_EMPTY, pythoncom.VT_ERROR, pythoncom.VT_FILETIME, pythoncom.VT_HRESULT, pythoncom.VT_I1, pythoncom.VT_I2, pythoncom.VT_I4, pythoncom.VT_I8, pythoncom.VT_INT, pythoncom.VT_NULL, pythoncom.VT_R4, pythoncom.VT_R8, pythoncom.VT_NULL, pythoncom.VT_STREAM, pythoncom.VT_UI1, pythoncom.VT_UI2, pythoncom.VT_UI4, pythoncom.VT_UI8, pythoncom.VT_UINT, pythoncom.VT_VOID, ] NoTranslateMap = {} for v in NoTranslateTypes: NoTranslateMap[v] = None class MapEntry: "Simple holder for named attibutes - items in a map." def __init__(self, desc_or_id, names=None, doc=None, resultCLSID=pythoncom.IID_NULL, resultDoc = None, hidden=0): if type(desc_or_id)==type(0): self.dispid = desc_or_id self.desc = None else: self.dispid = desc_or_id[0] self.desc = desc_or_id self.names = names self.doc = doc self.resultCLSID = resultCLSID self.resultDocumentation = resultDoc self.wasProperty = 0 # Have I been transformed into a function so I can pass args? self.hidden = hidden def GetResultCLSID(self): rc = self.resultCLSID if rc == pythoncom.IID_NULL: return None return rc # Return a string, suitable for output - either "'{...}'" or "None" def GetResultCLSIDStr(self): rc = self.GetResultCLSID() if rc is None: return "None" return repr(str(rc)) # Convert the IID object to a string, then to a string in a string. def GetResultName(self): if self.resultDocumentation is None: return None return self.resultDocumentation[0] class OleItem: typename = "OleItem" def __init__(self, doc=None): self.doc = doc if self.doc: self.python_name = MakePublicAttributeName(self.doc[0]) else: self.python_name = None self.bWritten = 0 self.bIsDispatch = 0 self.bIsSink = 0 self.clsid = None self.co_class = None class DispatchItem(OleItem): typename = "DispatchItem" def __init__(self, typeinfo=None, attr=None, doc=None, bForUser=1): OleItem.__init__(self,doc) self.propMap = {} self.propMapGet = {} self.propMapPut = {} self.mapFuncs = {} self.defaultDispatchName = None self.hidden = 0 if typeinfo: self.Build(typeinfo, attr, bForUser) def _propMapPutCheck_(self,key,item): ins, outs, opts = self.CountInOutOptArgs(item.desc[2]) if ins>1: # if a Put property takes more than 1 arg: if opts+1==ins or ins==item.desc[6]+1: newKey = "Set" + key deleteExisting = 0 # This one is still OK else: deleteExisting = 1 # No good to us if key in self.mapFuncs or key in self.propMapGet: newKey = "Set" + key else: newKey = key item.wasProperty = 1 self.mapFuncs[newKey] = item if deleteExisting: del self.propMapPut[key] def _propMapGetCheck_(self,key,item): ins, outs, opts = self.CountInOutOptArgs(item.desc[2]) if ins > 0: # if a Get property takes _any_ in args: if item.desc[6]==ins or ins==opts: newKey = "Get" + key deleteExisting = 0 # This one is still OK else: deleteExisting = 1 # No good to us if key in self.mapFuncs: newKey = "Get" + key else: newKey = key item.wasProperty = 1 self.mapFuncs[newKey] = item if deleteExisting: del self.propMapGet[key] def _AddFunc_(self,typeinfo,fdesc,bForUser): id = fdesc.memid funcflags = fdesc.wFuncFlags try: names = typeinfo.GetNames(id) name=names[0] except pythoncom.ole_error: name = "" names = None doc = None try: if bForUser: doc = typeinfo.GetDocumentation(id) except pythoncom.ole_error: pass if id==0 and name: self.defaultDispatchName = name invkind = fdesc.invkind # We need to translate any Alias', Enums, structs etc in result and args typerepr, flag, defval = fdesc.rettype # sys.stderr.write("%s result - %s -> " % (name, typerepr)) typerepr, resultCLSID, resultDoc = _ResolveType(typerepr, typeinfo) # sys.stderr.write("%s\n" % (typerepr,)) fdesc.rettype = typerepr, flag, defval, resultCLSID # Translate any Alias or Enums in argument list. argList = [] for argDesc in fdesc.args: typerepr, flag, defval = argDesc # sys.stderr.write("%s arg - %s -> " % (name, typerepr)) arg_type, arg_clsid, arg_doc = _ResolveType(typerepr, typeinfo) argDesc = arg_type, flag, defval, arg_clsid # sys.stderr.write("%s\n" % (argDesc[0],)) argList.append(argDesc) fdesc.args = tuple(argList) hidden = (funcflags & pythoncom.FUNCFLAG_FHIDDEN) != 0 if invkind == pythoncom.INVOKE_PROPERTYGET: map = self.propMapGet # This is not the best solution, but I dont think there is # one without specific "set" syntax. # If there is a single PUT or PUTREF, it will function as a property. # If there are both, then the PUT remains a property, and the PUTREF # gets transformed into a function. # (in vb, PUT=="obj=other_obj", PUTREF="set obj=other_obj elif invkind in (pythoncom.INVOKE_PROPERTYPUT, pythoncom.INVOKE_PROPERTYPUTREF): # Special case existing = self.propMapPut.get(name, None) if existing is not None: if existing.desc[4]==pythoncom.INVOKE_PROPERTYPUT: # Keep this one map = self.mapFuncs name = "Set"+name else: # Existing becomes a func. existing.wasProperty = 1 self.mapFuncs["Set"+name]=existing map = self.propMapPut # existing gets overwritten below. else: map = self.propMapPut # first time weve seen it. elif invkind == pythoncom.INVOKE_FUNC: map = self.mapFuncs else: map = None if not map is None: # if map.has_key(name): # sys.stderr.write("Warning - overwriting existing method/attribute %s\n" % name) map[name] = MapEntry(tuple(fdesc), names, doc, resultCLSID, resultDoc, hidden) # any methods that can't be reached via DISPATCH we return None # for, so dynamic dispatch doesnt see it. if fdesc.funckind != pythoncom.FUNC_DISPATCH: return None return (name,map) return None def _AddVar_(self,typeinfo,fdesc,bForUser): ### need pythoncom.VARFLAG_FRESTRICTED ... ### then check it if fdesc.varkind == pythoncom.VAR_DISPATCH: id = fdesc.memid names = typeinfo.GetNames(id) # Translate any Alias or Enums in result. typerepr, flags, defval = fdesc.elemdescVar typerepr, resultCLSID, resultDoc = _ResolveType(typerepr, typeinfo) fdesc.elemdescVar = typerepr, flags, defval doc = None try: if bForUser: doc = typeinfo.GetDocumentation(id) except pythoncom.ole_error: pass # handle the enumerator specially map = self.propMap # Check if the element is hidden. hidden = 0 if hasattr(fdesc,"wVarFlags"): hidden = (fdesc.wVarFlags & 0x40) != 0 # VARFLAG_FHIDDEN map[names[0]] = MapEntry(tuple(fdesc), names, doc, resultCLSID, resultDoc, hidden) return (names[0],map) else: return None def Build(self, typeinfo, attr, bForUser = 1): self.clsid = attr[0] self.bIsDispatch = (attr.wTypeFlags & pythoncom.TYPEFLAG_FDISPATCHABLE) != 0 if typeinfo is None: return # Loop over all methods for j in range(attr[6]): fdesc = typeinfo.GetFuncDesc(j) self._AddFunc_(typeinfo,fdesc,bForUser) # Loop over all variables (ie, properties) for j in range(attr[7]): fdesc = typeinfo.GetVarDesc(j) self._AddVar_(typeinfo,fdesc,bForUser) # Now post-process the maps. For any "Get" or "Set" properties # that have arguments, we must turn them into methods. If a method # of the same name already exists, change the name. for key, item in list(self.propMapGet.items()): self._propMapGetCheck_(key,item) for key, item in list(self.propMapPut.items()): self._propMapPutCheck_(key,item) def CountInOutOptArgs(self, argTuple): "Return tuple counting in/outs/OPTS. Sum of result may not be len(argTuple), as some args may be in/out." ins = out = opts = 0 for argCheck in argTuple: inOut = argCheck[1] if inOut==0: ins = ins + 1 out = out + 1 else: if inOut & pythoncom.PARAMFLAG_FIN: ins = ins + 1 if inOut & pythoncom.PARAMFLAG_FOPT: opts = opts + 1 if inOut & pythoncom.PARAMFLAG_FOUT: out = out + 1 return ins, out, opts def MakeFuncMethod(self, entry, name, bMakeClass = 1): # If we have a type description, and not varargs... if entry.desc is not None and (len(entry.desc) < 6 or entry.desc[6]!=-1): return self.MakeDispatchFuncMethod(entry, name, bMakeClass) else: return self.MakeVarArgsFuncMethod(entry, name, bMakeClass) def MakeDispatchFuncMethod(self, entry, name, bMakeClass = 1): fdesc = entry.desc doc = entry.doc names = entry.names ret = [] if bMakeClass: linePrefix = "\t" defNamedOptArg = "defaultNamedOptArg" defNamedNotOptArg = "defaultNamedNotOptArg" defUnnamedArg = "defaultUnnamedArg" else: linePrefix = "" defNamedOptArg = "pythoncom.Missing" defNamedNotOptArg = "pythoncom.Missing" defUnnamedArg = "pythoncom.Missing" defOutArg = "pythoncom.Missing" id = fdesc[0] s = linePrefix + 'def ' + name + '(self' + BuildCallList(fdesc, names, defNamedOptArg, defNamedNotOptArg, defUnnamedArg, defOutArg) + '):' ret.append(s) if doc and doc[1]: ret.append(linePrefix + '\t' + _safeQuotedString(doc[1])) # print "fdesc is ", fdesc resclsid = entry.GetResultCLSID() if resclsid: resclsid = "'%s'" % resclsid else: resclsid = 'None' # Strip the default values from the arg desc retDesc = fdesc[8][:2] argsDesc = tuple([what[:2] for what in fdesc[2]]) # The runtime translation of the return types is expensive, so when we know the # return type of the function, there is no need to check the type at runtime. # To qualify, this function must return a "simple" type, and have no byref args. # Check if we have byrefs or anything in the args which mean we still need a translate. param_flags = [what[1] for what in fdesc[2]] bad_params = [flag for flag in param_flags if flag & (pythoncom.PARAMFLAG_FOUT | pythoncom.PARAMFLAG_FRETVAL)!=0] s = None if len(bad_params)==0 and len(retDesc)==2 and retDesc[1]==0: rd = retDesc[0] if rd in NoTranslateMap: s = '%s\treturn self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, argsDesc, _BuildArgList(fdesc, names)) elif rd in [pythoncom.VT_DISPATCH, pythoncom.VT_UNKNOWN]: s = '%s\tret = self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)\n' % (linePrefix, id, fdesc[4], retDesc, repr(argsDesc), _BuildArgList(fdesc, names)) s = s + '%s\tif ret is not None:\n' % (linePrefix,) if rd == pythoncom.VT_UNKNOWN: s = s + "%s\t\t# See if this IUnknown is really an IDispatch\n" % (linePrefix,) s = s + "%s\t\ttry:\n" % (linePrefix,) s = s + "%s\t\t\tret = ret.QueryInterface(pythoncom.IID_IDispatch)\n" % (linePrefix,) s = s + "%s\t\texcept pythoncom.error:\n" % (linePrefix,) s = s + "%s\t\t\treturn ret\n" % (linePrefix,) s = s + '%s\t\tret = Dispatch(ret, %s, %s)\n' % (linePrefix,repr(name), resclsid) s = s + '%s\treturn ret' % (linePrefix) elif rd == pythoncom.VT_BSTR: s = "%s\t# Result is a Unicode object\n" % (linePrefix,) s = s + '%s\treturn self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, repr(argsDesc), _BuildArgList(fdesc, names)) # else s remains None if s is None: s = '%s\treturn self._ApplyTypes_(%d, %s, %s, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, argsDesc, repr(name), resclsid, _BuildArgList(fdesc, names)) ret.append(s) ret.append("") return ret def MakeVarArgsFuncMethod(self, entry, name, bMakeClass = 1): fdesc = entry.desc names = entry.names doc = entry.doc ret = [] argPrefix = "self" if bMakeClass: linePrefix = "\t" else: linePrefix = "" ret.append(linePrefix + 'def ' + name + '(' + argPrefix + ', *args):') if doc and doc[1]: ret.append(linePrefix + '\t' + _safeQuotedString(doc[1])) if fdesc: invoketype = fdesc[4] else: invoketype = pythoncom.DISPATCH_METHOD s = linePrefix + '\treturn self._get_good_object_(self._oleobj_.Invoke(*((' ret.append(s + str(entry.dispid) + ",0,%d,1)+args)),'%s')" % (invoketype, names[0])) ret.append("") return ret # Note - "DispatchItem" poorly named - need a new intermediate class. class VTableItem(DispatchItem): def Build(self, typeinfo, attr, bForUser = 1): DispatchItem.Build(self, typeinfo, attr, bForUser) assert typeinfo is not None, "Cant build vtables without type info!" meth_list = list(self.mapFuncs.values()) + list(self.propMapGet.values()) + list(self.propMapPut.values()) if sys.version_info < (2,4): def cmp_vtable_off(m1, m2): return cmp(m1.desc[7], m2.desc[7]) meth_list.sort(cmp_vtable_off) else: meth_list.sort(key=lambda m: m.desc[7]) # Now turn this list into the run-time representation # (ready for immediate use or writing to gencache) self.vtableFuncs = [] for entry in meth_list: self.vtableFuncs.append( (entry.names, entry.dispid, entry.desc) ) # A Lazy dispatch item - builds an item on request using info from # an ITypeComp. The dynamic module makes the called to build each item, # and also holds the references to the typeinfo and typecomp. class LazyDispatchItem(DispatchItem): typename = "LazyDispatchItem" def __init__(self, attr, doc): self.clsid = attr[0] DispatchItem.__init__(self, None, attr, doc, 0) typeSubstMap = { pythoncom.VT_INT: pythoncom.VT_I4, pythoncom.VT_UINT: pythoncom.VT_I4, pythoncom.VT_HRESULT: pythoncom.VT_I4, } def _ResolveType(typerepr, itypeinfo): # Resolve VT_USERDEFINED (often aliases or typed IDispatches) if type(typerepr)==tuple: indir_vt, subrepr = typerepr if indir_vt == pythoncom.VT_PTR: # If it is a VT_PTR to a VT_USERDEFINED that is an IDispatch/IUnknown, # then it resolves to simply the object. # Otherwise, it becomes a ByRef of the resolved type # We need to drop an indirection level on pointer to user defined interfaces. # eg, (VT_PTR, (VT_USERDEFINED, somehandle)) needs to become VT_DISPATCH # only when "somehandle" is an object. # but (VT_PTR, (VT_USERDEFINED, otherhandle)) doesnt get the indirection dropped. was_user = type(subrepr)==tuple and subrepr[0]==pythoncom.VT_USERDEFINED subrepr, sub_clsid, sub_doc = _ResolveType(subrepr, itypeinfo) if was_user and subrepr in [pythoncom.VT_DISPATCH, pythoncom.VT_UNKNOWN, pythoncom.VT_RECORD]: # Drop the VT_PTR indirection return subrepr, sub_clsid, sub_doc # Change PTR indirection to byref return subrepr | pythoncom.VT_BYREF, sub_clsid, sub_doc if indir_vt == pythoncom.VT_SAFEARRAY: # resolve the array element, and convert to VT_ARRAY subrepr, sub_clsid, sub_doc = _ResolveType(subrepr, itypeinfo) return pythoncom.VT_ARRAY | subrepr, sub_clsid, sub_doc if indir_vt == pythoncom.VT_CARRAY: # runtime has no support for this yet. # resolve the array element, and convert to VT_CARRAY # sheesh - return _something_ return pythoncom.VT_CARRAY, None, None if indir_vt == pythoncom.VT_USERDEFINED: try: resultTypeInfo = itypeinfo.GetRefTypeInfo(subrepr) except pythoncom.com_error, details: if details.hresult in [winerror.TYPE_E_CANTLOADLIBRARY, winerror.TYPE_E_LIBNOTREGISTERED]: # an unregistered interface return pythoncom.VT_UNKNOWN, None, None raise resultAttr = resultTypeInfo.GetTypeAttr() typeKind = resultAttr.typekind if typeKind == pythoncom.TKIND_ALIAS: tdesc = resultAttr.tdescAlias return _ResolveType(tdesc, resultTypeInfo) elif typeKind in [pythoncom.TKIND_ENUM, pythoncom.TKIND_MODULE]: # For now, assume Long return pythoncom.VT_I4, None, None elif typeKind == pythoncom.TKIND_DISPATCH: clsid = resultTypeInfo.GetTypeAttr()[0] retdoc = resultTypeInfo.GetDocumentation(-1) return pythoncom.VT_DISPATCH, clsid, retdoc elif typeKind in [pythoncom.TKIND_INTERFACE, pythoncom.TKIND_COCLASS]: # XXX - should probably get default interface for CO_CLASS??? clsid = resultTypeInfo.GetTypeAttr()[0] retdoc = resultTypeInfo.GetDocumentation(-1) return pythoncom.VT_UNKNOWN, clsid, retdoc elif typeKind == pythoncom.TKIND_RECORD: return pythoncom.VT_RECORD, None, None raise NotSupportedException("Can not resolve alias or user-defined type") return typeSubstMap.get(typerepr,typerepr), None, None def _BuildArgList(fdesc, names): "Builds list of args to the underlying Invoke method." # Word has TypeInfo for Insert() method, but says "no args" numArgs = max(fdesc[6], len(fdesc[2])) names = list(names) while None in names: i = names.index(None) names[i] = "arg%d" % (i,) # We've seen 'source safe' libraries offer the name of 'ret' params in # 'names' - although we can't reproduce this, it would be insane to offer # more args than we have arg infos for - hence the upper limit on names... names = list(map(MakePublicAttributeName, names[1:(numArgs + 1)])) name_num = 0 while len(names) < numArgs: names.append("arg%d" % (len(names),)) # As per BuildCallList(), avoid huge lines. # Hack a "\n" at the end of every 5th name - "strides" would be handy # here but don't exist in 2.2 for i in range(0, len(names), 5): names[i] = names[i] + "\n\t\t\t" return "," + ", ".join(names) valid_identifier_chars = string.ascii_letters + string.digits + "_" def demunge_leading_underscores(className): i = 0 while className[i] == "_": i += 1 assert i >= 2, "Should only be here with names starting with '__'" return className[i-1:] + className[:i-1] # Given a "public name" (eg, the name of a class, function, etc) # make sure it is a legal (and reasonable!) Python name. def MakePublicAttributeName(className, is_global = False): # Given a class attribute that needs to be public, convert it to a # reasonable name. # Also need to be careful that the munging doesnt # create duplicates - eg, just removing a leading "_" is likely to cause # a clash. # if is_global is True, then the name is a global variable that may # overwrite a builtin - eg, "None" if className[:2]=='__': return demunge_leading_underscores(className) elif className == 'None': # assign to None is evil (and SyntaxError in 2.4, even though # iskeyword says False there) - note that if it was a global # it would get picked up below className = 'NONE' elif iskeyword(className): # most keywords are lower case (except True, False etc in py3k) ret = className.capitalize() # but those which aren't get forced upper. if ret == className: ret = ret.upper() return ret elif is_global and hasattr(__builtins__, className): # builtins may be mixed case. If capitalizing it doesn't change it, # force to all uppercase (eg, "None", "True" become "NONE", "TRUE" ret = className.capitalize() if ret==className: # didn't change - force all uppercase. ret = ret.upper() return ret # Strip non printable chars return ''.join([char for char in className if char in valid_identifier_chars]) # Given a default value passed by a type library, return a string with # an appropriate repr() for the type. # Takes a raw ELEMDESC and returns a repr string, or None # (NOTE: The string itself may be '"None"', which is valid, and different to None. # XXX - To do: Dates are probably screwed, but can they come in? def MakeDefaultArgRepr(defArgVal): try: inOut = defArgVal[1] except IndexError: # something strange - assume is in param. inOut = pythoncom.PARAMFLAG_FIN if inOut & pythoncom.PARAMFLAG_FHASDEFAULT: # times need special handling... val = defArgVal[2] if isinstance(val, datetime.datetime): # VARIANT <-> SYSTEMTIME conversions always lose any sub-second # resolution, so just use a 'timetuple' here. return repr(tuple(val.utctimetuple())) if type(val) is TimeType: # must be the 'old' pywintypes time object... year=val.year; month=val.month; day=val.day; hour=val.hour; minute=val.minute; second=val.second; msec=val.msec return "pywintypes.Time((%(year)d, %(month)d, %(day)d, %(hour)d, %(minute)d, %(second)d,0,0,0,%(msec)d))" % locals() return repr(val) return None def BuildCallList(fdesc, names, defNamedOptArg, defNamedNotOptArg, defUnnamedArg, defOutArg, is_comment = False): "Builds a Python declaration for a method." # Names[0] is the func name - param names are from 1. numArgs = len(fdesc[2]) numOptArgs = fdesc[6] strval = '' if numOptArgs==-1: # Special value that says "var args after here" firstOptArg = numArgs numArgs = numArgs - 1 else: firstOptArg = numArgs - numOptArgs for arg in xrange(numArgs): try: argName = names[arg+1] namedArg = argName is not None except IndexError: namedArg = 0 if not namedArg: argName = "arg%d" % (arg) thisdesc = fdesc[2][arg] # See if the IDL specified a default value defArgVal = MakeDefaultArgRepr(thisdesc) if defArgVal is None: # Out params always get their special default if thisdesc[1] & (pythoncom.PARAMFLAG_FOUT | pythoncom.PARAMFLAG_FIN) == pythoncom.PARAMFLAG_FOUT: defArgVal = defOutArg else: # Unnamed arg - always allow default values. if namedArg: # Is a named argument if arg >= firstOptArg: defArgVal = defNamedOptArg else: defArgVal = defNamedNotOptArg else: defArgVal = defUnnamedArg argName = MakePublicAttributeName(argName) # insanely long lines with an 'encoding' flag crashes python 2.4.0 # keep 5 args per line # This may still fail if the arg names are insane, but that seems # unlikely. See also _BuildArgList() if (arg+1) % 5 == 0: strval = strval + "\n" if is_comment: strval = strval + "#" strval = strval + "\t\t\t" strval = strval + ", " + argName if defArgVal: strval = strval + "=" + defArgVal if numOptArgs==-1: strval = strval + ", *" + names[-1] return strval if __name__=='__main__': print "Use 'makepy.py' to generate Python code - this module is just a helper"
import operator import core.reports.ReportsDataSource import time from reportlab.lib.enums import TA_JUSTIFY from reportlab.lib.pagesizes import letter from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle from reportlab.lib.units import inch from django.http import HttpResponse from reportlab.pdfgen import canvas from io import StringIO class RunningMCProdTasks: def __init__(self): pass def prepareReport(self, campaigname, depthhours, mcordpd = True): lenlists = 10 rp = ReportsDataSource.ReportsDataSource() dataSetWholeCompaing = rp.getCompaingTasksJobsStats(campaigname, depthhours, mcordpd) eventsSetWholeCompaing = rp.getCompaingTasksEventsStats(campaigname, mcordpd) totEvDone = 0 totEvWereSched = 0 # totTasksFinished = 0 totTasksRegistered = 0 totTasksRunning = 0 aveEvThrHours = 0 estimProcTimeForQEv = 0 # topNSitesWorstPerf = {} avWallPerEv = 0 avWallPerEvH = 0 totNJobsFailed = 0 totNJobsSucc = 0 totWallTimeFailed = 0 totWallTimeSucc = 0 topNTasksWorstWPerEvH = {} topNTasksFailureRateHours = {} topNTasksFailureWallTimeHours = {} topNTasksErrorsHours = {} topNSitesWithHighestFailureHours = {} topNSitesWithHighestActivatedRunningRat = {} topNSitesWithHighestAssignedRunningRat = {} totEvDoneHours = 0 totEvRemaining = 0 taskFinishedSet = set() totTasksRegisteredSet = set() totTasksRunningSet = set() sitesDoneEvents = {} sitesSuccWall = {} totWallTime = 0 totWallTimeH = 0 tasksWallTime = {} taskSuccEvH = {} tasksSuccJobH = {} tasksFailedJobsH = {} tasksFailedWallH = {} sitesFailedWH = {} sitesActivateJobs = {} sitesRunningJobs = {} sitesAssignedJobs = {} taskSuccWallH = {} for row in eventsSetWholeCompaing: if row[3] > 0: totEvWereSched += row[3] if row[2] > 0: totEvRemaining += row[3] for row in dataSetWholeCompaing: totNJobsFailed += row[4] totNJobsSucc += row[3] totWallTimeFailed += row[7] if row[7] is not None else 0 totWallTimeSucc += row[6] if row[6] is not None else 0 if row[2] == 'finished': taskFinishedSet.add(row[0]) totWallTime += row[6] elif row[2] == 'registered' or row[2] == 'submitting': totTasksRegisteredSet.add(row[0]) elif row[2] == 'running': totTasksRunningSet.add(row[0]) totEvDoneHours += row[8] totEvDone += row[9] if row[1] not in sitesDoneEvents: sitesDoneEvents[row[1]] = row[9] else: sitesDoneEvents[row[1]] += row[9] if row[1] not in sitesSuccWall: sitesSuccWall[row[1]] = row[6] else: sitesSuccWall[row[1]] += row[6] if row[0] not in taskSuccWallH: taskSuccWallH[row[0]] = row[15] else: taskSuccWallH[row[0]] += row[15] totWallTimeH += row[15] totastWalTime = (row[6] if row[6] is not None else 0)+(row[7] if row[7] is not None else 0) if row[0] not in tasksWallTime: tasksWallTime[row[0]] = totastWalTime else: tasksWallTime[row[0]] += totastWalTime if row[0] not in taskSuccEvH: taskSuccEvH[row[0]] = row[9] else: taskSuccEvH[row[0]] += row[9] if row[0] not in tasksSuccJobH: tasksSuccJobH[row[0]] = row[11] else: tasksSuccJobH[row[0]] += row[11] if row[0] not in tasksFailedJobsH: tasksFailedJobsH[row[0]] = row[12] else: tasksFailedJobsH[row[0]] += row[12] if row[0] not in tasksFailedWallH: tasksFailedWallH[row[0]] = row[10] else: tasksFailedWallH[row[0]] += row[10] if row[1] not in sitesFailedWH: if row[10] is not None: sitesFailedWH[row[1]] = row[10] else: if row[10] is not None: sitesFailedWH[row[1]] += row[10] if row[1] not in sitesActivateJobs: sitesActivateJobs[row[1]] = row[13] else: sitesActivateJobs[row[1]] += row[13] if row[1] not in sitesAssignedJobs: sitesAssignedJobs[row[1]] = row[14] else: sitesAssignedJobs[row[1]] += row[14] if row[1] not in sitesRunningJobs: sitesRunningJobs[row[1]] = row[5] else: sitesRunningJobs[row[1]] += row[5] totTasksFinished = len(taskFinishedSet) totTasksRegistered = len(totTasksRegisteredSet) totTasksRunning = len(totTasksRunningSet) aveEvThrHours = totEvDoneHours/depthhours estProcTime = 100000000000000000 if aveEvThrHours > 0: estProcTime = totEvRemaining/aveEvThrHours for taskid, succEv in sitesDoneEvents.items(): if sitesSuccWall[taskid] > 0: topNSitesWorstPerf[taskid] = sitesDoneEvents[taskid]/sitesSuccWall[taskid] topNSitesWorstPerf = sorted(topNSitesWorstPerf.items(), key=operator.itemgetter(1))[0:lenlists] avWallPerEv = totEvDone/totWallTime avWallPerEvH = totEvDoneHours/totWallTimeH for taskid, succEv in taskSuccEvH.items(): if succEv > 0: topNTasksWorstWPerEvH[taskid] = taskSuccWallH[taskid]/succEv topNTasksWorstWPerEvH = sorted(topNTasksWorstWPerEvH.items(), key=operator.itemgetter(1), reverse=True)[0:lenlists] for taskid, succEv in tasksSuccJobH.items(): if tasksSuccJobH[taskid] > 0: topNTasksFailureRateHours[taskid] = tasksFailedJobsH[taskid]/tasksSuccJobH[taskid] sorted_x = sorted(topNTasksFailureRateHours.items(), key=operator.itemgetter(1), reverse=True) lenhTasks = lenlists if len(sorted_x) > lenlists else len(sorted_x) topNTasksFailureRateHours = {} for i in range(lenhTasks): topNTasksFailureRateHours[sorted_x[i][0]] = sorted_x[i][1] sorted_x = sorted(tasksFailedWallH.items(), key=operator.itemgetter(1), reverse=True) lenhTasks = lenlists if len(sorted_x) > lenlists else len(sorted_x) topNTasksFailureWallTimeHours = {} for i in range(lenhTasks): topNTasksFailureWallTimeHours[sorted_x[i][0]] = sorted_x[i][1] sitesActivateJobs = {} sitesRunningJobs = {} sitesAssignedJobs = {} sorted_x = sorted(sitesFailedWH.items(), key=operator.itemgetter(1), reverse=True) lenhTasks = lenlists if len(sorted_x) > lenlists else len(sorted_x) topNSitesWithHighestFailureHours = {} for i in range(lenhTasks): topNSitesWithHighestFailureHours[sorted_x[i][0]] = sorted_x[i][1] for taskid, succEv in sitesActivateJobs.items(): if sitesRunningJobs[taskid]: topNSitesWithHighestActivatedRunningRat[taskid] = sitesActivateJobs[taskid]/sitesRunningJobs[taskid] sorted_x = sorted(topNSitesWithHighestActivatedRunningRat.items(), key=operator.itemgetter(1), reverse=True) lenhTasks = lenlists if len(sorted_x) > lenlists else len(sorted_x) topNSitesWithHighestActivatedRunningRat = {} for i in range(lenhTasks): topNSitesWithHighestActivatedRunningRat[sorted_x[i][0]] = sorted_x[i][1] for taskid, succEv in sitesActivateJobs.items(): if sitesRunningJobs[taskid] > 0: topNSitesWithHighestAssignedRunningRat[taskid] = sitesAssignedJobs[taskid]/sitesRunningJobs[taskid] sorted_x = sorted(topNSitesWithHighestAssignedRunningRat.items(), key=operator.itemgetter(1), reverse=True) lenhTasks = lenlists if len(sorted_x) > lenlists else len(sorted_x) topNSitesWithHighestAssignedRunningRat = {} for i in range(lenhTasks): topNSitesWithHighestAssignedRunningRat[sorted_x[i][0]] = sorted_x[i][1] data = { 'topNSitesWithHighestAssignedRunningRat':topNSitesWithHighestAssignedRunningRat, 'topNSitesWithHighestActivatedRunningRat':topNSitesWithHighestActivatedRunningRat, 'topNSitesWithHighestFailureHours':topNSitesWithHighestFailureHours, 'topNTasksFailureWallTimeHours':topNTasksFailureWallTimeHours, 'topNTasksFailureRateHours':topNTasksFailureRateHours, 'topNTasksWorstWPerEvH':topNTasksWorstWPerEvH, 'avWallPerEv':avWallPerEv, 'topNSitesWorstPerf':topNSitesWorstPerf, 'campaign':campaigname, 'totEvDone':totEvDone, 'totEvWereSched':totEvWereSched, 'totTasksFinishedOrRunnung': totTasksFinished+totTasksRegistered, 'totTasksRegistered':totTasksRegistered, 'totTasksFinished':totTasksFinished, 'totTasksRunning':totTasksRunning, 'aveEvThrHours':aveEvThrHours, 'estProcTime':estProcTime, 'avWallPerEvH':avWallPerEvH, 'avWallPerEv':avWallPerEv, 'totJobsFailedSucc': totNJobsFailed/totNJobsSucc, 'totWallTimeFailedSucc': totWallTimeFailed/totWallTimeSucc, } return self.renderPDF(data) def renderPDF(self, data): buff = StringIO.StringIO() doc = SimpleDocTemplate(buff, pagesize=letter, rightMargin=72, leftMargin=72, topMargin=72, bottomMargin=18) compaign = data['campaign'] totEvDone = data['totEvDone'] totEvWasScheduled = data['totEvWereSched'] totTasksFinishedOrRunnung = data['totTasksFinishedOrRunnung'] totTasksRegistered = data['totTasksRegistered'] aveEvThrHours = data['aveEvThrHours'] estProcTime = data['estProcTime'] totTasksFinished = data['totTasksFinished'] totTasksRunning = data['totTasksRunning'] topNSitesWorstPerf = data['topNSitesWorstPerf'] avWallPerEvH = data['avWallPerEvH'] avWallPerEv = data['avWallPerEv'] totJobsFailedSucc = data['totJobsFailedSucc'] totWallTimeFailedSucc = data['totWallTimeFailedSucc'] topNTasksWorstWPerEvH = data['topNTasksWorstWPerEvH'] Report = [] styles = getSampleStyleSheet() style = getSampleStyleSheet()['Normal'] style.leading = 24 Report.append(Paragraph('Report on campaign: ' + compaign, styles["Heading1"])) Report.append(Paragraph('Build on ' + time.ctime(), styles["Bullet"])) Report.append(Paragraph('Progress and loads', styles["Heading2"])) Report.append(Paragraph('Total events done: ' + str(round(totEvDone / 1000000, 2)) + 'M of ' + str( round(totEvWasScheduled / 1000000, 2)) + 'M in ' + str(totTasksFinishedOrRunnung) + ' tasks', styles["Normal"])) Report.append(Paragraph('Total tasks in the queue: ' + str(totTasksRegistered), styles["Normal"])) Report.append(Paragraph('Total tasks running: ' + str(totTasksRunning), styles["Normal"])) Report.append(Paragraph('Total tasks done: ' + str(totTasksFinished), styles["Normal"])) Report.append( Paragraph('Average (of last 12 hours) events throughput: ' + str(round(aveEvThrHours, 2)) + '/h', styles["Normal"])) Report.append(Paragraph('Estimated processing time of queued events: ' + str(estProcTime/24) + 'd', styles["Normal"])) Report.append(Paragraph('Average (of jobs finished in last 12 hours) events/walltime sec: ' + str(round(avWallPerEvH)), styles["Normal"])) Report.append(Paragraph('Average events/walltime sec: ' + str(round(avWallPerEv)), styles["Normal"])) Report.append(Paragraph('Issues: ', styles["Heading2"])) strTopNSitesWorstPerf = "" for (site, perf) in topNSitesWorstPerf: strTopNSitesWorstPerf += site + "(" + str(round(perf)) + ") " Report.append(Paragraph('Top 3 sites with worst events throughput: ' + strTopNSitesWorstPerf, styles["Normal"])) Report.append(Paragraph('Average rates for the whole campaign:', styles["Normal"])) Report.append(Paragraph('Number of jobs failed/finished:' + str(round(totJobsFailedSucc, 4)), styles["Normal"], bulletText='-')) Report.append(Paragraph('Number of jobs walltime failed / finished:' + str(round(totWallTimeFailedSucc, 4)), styles["Normal"])) strtopNTasksWorstWPerEvH = "" for (task, perf) in topNTasksWorstWPerEvH: strtopNTasksWorstWPerEvH += str(task) + "(" + str(round(perf, 4)) + ") " Report.append(Paragraph('Top 10 running tasks (of jobs finished in last 12 hours) with worst walltime per events:'+ strtopNTasksWorstWPerEvH, styles["Normal"], bulletText='-')) Report.append(Paragraph('List of top sites with highest failure rate and the type of failures', styles["Normal"])) doc.build(Report) response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="report.pdf"' response.write(buff.getvalue()) buff.close() return response """ Top 10 tasks (of jobs finished in last 12 hours) with highest failure rate: taskid(failure rate, site, error, count of error), Top 10 tasks (of jobs finished in last 12 hours) with highest failured walltime: taskid(failure walltime, site, error, count of error), Top 10 errors of campaign (of jobs finished in last 12 hours): error, description (affected tasks) corresponding number of errors and the walltime spent for them (to evaluate relative importance of the errors) List of top (whatever number) sites with highest failure rate and the type of failures List of the top (whatever number) sites with the highest activated/running ratio (shows if we have jobs that are queued on a site but are not run) List of the top (whatever number) sites with the highest assigned/running ratio (shows if there are hanging transfers) """
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class SuppressionsOperations(object): """SuppressionsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An objec model deserializer. :ivar api_version: The version of the API to be used with the client request. Constant value: "2017-04-19". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-04-19" self.config = config def get( self, resource_uri, recommendation_id, name, custom_headers=None, raw=False, **operation_config): """Obtains the details of a suppression. :param resource_uri: The fully qualified Azure Resource Manager identifier of the resource to which the recommendation applies. :type resource_uri: str :param recommendation_id: The recommendation ID. :type recommendation_id: str :param name: The name of the suppression. :type name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SuppressionContract or ClientRawResponse if raw=true :rtype: ~azure.mgmt.advisor.models.SuppressionContract or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/{resourceUri}/providers/Microsoft.Advisor/recommendations/{recommendationId}/suppressions/{name}' path_format_arguments = { 'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str'), 'recommendationId': self._serialize.url("recommendation_id", recommendation_id, 'str'), 'name': self._serialize.url("name", name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('SuppressionContract', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def create( self, resource_uri, recommendation_id, name, suppression_id=None, ttl=None, custom_headers=None, raw=False, **operation_config): """Enables the snoozed or dismissed attribute of a recommendation. The snoozed or dismissed attribute is referred to as a suppression. Use this API to create or update the snoozed or dismissed status of a recommendation. :param resource_uri: The fully qualified Azure Resource Manager identifier of the resource to which the recommendation applies. :type resource_uri: str :param recommendation_id: The recommendation ID. :type recommendation_id: str :param name: The name of the suppression. :type name: str :param suppression_id: The GUID of the suppression. :type suppression_id: str :param ttl: The duration for which the suppression is valid. :type ttl: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: SuppressionContract or ClientRawResponse if raw=true :rtype: ~azure.mgmt.advisor.models.SuppressionContract or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ suppression_contract = models.SuppressionContract(suppression_id=suppression_id, ttl=ttl) # Construct URL url = '/{resourceUri}/providers/Microsoft.Advisor/recommendations/{recommendationId}/suppressions/{name}' path_format_arguments = { 'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str'), 'recommendationId': self._serialize.url("recommendation_id", recommendation_id, 'str'), 'name': self._serialize.url("name", name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(suppression_contract, 'SuppressionContract') # Construct and send request request = self._client.put(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('SuppressionContract', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def delete( self, resource_uri, recommendation_id, name, custom_headers=None, raw=False, **operation_config): """Enables the activation of a snoozed or dismissed recommendation. The snoozed or dismissed attribute of a recommendation is referred to as a suppression. :param resource_uri: The fully qualified Azure Resource Manager identifier of the resource to which the recommendation applies. :type resource_uri: str :param recommendation_id: The recommendation ID. :type recommendation_id: str :param name: The name of the suppression. :type name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: None or ClientRawResponse if raw=true :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = '/{resourceUri}/providers/Microsoft.Advisor/recommendations/{recommendationId}/suppressions/{name}' path_format_arguments = { 'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str'), 'recommendationId': self._serialize.url("recommendation_id", recommendation_id, 'str'), 'name': self._serialize.url("name", name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def list( self, top=None, skip_token=None, custom_headers=None, raw=False, **operation_config): """Retrieves the list of snoozed or dismissed suppressions for a subscription. The snoozed or dismissed attribute of a recommendation is referred to as a suppression. :param top: The number of suppressions per page if a paged version of this API is being used. :type top: int :param skip_token: The page-continuation token to use with a paged version of this API. :type skip_token: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of SuppressionContract :rtype: ~azure.mgmt.advisor.models.SuppressionContractPaged[~azure.mgmt.advisor.models.SuppressionContract] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = '/subscriptions/{subscriptionId}/providers/Microsoft.Advisor/suppressions' path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int') if skip_token is not None: query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.SuppressionContractPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.SuppressionContractPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized
"""Provides some utilities widely used by other modules""" import bisect import collections import collections.abc import operator import os.path import random import math # ______________________________________________________________________________ # Functions on Sequences and Iterables def sequence(iterable): """Coerce iterable to sequence, if it is not already one.""" return (iterable if isinstance(iterable, collections.abc.Sequence) else tuple(iterable)) def removeall(item, seq): """Return a copy of seq (or string) with all occurences of item removed.""" if isinstance(seq, str): return seq.replace(item, '') else: return [x for x in seq if x != item] def unique(seq): # TODO: replace with set """Remove duplicate elements from seq. Assumes hashable elements.""" return list(set(seq)) def count(seq): """Count the number of items in sequence that are interpreted as true.""" return sum(bool(x) for x in seq) def product(numbers): """Return the product of the numbers, e.g. product([2, 3, 10]) == 60""" result = 1 for x in numbers: result *= x return result def first(iterable, default=None): """Return the first element of an iterable or the next element of a generator; or default.""" try: return iterable[0] except IndexError: return default except TypeError: return next(iterable, default) def is_in(elt, seq): """Similar to (elt in seq), but compares with 'is', not '=='.""" return any(x is elt for x in seq) def mode(data): """Return the most common data item. If there are ties, return any one of them.""" [(item, count)] = collections.Counter(data).most_common(1) return item # ______________________________________________________________________________ # argmin and argmax identity = lambda x: x argmin = min argmax = max def argmin_random_tie(seq, key=identity): """Return a minimum element of seq; break ties at random.""" return argmin(shuffled(seq), key=key) def argmax_random_tie(seq, key=identity): """Return an element with highest fn(seq[i]) score; break ties at random.""" return argmax(shuffled(seq), key=key) def shuffled(iterable): """Randomly shuffle a copy of iterable.""" items = list(iterable) random.shuffle(items) return items # ______________________________________________________________________________ # Statistical and mathematical functions def histogram(values, mode=0, bin_function=None): """Return a list of (value, count) pairs, summarizing the input values. Sorted by increasing value, or if mode=1, by decreasing count. If bin_function is given, map it over values first.""" if bin_function: values = map(bin_function, values) bins = {} for val in values: bins[val] = bins.get(val, 0) + 1 if mode: return sorted(list(bins.items()), key=lambda x: (x[1], x[0]), reverse=True) else: return sorted(bins.items()) def dotproduct(X, Y): """Return the sum of the element-wise product of vectors X and Y.""" return sum(x * y for x, y in zip(X, Y)) def element_wise_product(X, Y): """Return vector as an element-wise product of vectors X and Y""" assert len(X) == len(Y) return [x * y for x, y in zip(X, Y)] def matrix_multiplication(X_M, *Y_M): """Return a matrix as a matrix-multiplication of X_M and arbitary number of matrices *Y_M""" def _mat_mult(X_M, Y_M): """Return a matrix as a matrix-multiplication of two matrices X_M and Y_M >>> matrix_multiplication([[1, 2, 3], [2, 3, 4]], [[3, 4], [1, 2], [1, 0]]) [[8, 8],[13, 14]] """ assert len(X_M[0]) == len(Y_M) result = [[0 for i in range(len(Y_M[0]))] for j in range(len(X_M))] for i in range(len(X_M)): for j in range(len(Y_M[0])): for k in range(len(Y_M)): result[i][j] += X_M[i][k] * Y_M[k][j] return result result = X_M for Y in Y_M: result = _mat_mult(result, Y) return result def vector_to_diagonal(v): """Converts a vector to a diagonal matrix with vector elements as the diagonal elements of the matrix""" diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))] for i in range(len(v)): diag_matrix[i][i] = v[i] return diag_matrix def vector_add(a, b): """Component-wise addition of two vectors.""" return tuple(map(operator.add, a, b)) def scalar_vector_product(X, Y): """Return vector as a product of a scalar and a vector""" return [X * y for y in Y] def scalar_matrix_product(X, Y): """Return matrix as a product of a scalar and a matrix""" return [scalar_vector_product(X, y) for y in Y] def inverse_matrix(X): """Inverse a given square matrix of size 2x2""" assert len(X) == 2 assert len(X[0]) == 2 det = X[0][0] * X[1][1] - X[0][1] * X[1][0] assert det != 0 inv_mat = scalar_matrix_product(1.0/det, [[X[1][1], -X[0][1]], [-X[1][0], X[0][0]]]) return inv_mat def probability(p): """Return true with probability p.""" return p > random.uniform(0.0, 1.0) def weighted_sample_with_replacement(n, seq, weights): """Pick n samples from seq at random, with replacement, with the probability of each element in proportion to its corresponding weight.""" sample = weighted_sampler(seq, weights) return [sample() for _ in range(n)] def weighted_sampler(seq, weights): """Return a random-sample function that picks from seq weighted by weights.""" totals = [] for w in weights: totals.append(w + totals[-1] if totals else w) return lambda: seq[bisect.bisect(totals, random.uniform(0, totals[-1]))] def rounder(numbers, d=4): """Round a single number, or sequence of numbers, to d decimal places.""" if isinstance(numbers, (int, float)): return round(numbers, d) else: constructor = type(numbers) # Can be list, set, tuple, etc. return constructor(rounder(n, d) for n in numbers) def num_or_str(x): """The argument is a string; convert to a number if possible, or strip it.""" try: return int(x) except ValueError: try: return float(x) except ValueError: return str(x).strip() def normalize(dist): """Multiply each number by a constant such that the sum is 1.0""" if isinstance(dist, dict): total = sum(dist.values()) for key in dist: dist[key] = dist[key] / total assert 0 <= dist[key] <= 1, "Probabilities must be between 0 and 1." return dist total = sum(dist) return [(n / total) for n in dist] def clip(x, lowest, highest): """Return x clipped to the range [lowest..highest].""" return max(lowest, min(x, highest)) def sigmoid(x): """Return activation value of x with sigmoid function""" return 1/(1 + math.exp(-x)) def step(x): """Return activation value of x with sign function""" return 1 if x >= 0 else 0 try: # math.isclose was added in Python 3.5; but we might be in 3.4 from math import isclose except ImportError: def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): """Return true if numbers a and b are close to each other.""" return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) # ______________________________________________________________________________ # Misc Functions def memoize(fn, slot=None, maxsize=32): """Memoize fn: make it remember the computed value for any argument list. If slot is specified, store result in that slot of first argument. If slot is false, use lru_cache for caching the values.""" if slot: def memoized_fn(obj, *args): if hasattr(obj, slot): return getattr(obj, slot) else: val = fn(obj, *args) setattr(obj, slot, val) return val else: @functools.lru_cache(maxsize=maxsize) def memoized_fn(*args): return fn(*args) return memoized_fn def name(obj): """Try to find some reasonable name for the object.""" return (getattr(obj, 'name', 0) or getattr(obj, '__name__', 0) or getattr(getattr(obj, '__class__', 0), '__name__', 0) or str(obj)) def isnumber(x): """Is x a number?""" return hasattr(x, '__int__') def issequence(x): """Is x a sequence?""" return isinstance(x, collections.abc.Sequence) def print_table(table, header=None, sep=' ', numfmt='%g'): """Print a list of lists as a table, so that columns line up nicely. header, if specified, will be printed as the first row. numfmt is the format for all numbers; you might want e.g. '%6.2f'. (If you want different formats in different columns, don't use print_table.) sep is the separator between columns.""" justs = ['rjust' if isnumber(x) else 'ljust' for x in table[0]] if header: table.insert(0, header) table = [[numfmt.format(x) if isnumber(x) else x for x in row] for row in table] sizes = list( map(lambda seq: max(map(len, seq)), list(zip(*[map(str, row) for row in table])))) for row in table: print(sep.join(getattr( str(x), j)(size) for (j, size, x) in zip(justs, sizes, row))) def AIMAFile(components, mode='r'): """Open a file based at the AIMA root directory.""" aima_root = os.path.dirname(__file__) aima_file = os.path.join(aima_root, *components) return open(aima_file) def DataFile(name, mode='r'): "Return a file in the AIMA /aima-data directory." return AIMAFile(['aima-data', name], mode) # ______________________________________________________________________________ # Expressions # See https://docs.python.org/3/reference/expressions.html#operator-precedence # See https://docs.python.org/3/reference/datamodel.html#special-method-names class Expr(object): """A mathematical expression with an operator and 0 or more arguments. op is a str like '+' or 'sin'; args are Expressions. Expr('x') or Symbol('x') creates a symbol (a nullary Expr). Expr('-', x) creates a unary; Expr('+', x, 1) creates a binary.""" def __init__(self, op, *args): self.op = str(op) self.args = args # Operator overloads def __neg__(self): return Expr('-', self) def __pos__(self): return Expr('+', self) def __invert__(self): return Expr('~', self) def __add__(self, rhs): return Expr('+', self, rhs) def __sub__(self, rhs): return Expr('-', self, rhs) def __mul__(self, rhs): return Expr('*', self, rhs) def __pow__(self, rhs): return Expr('**', self, rhs) def __mod__(self, rhs): return Expr('%', self, rhs) def __and__(self, rhs): return Expr('&', self, rhs) def __xor__(self, rhs): return Expr('^', self, rhs) def __rshift__(self, rhs): return Expr('>>', self, rhs) def __lshift__(self, rhs): return Expr('<<', self, rhs) def __truediv__(self, rhs): return Expr('/', self, rhs) def __floordiv__(self, rhs): return Expr('//', self, rhs) def __matmul__(self, rhs): return Expr('@', self, rhs) def __or__(self, rhs): """Allow both P | Q, and P |'==>'| Q.""" if isinstance(rhs, Expression): return Expr('|', self, rhs) else: return PartialExpr(rhs, self) # Reverse operator overloads def __radd__(self, lhs): return Expr('+', lhs, self) def __rsub__(self, lhs): return Expr('-', lhs, self) def __rmul__(self, lhs): return Expr('*', lhs, self) def __rdiv__(self, lhs): return Expr('/', lhs, self) def __rpow__(self, lhs): return Expr('**', lhs, self) def __rmod__(self, lhs): return Expr('%', lhs, self) def __rand__(self, lhs): return Expr('&', lhs, self) def __rxor__(self, lhs): return Expr('^', lhs, self) def __ror__(self, lhs): return Expr('|', lhs, self) def __rrshift__(self, lhs): return Expr('>>', lhs, self) def __rlshift__(self, lhs): return Expr('<<', lhs, self) def __rtruediv__(self, lhs): return Expr('/', lhs, self) def __rfloordiv__(self, lhs): return Expr('//', lhs, self) def __rmatmul__(self, lhs): return Expr('@', lhs, self) def __call__(self, *args): "Call: if 'f' is a Symbol, then f(0) == Expr('f', 0)." if self.args: raise ValueError('can only do a call for a Symbol, not an Expr') else: return Expr(self.op, *args) # Equality and repr def __eq__(self, other): "'x == y' evaluates to True or False; does not build an Expr." return (isinstance(other, Expr) and self.op == other.op and self.args == other.args) def __hash__(self): return hash(self.op) ^ hash(self.args) def __repr__(self): op = self.op args = [str(arg) for arg in self.args] if op.isidentifier(): # f(x) or f(x, y) return '{}({})'.format(op, ', '.join(args)) if args else op elif len(args) == 1: # -x or -(x + 1) return op + args[0] else: # (x - y) opp = (' ' + op + ' ') return '(' + opp.join(args) + ')' # An 'Expression' is either an Expr or a Number. # Symbol is not an explicit type; it is any Expr with 0 args. Number = (int, float, complex) Expression = (Expr, Number) def Symbol(name): """A Symbol is just an Expr with no args.""" return Expr(name) def symbols(names): """Return a tuple of Symbols; names is a comma/whitespace delimited str.""" return tuple(Symbol(name) for name in names.replace(',', ' ').split()) def subexpressions(x): """Yield the subexpressions of an Expression (including x itself).""" yield x if isinstance(x, Expr): for arg in x.args: yield from subexpressions(arg) def arity(expression): """The number of sub-expressions in this expression.""" if isinstance(expression, Expr): return len(expression.args) else: # expression is a number return 0 # For operators that are not defined in Python, we allow new InfixOps: class PartialExpr: """Given 'P |'==>'| Q, first form PartialExpr('==>', P), then combine with Q.""" def __init__(self, op, lhs): self.op, self.lhs = op, lhs def __or__(self, rhs): return Expr(self.op, self.lhs, rhs) def __repr__(self): return "PartialExpr('{}', {})".format(self.op, self.lhs) def expr(x): """Shortcut to create an Expression. x is a str in which: - identifiers are automatically defined as Symbols. - ==> is treated as an infix |'==>'|, as are <== and <=>. If x is already an Expression, it is returned unchanged. Example: >>> expr('P & Q ==> Q') ((P & Q) ==> Q) """ if isinstance(x, str): return eval(expr_handle_infix_ops(x), defaultkeydict(Symbol)) else: return x infix_ops = '==> <== <=>'.split() def expr_handle_infix_ops(x): """Given a str, return a new str with ==> replaced by |'==>'|, etc. >>> expr_handle_infix_ops('P ==> Q') "P |'==>'| Q" """ for op in infix_ops: x = x.replace(op, '|' + repr(op) + '|') return x class defaultkeydict(collections.defaultdict): """Like defaultdict, but the default_factory is a function of the key. >>> d = defaultkeydict(len); d['four'] 4 """ def __missing__(self, key): self[key] = result = self.default_factory(key) return result # ______________________________________________________________________________ # Queues: Stack, FIFOQueue, PriorityQueue # TODO: Possibly use queue.Queue, queue.PriorityQueue # TODO: Priority queues may not belong here -- see treatment in search.py class Queue: """Queue is an abstract class/interface. There are three types: Stack(): A Last In First Out Queue. FIFOQueue(): A First In First Out Queue. PriorityQueue(order, f): Queue in sorted order (default min-first). Each type supports the following methods and functions: q.append(item) -- add an item to the queue q.extend(items) -- equivalent to: for item in items: q.append(item) q.pop() -- return the top item from the queue len(q) -- number of items in q (also q.__len()) item in q -- does q contain item? Note that isinstance(Stack(), Queue) is false, because we implement stacks as lists. If Python ever gets interfaces, Queue will be an interface.""" def __init__(self): raise NotImplementedError def extend(self, items): for item in items: self.append(item) def Stack(): """Return an empty list, suitable as a Last-In-First-Out Queue.""" return [] class FIFOQueue(Queue): """A First-In-First-Out Queue.""" def __init__(self): self.A = [] self.start = 0 def append(self, item): self.A.append(item) def __len__(self): return len(self.A) - self.start def extend(self, items): self.A.extend(items) def pop(self): e = self.A[self.start] self.start += 1 if self.start > 5 and self.start > len(self.A) / 2: self.A = self.A[self.start:] self.start = 0 return e def __contains__(self, item): return item in self.A[self.start:] class PriorityQueue(Queue): """A queue in which the minimum (or maximum) element (as determined by f and order) is returned first. If order is min, the item with minimum f(x) is returned first; if order is max, then it is the item with maximum f(x). Also supports dict-like lookup.""" def __init__(self, order=min, f=lambda x: x): self.A = [] self.order = order self.f = f def append(self, item): bisect.insort(self.A, (self.f(item), item)) def __len__(self): return len(self.A) def pop(self): if self.order == min: return self.A.pop(0)[1] else: return self.A.pop()[1] def __contains__(self, item): return any(item == pair[1] for pair in self.A) def __getitem__(self, key): for _, item in self.A: if item == key: return item def __delitem__(self, key): for i, (value, item) in enumerate(self.A): if item == key: self.A.pop(i) # ______________________________________________________________________________ # Useful Shorthands class Bool(int): """Just like `bool`, except values display as 'T' and 'F' instead of 'True' and 'False'""" __str__ = __repr__ = lambda self: 'T' if self else 'F' T = Bool(True) F = Bool(False)
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import os import six from tensorflow.contrib.eager.python import checkpointable_utils from tensorflow.python.client import session as session_lib from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.eager import test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import test_util from tensorflow.python.keras._impl.keras.engine import training from tensorflow.python.layers import core from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import template from tensorflow.python.ops import variable_scope from tensorflow.python.training import adam from tensorflow.python.training import checkpointable from tensorflow.python.training import saver as core_saver from tensorflow.python.training import training_util class NonLayerCheckpointable(checkpointable.Checkpointable): def __init__(self): super(NonLayerCheckpointable, self).__init__() self.a_variable = checkpointable_utils.add_variable( self, name="a_variable", shape=[]) # pylint: disable=not-callable class MyModel(training.Model): """A concrete Model for testing.""" def __init__(self): super(MyModel, self).__init__() self._named_dense = core.Dense(1, use_bias=True) self._second = core.Dense(1, use_bias=False) # We can still track Checkpointables which aren't Layers. self._non_layer = NonLayerCheckpointable() def call(self, values): ret = self._second(self._named_dense(values)) return ret class InterfaceTests(test.TestCase): @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testAddVariable(self): obj = NonLayerCheckpointable() with self.assertRaisesRegexp(ValueError, "do not specify shape"): checkpointable_utils.add_variable( obj, name="shape_specified_twice", shape=[], initializer=1) constant_initializer = checkpointable_utils.add_variable( obj, name="constant_initializer", initializer=1) with variable_scope.variable_scope("some_variable_scope"): ones_initializer = checkpointable_utils.add_variable( obj, name="ones_initializer", shape=[2], initializer=init_ops.ones_initializer(dtype=dtypes.float32)) bare_initializer = checkpointable_utils.add_variable( obj, name="bare_initializer", shape=[2, 2], dtype=dtypes.float64, initializer=init_ops.zeros_initializer) # Even in graph mode, there are no naming conflicts between objects, only # naming conflicts within an object. other_duplicate = resource_variable_ops.ResourceVariable( name="duplicate", initial_value=1.) duplicate = checkpointable_utils.add_variable( obj, name="duplicate", shape=[]) with self.assertRaisesRegexp(ValueError, "'duplicate' already exists"): checkpointable_utils.add_variable(obj, name="duplicate", shape=[]) self.evaluate(checkpointable_utils.gather_initializers(obj)) self.assertEqual("constant_initializer:0", constant_initializer.name) self.assertEqual(1, self.evaluate(constant_initializer)) self.assertEqual("some_variable_scope/ones_initializer:0", ones_initializer.name) self.assertAllEqual([1, 1], self.evaluate(ones_initializer)) self.assertAllEqual([[0., 0.], [0., 0.]], self.evaluate(bare_initializer)) self.assertEqual("a_variable:0", obj.a_variable.name) self.assertEqual("duplicate:0", other_duplicate.name) if context.executing_eagerly(): # When executing eagerly, there's no uniquification of variable names. The # checkpoint name will be the same. self.assertEqual("duplicate:0", duplicate.name) else: # The .name attribute may be globally influenced, but the checkpoint name # won't be (tested below). self.assertEqual("duplicate_1:0", duplicate.name) named_variables, _ = checkpointable_utils._serialize_object_graph(obj) expected_checkpoint_names = ( "a_variable/.ATTRIBUTES/VARIABLE_VALUE", "bare_initializer/.ATTRIBUTES/VARIABLE_VALUE", "constant_initializer/.ATTRIBUTES/VARIABLE_VALUE", "duplicate/.ATTRIBUTES/VARIABLE_VALUE", "ones_initializer/.ATTRIBUTES/VARIABLE_VALUE", ) six.assertCountEqual( self, expected_checkpoint_names, named_variables.keys()) def testInitNotCalled(self): class NoInit(checkpointable.Checkpointable): def __init__(self): pass # __init__ for Checkpointable will be called implicitly. checkpointable_utils.add_variable(NoInit(), "var", shape=[]) def testShapeDtype(self): root = checkpointable.Checkpointable() v1 = checkpointable_utils.add_variable( root, name="v1", initializer=3., dtype=dtypes.float64) self.assertEqual(dtypes.float64, v1.dtype) v2 = checkpointable_utils.add_variable( root, name="v2", shape=[3], initializer=init_ops.ones_initializer, dtype=dtypes.float64) self.assertEqual(dtypes.float64, v2.dtype) self.assertAllEqual([1., 1., 1.], self.evaluate(v2)) class _MirroringSaveable(core_saver.BaseSaverBuilder.SaveableObject): def __init__(self, primary_variable, mirrored_variable, name): self._primary_variable = primary_variable self._mirrored_variable = mirrored_variable tensor = self._primary_variable.read_value() spec = core_saver.BaseSaverBuilder.SaveSpec( tensor=tensor, slice_spec="", name=name) super(_MirroringSaveable, self).__init__( tensor, [spec], name) def restore(self, restored_tensors, restored_shapes): """Restore the same value into both variables.""" tensor, = restored_tensors return control_flow_ops.group( self._primary_variable.assign(tensor), self._mirrored_variable.assign(tensor)) class _OwnsMirroredVariables(checkpointable.CheckpointableBase): """A Checkpointable object which returns a more complex SaveableObject.""" def __init__(self): self.non_dep_variable = variable_scope.get_variable( name="non_dep_variable", initializer=6., use_resource=True) self.mirrored = variable_scope.get_variable( name="mirrored", initializer=15., use_resource=True) def _gather_saveables_for_checkpoint(self): def _saveable_factory(name=self.non_dep_variable.name): return _MirroringSaveable( primary_variable=self.non_dep_variable, mirrored_variable=self.mirrored, name=name) return {checkpointable.VARIABLE_VALUE_KEY: _saveable_factory} # The Saver sorts by name before parsing, so we need a name property. @property def name(self): return self.non_dep_variable.name class CheckpointingTests(test.TestCase): @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testNamingWithOptimizer(self): input_value = constant_op.constant([[3.]]) model = MyModel() # A nuisance Model using the same optimizer. Its slot variables should not # go in the checkpoint, since it is never depended on. other_model = MyModel() optimizer = adam.AdamOptimizer(0.001) optimizer_step = training_util.get_or_create_global_step() root_checkpointable = checkpointable_utils.Checkpoint( optimizer=optimizer, model=model, optimizer_step=optimizer_step) if context.executing_eagerly(): optimizer.minimize( lambda: model(input_value), global_step=optimizer_step) optimizer.minimize( lambda: other_model(input_value), global_step=optimizer_step) else: train_op = optimizer.minimize( model(input_value), global_step=optimizer_step) optimizer.minimize( other_model(input_value), global_step=optimizer_step) self.evaluate(checkpointable_utils.gather_initializers( root_checkpointable)) self.evaluate(train_op) named_variables, serialized_graph = ( checkpointable_utils._serialize_object_graph(root_checkpointable)) expected_checkpoint_names = ( # Created in the root node, so no prefix. "optimizer_step", "model/_second/kernel", "model/_named_dense/kernel", "model/_named_dense/bias", # non-Layer dependency of the model "model/_non_layer/a_variable", # The optimizer creates two non-slot variables "optimizer/beta1_power", "optimizer/beta2_power", # Slot variables "model/_second/kernel/.OPTIMIZER_SLOT/optimizer/m", "model/_second/kernel/.OPTIMIZER_SLOT/optimizer/v", "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m", "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/v", "model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/m", "model/_named_dense/bias/.OPTIMIZER_SLOT/optimizer/v", ) suffix = "/.ATTRIBUTES/VARIABLE_VALUE" expected_checkpoint_names = [ name + suffix for name in expected_checkpoint_names] six.assertCountEqual(self, expected_checkpoint_names, named_variables.keys()) # Check that we've mapped to the right variable objects (not exhaustive) self.assertEqual( "global_step:0", named_variables["optimizer_step" + suffix].name) self.assertEqual( "my_model/dense_1/kernel:0", named_variables["model/_second/kernel" + suffix].name) self.assertEqual( "my_model/dense/kernel:0", named_variables["model/_named_dense/kernel" + suffix].name) self.assertEqual( "beta1_power:0", named_variables["optimizer/beta1_power" + suffix].name) self.assertEqual( "beta2_power:0", named_variables["optimizer/beta2_power" + suffix].name) # Spot check the generated protocol buffers. self.assertEqual("optimizer", serialized_graph.nodes[0].children[1].local_name) optimizer_node = serialized_graph.nodes[serialized_graph.nodes[0].children[ 1].node_id] self.assertEqual("beta1_power", optimizer_node.children[0].local_name) self.assertEqual("beta1_power", serialized_graph.nodes[optimizer_node.children[0].node_id] .attributes[0].full_name) self.assertEqual( "my_model/dense/kernel", serialized_graph.nodes[optimizer_node.slot_variables[0] .original_variable_node_id] .attributes[0].full_name) # We strip off the :0 suffix, as variable.name-based saving does. self.assertEqual( "my_model/dense/kernel/Adam", serialized_graph.nodes[optimizer_node.slot_variables[0] .slot_variable_node_id] .attributes[0].full_name) self.assertEqual( "my_model/dense/kernel/Adam:0", optimizer.get_slot( var=named_variables["model/_named_dense/kernel" + suffix], name="m").name) self.assertEqual( "model/_named_dense/kernel" + suffix, serialized_graph.nodes[ optimizer_node.slot_variables[0] .original_variable_node_id].attributes[0].checkpoint_key) self.assertEqual("m", optimizer_node.slot_variables[0].slot_name) self.assertEqual( "model/_named_dense/kernel/.OPTIMIZER_SLOT/optimizer/m" + suffix, serialized_graph.nodes[ optimizer_node.slot_variables[0] .slot_variable_node_id].attributes[0].checkpoint_key) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testMoreComplexSaveableReturned(self): v = _OwnsMirroredVariables() checkpoint = checkpointable_utils.Checkpoint(v=v) test_dir = self.get_temp_dir() prefix = os.path.join(test_dir, "ckpt") self.evaluate(v.non_dep_variable.assign(42.)) save_path = checkpoint.save(prefix) self.evaluate(v.non_dep_variable.assign(43.)) self.evaluate(v.mirrored.assign(44.)) checkpoint.restore(save_path).assert_consumed().initialize_or_restore() self.assertEqual(42., self.evaluate(v.non_dep_variable)) self.assertEqual(42., self.evaluate(v.mirrored)) self.evaluate(v.non_dep_variable.assign(44.)) save_path = checkpoint.save(prefix) self.evaluate(v.non_dep_variable.assign(45.)) checkpoint.restore(save_path).assert_consumed().initialize_or_restore() self.assertEqual(44., self.evaluate(v.non_dep_variable)) self.assertEqual(44., self.evaluate(v.mirrored)) @test_util.run_in_graph_and_eager_modes() def testMoreComplexSaveableReturnedWithGlobalName(self): # The same object can also be saved using the name-based saver. v = _OwnsMirroredVariables() saver = core_saver.Saver(var_list=[v]) test_dir = self.get_temp_dir() prefix = os.path.join(test_dir, "ckpt") self.evaluate(v.non_dep_variable.assign(42.)) with self.test_session() as sess: save_path = saver.save(sess, prefix) self.evaluate(v.non_dep_variable.assign(43.)) self.evaluate(v.mirrored.assign(44.)) saver.restore(sess, save_path) self.assertEqual(42., self.evaluate(v.non_dep_variable)) self.assertEqual(42., self.evaluate(v.mirrored)) @test_util.run_in_graph_and_eager_modes() def testSaveRestore(self): model = MyModel() optimizer = adam.AdamOptimizer(0.001) root_checkpointable = checkpointable_utils.Checkpoint( optimizer=optimizer, model=model) input_value = constant_op.constant([[3.]]) if context.executing_eagerly(): optimizer.minimize( lambda: model(input_value)) else: train_op = optimizer.minimize(model(input_value)) # TODO(allenl): Make initialization more pleasant when graph building. root_checkpointable.save_counter # pylint: disable=pointless-statement self.evaluate(checkpointable_utils.gather_initializers( root_checkpointable)) self.evaluate(train_op) prefix = os.path.join(self.get_temp_dir(), "ckpt") self.evaluate(state_ops.assign(model._named_dense.variables[1], [42.])) m_bias_slot = optimizer.get_slot(model._named_dense.variables[1], "m") self.evaluate(state_ops.assign(m_bias_slot, [1.5])) save_path = root_checkpointable.save(file_prefix=prefix) self.evaluate(state_ops.assign(model._named_dense.variables[1], [43.])) self.evaluate(state_ops.assign(root_checkpointable.save_counter, 3)) optimizer_variables = self.evaluate(optimizer.variables()) self.evaluate(state_ops.assign(m_bias_slot, [-2.])) # Immediate restoration status = root_checkpointable.restore(save_path=save_path).assert_consumed() status.run_restore_ops() self.assertAllEqual([42.], self.evaluate(model._named_dense.variables[1])) self.assertAllEqual(1, self.evaluate(root_checkpointable.save_counter)) self.assertAllEqual([1.5], self.evaluate(m_bias_slot)) if not context.executing_eagerly(): return # Restore-on-create is only supported when executing eagerly on_create_model = MyModel() on_create_optimizer = adam.AdamOptimizer( 0.001, # Preserve beta1_power and beta2_power when appying gradients so we can # test that they've been restored correctly. beta1=1.0, beta2=1.0) on_create_root = checkpointable_utils.Checkpoint( optimizer=on_create_optimizer, model=on_create_model) # Deferred restoration status = on_create_root.restore(save_path=save_path) on_create_model(constant_op.constant([[3.]])) # create variables self.assertAllEqual(1, self.evaluate(on_create_root.save_counter)) self.assertAllEqual([42.], self.evaluate( on_create_model._named_dense.variables[1])) on_create_m_bias_slot = on_create_optimizer.get_slot( on_create_model._named_dense.variables[1], "m") # Optimizer slot variables are created when the original variable is # restored. self.assertAllEqual([1.5], self.evaluate(on_create_m_bias_slot)) self.assertAllEqual(optimizer_variables[2:], self.evaluate(on_create_optimizer.variables())) dummy_var = resource_variable_ops.ResourceVariable([1.]) on_create_optimizer.minimize(loss=dummy_var.read_value) status.assert_consumed() beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators() self.assertAllEqual(optimizer_variables[0], self.evaluate(beta1_power)) self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power)) # TODO(allenl): Debug garbage created by this test in python3. def testDeferredRestorationUsageEager(self): """An idiomatic eager execution example.""" num_training_steps = 10 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") for training_continuation in range(3): model = MyModel() optimizer = adam.AdamOptimizer(0.001) root = checkpointable_utils.Checkpoint( optimizer=optimizer, model=model, optimizer_step=training_util.get_or_create_global_step()) root.restore(core_saver.latest_checkpoint(checkpoint_directory)) for _ in range(num_training_steps): # TODO(allenl): Use a Dataset and serialize/checkpoint it. input_value = constant_op.constant([[3.]]) optimizer.minimize( lambda: model(input_value), # pylint: disable=cell-var-from-loop global_step=root.optimizer_step) root.save(file_prefix=checkpoint_prefix) self.assertEqual((training_continuation + 1) * num_training_steps, root.optimizer_step.numpy()) def testUsageGraph(self): """Expected usage when graph building.""" with context.graph_mode(): num_training_steps = 10 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") for training_continuation in range(3): with ops.Graph().as_default(): model = MyModel() optimizer = adam.AdamOptimizer(0.001) root = checkpointable_utils.Checkpoint( optimizer=optimizer, model=model, global_step=training_util.get_or_create_global_step()) input_value = constant_op.constant([[3.]]) train_op = optimizer.minimize( model(input_value), global_step=root.global_step) checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory) with self.test_session(graph=ops.get_default_graph()) as session: status = root.restore(save_path=checkpoint_path) status.initialize_or_restore(session=session) if checkpoint_path is None: self.assertEqual(0, training_continuation) with self.assertRaises(AssertionError): status.assert_consumed() else: status.assert_consumed() for _ in range(num_training_steps): session.run(train_op) root.save(file_prefix=checkpoint_prefix, session=session) self.assertEqual((training_continuation + 1) * num_training_steps, session.run(root.global_step)) self.assertEqual(training_continuation + 1, session.run(root.save_counter)) @test_util.run_in_graph_and_eager_modes() def testAgnosticUsage(self): """Graph/eager agnostic usage.""" # Does create garbage when executing eagerly due to ops.Graph() creation. num_training_steps = 10 checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") for training_continuation in range(3): with ops.Graph().as_default(), self.test_session( graph=ops.get_default_graph()), test_util.device(use_gpu=True): model = MyModel() optimizer = adam.AdamOptimizer(0.001) root = checkpointable_utils.Checkpoint( optimizer=optimizer, model=model, global_step=training_util.get_or_create_global_step()) checkpoint_path = core_saver.latest_checkpoint(checkpoint_directory) status = root.restore(save_path=checkpoint_path) input_value = constant_op.constant([[3.]]) train_fn = functools.partial( optimizer.minimize, functools.partial(model, input_value), global_step=root.global_step) if not context.executing_eagerly(): train_fn = functools.partial(self.evaluate, train_fn()) status.initialize_or_restore() for _ in range(num_training_steps): train_fn() root.save(file_prefix=checkpoint_prefix) self.assertEqual((training_continuation + 1) * num_training_steps, self.evaluate(root.global_step)) self.assertEqual(training_continuation + 1, self.evaluate(root.save_counter)) def _get_checkpoint_name(self, name): root = checkpointable.Checkpointable() checkpointable_utils.add_variable( root, name=name, shape=[1, 2], dtype=dtypes.float64) named_variables, _ = checkpointable_utils._serialize_object_graph(root) checkpoint_name, = named_variables.keys() with ops.name_scope("root/" + checkpoint_name): pass # Make sure we can use this as an op name if we prefix it. return checkpoint_name @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testVariableNameEscaping(self): suffix = "/.ATTRIBUTES/VARIABLE_VALUE" self.assertEqual(r"a.Sb.Sc" + suffix, self._get_checkpoint_name(r"a/b/c")) self.assertEqual(r"b" + suffix, self._get_checkpoint_name(r"b")) self.assertEqual(r"c.S" + suffix, self._get_checkpoint_name(r"c/")) self.assertEqual(r"d.S..S" + suffix, self._get_checkpoint_name(r"d/.S")) self.assertEqual(r"d.S..ATTRIBUTES.Sf" + suffix, self._get_checkpoint_name(r"d/.ATTRIBUTES/f")) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testNumberedPath(self): root = checkpointable.Checkpointable() leaf = checkpointable.Checkpointable() root.leaf = leaf checkpointable_utils.add_variable(leaf, name="v", shape=[]) named_variables, _ = checkpointable_utils._serialize_object_graph(root) variable_name, = named_variables.keys() self.assertEqual(r"leaf/v/.ATTRIBUTES/VARIABLE_VALUE", variable_name) @test_util.run_in_graph_and_eager_modes() def testLocalNameValidation(self): root = checkpointable.Checkpointable() leaf = checkpointable.Checkpointable() # Dots are escaped, which avoids conflicts with reserved names. root._track_checkpointable(leaf, name=".ATTRIBUTES") checkpointable_utils.add_variable(checkpointable=leaf, name="a", shape=[]) named_variables, _ = checkpointable_utils._serialize_object_graph(root) name, = named_variables.keys() self.assertEqual(name, "..ATTRIBUTES/a/.ATTRIBUTES/VARIABLE_VALUE") def testAnonymousVarsInInit(self): class Model(training.Model): def __init__(self): super(Model, self).__init__() self.w = resource_variable_ops.ResourceVariable(0.0) self.b = resource_variable_ops.ResourceVariable(0.0) self.vars = [self.w, self.b] def call(self, x): return x * self.w + self.b with context.eager_mode(): model = Model() optimizer = adam.AdamOptimizer(learning_rate=0.05) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") checkpoint = checkpointable_utils.Checkpoint( model=model, optimizer=optimizer) for _ in range(2): checkpoint.save(checkpoint_prefix) with backprop.GradientTape() as tape: loss = (constant_op.constant(1.) - model(constant_op.constant(1.))) ** 2 grad = tape.gradient(loss, model.vars) optimizer.apply_gradients( [(g, v) for g, v in zip(grad, model.vars)]) @test_util.run_in_graph_and_eager_modes() def testLateDependencyTracking(self): class Dependency(checkpointable.Checkpointable): def build(self): self.var = checkpointable_utils.add_variable( self, "var", initializer=0.) class LateDependencies(checkpointable.Checkpointable): def add_dep(self): self.dep = Dependency() self.dep.build() original = LateDependencies() original.add_dep() self.evaluate(state_ops.assign(original.dep.var, 123.)) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpointable_utils.CheckpointableSaver( original).save(checkpoint_prefix) load_into = LateDependencies() status = checkpointable_utils.CheckpointableSaver( load_into).restore(save_path) with self.assertRaises(AssertionError): status.assert_consumed() load_into.add_dep() status.assert_consumed() status.run_restore_ops() self.assertEqual(123., self.evaluate(load_into.dep.var)) @test_util.run_in_graph_and_eager_modes() def testDepAfterVar(self): class Dependency(checkpointable.Checkpointable): def build(self): self.var = checkpointable_utils.add_variable( self, "var", initializer=0.) class DepAfterVar(checkpointable.Checkpointable): def add_dep(self): dep = Dependency() dep.build() self.dep = dep dep_after_var = DepAfterVar() dep_after_var.add_dep() self.evaluate(state_ops.assign(dep_after_var.dep.var, -14.)) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = checkpointable_utils.CheckpointableSaver(dep_after_var).save( checkpoint_prefix) loaded_dep_after_var = DepAfterVar() status = checkpointable_utils.CheckpointableSaver( loaded_dep_after_var).restore(save_path) loaded_dep_after_var.add_dep() status.assert_consumed() status.run_restore_ops() self.assertEqual(-14., self.evaluate(loaded_dep_after_var.dep.var)) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testDeferredSlotRestoration(self): checkpoint_directory = self.get_temp_dir() root = checkpointable.Checkpointable() root.var = checkpointable_utils.add_variable( root, name="var", initializer=0.) optimizer = adam.AdamOptimizer(0.1) if context.executing_eagerly(): optimizer.minimize(root.var.read_value) else: train_op = optimizer.minimize(root.var) # Note that `optimizer` has not been added as a dependency of # `root`. Create a one-off grouping so that slot variables for `root.var` # get initialized too. self.evaluate(checkpointable_utils.gather_initializers( checkpointable_utils.Checkpoint(root=root, optimizer=optimizer))) self.evaluate(train_op) self.evaluate(state_ops.assign(root.var, 12.)) no_slots_path = checkpointable_utils.CheckpointableSaver(root).save( os.path.join(checkpoint_directory, "no_slots")) root.optimizer = optimizer self.evaluate(state_ops.assign(root.var, 13.)) self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var), 14.)) slots_path = checkpointable_utils.CheckpointableSaver(root).save( os.path.join(checkpoint_directory, "with_slots")) new_root = checkpointable.Checkpointable() # Load the slot-containing checkpoint (deferred), then immediately overwrite # the non-slot variable (also deferred). slot_status = checkpointable_utils.CheckpointableSaver( new_root).restore(slots_path) no_slot_status = checkpointable_utils.CheckpointableSaver( new_root).restore(no_slots_path) with self.assertRaises(AssertionError): no_slot_status.assert_consumed() new_root.var = checkpointable_utils.add_variable( new_root, name="var", shape=[]) no_slot_status.assert_consumed() no_slot_status.run_restore_ops() self.assertEqual(12., self.evaluate(new_root.var)) new_root.optimizer = adam.AdamOptimizer(0.1) with self.assertRaisesRegexp(AssertionError, "beta1_power"): slot_status.assert_consumed() self.assertEqual(12., self.evaluate(new_root.var)) if context.executing_eagerly(): # Slot variables are only created with restoring initializers when # executing eagerly. self.assertEqual(14., self.evaluate( new_root.optimizer.get_slot(name="m", var=new_root.var))) else: self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var), None) if context.executing_eagerly(): new_root.optimizer.minimize(new_root.var.read_value) else: train_op = new_root.optimizer.minimize(new_root.var) # The slot variable now exists; restore() didn't create it, but we should # now have a restore op for it. slot_status.run_restore_ops() self.assertEqual(14., self.evaluate( new_root.optimizer.get_slot(name="m", var=new_root.var))) self.evaluate(train_op) slot_status.assert_consumed() @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testOverlappingRestores(self): checkpoint_directory = self.get_temp_dir() save_root = checkpointable.Checkpointable() save_root.dep = checkpointable.Checkpointable() save_root.dep.var = checkpointable_utils.add_variable( save_root.dep, name="var", initializer=0.) self.evaluate(state_ops.assign(save_root.dep.var, 12.)) saver = checkpointable_utils.CheckpointableSaver(save_root) first_path = saver.save(os.path.join(checkpoint_directory, "first")) self.evaluate(state_ops.assign(save_root.dep.var, 13.)) second_path = saver.save(os.path.join(checkpoint_directory, "second")) first_root = checkpointable.Checkpointable() second_root = checkpointable.Checkpointable() first_status = checkpointable_utils.CheckpointableSaver( first_root).restore(first_path) second_status = checkpointable_utils.CheckpointableSaver( second_root).restore(second_path) load_dep = checkpointable.Checkpointable() load_dep.var = checkpointable_utils.add_variable( load_dep, name="var", shape=[]) first_root.dep = load_dep first_status.assert_consumed() first_status.run_restore_ops() self.assertEqual(12., self.evaluate(load_dep.var)) second_root.dep = load_dep second_status.assert_consumed() second_status.run_restore_ops() self.assertEqual(13., self.evaluate(load_dep.var)) # Try again with the order of the restore() reversed. The last restore # determines the final value. first_root = checkpointable.Checkpointable() second_root = checkpointable.Checkpointable() second_status = checkpointable_utils.CheckpointableSaver( second_root).restore(second_path) first_status = checkpointable_utils.CheckpointableSaver( first_root).restore(first_path) load_dep = checkpointable.Checkpointable() load_dep.var = checkpointable_utils.add_variable( load_dep, name="var", shape=[]) first_root.dep = load_dep first_status.assert_consumed() first_status.run_restore_ops() self.assertEqual(12., self.evaluate(load_dep.var)) second_root.dep = load_dep second_status.assert_consumed() second_status.run_restore_ops() self.assertEqual(12., self.evaluate(load_dep.var)) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testAmbiguousLoad(self): # Not OK to split one checkpoint object into two checkpoint_directory = self.get_temp_dir() save_root = checkpointable.Checkpointable() save_root.dep_one = checkpointable.Checkpointable() save_root.dep_two = checkpointable.Checkpointable() dep_three = checkpointable.Checkpointable() save_root.dep_one.dep_three = dep_three save_root.dep_two.dep_three = dep_three checkpointable_utils.add_variable(dep_three, name="var", initializer=0.) self.evaluate(checkpointable_utils.gather_initializers(save_root)) save_path = checkpointable_utils.CheckpointableSaver(save_root).save( os.path.join(checkpoint_directory, "ckpt")) load_root = checkpointable.Checkpointable() checkpointable_utils.CheckpointableSaver(load_root).restore(save_path) load_root.dep_one = checkpointable.Checkpointable() load_root.dep_two = checkpointable.Checkpointable() load_root.dep_one.dep_three = checkpointable.Checkpointable() with self.assertRaisesRegexp(AssertionError, "resolved to different objects"): load_root.dep_two.dep_three = checkpointable.Checkpointable() @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testObjectsCombined(self): # Currently fine to load two checkpoint objects into one Python object checkpoint_directory = self.get_temp_dir() save_root = checkpointable.Checkpointable() save_root.dep_one = checkpointable.Checkpointable() save_root.dep_two = checkpointable.Checkpointable() checkpointable_utils.add_variable( save_root.dep_one, name="var1", initializer=32., dtype=dtypes.float64) checkpointable_utils.add_variable( save_root.dep_two, name="var2", initializer=64., dtype=dtypes.float64) self.evaluate(checkpointable_utils.gather_initializers(save_root)) save_path = checkpointable_utils.CheckpointableSaver(save_root).save( os.path.join(checkpoint_directory, "ckpt")) load_root = checkpointable.Checkpointable() load_root.dep_one = checkpointable.Checkpointable() load_root.dep_two = load_root.dep_one v1 = checkpointable_utils.add_variable( load_root.dep_one, name="var1", shape=[], dtype=dtypes.float64) v2 = checkpointable_utils.add_variable( load_root.dep_one, name="var2", shape=[], dtype=dtypes.float64) status = checkpointable_utils.CheckpointableSaver(load_root).restore( save_path).assert_consumed() status.run_restore_ops() self.assertEqual(32., self.evaluate(v1)) self.assertEqual(64., self.evaluate(v2)) @test_util.run_in_graph_and_eager_modes() def testDependencyLoop(self): # Note: this test creates garbage during eager execution because it # purposefully creates a reference cycle. first = checkpointable.Checkpointable() second = checkpointable.Checkpointable() first.second = second second.first = first first.v = checkpointable_utils.add_variable( first, "v1", initializer=[3., 1., 4.]) second.v = checkpointable_utils.add_variable( second, "v2", initializer=[1., 1., 2., 3.]) self.evaluate(checkpointable_utils.gather_initializers(first)) checkpoint_directory = self.get_temp_dir() save_path = checkpointable_utils.CheckpointableSaver(first).save( os.path.join(checkpoint_directory, "ckpt")) # Test deferred loading first_load = checkpointable.Checkpointable() status = checkpointable_utils.CheckpointableSaver( first_load).restore(save_path) second_load = checkpointable.Checkpointable() first_load.second = second_load second_load.first = first_load with self.assertRaises(AssertionError): status.assert_consumed() first_load.v = checkpointable_utils.add_variable( first_load, "v1", shape=[3]) second_load.v = checkpointable_utils.add_variable( second_load, "v2", shape=[4]) status.assert_consumed() status.run_restore_ops() self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v)) self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v)) # Test loading when variables have already been created self.evaluate(first_load.v.assign([2., 7., 1.])) self.assertAllEqual([2., 7., 1.], self.evaluate(first_load.v)) self.evaluate(second_load.v.assign([2., 7., 1., 8.])) self.assertAllEqual([2., 7., 1., 8.], self.evaluate(second_load.v)) status = checkpointable_utils.CheckpointableSaver(first_load).restore( save_path).assert_consumed() status.run_restore_ops() self.assertAllEqual([3., 1., 4.], self.evaluate(first_load.v)) self.assertAllEqual([1., 1., 2., 3.], self.evaluate(second_load.v)) @test_util.run_in_graph_and_eager_modes() def testRestoreOnAssign(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_graph = ops.Graph() with save_graph.as_default(), self.test_session(save_graph): first = checkpointable.Checkpointable() first.var1 = variable_scope.get_variable( name="outside_var", initializer=0.) first.var2 = variable_scope.get_variable( name="blah", initializer=0.) self.evaluate(first.var1.assign(4.)) self.evaluate(first.var2.assign(8.)) save_path = checkpointable_utils.CheckpointableSaver(first).save( checkpoint_prefix) restore_graph = ops.Graph() with restore_graph.as_default(), self.test_session(restore_graph): second = checkpointable.Checkpointable() second.var2 = variable_scope.get_variable( name="blah", initializer=0.) status = checkpointable_utils.CheckpointableSaver( second).restore(save_path) recreated_var1 = variable_scope.get_variable( name="outside_var", initializer=0.) status.run_restore_ops() self.assertEqual(8., self.evaluate(second.var2)) self.evaluate(recreated_var1.assign(-2.)) self.assertEqual(-2., self.evaluate(recreated_var1)) second.var1 = recreated_var1 status.run_restore_ops() self.assertEqual(4., self.evaluate(recreated_var1)) def testManySavesGraph(self): """Saves after the first should not modify the graph.""" with context.graph_mode(): graph = ops.Graph() with graph.as_default(), self.test_session(graph): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = checkpointable.Checkpointable() obj.var = variable_scope.get_variable(name="v", initializer=0.) obj.opt = adam.AdamOptimizer(0.1) obj.opt.minimize(obj.var.read_value()) self.evaluate(checkpointable_utils.gather_initializers(obj)) saver = checkpointable_utils.CheckpointableSaver(obj) saver.save(checkpoint_prefix) before_ops = graph.get_operations() saver.save(checkpoint_prefix) self.assertEqual(before_ops, graph.get_operations()) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testCheckpointCleanup(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = checkpointable.Checkpointable() obj.var = variable_scope.get_variable(name="v", initializer=0.) self.evaluate(checkpointable_utils.gather_initializers(obj)) saver = checkpointable_utils.Checkpoint(obj=obj) for _ in range(10): saver.save(checkpoint_prefix) expected_filenames = ["checkpoint"] for checkpoint_number in range(6, 11): expected_filenames.append("ckpt-%d.index" % (checkpoint_number,)) expected_filenames.append( "ckpt-%d.data-00000-of-00001" % (checkpoint_number,)) six.assertCountEqual( self, expected_filenames, os.listdir(checkpoint_directory)) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def testCheckpointCleanupChangingVarList(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = checkpointable.Checkpointable() obj.var = variable_scope.get_variable(name="v", initializer=0.) self.evaluate(checkpointable_utils.gather_initializers(obj)) checkpoint = checkpointable_utils.Checkpoint(obj=obj) looped_variables = [] for iteration in range(10): new_variable = resource_variable_ops.ResourceVariable(iteration) self.evaluate(new_variable.initializer) setattr(checkpoint, "var_%d" % iteration, new_variable) checkpoint.save(checkpoint_prefix) looped_variables.append(new_variable) expected_filenames = ["checkpoint"] # We've copied the saver each time, but checkpoint management should still # be consistent. for checkpoint_number in range(6, 11): expected_filenames.append("ckpt-%d.index" % (checkpoint_number,)) expected_filenames.append( "ckpt-%d.data-00000-of-00001" % (checkpoint_number,)) six.assertCountEqual( self, expected_filenames, os.listdir(checkpoint_directory)) for v in looped_variables: self.evaluate(v.assign(314)) checkpoint.restore(checkpoint_prefix + "-6").run_restore_ops() self.assertEqual(314, self.evaluate(checkpoint.var_9)) self.assertEqual(314, self.evaluate(checkpoint.var_8)) self.assertEqual(314, self.evaluate(checkpoint.var_6)) self.assertEqual(5, self.evaluate(checkpoint.var_5)) self.assertEqual(1, self.evaluate(checkpoint.var_1)) self.assertEqual(0, self.evaluate(checkpoint.var_0)) if context.executing_eagerly(): checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops() self.assertEqual(9, self.evaluate(checkpoint.var_9)) self.assertEqual(8, self.evaluate(checkpoint.var_8)) self.assertEqual(1, self.evaluate(checkpoint.var_1)) self.assertEqual(0, self.evaluate(checkpoint.var_0)) else: # Restoring into modified graphs is an error while graph building. with self.assertRaises(NotImplementedError): checkpoint.restore(checkpoint_prefix + "-10").run_restore_ops() def testManyRestoresGraph(self): """Restores after the first should not modify the graph.""" with context.graph_mode(): graph = ops.Graph() with graph.as_default(), self.test_session(graph): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") obj = checkpointable.Checkpointable() obj.var = variable_scope.get_variable(name="v", initializer=0.) obj.opt = adam.AdamOptimizer(0.1) obj.opt.minimize(obj.var.read_value()) self.evaluate(checkpointable_utils.gather_initializers(obj)) saver = checkpointable_utils.CheckpointableSaver(obj) save_path = saver.save(checkpoint_prefix) saver.restore(save_path) before_ops = graph.get_operations() saver.restore(save_path) self.assertEqual(before_ops, graph.get_operations()) def testMultipleGraphsNonSlotVariables(self): with context.graph_mode(): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") optimizer = adam.AdamOptimizer(0.001) # Construct a model in one graph first_graph = ops.Graph() first_session = session_lib.Session(graph=first_graph) with first_graph.as_default(), first_session.as_default(): first_variable = resource_variable_ops.ResourceVariable([1.]) first_root_checkpointable = checkpointable_utils.Checkpoint( optimizer=optimizer, variable=first_variable) train_op = optimizer.minimize(first_variable.read_value) self.evaluate(checkpointable_utils.gather_initializers( first_root_checkpointable)) self.evaluate(train_op) self.evaluate(first_variable.assign([1.])) self.evaluate(optimizer.get_slot( var=first_variable, name="m").assign([2.])) beta1_power, _ = optimizer._get_beta_accumulators() self.evaluate(beta1_power.assign(3.)) # Save and load in a second graph second_graph = ops.Graph() with second_graph.as_default(), session_lib.Session(graph=second_graph): second_variable = resource_variable_ops.ResourceVariable([1.]) second_root_checkpointable = checkpointable_utils.Checkpoint( optimizer=optimizer, variable=second_variable) train_op = optimizer.minimize(second_variable.read_value) second_root_checkpointable.restore(None).initialize_or_restore() self.evaluate(train_op) self.evaluate(second_variable.assign([4.])) self.evaluate(optimizer.get_slot( var=second_variable, name="m").assign([5.])) beta1_power, _ = optimizer._get_beta_accumulators() self.evaluate(beta1_power.assign(6.)) save_path = second_root_checkpointable.save(checkpoint_prefix) self.evaluate(second_variable.assign([7.])) self.evaluate(optimizer.get_slot( var=second_variable, name="m").assign([8.])) beta1_power, _ = optimizer._get_beta_accumulators() self.assertAllEqual(6., self.evaluate(beta1_power)) status = second_root_checkpointable.restore(save_path) status.assert_consumed().run_restore_ops() self.assertAllEqual([4.], self.evaluate(second_variable)) self.assertAllEqual([5.], self.evaluate(optimizer.get_slot( var=second_variable, name="m"))) beta1_power, _ = optimizer._get_beta_accumulators() self.assertAllEqual(6., self.evaluate(beta1_power)) # Check that the first graph is unmolested with first_graph.as_default(), first_session.as_default(): self.assertAllEqual([1.], self.evaluate(first_variable)) self.assertAllEqual([2.], self.evaluate(optimizer.get_slot( var=first_variable, name="m"))) beta1_power, _ = optimizer._get_beta_accumulators() self.assertAllEqual(3., self.evaluate(beta1_power)) class TemplateTests(test.TestCase): @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def test_checkpointable_save_restore(self): def _templated(): v = variable_scope.get_variable( "v", shape=[1], initializer=init_ops.zeros_initializer()) v2 = variable_scope.get_variable( "v2", shape=[1], initializer=init_ops.zeros_initializer()) return v, v + 1., v2 save_template = template.make_template("s1", _templated) save_root = checkpointable_utils.Checkpoint(my_template=save_template) v1_save, _, v2_save = save_template() self.evaluate(v1_save.assign([12.])) self.evaluate(v2_save.assign([14.])) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = save_root.save(checkpoint_prefix) load_template = template.make_template("s2", _templated) load_root = checkpointable_utils.Checkpoint(my_template=load_template) status = load_root.restore(save_path) var, var_plus_one, var2 = load_template() self.assertEqual(2, len(load_template._checkpoint_dependencies)) self.assertEqual("v", load_template._checkpoint_dependencies[0].name) self.assertEqual("v2", load_template._checkpoint_dependencies[1].name) status.assert_consumed().run_restore_ops() self.assertAllEqual([12.], self.evaluate(var)) self.assertAllEqual([13.], self.evaluate(var_plus_one)) self.assertAllEqual([14.], self.evaluate(var2)) @test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True) def test_checkpointable_save_restore_nested(self): def _inner_template(): v = variable_scope.get_variable( "v", shape=[1], initializer=init_ops.zeros_initializer()) return v def _outer_template(): first_inner = template.make_template("i1", _inner_template) second_inner = template.make_template("i2", _inner_template) v1 = first_inner() v2 = second_inner() v3 = second_inner() return (first_inner, second_inner), (v1, v2, v3) with variable_scope.variable_scope("ignored"): save_template = template.make_template("s1", _outer_template) save_root = checkpointable_utils.Checkpoint(my_template=save_template) (inner_template_one, inner_template_two), _ = save_template() self.evaluate(inner_template_one.variables[0].assign([20.])) self.evaluate(inner_template_two.variables[0].assign([25.])) checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") save_path = save_root.save(checkpoint_prefix) load_template = template.make_template("s2", _outer_template) load_root = checkpointable_utils.Checkpoint(my_template=load_template) status = load_root.restore(save_path) (inner_template_one, inner_template_two), (v1, v2, v3) = load_template() outer_template_dependencies = load_root.my_template._checkpoint_dependencies self.assertEqual(2, len(outer_template_dependencies)) self.assertEqual("i1", outer_template_dependencies[0].name) self.assertIs(inner_template_one, outer_template_dependencies[0].ref) self.assertEqual("i2", outer_template_dependencies[1].name) self.assertIs(inner_template_two, outer_template_dependencies[1].ref) self.assertEqual(1, len(inner_template_one._checkpoint_dependencies)) self.assertEqual("v", inner_template_one._checkpoint_dependencies[0].name) self.assertEqual(1, len(inner_template_two._checkpoint_dependencies)) self.assertEqual("v", inner_template_two._checkpoint_dependencies[0].name) status.assert_consumed().run_restore_ops() self.assertAllEqual([20.], self.evaluate(v1)) self.assertAllEqual([25.], self.evaluate(v2)) self.assertAllEqual([25.], self.evaluate(v3)) class CheckpointCompatibilityTests(test.TestCase): def _initialized_model(self): input_value = constant_op.constant([[3.]]) model = MyModel() optimizer = adam.AdamOptimizer(0.001) optimizer_step = training_util.get_or_create_global_step() root_checkpointable = checkpointable_utils.Checkpoint( optimizer=optimizer, model=model, optimizer_step=optimizer_step) train_op = optimizer.minimize( functools.partial(model, input_value), global_step=optimizer_step) self.evaluate(checkpointable_utils.gather_initializers( root_checkpointable)) self.evaluate(train_op) # A regular variable, a slot variable, and a non-slot Optimizer variable # with known values to check when loading. self.evaluate(model._named_dense.bias.assign([1.])) self.evaluate(optimizer.get_slot( var=model._named_dense.bias, name="m").assign([2.])) beta1_power, _ = optimizer._get_beta_accumulators() self.evaluate(beta1_power.assign(3.)) return root_checkpointable def _set_sentinels(self, root_checkpointable): self.evaluate(root_checkpointable.model._named_dense.bias.assign([101.])) self.evaluate( root_checkpointable.optimizer.get_slot( var=root_checkpointable.model._named_dense.bias, name="m") .assign([102.])) beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators() self.evaluate(beta1_power.assign(103.)) def _check_sentinels(self, root_checkpointable): self.assertAllEqual( [1.], self.evaluate(root_checkpointable.model._named_dense.bias)) self.assertAllEqual([2.], self.evaluate( root_checkpointable.optimizer.get_slot( var=root_checkpointable.model._named_dense.bias, name="m"))) beta1_power, _ = root_checkpointable.optimizer._get_beta_accumulators() self.assertAllEqual(3., self.evaluate(beta1_power)) def _write_name_based_checkpoint(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with context.graph_mode(): save_graph = ops.Graph() with save_graph.as_default(), self.test_session( graph=save_graph) as session: root = self._initialized_model() name_saver = core_saver.Saver() return name_saver.save( sess=session, save_path=checkpoint_prefix, global_step=root.optimizer_step) @test_util.run_in_graph_and_eager_modes() def testLoadFromNameBasedSaver(self): """Save a name-based checkpoint, load it using the object-based API.""" with test_util.device(use_gpu=True): save_path = self._write_name_based_checkpoint() root = self._initialized_model() self._set_sentinels(root) with self.assertRaises(AssertionError): self._check_sentinels(root) object_saver = checkpointable_utils.CheckpointableSaver(root) status = object_saver.restore(save_path) with self.assertRaises(AssertionError): status.assert_consumed() status.run_restore_ops() self._check_sentinels(root) self._set_sentinels(root) status.initialize_or_restore() self._check_sentinels(root) # TODO(allenl): Test for the core name-based saver loading object-based # checkpoints once object-based checkpointing is in core. def testSaveGraphLoadEager(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with context.graph_mode(): save_graph = ops.Graph() with save_graph.as_default(), self.test_session( graph=save_graph) as session: root = self._initialized_model() object_saver = checkpointable_utils.CheckpointableSaver(root) save_path = object_saver.save( session=session, file_prefix=checkpoint_prefix) with context.eager_mode(): root = self._initialized_model() self._set_sentinels(root) root.restore(save_path).assert_consumed() self._check_sentinels(root) def testSaveEagerLoadGraph(self): checkpoint_directory = self.get_temp_dir() checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt") with context.eager_mode(): root = self._initialized_model() object_saver = checkpointable_utils.CheckpointableSaver(root) save_path = object_saver.save(file_prefix=checkpoint_prefix) with context.graph_mode(): save_graph = ops.Graph() with save_graph.as_default(), self.test_session( graph=save_graph): root = self._initialized_model() self._set_sentinels(root) root.restore(save_path).assert_consumed().run_restore_ops() self._check_sentinels(root) if __name__ == "__main__": test.main()
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inception model configuration. Includes multiple models: inception3, inception4, inception-resnet2. References: Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich Going Deeper with Convolutions http://arxiv.org/pdf/1409.4842v1.pdf Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna Rethinking the Inception Architecture for Computer Vision arXiv preprint arXiv:1512.00567 (2015) Inception v3 model: http://arxiv.org/abs/1512.00567 Inception v4 and Resnet V2 architectures: http://arxiv.org/abs/1602.07261 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from six.moves import xrange # pylint: disable=redefined-builtin from cnn_quantization.tf_cnn_benchmarks.models import model class Inceptionv3Model(model.CNNModel): """InceptionV3.""" def __init__(self, auxiliary=False, params=None): self._auxiliary = auxiliary super(Inceptionv3Model, self).__init__( 'inception3', 299, 32, 0.005, params=params) def add_inference(self, cnn): def inception_v3_a(cnn, n): cols = [[('conv', 64, 1, 1)], [('conv', 48, 1, 1), ('conv', 64, 5, 5)], [('conv', 64, 1, 1), ('conv', 96, 3, 3), ('conv', 96, 3, 3)], [('apool', 3, 3, 1, 1, 'SAME'), ('conv', n, 1, 1)]] cnn.inception_module('incept_v3_a', cols) def inception_v3_b(cnn): cols = [[('conv', 384, 3, 3, 2, 2, 'VALID')], [('conv', 64, 1, 1), ('conv', 96, 3, 3), ('conv', 96, 3, 3, 2, 2, 'VALID')], [('mpool', 3, 3, 2, 2, 'VALID')]] cnn.inception_module('incept_v3_b', cols) def inception_v3_c(cnn, n): cols = [[('conv', 192, 1, 1)], [('conv', n, 1, 1), ('conv', n, 1, 7), ('conv', 192, 7, 1)], [('conv', n, 1, 1), ('conv', n, 7, 1), ('conv', n, 1, 7), ('conv', n, 7, 1), ('conv', 192, 1, 7)], [('apool', 3, 3, 1, 1, 'SAME'), ('conv', 192, 1, 1)]] cnn.inception_module('incept_v3_c', cols) def inception_v3_d(cnn): cols = [[('conv', 192, 1, 1), ('conv', 320, 3, 3, 2, 2, 'VALID')], [('conv', 192, 1, 1), ('conv', 192, 1, 7), ('conv', 192, 7, 1), ('conv', 192, 3, 3, 2, 2, 'VALID')], [('mpool', 3, 3, 2, 2, 'VALID')]] cnn.inception_module('incept_v3_d', cols) def inception_v3_e(cnn, pooltype): cols = [[('conv', 320, 1, 1)], [('conv', 384, 1, 1), ('conv', 384, 1, 3)], [('share',), ('conv', 384, 3, 1)], [('conv', 448, 1, 1), ('conv', 384, 3, 3), ('conv', 384, 1, 3)], [('share',), ('share',), ('conv', 384, 3, 1)], [('mpool' if pooltype == 'max' else 'apool', 3, 3, 1, 1, 'SAME'), ('conv', 192, 1, 1)]] cnn.inception_module('incept_v3_e', cols) def incept_v3_aux(cnn): assert cnn.aux_top_layer is None cnn.aux_top_layer = cnn.top_layer cnn.aux_top_size = cnn.top_size with cnn.switch_to_aux_top_layer(): cnn.apool(5, 5, 3, 3, mode='VALID') cnn.conv(128, 1, 1, mode='SAME') cnn.conv(768, 5, 5, mode='VALID', stddev=0.01) cnn.reshape([-1, 768]) cnn.use_batch_norm = True cnn.conv(32, 3, 3, 2, 2, mode='VALID') # 299 x 299 x 3 cnn.conv(32, 3, 3, 1, 1, mode='VALID') # 149 x 149 x 32 cnn.conv(64, 3, 3, 1, 1, mode='SAME') # 147 x 147 x 64 cnn.mpool(3, 3, 2, 2, mode='VALID') # 147 x 147 x 64 cnn.conv(80, 1, 1, 1, 1, mode='VALID') # 73 x 73 x 80 cnn.conv(192, 3, 3, 1, 1, mode='VALID') # 71 x 71 x 192 cnn.mpool(3, 3, 2, 2, 'VALID') # 35 x 35 x 192 inception_v3_a(cnn, 32) # 35 x 35 x 256 mixed. inception_v3_a(cnn, 64) # 35 x 35 x 288 mixed_1. inception_v3_a(cnn, 64) # 35 x 35 x 288 mixed_2 inception_v3_b(cnn) # 17 x 17 x 768 mixed_3 inception_v3_c(cnn, 128) # 17 x 17 x 768 mixed_4 inception_v3_c(cnn, 160) # 17 x 17 x 768 mixed_5 inception_v3_c(cnn, 160) # 17 x 17 x 768 mixed_6 inception_v3_c(cnn, 192) # 17 x 17 x 768 mixed_7 if self._auxiliary: incept_v3_aux(cnn) # Auxillary Head logits inception_v3_d(cnn) # 17 x 17 x 1280 mixed_8 inception_v3_e(cnn, 'avg') # 8 x 8 x 2048 mixed_9 inception_v3_e(cnn, 'max') # 8 x 8 x 2048 mixed_10 cnn.apool(8, 8, 1, 1, 'VALID') # 8 x 8 x 2048 cnn.reshape([-1, 2048]) # 1 x 1 x 2048 # Stem functions def inception_v4_sa(cnn): cols = [[('mpool', 3, 3, 2, 2, 'VALID')], [('conv', 96, 3, 3, 2, 2, 'VALID')]] cnn.inception_module('incept_v4_sa', cols) def inception_v4_sb(cnn): cols = [[('conv', 64, 1, 1), ('conv', 96, 3, 3, 1, 1, 'VALID')], [('conv', 64, 1, 1), ('conv', 64, 7, 1), ('conv', 64, 1, 7), ('conv', 96, 3, 3, 1, 1, 'VALID')]] cnn.inception_module('incept_v4_sb', cols) def inception_v4_sc(cnn): cols = [[('conv', 192, 3, 3, 2, 2, 'VALID')], [('mpool', 3, 3, 2, 2, 'VALID')]] cnn.inception_module('incept_v4_sc', cols) # Reduction functions def inception_v4_ra(cnn, k, l, m, n): cols = [ [('mpool', 3, 3, 2, 2, 'VALID')], [('conv', n, 3, 3, 2, 2, 'VALID')], [('conv', k, 1, 1), ('conv', l, 3, 3), ('conv', m, 3, 3, 2, 2, 'VALID')] ] cnn.inception_module('incept_v4_ra', cols) def inception_v4_rb(cnn): cols = [[('mpool', 3, 3, 2, 2, 'VALID')], [('conv', 192, 1, 1), ('conv', 192, 3, 3, 2, 2, 'VALID')], [('conv', 256, 1, 1), ('conv', 256, 1, 7), ('conv', 320, 7, 1), ('conv', 320, 3, 3, 2, 2, 'VALID')]] cnn.inception_module('incept_v4_rb', cols) class Inceptionv4Model(model.CNNModel): """Inceptionv4.""" def __init__(self, params=None): super(Inceptionv4Model, self).__init__( 'inception4', 299, 32, 0.005, params=params) def add_inference(self, cnn): def inception_v4_a(cnn): cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 96, 1, 1)], [('conv', 96, 1, 1)], [('conv', 64, 1, 1), ('conv', 96, 3, 3)], [('conv', 64, 1, 1), ('conv', 96, 3, 3), ('conv', 96, 3, 3)]] cnn.inception_module('incept_v4_a', cols) def inception_v4_b(cnn): cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 128, 1, 1)], [('conv', 384, 1, 1)], [('conv', 192, 1, 1), ('conv', 224, 1, 7), ('conv', 256, 7, 1)], [('conv', 192, 1, 1), ('conv', 192, 1, 7), ('conv', 224, 7, 1), ('conv', 224, 1, 7), ('conv', 256, 7, 1)]] cnn.inception_module('incept_v4_b', cols) def inception_v4_c(cnn): cols = [[('apool', 3, 3, 1, 1, 'SAME'), ('conv', 256, 1, 1)], [('conv', 256, 1, 1)], [('conv', 384, 1, 1), ('conv', 256, 1, 3)], [('share',), ('conv', 256, 3, 1)], [('conv', 384, 1, 1), ('conv', 448, 1, 3), ('conv', 512, 3, 1), ('conv', 256, 3, 1)], [('share',), ('share',), ('share',), ('conv', 256, 1, 3)]] cnn.inception_module('incept_v4_c', cols) cnn.use_batch_norm = True cnn.conv(32, 3, 3, 2, 2, mode='VALID') cnn.conv(32, 3, 3, 1, 1, mode='VALID') cnn.conv(64, 3, 3) inception_v4_sa(cnn) inception_v4_sb(cnn) inception_v4_sc(cnn) for _ in xrange(4): inception_v4_a(cnn) inception_v4_ra(cnn, 192, 224, 256, 384) for _ in xrange(7): inception_v4_b(cnn) inception_v4_rb(cnn) for _ in xrange(3): inception_v4_c(cnn) cnn.spatial_mean() cnn.dropout(0.8)
from troposphere import ( Parameter, Ref, Output, Tags, GetAtt, Base64, Join, Equals, ec2, elasticloadbalancing as elb, autoscaling as asg, route53 as r53 ) from cfn.utils.cfn import get_recent_ami from cfn.utils.constants import ( ALLOW_ALL_CIDR, EC2_INSTANCE_TYPES, HTTP, HTTPS, POSTGRESQL, REDIS, SSH, VPC_CIDR ) from majorkirby import StackNode, MKUnresolvableInputError class Worker(StackNode): INPUTS = { 'Tags': ['global:Tags'], 'Region': ['global:Region'], 'StackType': ['global:StackType'], 'StackColor': ['global:StackColor'], 'KeyName': ['global:KeyName'], 'IPAccess': ['global:IPAccess'], 'AvailabilityZones': ['global:AvailabilityZones', 'VPC:AvailabilityZones'], 'RDSPassword': ['global:RDSPassword', 'DataPlane:RDSPassword'], 'WorkerInstanceType': ['global:WorkerInstanceType'], 'WorkerAMI': ['global:WorkerAMI'], 'WorkerInstanceProfile': ['global:WorkerInstanceProfile'], 'WorkerAutoScalingDesired': ['global:WorkerAutoScalingDesired'], # NOQA 'WorkerAutoScalingMin': ['global:WorkerAutoScalingMin'], 'WorkerAutoScalingMax': ['global:WorkerAutoScalingMax'], 'WorkerAutoScalingScheduleStartCapacity': ['global:WorkerAutoScalingScheduleStartCapacity'], # NOQA 'WorkerAutoScalingScheduleStartRecurrence': ['global:WorkerAutoScalingScheduleStartRecurrence'], # NOQA 'WorkerAutoScalingScheduleEndCapacity': ['global:WorkerAutoScalingScheduleEndCapacity'], # NOQA 'WorkerAutoScalingScheduleEndRecurrence': ['global:WorkerAutoScalingScheduleEndRecurrence'], # NOQA 'PublicSubnets': ['global:PublicSubnets', 'VPC:PublicSubnets'], 'PrivateSubnets': ['global:PrivateSubnets', 'VPC:PrivateSubnets'], 'PublicHostedZoneName': ['global:PublicHostedZoneName'], 'VpcId': ['global:VpcId', 'VPC:VpcId'], 'GlobalNotificationsARN': ['global:GlobalNotificationsARN'], 'HydroShareBaseURL': ['global:HydroShareBaseURL'], 'HydroShareSecretKey': ['global:HydroShareSecretKey'], 'SRATCatchmentAPIURL': ['global:SRATCatchmentAPIURL'], 'SRATCatchmentAPIKey': ['global:SRATCatchmentAPIKey'], 'RollbarServerSideAccessToken': ['global:RollbarServerSideAccessToken'], 'PapertrailHost': ['global:PapertrailHost'], 'PapertrailPort': ['global:PapertrailPort'], } DEFAULTS = { 'Tags': {}, 'Region': 'us-east-1', 'StackType': 'Staging', 'StackColor': 'Green', 'KeyName': 'mmw-stg', 'IPAccess': ALLOW_ALL_CIDR, 'WorkerInstanceType': 't2.micro', 'WorkerInstanceProfile': 'WorkerInstanceProfile', 'WorkerAutoScalingDesired': '1', 'WorkerAutoScalingMin': '1', 'WorkerAutoScalingMax': '1', } ATTRIBUTES = { 'StackType': 'StackType', 'StackColor': 'StackColor', } def set_up_stack(self): super(Worker, self).set_up_stack() self.default_tags = self.get_input('Tags').copy() self.region = self.get_input('Region') self.add_description('Worker stack for MMW') # Parameters self.color = self.add_parameter(Parameter( 'StackColor', Type='String', Description='Stack color', AllowedValues=['Blue', 'Green'] ), 'StackColor') self.keyname = self.add_parameter(Parameter( 'KeyName', Type='String', Description='Name of an existing EC2 key pair' ), 'KeyName') self.ip_access = self.add_parameter(Parameter( 'IPAccess', Type='String', Default=self.get_input('IPAccess'), Description='CIDR for allowing SSH access' ), 'IPAccess') self.availability_zones = self.add_parameter(Parameter( 'AvailabilityZones', Type='CommaDelimitedList', Description='Comma delimited list of availability zones' ), 'AvailabilityZones') self.rds_password = self.add_parameter(Parameter( 'RDSPassword', Type='String', NoEcho=True, Description='Database password', ), 'RDSPassword') self.worker_instance_type = self.add_parameter(Parameter( 'WorkerInstanceType', Type='String', Default='t2.micro', Description='Worker EC2 instance type', AllowedValues=EC2_INSTANCE_TYPES, ConstraintDescription='must be a valid EC2 instance type.' ), 'WorkerInstanceType') self.worker_ami = self.add_parameter(Parameter( 'WorkerAMI', Type='String', Default=self.get_recent_worker_ami(), Description='Worker AMI' ), 'WorkerAMI') self.worker_instance_profile = self.add_parameter(Parameter( 'WorkerInstanceProfile', Type='String', Default='WorkerInstanceProfile', Description='Worker instance profile' ), 'WorkerInstanceProfile') self.worker_auto_scaling_desired = self.add_parameter(Parameter( 'WorkerAutoScalingDesired', Type='String', Default='2', Description='Worker AutoScalingGroup desired' ), 'WorkerAutoScalingDesired') self.worker_auto_scaling_min = self.add_parameter(Parameter( 'WorkerAutoScalingMin', Type='String', Default='0', Description='Worker AutoScalingGroup minimum' ), 'WorkerAutoScalingMin') self.worker_auto_scaling_max = self.add_parameter(Parameter( 'WorkerAutoScalingMax', Type='String', Default='2', Description='Worker AutoScalingGroup maximum' ), 'WorkerAutoScalingMax') self.worker_auto_scaling_schedule_start_recurrence = self.add_parameter( # NOQA Parameter( 'WorkerAutoScalingScheduleStartRecurrence', Type='String', Default='0 12 * * 1-5', Description='Worker ASG schedule start recurrence' ), 'WorkerAutoScalingScheduleStartRecurrence') self.worker_auto_scaling_schedule_start_capacity = self.add_parameter( # NOQA Parameter( 'WorkerAutoScalingScheduleStartCapacity', Type='String', Default='2', Description='Worker ASG schedule start capacity' ), 'WorkerAutoScalingScheduleStartCapacity') self.worker_auto_scaling_schedule_end_recurrence = self.add_parameter( # NOQA Parameter( 'WorkerAutoScalingScheduleEndRecurrence', Type='String', Default='0 0 * * *', Description='Worker ASG schedule end recurrence' ), 'WorkerAutoScalingScheduleEndRecurrence') self.worker_auto_scaling_schedule_end_capacity = self.add_parameter( # NOQA Parameter( 'WorkerAutoScalingScheduleEndCapacity', Type='String', Default='0', Description='Worker ASG schedule end capacity' ), 'WorkerAutoScalingScheduleEndCapacity') self.public_subnets = self.add_parameter(Parameter( 'PublicSubnets', Type='CommaDelimitedList', Description='A list of public subnets' ), 'PublicSubnets') self.private_subnets = self.add_parameter(Parameter( 'PrivateSubnets', Type='CommaDelimitedList', Description='A list of private subnets' ), 'PrivateSubnets') self.public_hosted_zone_name = self.add_parameter(Parameter( 'PublicHostedZoneName', Type='String', Description='Route 53 public hosted zone name' ), 'PublicHostedZoneName') self.vpc_id = self.add_parameter(Parameter( 'VpcId', Type='String', Description='VPC ID' ), 'VpcId') self.notification_topic_arn = self.add_parameter(Parameter( 'GlobalNotificationsARN', Type='String', Description='ARN for an SNS topic to broadcast notifications' ), 'GlobalNotificationsARN') self.hydroshare_base_url = self.add_parameter(Parameter( 'HydroShareBaseURL', Type='String', Description='Base URL for HydroShare portal' ), 'HydroShareBaseURL') self.hydroshare_secret_key = self.add_parameter(Parameter( 'HydroShareSecretKey', Type='String', NoEcho=True, Description='Secret key for HydroShare portal integration' ), 'HydroShareSecretKey') self.srat_catchment_api_url = self.add_parameter(Parameter( 'SRATCatchmentAPIURL', Type='String', Description='URL for the SRAT Catchment API' ), 'SRATCatchmentAPIURL') self.srat_catchment_api_key = self.add_parameter(Parameter( 'SRATCatchmentAPIKey', Type='String', NoEcho=True, Description='API key for the SRAT Catchment API' ), 'SRATCatchmentAPIKey') self.papertrail_host = self.add_parameter(Parameter( 'PapertrailHost', Type='String', Description='Hostname for Papertrail log destination', ), 'PapertrailHost') self.papertrail_port = self.add_parameter(Parameter( 'PapertrailPort', Type='String', Description='Port for Papertrail log destination', ), 'PapertrailPort') worker_lb_security_group, \ worker_security_group = self.create_security_groups() worker_lb = self.create_load_balancer(worker_lb_security_group) self.create_auto_scaling_resources( worker_security_group, worker_lb) self.create_dns_records(worker_lb) self.add_output(Output('WorkerLoadBalancerEndpoint', Value=GetAtt(worker_lb, 'DNSName'))) self.add_output(Output('WorkerLoadBalancerHostedZoneNameID', Value=GetAtt(worker_lb, 'CanonicalHostedZoneNameID'))) def get_recent_worker_ami(self): try: worker_ami_id = self.get_input('WorkerAMI') except MKUnresolvableInputError: filters = {'name': 'mmw-worker-*'} worker_ami_id = get_recent_ami(self.aws_profile, filters=filters, region=self.region) return worker_ami_id def create_security_groups(self): worker_lb_security_group_name = 'sgWorkerLoadBalancer' worker_lb_security_group = self.add_resource(ec2.SecurityGroup( worker_lb_security_group_name, GroupDescription='Enables access to workers via a load balancer', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=Ref(self.ip_access), FromPort=p, ToPort=p ) for p in [HTTP] ], SecurityGroupEgress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [HTTP] ], Tags=self.get_tags(Name=worker_lb_security_group_name) )) worker_security_group_name = 'sgWorker' worker_security_group = self.add_resource(ec2.SecurityGroup( worker_security_group_name, GroupDescription='Enables access to workers', VpcId=Ref(self.vpc_id), SecurityGroupIngress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [SSH, HTTP] ] + [ ec2.SecurityGroupRule( IpProtocol='tcp', SourceSecurityGroupId=Ref(sg), FromPort=HTTP, ToPort=HTTP ) for sg in [worker_lb_security_group] ], SecurityGroupEgress=[ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=VPC_CIDR, FromPort=p, ToPort=p ) for p in [POSTGRESQL, REDIS] ] + [ ec2.SecurityGroupRule( IpProtocol='tcp', CidrIp=ALLOW_ALL_CIDR, FromPort=p, ToPort=p ) for p in [HTTP, HTTPS, self.get_input('PapertrailPort')] ], Tags=self.get_tags(Name=worker_security_group_name) )) return worker_lb_security_group, worker_security_group def create_load_balancer(self, worker_lb_security_group): worker_lb_name = 'elbWorker' return self.add_resource(elb.LoadBalancer( worker_lb_name, ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy( Enabled=True, Timeout=300, ), CrossZone=True, SecurityGroups=[Ref(worker_lb_security_group)], Listeners=[ elb.Listener( LoadBalancerPort='80', InstancePort='80', Protocol='HTTP', ) ], HealthCheck=elb.HealthCheck( Target='HTTP:80/', HealthyThreshold='3', UnhealthyThreshold='2', Interval='60', Timeout='10', ), Subnets=Ref(self.public_subnets), Tags=self.get_tags(Name=worker_lb_name) )) def create_auto_scaling_resources(self, worker_security_group, worker_lb): worker_launch_config_name = 'lcWorker' worker_launch_config = self.add_resource( asg.LaunchConfiguration( worker_launch_config_name, EbsOptimized=True, ImageId=Ref(self.worker_ami), IamInstanceProfile=Ref(self.worker_instance_profile), InstanceType=Ref(self.worker_instance_type), KeyName=Ref(self.keyname), SecurityGroups=[Ref(worker_security_group)], UserData=Base64( Join('', self.get_cloud_config())) )) worker_auto_scaling_group_name = 'asgWorker' worker_asg = self.add_resource( asg.AutoScalingGroup( worker_auto_scaling_group_name, AvailabilityZones=Ref(self.availability_zones), Cooldown=300, DesiredCapacity=Ref(self.worker_auto_scaling_desired), HealthCheckGracePeriod=600, HealthCheckType='ELB', LaunchConfigurationName=Ref(worker_launch_config), LoadBalancerNames=[Ref(worker_lb)], MaxSize=Ref(self.worker_auto_scaling_max), MinSize=Ref(self.worker_auto_scaling_min), NotificationConfigurations=[ asg.NotificationConfigurations( TopicARN=Ref(self.notification_topic_arn), NotificationTypes=[ asg.EC2_INSTANCE_LAUNCH, asg.EC2_INSTANCE_LAUNCH_ERROR, asg.EC2_INSTANCE_TERMINATE, asg.EC2_INSTANCE_TERMINATE_ERROR ] ) ], VPCZoneIdentifier=Ref(self.private_subnets), Tags=[asg.Tag('Name', 'Worker', True)] ) ) self.add_resource( asg.ScheduledAction( 'schedWorkerAutoScalingStart', AutoScalingGroupName=Ref(worker_asg), DesiredCapacity=Ref( self.worker_auto_scaling_schedule_start_capacity), Recurrence=Ref( self.worker_auto_scaling_schedule_start_recurrence) ) ) self.add_resource( asg.ScheduledAction( 'schedWorkerAutoScalingEnd', AutoScalingGroupName=Ref(worker_asg), DesiredCapacity=Ref( self.worker_auto_scaling_schedule_end_capacity), Recurrence=Ref( self.worker_auto_scaling_schedule_end_recurrence) ) ) def get_cloud_config(self): return ['#cloud-config\n', '\n', 'write_files:\n', ' - path: /etc/mmw.d/env/MMW_STACK_COLOR\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: ', Ref(self.color), '\n', ' - path: /etc/mmw.d/env/MMW_STACK_TYPE\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: ', self.get_input('StackType'), '\n', ' - path: /etc/mmw.d/env/MMW_DB_PASSWORD\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: ', Ref(self.rds_password), '\n', ' - path: /etc/mmw.d/env/ROLLBAR_SERVER_SIDE_ACCESS_TOKEN\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: ', self.get_input('RollbarServerSideAccessToken'), '\n', # NOQA ' - path: /etc/mmw.d/env/MMW_HYDROSHARE_BASE_URL\n', ' permissions: 0750\n', ' owner: root:mmw\n', ' content: ', Ref(self.hydroshare_base_url), '\n', ' - path: /etc/mmw.d/env/MMW_HYDROSHARE_SECRET_KEY\n', ' permissions: 0750\n', ' owner: root:mmw\n', ' content: ', Ref(self.hydroshare_secret_key), '\n', ' - path: /etc/mmw.d/env/MMW_SRAT_CATCHMENT_API_URL\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: ', Ref(self.srat_catchment_api_url), '\n', ' - path: /etc/mmw.d/env/MMW_SRAT_CATCHMENT_API_KEY\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: ', Ref(self.srat_catchment_api_key), '\n', ' - path: /etc/fstab.rwd-data\n', ' permissions: 0440\n', ' owner: root:mmw\n', ' content: |\n', ' /dev/xvdf /opt/rwd-data\text4\tdefaults,nofail,discard\t0 2\n', # NOQA '\n', 'rsyslog:\n', ' - $DefaultNetstreamDriverCAFile /etc/papertrail-bundle.pem # trust these CAs\n', ' - $PreserveFQDN off\n', ' - $ActionSendStreamDriver gtls # use gtls netstream driver\n', ' - $ActionSendStreamDriverMode 1 # require TLS\n', ' - $ActionSendStreamDriverAuthMode x509/name # authenticate by hostname\n', ' - $ActionSendStreamDriverPermittedPeer *.papertrailapp.com\n', ' - $ActionResumeInterval 10\n', ' - $ActionQueueSize 100000\n', ' - $ActionQueueDiscardMark 97500\n', ' - $ActionQueueHighWaterMark 80000\n', ' - $ActionQueueType LinkedList\n', ' - $ActionQueueFileName papertrailqueue\n', ' - $ActionQueueCheckpointInterval 100\n', ' - $ActionQueueMaxDiskSpace 2g\n', ' - $ActionResumeRetryCount -1\n', ' - $ActionQueueSaveOnShutdown on\n', ' - $ActionQueueTimeoutEnqueue 2\n', ' - $ActionQueueDiscardSeverity 0\n', ' - "*.* @@', Ref(self.papertrail_host), ':', Ref( self.papertrail_port), '"\n', 'rsyslog_filename: 22-mmw-papertrail.conf\n', '\n', 'runcmd:\n', ' - cat /etc/fstab.rwd-data >> /etc/fstab\n', ' - mount -t ext4 /dev/xvdf /opt/rwd-data && docker restart mmw_mmw-rwd_1\n', # NOQA ' - /opt/model-my-watershed/scripts/aws/ebs-warmer.sh'] def create_dns_records(self, worker_lb): self.add_condition('BlueCondition', Equals('Blue', Ref(self.color))) self.add_condition('GreenCondition', Equals('Green', Ref(self.color))) self.add_resource(r53.RecordSetGroup( 'dnsPublicRecordsBlue', Condition='BlueCondition', HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']), RecordSets=[ r53.RecordSet( 'dnsTileServersBlue', AliasTarget=r53.AliasTarget( GetAtt(worker_lb, 'CanonicalHostedZoneNameID'), GetAtt(worker_lb, 'DNSName'), True ), Name=Join('', ['blue-workers.', Ref(self.public_hosted_zone_name), '.']), Type='A' ) ] )) self.add_resource(r53.RecordSetGroup( 'dnsPublicRecordsGreen', Condition='GreenCondition', HostedZoneName=Join('', [Ref(self.public_hosted_zone_name), '.']), RecordSets=[ r53.RecordSet( 'dnsTileServersGreen', AliasTarget=r53.AliasTarget( GetAtt(worker_lb, 'CanonicalHostedZoneNameID'), GetAtt(worker_lb, 'DNSName'), True ), Name=Join('', ['green-workers.', Ref(self.public_hosted_zone_name), '.']), Type='A' ) ] )) def get_tags(self, **kwargs): """Helper method to return Troposphere tags + default tags Args: **kwargs: arbitrary keyword arguments to be used as tags """ kwargs.update(self.default_tags) return Tags(**kwargs)
#!/usr/bin/env python import os import shutil import sqlite3 import sys import uuid import database as gemini_db def append_variant_info(main_curr, chunk_db): """ Append the variant and variant_info data from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) main_curr.execute("BEGIN TRANSACTION") cmd = "INSERT INTO variants SELECT * FROM toMerge.variants" main_curr.execute(cmd) cmd = \ "INSERT INTO variant_impacts SELECT * FROM toMerge.variant_impacts" main_curr.execute(cmd) main_curr.execute("END TRANSACTION") cmd = "detach toMerge" main_curr.execute(cmd) def append_sample_genotype_counts(main_curr, chunk_db): """ Append the sample_genotype_counts from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "INSERT INTO sample_genotype_counts \ SELECT * FROM toMerge.sample_genotype_counts" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def append_sample_info(main_curr, chunk_db): """ Append the sample info from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "create table samples as select * from toMerge.samples where 1=0" main_curr.execute(cmd) cmd = "INSERT INTO samples SELECT * FROM toMerge.samples" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def append_resource_info(main_curr, chunk_db): """ Append the resource info from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "INSERT INTO resources SELECT * FROM toMerge.resources" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def append_version_info(main_curr, chunk_db): """ Append the version info from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "INSERT INTO version SELECT * FROM toMerge.version" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def append_vcf_header(main_curr, chunk_db): """ Append the vcf_header from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "INSERT INTO vcf_header SELECT * FROM toMerge.vcf_header" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def append_gene_summary(main_curr, chunk_db): """ Append the gene_summary from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "INSERT INTO gene_summary SELECT * FROM toMerge.gene_summary" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def append_gene_detailed(main_curr, chunk_db): """ Append the gene_detailed from a chunk_db to the main database. """ cmd = "attach ? as toMerge" main_curr.execute(cmd, (chunk_db, )) cmd = "INSERT INTO gene_detailed SELECT * FROM toMerge.gene_detailed" main_curr.execute(cmd) cmd = "detach toMerge" main_curr.execute(cmd) def update_sample_genotype_counts(main_curr, chunk_db): """ Update the main sample_genotype_counts table with the counts observed in one of the chunked databases (chunk_db) """ curr_db_conn = sqlite3.connect(chunk_db) curr_db_conn.isolation_level = None curr_db_conn.row_factory = sqlite3.Row curr_db_curr = curr_db_conn.cursor() cmd = "SELECT sample_id, num_hom_ref, \ num_het, num_hom_alt, \ num_unknown FROM sample_genotype_counts" curr_db_curr.execute(cmd) for row in curr_db_curr: main_curr.execute("""UPDATE sample_genotype_counts SET num_hom_ref = num_hom_ref + ?, num_het = num_het + ?, num_hom_alt = num_hom_alt + ?, num_unknown = num_unknown + ? WHERE sample_id= ? """, (row['num_hom_ref'], row['num_het'], row['num_hom_alt'], row['num_unknown'], row['sample_id'])) curr_db_curr.close() def merge_db_chunks(args): # open up a new database if os.path.exists(args.db): os.remove(args.db) main_conn = sqlite3.connect(args.db) main_conn.isolation_level = None main_curr = main_conn.cursor() main_curr.execute('PRAGMA synchronous = OFF') main_curr.execute('PRAGMA journal_mode=MEMORY') # create the gemini database tables for the new DB gemini_db.create_tables(main_curr) databases = [] for database in args.chunkdbs: databases.append(database) for idx, database in enumerate(databases): db = database[0] append_variant_info(main_curr, db) # we only need to add these tables from one of the chunks. if idx == 0: append_sample_genotype_counts(main_curr, db) append_sample_info(main_curr, db) append_resource_info(main_curr, db) append_version_info(main_curr, db) append_vcf_header(main_curr, db) append_gene_summary(main_curr, db) append_gene_detailed(main_curr, db) else: update_sample_genotype_counts(main_curr, db) if args.index: gemini_db.create_indices(main_curr) main_conn.commit() main_curr.close() def merge_chunks(parser, args): for try_count in range(2): try: if try_count > 0: tmp_dbs = [os.path.join(args.tempdir, "%s.db" % uuid.uuid4()) for _ in args.chunkdbs] for chunk_db, tmp_db in zip(args.chunkdbs, tmp_dbs): shutil.copyfile(chunk_db[0], tmp_db) chunk_db[0] = tmp_db output_db = args.db args.db = os.path.join(args.tempdir, "%s.db" % uuid.uuid4()) merge_db_chunks(args) if try_count > 0: shutil.move(args.db, output_db) for tmp_db in tmp_dbs: os.remove(tmp_db) break except sqlite3.OperationalError, e: sys.stderr.write("sqlite3.OperationalError: %s\n" % e) else: raise Exception(("Attempted workaround for SQLite locking issue on NFS " "drives has failed. One possible reason is that the temp directory " "%s is also on an NFS drive.") % args.tempdir)
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/ # Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import os import logging import time import threading from botocore.vendored.requests.sessions import Session from botocore.vendored.requests.utils import get_environ_proxies from botocore.vendored.requests.exceptions import ConnectionError from botocore.vendored import six from botocore.awsrequest import create_request_object from botocore.exceptions import UnknownEndpointError from botocore.exceptions import EndpointConnectionError from botocore.exceptions import ConnectionClosedError from botocore.compat import filter_ssl_warnings from botocore.utils import is_valid_endpoint_url from botocore.hooks import first_non_none_response from botocore.response import StreamingBody from botocore import parsers logger = logging.getLogger(__name__) DEFAULT_TIMEOUT = 60 filter_ssl_warnings() try: from botocore.vendored.requests.packages.urllib3.contrib import pyopenssl pyopenssl.extract_from_urllib3() except ImportError: pass def convert_to_response_dict(http_response, operation_model): """Convert an HTTP response object to a request dict. This converts the requests library's HTTP response object to a dictionary. :type http_response: botocore.vendored.requests.model.Response :param http_response: The HTTP response from an AWS service request. :rtype: dict :return: A response dictionary which will contain the following keys: * headers (dict) * status_code (int) * body (string or file-like object) """ response_dict = { 'headers': http_response.headers, 'status_code': http_response.status_code, } if response_dict['status_code'] >= 300: response_dict['body'] = http_response.content elif operation_model.has_streaming_output: response_dict['body'] = StreamingBody( http_response.raw, response_dict['headers'].get('content-length')) else: response_dict['body'] = http_response.content return response_dict class PreserveAuthSession(Session): def rebuild_auth(self, prepared_request, response): pass class Endpoint(object): """ Represents an endpoint for a particular service in a specific region. Only an endpoint can make requests. :ivar service: The Service object that describes this endpoints service. :ivar host: The fully qualified endpoint hostname. :ivar session: The session object. """ def __init__(self, host, endpoint_prefix, event_emitter, proxies=None, verify=True, timeout=DEFAULT_TIMEOUT, response_parser_factory=None): self._endpoint_prefix = endpoint_prefix self._event_emitter = event_emitter self.host = host self.verify = verify if proxies is None: proxies = {} self.proxies = proxies self.http_session = PreserveAuthSession() self.timeout = timeout logger.debug('Setting %s timeout as %s', endpoint_prefix, self.timeout) self._lock = threading.Lock() if response_parser_factory is None: response_parser_factory = parsers.ResponseParserFactory() self._response_parser_factory = response_parser_factory def __repr__(self): return '%s(%s)' % (self._endpoint_prefix, self.host) def make_request(self, operation_model, request_dict): logger.debug("Making request for %s (verify_ssl=%s) with params: %s", operation_model, self.verify, request_dict) return self._send_request(request_dict, operation_model) def create_request(self, params, operation_model=None): request = create_request_object(params) if operation_model: event_name = 'request-created.{endpoint_prefix}.{op_name}'.format( endpoint_prefix=self._endpoint_prefix, op_name=operation_model.name) self._event_emitter.emit(event_name, request=request, operation_name=operation_model.name) prepared_request = self.prepare_request(request) return prepared_request def _encode_headers(self, headers): # In place encoding of headers to utf-8 if they are unicode. for key, value in headers.items(): if isinstance(value, six.text_type): headers[key] = value.encode('utf-8') def prepare_request(self, request): self._encode_headers(request.headers) return request.prepare() def _send_request(self, request_dict, operation_model): attempts = 1 request = self.create_request(request_dict, operation_model) success_response, exception = self._get_response( request, operation_model, attempts) while self._needs_retry(attempts, operation_model, request_dict, success_response, exception): attempts += 1 # If there is a stream associated with the request, we need # to reset it before attempting to send the request again. # This will ensure that we resend the entire contents of the # body. request.reset_stream() # Create a new request when retried (including a new signature). request = self.create_request( request_dict, operation_model) success_response, exception = self._get_response( request, operation_model, attempts) if exception is not None: raise exception else: return success_response def _get_response(self, request, operation_model, attempts): # This will return a tuple of (success_response, exception) # and success_response is itself a tuple of # (http_response, parsed_dict). # If an exception occurs then the success_response is None. # If no exception occurs then exception is None. try: logger.debug("Sending http request: %s", request) http_response = self.http_session.send( request, verify=self.verify, stream=operation_model.has_streaming_output, proxies=self.proxies, timeout=self.timeout) except ConnectionError as e: # For a connection error, if it looks like it's a DNS # lookup issue, 99% of the time this is due to a misconfigured # region/endpoint so we'll raise a more specific error message # to help users. logger.debug("ConnectionError received when sending HTTP request.", exc_info=True) if self._looks_like_dns_error(e): endpoint_url = e.request.url better_exception = EndpointConnectionError( endpoint_url=endpoint_url, error=e) return (None, better_exception) elif self._looks_like_bad_status_line(e): better_exception = ConnectionClosedError( endpoint_url=e.request.url, request=e.request) return (None, better_exception) else: return (None, e) except Exception as e: logger.debug("Exception received when sending HTTP request.", exc_info=True) return (None, e) # This returns the http_response and the parsed_data. response_dict = convert_to_response_dict(http_response, operation_model) parser = self._response_parser_factory.create_parser( operation_model.metadata['protocol']) parsed_response = parser.parse( response_dict, operation_model.output_shape) return (http_response, parsed_response), None def _looks_like_dns_error(self, e): return 'gaierror' in str(e) and e.request is not None def _looks_like_bad_status_line(self, e): return 'BadStatusLine' in str(e) and e.request is not None def _needs_retry(self, attempts, operation_model, request_dict, response=None, caught_exception=None): event_name = 'needs-retry.%s.%s' % (self._endpoint_prefix, operation_model.name) responses = self._event_emitter.emit( event_name, response=response, endpoint=self, operation=operation_model, attempts=attempts, caught_exception=caught_exception, request_dict=request_dict) handler_response = first_non_none_response(responses) if handler_response is None: return False else: # Request needs to be retried, and we need to sleep # for the specified number of times. logger.debug("Response received to retry, sleeping for " "%s seconds", handler_response) time.sleep(handler_response) return True class EndpointCreator(object): def __init__(self, event_emitter): self._event_emitter = event_emitter def create_endpoint(self, service_model, region_name, endpoint_url, verify=None, response_parser_factory=None, timeout=DEFAULT_TIMEOUT): if not is_valid_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) return Endpoint( endpoint_url, endpoint_prefix=service_model.endpoint_prefix, event_emitter=self._event_emitter, proxies=self._get_proxies(endpoint_url), verify=self._get_verify_value(verify), timeout=timeout, response_parser_factory=response_parser_factory) def _get_proxies(self, url): # We could also support getting proxies from a config file, # but for now proxy support is taken from the environment. return get_environ_proxies(url) def _get_verify_value(self, verify): # This is to account for: # https://github.com/kennethreitz/requests/issues/1436 # where we need to honor REQUESTS_CA_BUNDLE because we're creating our # own request objects. # First, if verify is not None, then the user explicitly specified # a value so this automatically wins. if verify is not None: return verify # Otherwise use the value from REQUESTS_CA_BUNDLE, or default to # True if the env var does not exist. return os.environ.get('REQUESTS_CA_BUNDLE', True)
"""File format classes for the games The Lost Vikings (1992) and Blackthorne (1994). """ import logging logger = logging.getLogger( __name__ ) from mrcrowbar import models as mrc from mrcrowbar import utils class Config( mrc.Block ): pad1 = mrc.UInt16_LE( 0x00 ) sound_type = mrc.UInt16_LE( 0x02 ) sound_port = mrc.UInt16_LE( 0x04 ) sound_irq = mrc.UInt16_LE( 0x06 ) sound_dma = mrc.UInt16_LE( 0x08 ) pad2 = mrc.UInt16_LE( 0x0a ) unk1 = mrc.UInt16_LE( 0x0c ) pad3 = mrc.UInt16_LE( 0x0e ) pad4 = mrc.UInt16_LE( 0x10 ) music_type = mrc.UInt16_LE( 0x12 ) music_port = mrc.UInt16_LE( 0x14 ) pad5 = mrc.UInt16_LE( 0x16 ) pad6 = mrc.UInt16_LE( 0x18 ) pad7 = mrc.UInt16_LE( 0x1a ) pad8 = mrc.UInt16_LE( 0x1c ) pad9 = mrc.UInt16_LE( 0x1e ) class Data( mrc.Block ): count = mrc.UInt32_LE( 0x00 ) offsets = mrc.UInt32_LE( 0x04, bitmask=b"\xff\xff\xff\x3f", count=mrc.Ref( "count" ) ) data_raw = mrc.Bytes( mrc.EndOffset( "offsets" ) ) def __init__( self, *args, **kwargs ): super().__init__( *args, **kwargs ) self.data = mrc.LinearStore( parent=self, source=mrc.Ref( "data_raw" ), block_klass=mrc.Unknown, offsets=mrc.Ref( "offsets" ), base_offset=mrc.EndOffset( "offsets", neg=True ), ) class Tile( mrc.Block ): index = mrc.UInt16_LE( 0x00 ) class TileMap( mrc.Block ): tiles = mrc.BlockField( Tile, 0x00, stream=True ) class TileQuad( mrc.Block ): index = mrc.Bits16( 0x00, 0xffc0, endian="little" ) flip_v = mrc.Bits16( 0x00, 0x0020, endian="little" ) flip_h = mrc.Bits16( 0x00, 0x0010, endian="little" ) unk1 = mrc.Bits16( 0x00, 0x000f, endian="little" ) @property def repr( self ): return "index: {}, flip_h: {}, flip_v: {}".format( self.index, self.flip_h, self.flip_v ) class MetaTile( mrc.Block ): top_left = mrc.BlockField( TileQuad, 0x00 ) top_right = mrc.BlockField( TileQuad, 0x02 ) bottom_left = mrc.BlockField( TileQuad, 0x04 ) bottom_right = mrc.BlockField( TileQuad, 0x06 ) @property def repr( self ): return "top_left: {}, top_right: {}, bottom_left: {}, bottom_right: {}".format( self.top_left, self.top_right, self.bottom_left, self.bottom_right ) class MetaTileMap( mrc.Block ): metatiles = mrc.BlockField( MetaTile, 0x00, stream=True ) class LZSS( mrc.Transform ): def import_data( self, buffer ): output_size = utils.from_uint32_le( buffer[:4] ) edx = output_size data_p = 4 bx = 0 cx = 0 work_ram = bytearray( 0x1000 ) output = bytearray() while True: cx >>= 1 if cx < 0x100: logger.debug( "@ new pattern: {:08b}".format( buffer[data_p] ) ) cx = buffer[data_p] + 0xff00 data_p += 1 if not (cx & 1): info = buffer[data_p] + (buffer[data_p + 1] << 8) data_p += 2 work_p = info & 0xfff count = (info >> 12) + 3 logger.debug( "# work_ram[0x{:04x}:0x{:04x}] = work_ram[0x{:04x}:0x{:04x}]".format( bx, (bx + count) & 0xfff, work_p, (work_p + count) & 0xfff ) ) logger.debug( "! output[0x{:04x}:0x{:04x}] = work_ram[0x{:04x}:0x{:04x}]".format( len( output ), len( output ) + count, work_p, (work_p + count) & 0xfff, ) ) for i in range( count ): # loc_103C4 dat = work_ram[work_p] work_ram[bx] = dat work_p += 1 work_p &= 0xfff bx += 1 bx &= 0xfff output.append( dat ) edx -= 1 if edx == 0: break if edx == 0: break else: logger.debug( "# work_ram[0x{:04x}] = buffer[0x{:04x}]".format( bx, data_p ) ) logger.debug( "! output[0x{:04x}] = buffer[0x{:04x}]".format( len( output ), data_p ) ) dat = buffer[data_p] work_ram[bx] = dat data_p += 1 bx += 1 bx &= 0xfff output.append( dat ) edx -= 1 if edx == 0: break logger.info( "{} - output_size: {:08x}, output_end: {:08x}, input_size: {:08x}, input_end: {:08x}".format( self, output_size, len( output ), len( buffer ), data_p ) ) return mrc.TransformResult( payload=bytes( output ), end_offset=data_p ) class Interlace( mrc.Transform ): def import_data( self, buffer ): assert len( buffer ) % 64 == 0 result = bytearray( len( buffer ) ) for i in range( 0, len( buffer ), 64 ): deint = buffer[i : i + 64 : 2] + buffer[i + 1 : i + 64 : 2] result[i : i + 64] = bytes( [deint[8 * (j % 8) + (j // 8)] for j in range( 64 )] ) return mrc.TransformResult( payload=bytes( result ), end_offset=len( result ) ) # in BLACK.EXE at 0x14ef8. BLACKTHORNE_PALETTE_ENTRIES = [ 0x4f, 0x8c, 0x9d, 0x54, 0x109, 0x108, 0x10a, 0x55, 0x57, 0x52, 0x4e, 0x58, 0x116, 0x50, 0x48, 0x124, 0x7c, 0x53, 0x56, 0x51, 0x10b, 0x128, 0x129, 0x125, 0x12d, 0x12e, 0x126, 0x127, 0x12f, 0x12b, 0x12a, 0x12c, 0x130, ]
#!/usr/bin/env python # Copyright 2014-2020 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Yang Gao <younggao1994@gmail.com> # ''' Analytical electron-phonon matrix for restricted kohm sham ''' import numpy as np from pyscf import lib from pyscf.hessian import rks as rks_hess from pyscf.hessian import rhf as rhf_hess from pyscf.grad import rks as rks_grad from pyscf.dft import numint from pyscf.eph import rhf as rhf_eph from pyscf.data.nist import MP_ME CUTOFF_FREQUENCY = rhf_eph.CUTOFF_FREQUENCY KEEP_IMAG_FREQUENCY = rhf_eph.KEEP_IMAG_FREQUENCY def _get_vxc_deriv1(hessobj, mo_coeff, mo_occ, max_memory): """" This functions is slightly different from hessian.rks._get_vxc_deriv1 in that <\nabla u|Vxc|v> is removed""" mol = hessobj.mol mf = hessobj.base if hessobj.grids is not None: grids = hessobj.grids else: grids = mf.grids if grids.coords is None: grids.build(with_non0tab=True) nao, nmo = mo_coeff.shape ni = mf._numint xctype = ni._xc_type(mf.xc) aoslices = mol.aoslice_by_atom() shls_slice = (0, mol.nbas) ao_loc = mol.ao_loc_nr() dm0 = mf.make_rdm1(mo_coeff, mo_occ) vmat = np.zeros((mol.natm,3,nao,nao)) max_memory = max(2000, max_memory-vmat.size*8/1e6) if xctype == 'LDA': ao_deriv = 1 for ao, mask, weight, coords \ in ni.block_loop(mol, grids, nao, ao_deriv, max_memory): rho = ni.eval_rho2(mol, ao[0], mo_coeff, mo_occ, mask, 'LDA') vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3] frr = fxc[0] ao_dm0 = numint._dot_ao_dm(mol, ao[0], dm0, mask, shls_slice, ao_loc) for ia in range(mol.natm): p0, p1 = aoslices[ia][2:] rho1 = np.einsum('xpi,pi->xp', ao[1:,:,p0:p1], ao_dm0[:,p0:p1]) aow = np.einsum('pi,xp->xpi', ao[0], weight*frr*rho1) rks_grad._d1_dot_(vmat[ia], mol, aow, ao[0], mask, ao_loc, True) ao_dm0 = aow = None for ia in range(mol.natm): vmat[ia] = -vmat[ia] - vmat[ia].transpose(0,2,1) elif xctype == 'GGA': ao_deriv = 2 # v_ip = np.zeros((3,nao,nao)) for ao, mask, weight, coords \ in ni.block_loop(mol, grids, nao, ao_deriv, max_memory): rho = ni.eval_rho2(mol, ao[:4], mo_coeff, mo_occ, mask, 'GGA') vxc, fxc = ni.eval_xc(mf.xc, rho, 0, deriv=2)[1:3] wv = numint._rks_gga_wv0(rho, vxc, weight) # rks_grad._gga_grad_sum_(v_ip, mol, ao, wv, mask, ao_loc) ao_dm0 = [numint._dot_ao_dm(mol, ao[i], dm0, mask, shls_slice, ao_loc) for i in range(4)] for ia in range(mol.natm): wv = dR_rho1 = rks_hess._make_dR_rho1(ao, ao_dm0, ia, aoslices) wv[0] = numint._rks_gga_wv1(rho, dR_rho1[0], vxc, fxc, weight) wv[1] = numint._rks_gga_wv1(rho, dR_rho1[1], vxc, fxc, weight) wv[2] = numint._rks_gga_wv1(rho, dR_rho1[2], vxc, fxc, weight) aow = np.einsum('npi,Xnp->Xpi', ao[:4], wv) rks_grad._d1_dot_(vmat[ia], mol, aow, ao[0], mask, ao_loc, True) ao_dm0 = aow = None for ia in range(mol.natm): vmat[ia] = -vmat[ia] - vmat[ia].transpose(0,2,1) elif xctype == 'MGGA': raise NotImplementedError('meta-GGA') return vmat def get_eph(ephobj, mo1, omega, vec, mo_rep): if isinstance(mo1, str): mo1 = lib.chkfile.load(mo1, 'scf_mo1') mo1 = dict([(int(k), mo1[k]) for k in mo1]) mol = ephobj.mol mf = ephobj.base ni = mf._numint ni.libxc.test_deriv_order(mf.xc, 2, raise_error=True) omg, alpha, hyb = ni.rsh_and_hybrid_coeff(mf.xc, spin=mol.spin) vnuc_deriv = ephobj.vnuc_generator(mol) aoslices = mol.aoslice_by_atom() vind = rhf_eph.rhf_deriv_generator(mf, mf.mo_coeff, mf.mo_occ) mocc = mf.mo_coeff[:,mf.mo_occ>0] dm0 = np.dot(mocc, mocc.T) * 2 natoms = mol.natm nao = mol.nao_nr() mem_now = lib.current_memory()[0] max_memory = max(2000, mf.max_memory*.9-mem_now) vxc1ao = _get_vxc_deriv1(ephobj, mf.mo_coeff, mf.mo_occ, max_memory) vcore = [] for ia in range(natoms): h1 = vnuc_deriv(ia) v1 = vind(mo1[ia]) shl0, shl1, p0, p1 = aoslices[ia] shls_slice = (shl0, shl1) + (0, mol.nbas)*3 if abs(hyb)>1e-10: vj1, vk1 = \ rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl', ['ji->s2kl', -dm0[:,p0:p1], #vj1 'li->s1kj', -dm0[:,p0:p1]], #vk1 shls_slice=shls_slice) veff = vj1 - hyb * .5 * vk1 if abs(omg) > 1e-10: with mol.with_range_coulomb(omg): vk1 = \ rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl', ['li->s1kj', -dm0[:,p0:p1]], # vk1 shls_slice=shls_slice) veff -= (alpha-hyb) * .5 * vk1 else: vj1 = rhf_hess._get_jk(mol, 'int2e_ip1', 3, 's2kl', ['ji->s2kl', -dm0[:,p0:p1]], # vj1 shls_slice=shls_slice) veff = vj1[0] vtot = h1 + v1 + veff + vxc1ao[ia] + veff.transpose(0,2,1) vcore.append(vtot) vcore = np.asarray(vcore).reshape(-1,nao,nao) mass = mol.atom_mass_list() * MP_ME vec = rhf_eph._freq_mass_weighted_vec(vec, omega, mass) mat = np.einsum('xJ,xuv->Juv', vec, vcore, optimize=True) if mo_rep: mat = np.einsum('Juv,up,vq->Jpq', mat, mf.mo_coeff.conj(), mf.mo_coeff, optimize=True) return mat class EPH(rks_hess.Hessian): '''EPH for restricted DFT Attributes: cutoff_frequency : float or int cutoff frequency in cm-1. Default is 80 keep_imag_frequency : bool Whether to keep imaginary frequencies in the output. Default is False Saved results omega : numpy.ndarray Vibrational frequencies in au. vec : numpy.ndarray Polarization vectors of the vibration modes eph : numpy.ndarray Electron phonon matrix eph[j,a,b] (j in nmodes, a,b in norbs) ''' def __init__(self, scf_method, cutoff_frequency=CUTOFF_FREQUENCY, keep_imag_frequency=KEEP_IMAG_FREQUENCY): rks_hess.Hessian.__init__(self, scf_method) self.cutoff_frequency = cutoff_frequency self.keep_imag_frequency = keep_imag_frequency get_mode = rhf_eph.get_mode get_eph = get_eph vnuc_generator = rhf_eph.vnuc_generator kernel = rhf_eph.kernel if __name__ == '__main__': from pyscf import gto, dft mol = gto.M() mol.atom = [['O', [0.000000000000, 0.000000002577,0.868557119905]], ['H', [0.000000000000,-1.456050381698,2.152719488376]], ['H', [0.000000000000, 1.456050379121,2.152719486067]]] mol.unit = 'Bohr' mol.basis = 'sto3g' mol.verbose=4 mol.build() # this is a pre-computed relaxed geometry mf = dft.RKS(mol) mf.grids.level=6 mf.xc = 'b3lyp' mf.conv_tol = 1e-16 mf.conv_tol_grad = 1e-10 mf.kernel() grad = mf.nuc_grad_method().kernel() print("Force on the atoms/au:") print(grad) myeph = EPH(mf) eph, omega = myeph.kernel(mo_rep=True) print(np.amax(eph))
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin import logging import re from sqlalchemy import desc from flexget import plugin from flexget.event import event from flexget.entry import Entry from flexget.manager import Session from flexget.plugins.filter.series import SeriesTask, Series, Episode, Release, get_latest_release log = logging.getLogger('next_series_episodes') class NextSeriesEpisodes(object): """ Emit next episode number from all series configured in this task. Supports only 'ep' and 'sequence' mode series. """ schema = { 'oneOf': [ {'type': 'boolean'}, { 'type': 'object', 'properties': { 'from_start': {'type': 'boolean', 'default': False}, 'backfill': {'type': 'boolean', 'default': False} }, 'additionalProperties': False } ] } def __init__(self): self.rerun_entries = [] def ep_identifiers(self, season, episode): return ['S%02dE%02d' % (season, episode), '%dx%02d' % (season, episode)] def sequence_identifiers(self, episode): # Use a set to remove doubles, which will happen depending on number of digits in episode return set(['%d' % episode, '%02d' % episode, '%03d' % episode]) def search_entry(self, series, season, episode, task, rerun=True): # Extract the alternate names for the series alts = [alt.alt_name for alt in series.alternate_names] # Also consider series name without parenthetical (year, country) an alternate name paren_match = re.match(r'(.+?)( \(.+\))?$', series.name) if paren_match.group(2): alts.append(paren_match.group(1)) if series.identified_by == 'ep': search_strings = ['%s %s' % (series.name, id) for id in self.ep_identifiers(season, episode)] series_id = 'S%02dE%02d' % (season, episode) for alt in alts: search_strings.extend(['%s %s' % (alt, id) for id in self.ep_identifiers(season, episode)]) else: search_strings = ['%s %s' % (series.name, id) for id in self.sequence_identifiers(episode)] series_id = episode for alt in alts: search_strings.extend(['%s %s' % (alt, id) for id in self.sequence_identifiers(episode)]) entry = Entry(title=search_strings[0], url='', search_strings=search_strings, series_name=series.name, series_alternate_names=alts, # Not sure if this field is useful down the road. series_season=season, series_episode=episode, series_id=series_id, series_id_type=series.identified_by) if rerun: entry.on_complete(self.on_search_complete, task=task, identified_by=series.identified_by) return entry def on_task_input(self, task, config): if not config: return if isinstance(config, bool): config = {} if task.is_rerun: # Just return calculated next eps on reruns entries = self.rerun_entries self.rerun_entries = [] return entries else: self.rerun_entries = [] entries = [] impossible = {} with Session() as session: for seriestask in session.query(SeriesTask).filter(SeriesTask.name == task.name).all(): series = seriestask.series if not series: # TODO: How can this happen? log.debug('Found SeriesTask item without series specified. Cleaning up.') session.delete(seriestask) continue if series.identified_by not in ['ep', 'sequence']: reason = series.identified_by or 'auto' impossible.setdefault(reason, []).append(series.name) continue low_season = 0 if series.identified_by == 'ep' else -1 check_downloaded = not config.get('backfill') latest_season = get_latest_release(series, downloaded=check_downloaded) if latest_season: latest_season = latest_season.season else: latest_season = low_season + 1 for season in range(latest_season, low_season, -1): log.trace('Adding episodes for series %s season %d', series.name, season) latest = get_latest_release(series, season=season, downloaded=check_downloaded) if series.begin and (not latest or latest < series.begin): entries.append(self.search_entry(series, series.begin.season, series.begin.number, task)) elif latest and not config.get('backfill'): entries.append(self.search_entry(series, latest.season, latest.number + 1, task)) elif latest: start_at_ep = 1 episodes_this_season = (session.query(Episode). filter(Episode.series_id == series.id). filter(Episode.season == season)) if series.identified_by == 'sequence': # Don't look for missing too far back with sequence shows start_at_ep = max(latest.number - 10, 1) episodes_this_season = episodes_this_season.filter(Episode.number >= start_at_ep) latest_ep_this_season = episodes_this_season.order_by(desc(Episode.number)).first() downloaded_this_season = (episodes_this_season.join(Episode.releases). filter(Release.downloaded == True).all()) # Calculate the episodes we still need to get from this season if series.begin and series.begin.season == season: start_at_ep = max(start_at_ep, series.begin.number) eps_to_get = list(range(start_at_ep, latest_ep_this_season.number + 1)) for ep in downloaded_this_season: try: eps_to_get.remove(ep.number) except ValueError: pass entries.extend(self.search_entry(series, season, x, task, rerun=False) for x in eps_to_get) # If we have already downloaded the latest known episode, try the next episode if latest_ep_this_season.releases: entries.append(self.search_entry(series, season, latest_ep_this_season.number + 1, task)) else: if config.get('from_start') or config.get('backfill'): entries.append(self.search_entry(series, season, 1, task)) else: log.verbose('Series `%s` has no history. Set begin option, ' 'or use CLI `series begin` ' 'subcommand to set first episode to emit' % series.name) break # Skip older seasons if we are not in backfill mode if not config.get('backfill'): break # Don't look for seasons older than begin ep if series.begin and series.begin.season >= season: break for reason, series in impossible.items(): log.verbose('Series `%s` with identified_by value `%s` are not supported. ', ', '.join(series), reason) return entries def on_search_complete(self, entry, task=None, identified_by=None, **kwargs): """Decides whether we should look for next ep/season based on whether we found/accepted any episodes.""" with Session() as session: series = session.query(Series).filter(Series.name == entry['series_name']).first() latest = get_latest_release(series) db_release = (session.query(Release).join(Release.episode).join(Episode.series). filter(Series.name == entry['series_name']). filter(Episode.season == entry['series_season']). filter(Episode.number == entry['series_episode']).first()) if entry.accepted: log.debug('%s %s was accepted, rerunning to look for next ep.' % (entry['series_name'], entry['series_id'])) self.rerun_entries.append(self.search_entry(series, entry['series_season'], entry['series_episode'] + 1, task)) # Increase rerun limit by one if we have matches, this way # we keep searching as long as matches are found! # TODO: this should ideally be in discover so it would be more generic task.max_reruns += 1 task.rerun(plugin='next_series_episodes', reason='Look for next episode') elif db_release: # There are know releases of this episode, but none were accepted return elif latest and identified_by == 'ep' and ( entry['series_season'] == latest.season and entry['series_episode'] == latest.number + 1): # We searched for next predicted episode of this season unsuccessfully, try the next season self.rerun_entries.append(self.search_entry(series, latest.season + 1, 1, task)) log.debug('%s %s not found, rerunning to look for next season' % (entry['series_name'], entry['series_id'])) task.rerun(plugin='next_series_episodes', reason='Look for next season') @event('plugin.register') def register_plugin(): plugin.register(NextSeriesEpisodes, 'next_series_episodes', api_ver=2)
# -*- coding: utf-8 -*- # Copyright 2015 Metaswitch Networks # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import subprocess from datetime import datetime import json import logging from calico.felix.selectors import parse_selector from etcd import EtcdResult, EtcdException import etcd from gevent.event import Event import gevent from mock import Mock, call, patch, ANY from calico.datamodel_v1 import EndpointId, TieredPolicyId from calico.etcddriver.protocol import MessageReader, MessageWriter, \ MSG_TYPE_CONFIG_LOADED, MSG_TYPE_STATUS, STATUS_RESYNC, MSG_KEY_STATUS, \ MSG_TYPE_UPDATE, MSG_KEY_KEY, MSG_KEY_VALUE, MSG_KEY_TYPE, \ MSG_KEY_HOST_CONFIG, MSG_KEY_GLOBAL_CONFIG, MSG_TYPE_CONFIG, \ MSG_KEY_LOG_FILE, MSG_KEY_SEV_FILE, MSG_KEY_SEV_SCREEN, MSG_KEY_SEV_SYSLOG, \ STATUS_IN_SYNC, SocketClosed from calico.felix.config import Config from calico.felix.futils import IPV4, IPV6 from calico.felix.ipsets import IpsetActor from calico.felix.fetcd import (_FelixEtcdWatcher, EtcdAPI, die_and_restart, EtcdStatusReporter, combine_statuses) from calico.felix.splitter import UpdateSplitter from calico.felix.test.base import BaseTestCase, JSONString _log = logging.getLogger(__name__) patch.object = getattr(patch, "object") # Keep PyCharm linter happy. VALID_ENDPOINT = { "state": "active", "name": "tap1234", "mac": "aa:bb:cc:dd:ee:ff", "profile_ids": ["prof1"], "ipv4_nets": [ "10.0.0.1/32", ], "ipv6_nets": [ "dead::beef/128" ] } ENDPOINT_STR = json.dumps(VALID_ENDPOINT) RULES = { "inbound_rules": [], "outbound_rules": [], } RULES_STR = json.dumps(RULES) TAGS = ["a", "b"] TAGS_STR = json.dumps(TAGS) ETCD_ADDRESS = 'localhost:4001' POLICY_ID = TieredPolicyId("tiername", "polname") POLICY = { "selector": "a == 'b'", "inbound_rules": [], "outbound_rules": [], "order": 10, } SELECTOR = parse_selector(POLICY["selector"]) POLICY_PARSED = { "inbound_rules": [], "outbound_rules": [], } POLICY_STR = json.dumps(POLICY) class TestEtcdAPI(BaseTestCase): def setUp(self): super(TestEtcdAPI, self).setUp() self.m_config = Mock(spec=Config) self.m_config.ETCD_ADDRS = [ETCD_ADDRESS] self.m_config.ETCD_SCHEME = "http" self.m_config.ETCD_KEY_FILE = None self.m_config.ETCD_CERT_FILE = None self.m_config.ETCD_CA_FILE = None self.m_hosts_ipset = Mock(spec=IpsetActor) with patch("calico.felix.fetcd._FelixEtcdWatcher", autospec=True) as m_etcd_watcher: with patch("gevent.spawn", autospec=True) as m_spawn: self.api = EtcdAPI(self.m_config, self.m_hosts_ipset) self.m_spawn = m_spawn self.m_etcd_watcher = m_etcd_watcher.return_value self.m_etcd_watcher.load_config = Mock(spec=Event) self.m_etcd_watcher.begin_polling = Mock(spec=Event) self.m_etcd_watcher.configured = Mock(spec=Event) def test_create(self): self.m_etcd_watcher.assert_has_calls([ call.link(self.api._on_worker_died), call.start(), ]) self.m_spawn.assert_has_calls([ call(self.api._periodically_resync), call(self.api._periodically_resync).link_exception( self.api._on_worker_died) ]) @patch("gevent.sleep", autospec=True) def test_periodic_resync_mainline(self, m_sleep): self.m_config.RESYNC_INTERVAL = 10 m_configured = Mock(spec=Event) self.m_etcd_watcher.configured = m_configured with patch.object(self.api, "force_resync") as m_force_resync: m_force_resync.side_effect = ExpectedException() self.assertRaises(ExpectedException, self.api._periodically_resync) m_configured.wait.assert_called_once_with() m_sleep.assert_called_once_with(ANY) sleep_time = m_sleep.call_args[0][0] self.assertTrue(sleep_time >= 10) self.assertTrue(sleep_time <= 12) @patch("gevent.sleep", autospec=True) def test_periodic_resync_disabled(self, m_sleep): self.m_config.RESYNC_INTERVAL = 0 self.m_etcd_watcher.configured = Mock(spec=Event) with patch.object(self.api, "force_resync") as m_force_resync: m_force_resync.side_effect = Exception() self.api._periodically_resync() def test_force_resync(self): self.m_config.REPORT_ENDPOINT_STATUS = True with patch.object(self.api, "status_reporter") as m_status_rep: self.api.force_resync(async=True) self.step_actor(self.api) m_status_rep.resync.assert_called_once_with(async=True) self.assertTrue(self.m_etcd_watcher.resync_requested) def test_load_config(self): result = self.api.load_config(async=True) self.step_actor(self.api) conf = result.get() self.assertEqual(conf, self.m_etcd_watcher.configured) self.m_etcd_watcher.load_config.set.assert_called_once_with() def test_start_watch(self): m_splitter = Mock() self.api.load_config(async=True) result = self.api.start_watch(m_splitter, async=True) self.step_actor(self.api) self.m_etcd_watcher.load_config.set.assert_called_once_with() self.assertEqual(self.m_etcd_watcher.splitter, m_splitter) self.m_etcd_watcher.begin_polling.set.assert_called_once_with() @patch("sys.exit", autospec=True) def test_on_worker_died(self, m_exit): glet = gevent.spawn(lambda: None) glet.link(self.api._on_worker_died) glet.join(1) m_exit.assert_called_once_with(1) class ExpectedException(Exception): pass class TestEtcdWatcher(BaseTestCase): def setUp(self): super(TestEtcdWatcher, self).setUp() self.m_config = Mock() self.m_config.HOSTNAME = "hostname" self.m_config.IFACE_PREFIX = "tap" self.m_config.ETCD_ADDRS = [ETCD_ADDRESS] self.m_config.ETCD_SCHEME = "http" self.m_config.ETCD_KEY_FILE = None self.m_config.ETCD_CERT_FILE = None self.m_config.ETCD_CA_FILE = None self.m_hosts_ipset = Mock(spec=IpsetActor) self.m_api = Mock(spec=EtcdAPI) self.m_status_rep = Mock(spec=EtcdStatusReporter) self.watcher = _FelixEtcdWatcher(self.m_config, self.m_api, self.m_status_rep, self.m_hosts_ipset) self.m_splitter = Mock(spec=UpdateSplitter) self.watcher.splitter = self.m_splitter self.m_reader = Mock(spec=MessageReader) self.m_writer = Mock(spec=MessageWriter) self.watcher._msg_reader = self.m_reader self.watcher._msg_writer = self.m_writer self.m_driver_proc = Mock(spec=subprocess.Popen) self.watcher._driver_process = self.m_driver_proc def test_run(self): with patch.object(self.watcher.load_config, "wait") as m_wait: with patch.object(self.watcher, "_start_driver") as m_start: m_reader = Mock() m_writer = Mock() m_start.return_value = (m_reader, m_writer) m_reader.new_messages.side_effect = ExpectedException() self.assertRaises(ExpectedException, self.watcher._run) self.assertEqual(m_wait.mock_calls, [call()]) @patch("calico.felix.fetcd.die_and_restart", autospec=True) def test_read_loop(self, m_die): self.m_reader.new_messages.side_effect = iter([ iter([]), iter([(MSG_TYPE_STATUS, {MSG_KEY_STATUS: STATUS_RESYNC})]) ]) self.m_driver_proc.poll.side_effect = iter([ None, 1 ]) m_die.side_effect = ExpectedException() with patch.object(self.watcher, "_dispatch_msg_from_driver") as m_disp: self.assertRaises(ExpectedException, self.watcher._loop_reading_from_driver) self.assertEqual(m_disp.mock_calls, [call(MSG_TYPE_STATUS, {MSG_KEY_STATUS: STATUS_RESYNC})]) @patch("calico.felix.fetcd.die_and_restart", autospec=True) def test_read_loop_socket_error(self, m_die): self.m_reader.new_messages.side_effect = SocketClosed() m_die.side_effect = ExpectedException self.assertRaises(ExpectedException, self.watcher._loop_reading_from_driver) self.assertEqual(m_die.mock_calls, [call()]) @patch("calico.felix.fetcd.die_and_restart", autospec=True) def test_read_loop_resync(self, m_die): self.m_reader.new_messages.side_effect = iter([iter([]), iter([])]) self.m_driver_proc.poll.side_effect = iter([None, 1]) self.watcher.resync_requested = True m_die.side_effect = ExpectedException() self.assertRaises(ExpectedException, self.watcher._loop_reading_from_driver) def test_dispatch_from_driver(self): for msg_type, expected_method in [ (MSG_TYPE_UPDATE, "_on_update_from_driver"), (MSG_TYPE_CONFIG_LOADED, "_on_config_loaded_from_driver"), (MSG_TYPE_STATUS, "_on_status_from_driver"),]: with patch.object(self.watcher, expected_method) as m_meth: msg = Mock() self.watcher._dispatch_msg_from_driver(msg_type, msg) self.assertEqual(m_meth.mock_calls, [call(msg)]) def test_dispatch_from_driver_unexpected(self): self.assertRaises(RuntimeError, self.watcher._dispatch_msg_from_driver, "unknown", {}) @patch("gevent.sleep") def test_dispatch_yield(self, m_sleep): for _ in xrange(399): with patch.object(self.watcher, "_on_update_from_driver") as m_upd: msg = Mock() self.watcher._dispatch_msg_from_driver(MSG_TYPE_UPDATE, msg) self.assertEqual(m_sleep.mock_calls, [call(0.000001)]) def test_on_update_from_driver(self): self.watcher.read_count = 999 self.watcher.configured.set() with patch.object(self.watcher, "begin_polling") as m_begin: self.watcher._on_update_from_driver({ MSG_KEY_TYPE: MSG_TYPE_UPDATE, MSG_KEY_KEY: "/calico/v1/Ready", MSG_KEY_VALUE: "true", }) m_begin.wait.assert_called_once_with() @patch("calico.felix.fetcd.die_and_restart", autospec=True) def test_on_config_loaded(self, m_die): self.m_config.DRIVERLOGFILE = "/tmp/driver.log" global_config = {"InterfacePrefix": "tap"} local_config = {"LogSeverityFile": "DEBUG"} self.watcher._on_config_loaded_from_driver({ MSG_KEY_GLOBAL_CONFIG: global_config, MSG_KEY_HOST_CONFIG: local_config, }) self.assertTrue(self.watcher.configured.is_set()) self.assertEqual( self.m_config.report_etcd_config.mock_calls, [call(local_config, global_config)] ) self.assertEqual( self.m_writer.send_message.mock_calls, [call(MSG_TYPE_CONFIG, { MSG_KEY_LOG_FILE: "/tmp/driver.log", MSG_KEY_SEV_FILE: self.m_config.LOGLEVFILE, MSG_KEY_SEV_SCREEN: self.m_config.LOGLEVSCR, MSG_KEY_SEV_SYSLOG: self.m_config.LOGLEVSYS, })] ) self.assertEqual(m_die.mock_calls, []) # Check a subsequent config change results in Felix dying. global_config = {"InterfacePrefix": "not!tap"} local_config = {"LogSeverityFile": "not!DEBUG"} self.watcher._on_config_loaded_from_driver({ MSG_KEY_GLOBAL_CONFIG: global_config, MSG_KEY_HOST_CONFIG: local_config, }) self.assertEqual(m_die.mock_calls, [call()]) def test_on_status_from_driver(self): self.watcher._on_status_from_driver({ MSG_KEY_STATUS: STATUS_RESYNC }) self.assertFalse(self.watcher._been_in_sync) with patch.object(self.watcher, "begin_polling") as m_begin: # Two calls but second should be ignored... self.watcher._on_status_from_driver({ MSG_KEY_STATUS: STATUS_IN_SYNC }) self.watcher._on_status_from_driver({ MSG_KEY_STATUS: STATUS_IN_SYNC }) m_begin.wait.assert_called_once_with() self.assertTrue(self.watcher._been_in_sync) self.assertEqual(self.m_splitter.on_datamodel_in_sync.mock_calls, [call()]) self.assertEqual(self.m_hosts_ipset.replace_members.mock_calls, [call(frozenset([]), async=True)]) @patch("subprocess.Popen") @patch("socket.socket") @patch("os.unlink") def test_start_driver(self, m_unlink, m_socket, m_popen): m_sck = Mock() m_socket.return_value = m_sck m_conn = Mock() m_sck.accept.return_value = m_conn, None reader, writer = self.watcher._start_driver() self.assertEqual(m_socket.mock_calls[0], call(socket.AF_UNIX, socket.SOCK_STREAM)) self.assertEqual(m_sck.bind.mock_calls, [call("/run/felix-driver.sck")]) self.assertEqual(m_sck.listen.mock_calls, [call(1)]) self.assertEqual(m_popen.mock_calls[0], call([ANY, "-m", "calico.etcddriver", "/run/felix-driver.sck"])) self.assertEqual(m_unlink.mock_calls, [call("/run/felix-driver.sck")] * 2) self.assertTrue(isinstance(reader, MessageReader)) self.assertTrue(isinstance(writer, MessageWriter)) @patch("subprocess.Popen") @patch("socket.socket") @patch("os.unlink") def test_start_driver_unlink_fail(self, m_unlink, m_socket, m_popen): m_unlink.side_effect = OSError() m_sck = Mock() m_socket.return_value = m_sck m_conn = Mock() m_sck.accept.return_value = m_conn, None reader, writer = self.watcher._start_driver() self.assertTrue(isinstance(reader, MessageReader)) self.assertTrue(isinstance(writer, MessageWriter)) def test_update_hosts_ipset_not_in_sync(self): self.watcher._update_hosts_ipset() self.assertEqual(self.m_hosts_ipset.mock_calls, []) @patch("calico.felix.fetcd.die_and_restart", autospec=True) def test_config_set(self, m_die): self.watcher.last_global_config = {} self.dispatch("/calico/v1/config/InterfacePrefix", "set", value="foo") self.assertEqual(m_die.mock_calls, [call()]) @patch("calico.felix.fetcd.die_and_restart", autospec=True) def test_host_config_set(self, m_die): self.watcher.last_host_config = {} self.dispatch("/calico/v1/host/notourhostname/config/InterfacePrefix", "set", value="foo") self.dispatch("/calico/v1/host/hostname/config/InterfacePrefix", "set", value="foo") self.assertEqual(m_die.mock_calls, [call()]) def test_endpoint_set(self): self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", "set", value=ENDPOINT_STR) self.m_splitter.on_endpoint_update.assert_called_once_with( EndpointId("h1", "o1", "w1", "e1"), VALID_ENDPOINT, ) def test_endpoint_set_bad_json(self): self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", "set", value="{") self.m_splitter.on_endpoint_update.assert_called_once_with( EndpointId("h1", "o1", "w1", "e1"), None, ) def test_endpoint_set_invalid(self): self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", "set", value="{}") self.m_splitter.on_endpoint_update.assert_called_once_with( EndpointId("h1", "o1", "w1", "e1"), None, ) def test_prof_labels_set(self): self.dispatch("/calico/v1/policy/profile/prof1/labels", "set", value='{"a": "b"}') self.m_splitter.on_prof_labels_set.assert_called_once_with("prof1", {"a": "b"}) def test_prof_labels_set_bad_data(self): self.dispatch("/calico/v1/policy/profile/prof1/labels", "set", value='{"a": "b}') self.m_splitter.on_prof_labels_set.assert_called_once_with("prof1", None) def test_prof_labels_del(self): self.dispatch("/calico/v1/policy/profile/prof1/labels", "delete") self.m_splitter.on_prof_labels_set.assert_called_once_with("prof1", None) def test_on_tiered_policy_set(self): self.dispatch("/calico/v1/policy/tier/tiername/policy/polname", "set", value=POLICY_STR) self.m_splitter.on_rules_update.assert_called_once_with( POLICY_ID, POLICY_PARSED ) self.m_splitter.on_policy_selector_update.assert_called_once_with( POLICY_ID, SELECTOR, 10 ) def test_on_tiered_policy_set_bad_data(self): self.dispatch("/calico/v1/policy/tier/tiername/policy/polname", "set", value=POLICY_STR[:10]) self.m_splitter.on_rules_update.assert_called_once_with( POLICY_ID, None ) self.m_splitter.on_policy_selector_update.assert_called_once_with( POLICY_ID, None, None ) def test_on_tiered_policy_del(self): self.dispatch("/calico/v1/policy/tier/tiername/policy/polname", "delete") self.m_splitter.on_rules_update.assert_called_once_with( POLICY_ID, None ) self.m_splitter.on_policy_selector_update.assert_called_once_with( POLICY_ID, None, None ) def test_on_tier_data_set(self): self.dispatch("/calico/v1/policy/tier/tiername/metadata", "set", value='{"order": 10}') self.m_splitter.on_tier_data_update.assert_called_once_with( "tiername", {"order": 10} ) def test_on_tier_data_set_bad_data(self): self.dispatch("/calico/v1/policy/tier/tiername/metadata", "set", value='{"order": 10') self.m_splitter.on_tier_data_update.assert_called_once_with( "tiername", None ) def test_on_tier_data_del(self): self.dispatch("/calico/v1/policy/tier/tiername/metadata", "delete") self.m_splitter.on_tier_data_update.assert_called_once_with( "tiername", None ) def test_rules_set(self): self.dispatch("/calico/v1/policy/profile/prof1/rules", "set", value=RULES_STR) self.m_splitter.on_rules_update.assert_called_once_with("prof1", RULES) def test_rules_set_bad_json(self): self.dispatch("/calico/v1/policy/profile/prof1/rules", "set", value="{") self.m_splitter.on_rules_update.assert_called_once_with("prof1", None,) def test_rules_set_invalid(self): self.dispatch("/calico/v1/policy/profile/prof1/rules", "set", value='{}') self.m_splitter.on_rules_update.assert_called_once_with("prof1", None,) def test_tags_set(self): self.dispatch("/calico/v1/policy/profile/prof1/tags", "set", value=TAGS_STR) self.m_splitter.on_tags_update.assert_called_once_with("prof1", TAGS) def test_tags_set_bad_json(self): self.dispatch("/calico/v1/policy/profile/prof1/tags", "set", value="{") self.m_splitter.on_tags_update.assert_called_once_with("prof1", None) def test_tags_set_invalid(self): self.dispatch("/calico/v1/policy/profile/prof1/tags", "set", value="[{}]") self.m_splitter.on_tags_update.assert_called_once_with("prof1", None) def test_tags_del(self): """ Test tag-only deletion. """ self.dispatch("/calico/v1/policy/profile/profA/tags", action="delete") self.m_splitter.on_tags_update.assert_called_once_with("profA", None) self.assertFalse(self.m_splitter.on_rules_update.called) def test_rules_del(self): """ Test rules-only deletion. """ self.dispatch("/calico/v1/policy/profile/profA/rules", action="delete") self.m_splitter.on_rules_update.assert_called_once_with("profA", None) self.assertFalse(self.m_splitter.on_tags_update.called) def test_endpoint_del(self): """ Test endpoint-only deletion. """ self.dispatch("/calico/v1/host/h1/workload/o1/w1/endpoint/e1", action="delete") self.m_splitter.on_endpoint_update.assert_called_once_with( EndpointId("h1", "o1", "w1", "e1"), None, ) def test_host_ip_set(self): """ Test set for the IP of a host. """ self.watcher._been_in_sync = True self.dispatch("/calico/v1/host/foo/bird_ip", action="set", value="10.0.0.1") self.m_hosts_ipset.replace_members.assert_called_once_with( frozenset(["10.0.0.1"]), async=True, ) def test_host_ip_ipip_disabled(self): """ Test set for the IP of a host. """ self.m_config.IP_IN_IP_ENABLED = False self.dispatch("/calico/v1/host/foo/bird_ip", action="set", value="10.0.0.1") self.assertFalse(self.m_hosts_ipset.replace_members.called) self.dispatch("/calico/v1/host/foo/bird_ip", action="delete") self.assertFalse(self.m_hosts_ipset.replace_members.called) def test_host_ip_del(self): """ Test set for the IP of a host. """ self.watcher._been_in_sync = True self.dispatch("/calico/v1/host/foo/bird_ip", action="set", value="10.0.0.1") self.m_hosts_ipset.reset_mock() self.dispatch("/calico/v1/host/foo/bird_ip", action="delete") self.m_hosts_ipset.replace_members.assert_called_once_with( frozenset([]), async=True, ) def test_host_ip_invalid(self): """ Test set for the IP of a host. """ self.watcher._been_in_sync = True self.dispatch("/calico/v1/host/foo/bird_ip", action="set", value="10.0.0.1") self.m_hosts_ipset.reset_mock() self.dispatch("/calico/v1/host/foo/bird_ip", action="set", value="gibberish") self.m_hosts_ipset.replace_members.assert_called_once_with( frozenset([]), async=True, ) def test_ipam_pool_set(self): self.dispatch("/calico/v1/ipam/v4/pool/1234", action="set", value="{}") self.assertEqual(self.m_splitter.on_ipam_pool_updated.mock_calls, [call("1234", None)]) def test_ipam_pool_del(self): self.dispatch("/calico/v1/ipam/v4/pool/1234", action="delete") self.assertEqual(self.m_splitter.on_ipam_pool_updated.mock_calls, [call("1234", None)]) @patch("os._exit", autospec=True) @patch("gevent.sleep", autospec=True) def test_die_and_restart(self, m_sleep, m_exit): die_and_restart() m_sleep.assert_called_once_with(2) m_exit.assert_called_once_with(1) def dispatch(self, key, action, value=None): """ Send an EtcdResult to the watcher's dispatcher. """ m_response = Mock(spec=EtcdResult) m_response.key = key m_response.action = action m_response.value = value self.watcher.dispatcher.handle_event(m_response) class TestEtcdReporting(BaseTestCase): def setUp(self): super(TestEtcdReporting, self).setUp() self.m_config = Mock() self.m_config.IFACE_PREFIX = "tap" self.m_config.ETCD_ADDRS = ["localhost:4001"] self.m_config.ETCD_SCHEME = "http" self.m_config.ETCD_KEY_FILE = None self.m_config.ETCD_CERT_FILE = None self.m_config.ETCD_CA_FILE = None self.m_config.HOSTNAME = "hostname" self.m_config.RESYNC_INTERVAL = 0 self.m_config.REPORTING_INTERVAL_SECS = 1 self.m_config.REPORTING_TTL_SECS = 10 self.m_hosts_ipset = Mock(spec=IpsetActor) with patch("gevent.spawn", autospec=True): with patch("calico.felix.fetcd._FelixEtcdWatcher", autospec=True): with patch("calico.felix.fetcd.monotonic_time", return_value=100): self.api = EtcdAPI(self.m_config, self.m_hosts_ipset) self.api._watcher.configured = Mock() @patch("gevent.sleep", autospec=True) def test_reporting_loop_mainline(self, m_sleep): """ Test the mainline function of the status reporting loop. It should repeatedly call the _update_felix_status method, retrying on various exceptions. """ with patch.object(self.api, "_update_felix_status") as m_update: m_update.side_effect = [EtcdException, None, RuntimeError] self.assertRaises(RuntimeError, self.api._periodically_report_status) self.assertEqual(m_update.mock_calls, [call(10)] * 3) retry_call, jittered_call = m_sleep.mock_calls self.assertEqual(retry_call, call(5)) _, (delay,), _ = jittered_call self.assertTrue(delay >= 1) self.assertTrue(delay <= 1.1005) def test_reporting_loop_disabled(self): self.m_config.REPORTING_INTERVAL_SECS = 0 with patch.object(self.api, "_update_felix_status") as m_update: m_update.side_effect = RuntimeError self.api._periodically_report_status() @patch("calico.felix.futils.datetime", autospec=True) @patch("calico.felix.fetcd.monotonic_time", return_value=200) def test_update_felix_status(self, m_monotime, m_datetime): m_datetime.utcnow.return_value = datetime(2015, 9, 10, 2, 1, 53, 1234) with patch.object(self.api.client, "set") as m_set: self.api._update_felix_status(10) self.api._update_felix_status(10) # Should write two keys into etcd, one with a TTL and another with # richer status. self.assertEqual(m_set.mock_calls, [ call("/calico/felix/v1/host/hostname/last_reported_status", JSONString({"uptime": 100, "time": "2015-09-10T02:01:53Z", "first_update": True})), call("/calico/felix/v1/host/hostname/status", JSONString({"uptime": 100, "time": "2015-09-10T02:01:53Z", "first_update": True}), ttl=10), call("/calico/felix/v1/host/hostname/last_reported_status", JSONString({"uptime": 100, "time": "2015-09-10T02:01:53Z", "first_update": False})), call("/calico/felix/v1/host/hostname/status", JSONString({"uptime": 100, "time": "2015-09-10T02:01:53Z", "first_update": False}), ttl=10), ]) class TestEtcdStatusReporter(BaseTestCase): def setUp(self): super(TestEtcdStatusReporter, self).setUp() self.m_config = Mock(spec=Config) self.m_config.ETCD_ADDRS = [ETCD_ADDRESS] self.m_config.ETCD_SCHEME = "http" self.m_config.ETCD_KEY_FILE = None self.m_config.ETCD_CERT_FILE = None self.m_config.ETCD_CA_FILE = None self.m_config.HOSTNAME = "foo" self.m_config.REPORT_ENDPOINT_STATUS = True self.m_config.ENDPOINT_REPORT_DELAY = 1 self.m_client = Mock() self.rep = EtcdStatusReporter(self.m_config) self.rep.client = self.m_client def test_on_endpoint_status_mainline(self): # Send in an endpoint status update. endpoint_id = EndpointId("foo", "bar", "baz", "biff") with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.step_actor(self.rep) # Should record the status. self.assertEqual( self.rep._endpoint_status[IPV4], { endpoint_id: {"status": "up"} } ) # And do a write. self.assertEqual( self.m_client.set.mock_calls, [call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/biff", JSONString({"status": "up"}))] ) # Since we did a write, the rate limit timer should be scheduled. self.assertEqual( m_spawn.mock_calls, [call(ANY, self.rep._on_timer_pop, async=True)] ) self.assertTrue(self.rep._timer_scheduled) self.assertFalse(self.rep._reporting_allowed) # Send in another update, shouldn't get written until we pop the timer. self.m_client.reset_mock() with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, None, async=True) self.step_actor(self.rep) self.assertFalse(self.m_client.set.called) # Timer already scheduled, shouldn't get rescheduled. self.assertFalse(m_spawn.called) # Pop the timer, should trigger write and reschedule. with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep._on_timer_pop(async=True) self.step_actor(self.rep) self.maxDiff = 10000 self.assertEqual( self.m_client.delete.mock_calls, [ call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/" "biff"), call("calico/felix/v1/host/foo/workload/bar/baz/endpoint", dir=True, timeout=5), call("calico/felix/v1/host/foo/workload/bar/baz", dir=True, timeout=5), call("calico/felix/v1/host/foo/workload/bar", dir=True, timeout=5), call("calico/felix/v1/host/foo/workload", dir=True, timeout=5), ] ) # Rate limit timer should be scheduled. self.assertEqual( m_spawn.mock_calls, [call(ANY, self.rep._on_timer_pop, async=True)] ) spawn_delay = m_spawn.call_args[0][0] self.assertTrue(spawn_delay >= 0.89999) self.assertTrue(spawn_delay <= 1.10001) self.assertTrue(self.rep._timer_scheduled) self.assertFalse(self.rep._reporting_allowed) # Cache should be cleaned up. self.assertEqual(self.rep._endpoint_status[IPV4], {}) # Nothing queued. self.assertEqual(self.rep._newer_dirty_endpoints, set()) self.assertEqual(self.rep._older_dirty_endpoints, set()) def test_mark_endpoint_dirty_already_dirty(self): endpoint_id = EndpointId("a", "b", "c", "d") self.rep._older_dirty_endpoints.add(endpoint_id) self.rep._mark_endpoint_dirty(endpoint_id) self.assertFalse(endpoint_id in self.rep._newer_dirty_endpoints) def test_on_endpoint_status_failure(self): # Send in an endpoint status update. endpoint_id = EndpointId("foo", "bar", "baz", "biff") self.m_client.set.side_effect = EtcdException() with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.step_actor(self.rep) # Should do the write. self.assertEqual( self.m_client.set.mock_calls, [call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/biff", JSONString({"status": "up"}))] ) # But endpoint should be re-queued in the newer set. self.assertEqual(self.rep._newer_dirty_endpoints, set([endpoint_id])) self.assertEqual(self.rep._older_dirty_endpoints, set()) def test_on_endpoint_status_changed_disabled(self): self.m_config.REPORT_ENDPOINT_STATUS = False endpoint_id = EndpointId("foo", "bar", "baz", "biff") with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.step_actor(self.rep) self.assertFalse(m_spawn.called) self.assertEqual(self.rep._endpoint_status[IPV4], {}) # Nothing queued. self.assertEqual(self.rep._newer_dirty_endpoints, set()) self.assertEqual(self.rep._older_dirty_endpoints, set()) def test_on_endpoint_status_v4_v6(self): # Send in endpoint status updates for v4 and v6. endpoint_id = EndpointId("foo", "bar", "baz", "biff") with patch("gevent.spawn_later", autospec=True) as m_spawn: self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) self.rep.on_endpoint_status_changed(endpoint_id, IPV6, {"status": "down"}, async=True) self.step_actor(self.rep) # Should record the status. self.assertEqual( self.rep._endpoint_status, { IPV4: {endpoint_id: {"status": "up"}}, IPV6: {endpoint_id: {"status": "down"}}, } ) # And do a write. self.assertEqual( self.m_client.set.mock_calls, [call("/calico/felix/v1/host/foo/workload/bar/baz/endpoint/biff", JSONString({"status": "down"}))] ) def test_resync(self): endpoint_id = EndpointId("foo", "bar", "baz", "biff") self.rep.on_endpoint_status_changed(endpoint_id, IPV4, {"status": "up"}, async=True) endpoint_id_2 = EndpointId("foo", "bar", "baz", "boff") self.rep.on_endpoint_status_changed(endpoint_id_2, IPV6, {"status": "up"}, async=True) with patch("gevent.spawn_later", autospec=True) as m_spawn: self.step_actor(self.rep) self.rep._on_timer_pop(async=True) self.step_actor(self.rep) self.assertEqual(self.rep._older_dirty_endpoints, set()) self.assertEqual(self.rep._newer_dirty_endpoints, set()) self.rep.resync(async=True) self.step_actor(self.rep) self.assertEqual(self.rep._older_dirty_endpoints, set()) self.assertEqual(self.rep._newer_dirty_endpoints, set([endpoint_id, endpoint_id_2])) def test_combine_statuses(self): """ Test the "truth table" for combining status reports. """ self.assert_combined_status(None, None, None) self.assert_combined_status({"status": "up"}, None, {"status": "up"}) self.assert_combined_status({"status": "up"}, {"status": "up"}, {"status": "up"}) self.assert_combined_status({"status": "down"}, {"status": "up"}, {"status": "down"}) self.assert_combined_status({"status": "error"}, {"status": "up"}, {"status": "error"}) def assert_combined_status(self, a, b, expected): # Should be symmetric so check the arguments both ways round. for lhs, rhs in [(a, b), (b, a)]: result = combine_statuses(lhs, rhs) self.assertEqual(result, expected, "Expected %r and %r to combine to %s but got %r" % (lhs, rhs, expected, result)) def test_clean_up_endpoint_status(self): self.m_config.REPORT_ENDPOINT_STATUS = True ep_id = EndpointId("foo", "openstack", "workloadid", "endpointid") empty_dir = Mock() empty_dir.key = ("/calico/felix/v1/host/foo/workload/" "openstack/foobar") empty_dir.dir = True missing_ep = Mock() missing_ep.key = ("/calico/felix/v1/host/foo/workload/" "openstack/aworkload/endpoint/anendpoint") self.m_client.read.return_value.leaves = [ empty_dir, missing_ep, ] with patch.object(self.rep, "_mark_endpoint_dirty") as m_mark: self.rep.clean_up_endpoint_statuses(async=True) self.step_actor(self.rep) # Missing endpoint should have been marked for cleanup. m_mark.assert_called_once_with( EndpointId("foo", "openstack", "aworkload", "anendpoint") ) def test_clean_up_endpoint_status_etcd_error(self): self.m_config.REPORT_ENDPOINT_STATUS = True with patch.object(self.rep, "_attempt_cleanup") as m_clean: m_clean.side_effect = EtcdException() self.rep.clean_up_endpoint_statuses(async=True) self.step_actor(self.rep) self.assertTrue(self.rep._cleanup_pending) def test_clean_up_endpoint_status_not_found(self): self.m_config.REPORT_ENDPOINT_STATUS = True self.m_client.read.side_effect = etcd.EtcdKeyNotFound() with patch.object(self.rep, "_mark_endpoint_dirty") as m_mark: self.rep.clean_up_endpoint_statuses(async=True) self.step_actor(self.rep) self.assertFalse(m_mark.called) def test_clean_up_endpoint_status_disabled(self): self.m_config.REPORT_ENDPOINT_STATUS = False self.m_client.read.side_effect = self.failureException self.rep.clean_up_endpoint_statuses(async=True) self.step_actor(self.rep)
# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example Generator Pipeline. The run method implements a Beam pipeline that generates TFRecord files for training and evaluation by joining audio files, using the full filename, to annotations from CSV files, reading the audio files into short clips, and serializing the labeled clips as TensorFlow Examples. """ import csv import datetime import functools import io import itertools import os from typing import BinaryIO, Dict, Iterable, Optional, Tuple, TypeVar, Union import apache_beam as beam from apache_beam.io import fileio from dataclasses import dataclass from dateutil import parser as date_parser import intervaltree from multispecies_whale_detection import dataset from multispecies_whale_detection import xwav import numpy as np import resampy import soundfile import tensorflow as tf T = TypeVar('T') def _only_element(iterable: Iterable[T], default_value: T = None) -> T: """Unwraps an iterator that is expected to have at most one element. Args: iterable: An Iterable with at most one element. default_value: The value to return when the Iterable has no elements. Returns: The unique element of the Iterable or the default value when the Iterable is empty. Raises: ValueError: if the Iterable has more than one element. """ iterator = iter(iterable) element = next(iterator, default_value) try: _ = next(iterator) raise ValueError('Iterable had more than one element') except StopIteration: pass return element def _parse_utc(text: str) -> datetime.datetime: """Leniently parses a datetime, defaulting to UTC if no zone is provided.""" parsed = date_parser.parse(text) if not parsed.tzinfo: parsed = parsed.replace(tzinfo=datetime.timezone.utc) return parsed @dataclass class Configuration: """Application settings for the pipeline.""" input_directory: str output_directory: str clip_duration_seconds: float = 10 resample_rate: int = 16000 @dataclass class ClipMetadata: """Description of a clip of audio from a longer file.""" filename: str sample_rate: int duration: datetime.timedelta index_in_file: int start_relative_to_file: datetime.timedelta start_utc: Optional[datetime.datetime] # to annotate a factory method AnnotationType = TypeVar('AnnotationType', bound='Annotation') @dataclass class Annotation: """Value object for a labeled time interval within some audio. Depending on context, begin and end may be interpreted as relative to the start of an audio file, a clip taken from a longer file, or the UNIX epoch. """ label: str @classmethod def parse_csv_row(cls, row: Dict[str, str]) -> AnnotationType: """Parses an annotation from a CSV row. Args: row: Dictionary mapping CSV headers to field values. The headers must include "label" and one of ("begin" / "end" floating-point endpoints in seconds relative to the start of the file) or ("begin_utc" / "end_utc" endpoints as absolute times as strings in any format that dateutil.parser will convert to an aware datetime). When both are present, only the endpoints relative to the start of the file are used. Returns: Annotation parsed from the CSV row. Raises: ValueError if the given row does not include all the required fields. """ label = row.get('label', None) if not label: raise ValueError('label was empty or not provided') begin_rel_file = row.get('begin', None) end_rel_file = row.get('end', None) if begin_rel_file and end_rel_file: return FileAnnotation( label=row['label'], begin=datetime.timedelta(seconds=float(begin_rel_file)), end=datetime.timedelta(seconds=float(end_rel_file)), ) begin_text = row.get('begin_utc', None) end_text = row.get('end_utc', None) if begin_text and end_text: return UTCAnnotation( label=row['label'], begin=_parse_utc(begin_text), end=_parse_utc(end_text), ) raise ValueError('row should have either both "begin" and "end" fields ' 'or both "begin_utc" and "end_utc" fields') @dataclass class ClipAnnotation(Annotation): """Annotation as time differences from the start of a clip. This type is used for annotations relative to an extracted clip of audio intended to be included in its entirety into a TensorFlow Example. This is in contrast to FileAnnotation (below). There, begin / end are in a timeline whose zero point is at the start of the file. Here, the zero point is at the beginning of a clip which often has been extracted from a longer file. audio_example packs these ClipAnnotations into parallel "annotation" fields of a TensorFlow Example. """ begin: datetime.timedelta end: datetime.timedelta def _restrict_to_clip( begin: datetime.timedelta, end: datetime.timedelta, clip_metadata: ClipMetadata, label: str, ) -> Optional[ClipAnnotation]: """Restricts an interval to the duration from ClipMetadata. Args: begin: The start of the interval, relative to the clip. May be negative. end: The end of the interval, relative to the clip. May be negative. clip_metadata: Description of the clip. label: Label to set in the returned ClipAnnotation. Returns: ClipAnnotation with the intersection of (begin, end) with the clip described by clip_metadata or None if that interstion is empty. """ assert end > begin begin = max(begin, datetime.timedelta(seconds=0)) end = min(end, clip_metadata.duration) if begin < clip_metadata.duration and end > datetime.timedelta(seconds=0): return ClipAnnotation(begin=begin, end=end, label=label) else: return None @dataclass class FileAnnotation(Annotation): """Annotation as time differences from the start of a file.""" begin: datetime.timedelta end: datetime.timedelta def make_relative(self, clip_metadata: ClipMetadata) -> Optional[ClipAnnotation]: """Expresses this annotation as an interval within a given clip. Args: clip_metadata: Description of the clip, including its position relative to the file it came from. Returns: An annotation relative to the given clip or None if there is no overlap. """ return _restrict_to_clip( self.begin - clip_metadata.start_relative_to_file, self.end - clip_metadata.start_relative_to_file, clip_metadata, self.label, ) @dataclass class UTCAnnotation(Annotation): """Annotation whose endpoints are absolute datetimes. To avoid ambiguity, the datetimes must be time zone aware. """ begin: datetime.datetime end: datetime.datetime def __init__(self, label, begin, end): if not (begin.tzinfo and end.tzinfo): raise ValueError('endpoint datetimes must be time zone aware') self.label = label self.begin = begin self.end = end def make_relative(self, clip_metadata: ClipMetadata) -> Optional[ClipAnnotation]: """Expresses this annotation as an interval within a given clip. Args: clip_metadata: Description of the clip. start_utc must be set. Returns: An annotation relative to the given clip or None if there is no overlap. Raises: ValueError if clip_metadata.start_utc is None. """ return _restrict_to_clip( self.begin - clip_metadata.start_utc, self.end - clip_metadata.start_utc, clip_metadata, self.label, ) # Type of the values for the CoGroupByKey (filename) done by this pipeline. # See later make_audio_examples, which processes this JoinResult. JoinResult = Dict[str, Union[Iterable[fileio.ReadableFile], Iterable[Annotation]]] class TimedeltaCoder(beam.coders.Coder): """Compact Beam Coder for datetime.timedelta.""" def __init__(self): int_coder = beam.coders.VarIntCoder() self._tuple_coder = beam.coders.TupleCoder( (int_coder, int_coder, int_coder)) def encode(self, instance): return self._tuple_coder.encode( (instance.days, instance.seconds, instance.microseconds)) def decode(self, encoded): days, seconds, microseconds = self._tuple_coder.decode(encoded) return datetime.timedelta( days=days, seconds=seconds, microseconds=microseconds) def is_deterministic(self): return True class UTCDatetimeCoder(beam.coders.Coder): """Beam Coder that codes aware datetimes as UTC tuples.""" def __init__(self): int_coder = beam.coders.VarIntCoder() self._tuple_coder = beam.coders.TupleCoder( (int_coder, int_coder, int_coder, int_coder, int_coder, int_coder, int_coder)) def encode(self, instance): utc = instance.astimezone(datetime.timezone.utc) return self._tuple_coder.encode((utc.year, utc.month, utc.day, utc.hour, utc.minute, utc.second, utc.microsecond)) def decode(self, encoded): return datetime.datetime( *self._tuple_coder.decode(encoded), tzinfo=datetime.timezone.utc) class AnnotationCoder(beam.coders.Coder): """Compact Beam Coder for Annotations.""" FILE_TYPE_CODE = 1 UTC_TYPE_CODE = 2 def __init__(self): int_coder = beam.coders.VarIntCoder() # type code bytes_coder = beam.coders.BytesCoder() self._base_coder = beam.coders.TupleCoder((int_coder, bytes_coder)) timedelta_coder = TimedeltaCoder() self._file_coder = beam.coders.TupleCoder( (timedelta_coder, timedelta_coder, beam.coders.StrUtf8Coder())) datetime_coder = UTCDatetimeCoder() self._utc_coder = beam.coders.TupleCoder( (datetime_coder, datetime_coder, beam.coders.StrUtf8Coder())) def encode(self, annotation): if isinstance(annotation, FileAnnotation): type_code = self.FILE_TYPE_CODE sub_coder = self._file_coder elif isinstance(annotation, UTCAnnotation): type_code = self.UTC_TYPE_CODE sub_coder = self._utc_coder else: raise TypeError('unknown annotation type') sub_encoded = sub_coder.encode( (annotation.begin, annotation.end, annotation.label)) return self._base_coder.encode((type_code, sub_encoded)) def decode(self, encoded): type_code, sub_encoded = self._base_coder.decode(encoded) if type_code == self.FILE_TYPE_CODE: begin, end, label = self._file_coder.decode(sub_encoded) return FileAnnotation(begin=begin, end=end, label=label) elif type_code == self.UTC_TYPE_CODE: begin, end, label = self._utc_coder.decode(sub_encoded) return UTCAnnotation(begin=begin, end=end, label=label) def is_deterministic(self): return True beam.coders.registry.register_coder(Annotation, AnnotationCoder) def read_annotations(infile: BinaryIO) -> Iterable[Tuple[str, Annotation]]: """Parses an annotations CSV file. See py:meth:Annotation.parse_csv_row for a description of the format. Args: infile: Binary file-like object positioned at the beginning of the CSV. Yields: Pairs of filename and parsed Annotation. """ reader = csv.DictReader(io.TextIOWrapper(infile)) for row in reader: yield (row['filename'], Annotation.parse_csv_row(row)) def beam_read_annotations(readable_file: fileio.ReadableFile): """Opens the file and calls read_annotations.""" return read_annotations(readable_file.open()) def generate_clips( filename: str, infile: BinaryIO, clip_duration: datetime.timedelta ) -> Iterable[Tuple[ClipMetadata, np.array]]: """Reads a file and generates equal-length clips and metadata. In general the file may be much longer than the requested clip duration. The start of the clip advances by the clip duration (disjoint tiling) until the file (or subchunk, in the XWAV case) is exhausted. This allows both XWAV and non-XWAV files to be treated as the same type by calling code, despite the fact that XWAVs are in effect a collection of shorter (~75s) files. Args: filename: Passed through to ClipMetadata. infile: Seekable file-like object in any audio format supported by soundfile. Optional XWAV headers will be used to populate ClipMetadata.start_utc. readable_file: File handle of the type supplied by Beam. clip_duration: The desired length of each clip. When this does not evenly divide the duration of a subchunk (or whole file in the non-XWAV case), the remaining audio will be discarded. Yields: Pairs of clip metadata and NumPy arrays of audio of shape (samples, channels). """ try: infile.seek(0) xwav_reader = xwav.Reader(infile) sample_rate = xwav_reader.header.fmt_chunk.sample_rate clip_duration_samples = np.round(clip_duration.total_seconds() * sample_rate).astype(int) # TODO(mattharvey): Add the ability to specify hop size as a field of # examplegen.Configuration and pass that field, or perhaps the whole # Configuration, through to here. hop = clip_duration_samples # For ClipMetadata.start_relative_to_file, because clips may not exactly # fill the subchunk, we need to increment the subchunk start relative to # the file in an outer loop over subchunks. subchunk_rel_file = datetime.timedelta(seconds=0) clip_index_in_file = 0 for subchunk, samples in xwav_reader: subchunk_duration_samples = samples.shape[0] for begin, end in zip( range(0, subchunk_duration_samples, hop), range(clip_duration_samples, subchunk_duration_samples, hop)): clip_rel_subchunk = datetime.timedelta(seconds=begin / sample_rate) clip_metadata = ClipMetadata( filename=filename, sample_rate=sample_rate, duration=clip_duration, index_in_file=clip_index_in_file, start_relative_to_file=(subchunk_rel_file + clip_rel_subchunk), start_utc=(subchunk.time + clip_rel_subchunk), ) clip_samples = samples[begin:end, :] yield (clip_metadata, clip_samples) clip_index_in_file += 1 subchunk_rel_file += datetime.timedelta( seconds=subchunk_duration_samples / sample_rate) except xwav.Error: # TODO(matharvey): Consider refactoring this by adding an abstraction layer # around both SoundFile and xwav.Reader, which would present a single API # here and give an extension point for new reader implementations. Do not # forget that the non-XWAV branch may need to work with hours-long files # that are too big to pre-read into memory, nor that the non-XWAV branch # does not implement hop size yet. infile.seek(0) reader = soundfile.SoundFile(infile) sample_rate = reader.samplerate clip_duration_samples = np.round(clip_duration.total_seconds() * sample_rate).astype(int) # SoundFile read defaults to continuing where it left off, implying that the # hop is always exactly the duration of the context window in this, the # non-XWAV case. clip_index_in_file = 0 while reader.tell() + clip_duration_samples < reader.frames: clip_rel_file = datetime.timedelta(seconds=reader.tell() / reader.samplerate) clip_metadata = ClipMetadata( filename=filename, sample_rate=sample_rate, duration=clip_duration, index_in_file=clip_index_in_file, start_relative_to_file=clip_rel_file, start_utc=None, ) clip_samples = reader.read( clip_duration_samples, dtype='int16', always_2d=True) yield (clip_metadata, clip_samples) clip_index_in_file += 1 def audio_example(clip_metadata: ClipMetadata, waveform: np.array, sample_rate: int, channel: int, annotations: Iterable[ClipAnnotation]) -> tf.train.Example: """Constructs a TensorFlow Example with labeled audio. Args: clip_metadata: Passed through to multiple features: 'filename' bytes feature with the full path to the source audio file, for reference; 'start_relative_to_file' float feature scalar with the offset of waveform from the start of the file; 'start_utc' float feature with seconds since the UNIX epoch until the start of waveform, missing when the original data does not provide timestamps. waveform: 'audio_raw_pcm16' bytes feature with this raw, 16-bit, little-endian PCM audio. sample_rate: 'sample_rate' float feature scalar with the sample rate for waveform. When the waveform has been resampled, this will not match clip_metadata.sample_rate, which pertains to the original file. channel: 'channel' int64 feature indicates the channel index from the source audio. annotations: 'annotation_begin', 'annotation_end', and 'annotation_label' features are parallel arrays, with each entry corresponding to one of these given annotations. Returns: A TensorFlow Example with features as documented in the Args section. """ example = tf.train.Example() features = example.features.feature features[dataset.Features.AUDIO.value.name].bytes_list.value.append( waveform.astype('<i2').tobytes()) features[dataset.Features.SAMPLE_RATE.value.name].int64_list.value.append( sample_rate) features[dataset.Features.CHANNEL.value.name].int64_list.value.append(channel) features[dataset.Features.FILENAME.value.name].bytes_list.value.append( clip_metadata.filename.encode()) features[dataset.Features.START_RELATIVE_TO_FILE.value .name].float_list.value.append( clip_metadata.start_relative_to_file.total_seconds()) if clip_metadata.start_utc: features[dataset.Features.START_UTC.value.name].float_list.value.append( clip_metadata.start_utc.timestamp()) for annotation in annotations: features[ dataset.Features.ANNOTATION_BEGIN.value.name].float_list.value.append( annotation.begin.total_seconds()) features[ dataset.Features.ANNOTATION_END.value.name].float_list.value.append( annotation.end.total_seconds()) features[ dataset.Features.ANNOTATION_LABEL.value.name].bytes_list.value.append( annotation.label.encode()) return example class AnnotationTrees: def __init__(self, annotations: Iterable[Annotation]): self._file_tree = intervaltree.IntervalTree() self._utc_tree = intervaltree.IntervalTree() self._empty_count = 0 for annotation in annotations: if annotation.end <= annotation.begin: self._empty_count += 1 continue is_utc = isinstance(annotation, UTCAnnotation) is_file = isinstance(annotation, FileAnnotation) if is_utc: self._utc_tree[annotation.begin:annotation.end] = annotation elif is_file: self._file_tree[annotation.begin:annotation.end] = annotation else: assert is_utc or is_file def annotate_clip(self, clip_metadata: ClipMetadata) -> Iterable[ClipAnnotation]: file_intervals = self._file_tree[clip_metadata.start_relative_to_file:( clip_metadata.start_relative_to_file + clip_metadata.duration)] if clip_metadata.start_utc: utc_intervals = self._utc_tree[clip_metadata.start_utc:( clip_metadata.start_utc + clip_metadata.duration)] else: utc_intervals = [] for interval in itertools.chain(iter(file_intervals), iter(utc_intervals)): annotation = interval.data clip_annotation = annotation.make_relative(clip_metadata) if clip_annotation: yield clip_annotation def make_audio_examples( keyed_join_result: Tuple[str, JoinResult], clip_duration: datetime.timedelta, resample_rate: int = 16000) -> Iterable[tf.train.Example]: """Converts audio/annotation join to TensorFlow Examples. This is the core method of this pipeline. Given a join of exactly one audio stream to zero or more annotations, it reads the audio stream one clip at a time, expresses the endpoints of the annotations for that clip as seconds relative to the clip start, and emits the labeled clip as a TensorFlow Example. Args: keyed_join_result: A pair of a fully-qualified path to an audio file and a JoinResult. The JoinResult is a dict with keys 'audio' and 'annotations'. The 'audio' key maps to at most one file reader to be handled by :py:mod:`soundfile`. The 'annotations' key maps to zero or more Annotation objects corresponding to the same fully-qualified path as the audio stream. clip_duration: The intended duration of the audio clip in each emitted Example. resample_rate: Sample rate for the audio in the emitted Examples. The input audio stream will be resampled if the sample rate does not match. Yields: tf.train.Example with annotated PCM audio. For the feature specification of these Examples, see :py:func:`audio_example`. """ filename, join_result = keyed_join_result del filename # Trust readable_file more. readable_file = _only_element(join_result['audio']) if not readable_file: beam.metrics.Metrics.counter('examplegen', 'audio_file_not_found').inc() return filename = readable_file.metadata.path annotation_trees = AnnotationTrees(join_result['annotations']) with readable_file.open() as infile: for clip_metadata, clip_samples in generate_clips(filename, infile, clip_duration): if np.round(clip_metadata.sample_rate) == np.round(resample_rate): pcm_audio = clip_samples else: pcm_audio = resampy.resample( clip_samples, clip_metadata.sample_rate, resample_rate, axis=0, ) clip_annotations = annotation_trees.annotate_clip(clip_metadata) for channel, waveform in enumerate(pcm_audio.T): # TODO(mattharvey): Option for annotations to pertain to either or all # channels or a specific channel. beam.metrics.Metrics.counter('examplegen', 'examples-generated').inc() yield audio_example( clip_metadata=clip_metadata, waveform=waveform, sample_rate=resample_rate, channel=channel, annotations=clip_annotations, ) def extension_filter(kept_extensions: Iterable[str]) -> beam.PTransform: """Returns a Beam filter that keeps strs with given file extensions.""" def keep_fn(file_metadata: beam.io.filesystem.FileMetadata) -> bool: _, extension = os.path.splitext(file_metadata.path) return extension in kept_extensions return beam.Filter(keep_fn) def run(configuration: Configuration, options: beam.options.pipeline_options.PipelineOptions) -> None: """Runs the examplegen Beam pipeline. Args: configuration: Input and output paths and settings related to feature extraction. options: Settings related to the Beam runner. (See beam.apache.org.) Returns: None """ bind_make_audio_examples = functools.partial( make_audio_examples, clip_duration=datetime.timedelta( seconds=configuration.clip_duration_seconds), resample_rate=configuration.resample_rate, ) with beam.Pipeline(options=options) as pipeline: all_files = pipeline | 'ListFiles' >> fileio.MatchFiles( configuration.input_directory + '/**') audio_files = all_files | 'MatchAudio' >> extension_filter( {'.wav', '.flac'}) csv_files = all_files | 'MatchCsv' >> extension_filter({'.csv'}) audio_streams = ( audio_files | 'ReadAudio' >> fileio.ReadMatches() | 'KeyAudioByFilename' >> beam.Map(lambda r: (r.metadata.path, r))) annotations = ( csv_files | 'ReadCsv' >> fileio.ReadMatches() | 'ParseCsv' >> beam.ParDo(beam_read_annotations)) labeled_streams = ({ 'audio': audio_streams, 'annotations': annotations, } | 'JoinOnFilename' >> beam.CoGroupByKey()) examples = labeled_streams | 'MakeExample' >> beam.FlatMap( bind_make_audio_examples) # To make sure training examples within a batch are as close as possible to # being independent, shuffle at the level of the entire pipeline run. examples = examples | beam.Reshuffle() _ = examples | 'WriteRecords' >> beam.io.tfrecordio.WriteToTFRecord( os.path.join(configuration.output_directory, 'tfrecords'), coder=beam.coders.ProtoCoder(tf.train.Example)) # TODO(mattharvey): Implement customized text formatting for metadata.csv. _ = audio_files | 'WriteListing' >> beam.io.textio.WriteToText( os.path.join(configuration.output_directory, 'audio_files')) return pipeline.run()
import base64 import json import os import posixpath import re from time import time from wsgiref.handlers import format_date_time from olympia.constants import base from services.utils import log_configure, log_exception, mypool from services.utils import settings, user_media_path, user_media_url # This has to be imported after the settings (utils). from django_statsd.clients import statsd # Configure the log. log_configure() class ThemeUpdate(object): def __init__(self, locale, id_, qs=None): self.conn, self.cursor = None, None self.from_gp = qs == 'src=gp' self.data = { 'locale': locale, 'id': id_, # If we came from getpersonas.com, then look up by `persona_id`. # Otherwise, look up `addon_id`. 'primary_key': 'persona_id' if self.from_gp else 'addon_id', 'atype': base.ADDON_PERSONA, 'row': {} } if not self.cursor: self.conn = mypool.connect() self.cursor = self.conn.cursor() def base64_icon(self, addon_id): path = self.image_path('icon.jpg') if not os.path.isfile(path): return '' try: with open(path, 'r') as f: return base64.b64encode(f.read()) except IOError, e: if len(e.args) == 1: log_exception('I/O error: {0}'.format(e[0])) else: log_exception('I/O error({0}): {1}'.format(e[0], e[1])) return '' def get_headers(self, length): return [('Cache-Control', 'public, max-age=3600'), ('Content-Length', str(length)), ('Content-Type', 'application/json'), ('Expires', format_date_time(time() + 3600)), ('Last-Modified', format_date_time(time()))] def get_update(self): """ TODO: * When themes have versions let's not use `personas.approve` as a `modified` timestamp. Either set this during theme approval, or let's keep a hash of the header and footer. * Do a join on `addons_users` to get the actual correct user. We're cheating and setting `personas.display_username` during submission/management heh. But `personas.author` and `personas.display_username` are not what we want. """ sql = """ SELECT p.persona_id, a.id, a.slug, v.version, t_name.localized_string AS name, t_desc.localized_string AS description, p.display_username, p.header, p.footer, p.accentcolor, p.textcolor, UNIX_TIMESTAMP(a.modified) AS modified FROM addons AS a LEFT JOIN personas AS p ON p.addon_id=a.id LEFT JOIN versions AS v ON a.current_version=v.id LEFT JOIN translations AS t_name ON t_name.id=a.name AND t_name.locale=%(locale)s LEFT JOIN translations AS t_desc ON t_desc.id=a.summary AND t_desc.locale=%(locale)s WHERE p.{primary_key}=%(id)s AND a.addontype_id=%(atype)s AND a.status=4 AND a.inactive=0 """.format(primary_key=self.data['primary_key']) self.cursor.execute(sql, self.data) row = self.cursor.fetchone() def row_to_dict(row): return dict(zip(( 'persona_id', 'addon_id', 'slug', 'current_version', 'name', 'description', 'username', 'header', 'footer', 'accentcolor', 'textcolor', 'modified'), list(row))) if row: self.data['row'] = row_to_dict(row) # Fall back to `en-US` if the name was null for our locale. # TODO: Write smarter SQL and don't rerun the whole query. if not self.data['row']['name']: self.data['locale'] = 'en-US' self.cursor.execute(sql, self.data) row = self.cursor.fetchone() if row: self.data['row'] = row_to_dict(row) return True return False # TODO: Cache on row['modified'] def get_json(self): if not self.get_update(): # Persona not found. return row = self.data['row'] accent = row.get('accentcolor') text = row.get('textcolor') id_ = str(row[self.data['primary_key']]) data = { 'id': id_, 'name': row.get('name'), 'description': row.get('description'), # TODO: Change this to be `addons_users.user.username`. 'author': row.get('username'), # TODO: Change this to be `addons_users.user.display_name`. 'username': row.get('username'), 'headerURL': self.image_url(row['header']), 'footerURL': self.image_url(row['footer']), 'detailURL': self.locale_url(settings.SITE_URL, '/addon/%s/' % row['slug']), 'previewURL': self.image_url('preview.png'), 'iconURL': self.image_url('icon.png'), 'dataurl': self.base64_icon(row['addon_id']), 'accentcolor': '#%s' % accent if accent else None, 'textcolor': '#%s' % text if text else None, 'updateURL': self.locale_url(settings.VAMO_URL, '/themes/update-check/' + id_), # 04-25-2013: Bumped for GP migration so we get new `updateURL`s. 'version': row.get('current_version', 0) } # If this theme was originally installed from getpersonas.com, # we have to use the `<persona_id>?src=gp` version of the `updateURL`. if self.from_gp: data['updateURL'] += '?src=gp' return json.dumps(data) def image_path(self, filename): row = self.data['row'] # Special cased for non-AMO-uploaded themes imported from getpersonas. if row['persona_id'] != 0: if filename == 'preview.png': filename = 'preview.jpg' elif filename == 'icon.png': filename = 'preview_small.jpg' return os.path.join(user_media_path('addons'), str(row['addon_id']), filename) def image_url(self, filename): row = self.data['row'] # Special cased for non-AMO-uploaded themes imported from getpersonas. if row['persona_id'] != 0: if filename == 'preview.png': filename = 'preview.jpg' elif filename == 'icon.png': filename = 'preview_small.jpg' image_url = posixpath.join(user_media_url('addons'), str(row['addon_id']), filename or '') modified = int(row['modified']) if row['modified'] else 0 return '%s?%s' % (image_url, modified) def locale_url(self, domain, url): return '%s/%s%s' % (domain, self.data.get('locale', 'en-US'), url) url_re = re.compile('(?P<locale>.+)?/themes/update-check/(?P<id>\d+)$') def application(environ, start_response): """ Developing locally? gunicorn -b 0.0.0.0:7000 -w 12 -k sync -t 90 --max-requests 5000 \ -n gunicorn-theme_update services.wsgi.theme_update:application """ status = '200 OK' with statsd.timer('services.theme_update'): try: locale, id_ = url_re.match(environ['PATH_INFO']).groups() locale = (locale or 'en-US').lstrip('/') id_ = int(id_) except AttributeError: # URL path incorrect. start_response('404 Not Found', []) return [''] try: update = ThemeUpdate(locale, id_, environ.get('QUERY_STRING')) output = update.get_json() if not output: start_response('404 Not Found', []) return [''] start_response(status, update.get_headers(len(output))) except Exception: log_exception(environ['PATH_INFO']) raise return [output]
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from .model import Model from .parameterization.variational import VariationalPosterior from .mapping import Mapping from .. import likelihoods from .. import kern from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation from ..util.normalizer import Standardize from paramz import ObsAr import logging import warnings logger = logging.getLogger("GP") class GP(Model): """ General purpose Gaussian process model :param X: input observations :param Y: output observations :param kernel: a GPy kernel :param likelihood: a GPy likelihood :param inference_method: The :class:`~GPy.inference.latent_function_inference.LatentFunctionInference` inference method to use for this GP :rtype: model object :param Norm normalizer: normalize the outputs Y. Prediction will be un-normalized using this normalizer. If normalizer is True, we will normalize using Standardize. If normalizer is False, no normalization will be done. .. Note:: Multiple independent outputs are allowed using columns of Y """ def __init__(self, X, Y, kernel, likelihood, mean_function=None, inference_method=None, name='gp', Y_metadata=None, normalizer=False): super(GP, self).__init__(name) assert X.ndim == 2 if isinstance(X, (ObsAr, VariationalPosterior)): self.X = X.copy() else: self.X = ObsAr(X) assert Y.ndim == 2 logger.info("initializing Y") if normalizer is True: self.normalizer = Standardize() elif normalizer is False: self.normalizer = None else: self.normalizer = normalizer if self.normalizer is not None: self.normalizer.scale_by(Y) self.Y_normalized = ObsAr(self.normalizer.normalize(Y)) self.Y = Y elif isinstance(Y, np.ndarray): self.Y = ObsAr(Y) self.Y_normalized = self.Y else: self.Y = Y self.Y_normalized = self.Y if Y.shape[0] != self.num_data: #There can be cases where we want inputs than outputs, for example if we have multiple latent #function values warnings.warn("There are more rows in your input data X, \ than in your output data Y, be VERY sure this is what you want") _, self.output_dim = self.Y.shape assert ((Y_metadata is None) or isinstance(Y_metadata, dict)) self.Y_metadata = Y_metadata assert isinstance(kernel, kern.Kern) #assert self.input_dim == kernel.input_dim self.kern = kernel assert isinstance(likelihood, likelihoods.Likelihood) self.likelihood = likelihood if self.kern._effective_input_dim != self.X.shape[1]: warnings.warn("Your kernel has a different input dimension {} then the given X dimension {}. Be very sure this is what you want and you have not forgotten to set the right input dimenion in your kernel".format(self.kern._effective_input_dim, self.X.shape[1])) #handle the mean function self.mean_function = mean_function if mean_function is not None: assert isinstance(self.mean_function, Mapping) assert mean_function.input_dim == self.input_dim assert mean_function.output_dim == self.output_dim self.link_parameter(mean_function) #find a sensible inference method logger.info("initializing inference method") if inference_method is None: if isinstance(likelihood, likelihoods.Gaussian) or isinstance(likelihood, likelihoods.MixedNoise): inference_method = exact_gaussian_inference.ExactGaussianInference() else: inference_method = expectation_propagation.EP() print("defaulting to " + str(inference_method) + " for latent function inference") self.inference_method = inference_method logger.info("adding kernel and likelihood as parameters") self.link_parameter(self.kern) self.link_parameter(self.likelihood) self.posterior = None def to_dict(self, save_data=True): """ Convert the object into a json serializable dictionary. Note: It uses the private method _save_to_input_dict of the parent. :param boolean save_data: if true, it adds the training data self.X and self.Y to the dictionary :return dict: json serializable dictionary containing the needed information to instantiate the object """ input_dict = super(GP, self)._save_to_input_dict() input_dict["class"] = "GPy.core.GP" if not save_data: input_dict["X"] = None input_dict["Y"] = None else: try: input_dict["X"] = self.X.values.tolist() except: input_dict["X"] = self.X.tolist() try: input_dict["Y"] = self.Y.values.tolist() except: input_dict["Y"] = self.Y.tolist() input_dict["kernel"] = self.kern.to_dict() input_dict["likelihood"] = self.likelihood.to_dict() if self.mean_function is not None: input_dict["mean_function"] = self.mean_function.to_dict() input_dict["inference_method"] = self.inference_method.to_dict() # TODO: We should create a Metadata class if self.Y_metadata is not None: # make Y_metadata serializable input_dict["Y_metadata"] = {k: self.Y_metadata[k].tolist() for k in self.Y_metadata.keys()} if self.normalizer is not None: input_dict["normalizer"] = self.normalizer.to_dict() return input_dict @staticmethod def _format_input_dict(input_dict, data=None): import GPy import numpy as np if (input_dict['X'] is None) or (input_dict['Y'] is None): assert(data is not None) input_dict["X"], input_dict["Y"] = np.array(data[0]), np.array(data[1]) elif data is not None: warnings.warn("WARNING: The model has been saved with X,Y! The original values are being overridden!") input_dict["X"], input_dict["Y"] = np.array(data[0]), np.array(data[1]) else: input_dict["X"], input_dict["Y"] = np.array(input_dict['X']), np.array(input_dict['Y']) input_dict["kernel"] = GPy.kern.Kern.from_dict(input_dict["kernel"]) input_dict["likelihood"] = GPy.likelihoods.likelihood.Likelihood.from_dict(input_dict["likelihood"]) mean_function = input_dict.get("mean_function") if mean_function is not None: input_dict["mean_function"] = GPy.core.mapping.Mapping.from_dict(mean_function) else: input_dict["mean_function"] = mean_function input_dict["inference_method"] = GPy.inference.latent_function_inference.LatentFunctionInference.from_dict(input_dict["inference_method"]) # converts Y_metadata from serializable to array. We should create a Metadata class Y_metadata = input_dict.get("Y_metadata") if isinstance(Y_metadata, dict): input_dict["Y_metadata"] = {k: np.array(Y_metadata[k]) for k in Y_metadata.keys()} else: input_dict["Y_metadata"] = Y_metadata normalizer = input_dict.get("normalizer") if normalizer is not None: input_dict["normalizer"] = GPy.util.normalizer._Norm.from_dict(normalizer) else: input_dict["normalizer"] = normalizer return input_dict @staticmethod def _build_from_input_dict(input_dict, data=None): input_dict = GP._format_input_dict(input_dict, data) return GP(**input_dict) def save_model(self, output_filename, compress=True, save_data=True): self._save_model(output_filename, compress=True, save_data=True) # The predictive variable to be used to predict using the posterior object's # woodbury_vector and woodbury_inv is defined as predictive_variable # as long as the posterior has the right woodbury entries. # It is the input variable used for the covariance between # X_star and the posterior of the GP. # This is usually just a link to self.X (full GP) or self.Z (sparse GP). # Make sure to name this variable and the predict functions will "just work" # In maths the predictive variable is: # K_{xx} - K_{xp}W_{pp}^{-1}K_{px} # W_{pp} := \texttt{Woodbury inv} # p := _predictive_variable @property def _predictive_variable(self): return self.X @property def num_data(self): return self.X.shape[0] @property def input_dim(self): return self.X.shape[1] def set_XY(self, X=None, Y=None): """ Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray """ self.update_model(False) if Y is not None: if self.normalizer is not None: self.normalizer.scale_by(Y) self.Y_normalized = ObsAr(self.normalizer.normalize(Y)) self.Y = Y else: self.Y = ObsAr(Y) self.Y_normalized = self.Y if X is not None: if self.X in self.parameters: # LVM models if isinstance(self.X, VariationalPosterior): assert isinstance(X, type(self.X)), "The given X must have the same type as the X in the model!" index = self.X._parent_index_ self.unlink_parameter(self.X) self.X = X self.link_parameter(self.X, index=index) else: index = self.X._parent_index_ self.unlink_parameter(self.X) from ..core import Param self.X = Param('latent mean', X) self.link_parameter(self.X, index=index) else: self.X = ObsAr(X) self.update_model(True) def set_X(self,X): """ Set the input data of the model :param X: input observations :type X: np.ndarray """ self.set_XY(X=X) def set_Y(self,Y): """ Set the output data of the model :param X: output observations :type X: np.ndarray """ self.set_XY(Y=Y) def parameters_changed(self): """ Method that is called upon any changes to :class:`~GPy.core.parameterization.param.Param` variables within the model. In particular in the GP class this method re-performs inference, recalculating the posterior and log marginal likelihood and gradients of the model .. warning:: This method is not designed to be called manually, the framework is set up to automatically call this method upon changes to parameters, if you call this method yourself, there may be unexpected consequences. """ self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.mean_function, self.Y_metadata) self.likelihood.update_gradients(self.grad_dict['dL_dthetaL']) self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X) if self.mean_function is not None: self.mean_function.update_gradients(self.grad_dict['dL_dm'], self.X) def log_likelihood(self): """ The log marginal likelihood of the model, :math:`p(\mathbf{y})`, this is the objective function of the model being optimised """ return self._log_marginal_likelihood def _raw_predict(self, Xnew, full_cov=False, kern=None): """ For making predictions, does not account for normalization or likelihood full_cov is a boolean which defines whether the full covariance matrix of the prediction is computed. If full_cov is False (default), only the diagonal of the covariance is returned. .. math:: p(f*|X*, X, Y) = \int^{\inf}_{\inf} p(f*|f,X*)p(f|X,Y) df = N(f*| K_{x*x}(K_{xx} + \Sigma)^{-1}Y, K_{x*x*} - K_{xx*}(K_{xx} + \Sigma)^{-1}K_{xx*} \Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance} """ mu, var = self.posterior._raw_predict(kern=self.kern if kern is None else kern, Xnew=Xnew, pred_var=self._predictive_variable, full_cov=full_cov) if self.mean_function is not None: mu += self.mean_function.f(Xnew) return mu, var def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, likelihood=None, include_likelihood=True): """ Predict the function(s) at the new point(s) Xnew. This includes the likelihood variance added to the predicted underlying function (usually referred to as f). In order to predict without adding in the likelihood give `include_likelihood=False`, or refer to self.predict_noiseless(). :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray (Nnew x self.input_dim) :param full_cov: whether to return the full covariance matrix, or just the diagonal :type full_cov: bool :param Y_metadata: metadata about the predicting point to pass to the likelihood :param kern: The kernel to use for prediction (defaults to the model kern). this is useful for examining e.g. subprocesses. :param include_likelihood: Whether or not to add likelihood noise to the predicted underlying latent function f. :type include_likelihood: bool :returns: (mean, var): mean: posterior mean, a Numpy array, Nnew x self.input_dim var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise If full_cov and self.input_dim > 1, the return shape of var is Nnew x Nnew x self.input_dim. If self.input_dim == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. Note: If you want the predictive quantiles (e.g. 95% confidence interval) use :py:func:`~GPy.core.gp.GP.predict_quantiles`. """ # Predict the latent function values mean, var = self._raw_predict(Xnew, full_cov=full_cov, kern=kern) if include_likelihood: # now push through likelihood if likelihood is None: likelihood = self.likelihood mean, var = likelihood.predictive_values(mean, var, full_cov, Y_metadata=Y_metadata) if self.normalizer is not None: mean = self.normalizer.inverse_mean(mean) # We need to create 3d array for the full covariance matrix with # multiple outputs. if full_cov & (mean.shape[1] > 1): var = self.normalizer.inverse_covariance(var) else: var = self.normalizer.inverse_variance(var) return mean, var def predict_noiseless(self, Xnew, full_cov=False, Y_metadata=None, kern=None): """ Convenience function to predict the underlying function of the GP (often referred to as f) without adding the likelihood variance on the prediction function. This is most likely what you want to use for your predictions. :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray (Nnew x self.input_dim) :param full_cov: whether to return the full covariance matrix, or just the diagonal :type full_cov: bool :param Y_metadata: metadata about the predicting point to pass to the likelihood :param kern: The kernel to use for prediction (defaults to the model kern). this is useful for examining e.g. subprocesses. :returns: (mean, var): mean: posterior mean, a Numpy array, Nnew x self.input_dim var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise If full_cov and self.input_dim > 1, the return shape of var is Nnew x Nnew x self.input_dim. If self.input_dim == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. Note: If you want the predictive quantiles (e.g. 95% confidence interval) use :py:func:`~GPy.core.gp.GP.predict_quantiles`. """ return self.predict(Xnew, full_cov, Y_metadata, kern, None, False) def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, kern=None, likelihood=None): """ Get the predictive quantiles around the prediction at X :param X: The points at which to make a prediction :type X: np.ndarray (Xnew x self.input_dim) :param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval :type quantiles: tuple :param kern: optional kernel to use for prediction :type predict_kw: dict :returns: list of quantiles for each X and predictive quantiles for interval combination :rtype: [np.ndarray (Xnew x self.output_dim), np.ndarray (Xnew x self.output_dim)] """ m, v = self._raw_predict(X, full_cov=False, kern=kern) if likelihood is None: likelihood = self.likelihood quantiles = likelihood.predictive_quantiles(m, v, quantiles, Y_metadata=Y_metadata) if self.normalizer is not None: quantiles = [self.normalizer.inverse_mean(q) for q in quantiles] return quantiles def predictive_gradients(self, Xnew, kern=None): """ Compute the derivatives of the predicted latent function with respect to X* Given a set of points at which to predict X* (size [N*,Q]), compute the derivatives of the mean and variance. Resulting arrays are sized: dmu_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one). Note that this is not the same as computing the mean and variance of the derivative of the function! dv_dX* -- [N*, Q], (since all outputs have the same variance) :param X: The points at which to get the predictive gradients :type X: np.ndarray (Xnew x self.input_dim) :returns: dmu_dX, dv_dX :rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q) ] """ if kern is None: kern = self.kern mean_jac = np.empty((Xnew.shape[0], Xnew.shape[1], self.output_dim)) for i in range(self.output_dim): mean_jac[:, :, i] = kern.gradients_X( self.posterior.woodbury_vector[:, i:i+1].T, Xnew, self._predictive_variable) # Gradients wrt the diagonal part k_{xx} dv_dX = kern.gradients_X_diag(np.ones(Xnew.shape[0]), Xnew) # Grads wrt 'Schur' part K_{xf}K_{ff}^{-1}K_{fx} if self.posterior.woodbury_inv.ndim == 3: var_jac = np.empty(dv_dX.shape + (self.posterior.woodbury_inv.shape[2],)) var_jac[:] = dv_dX[:, :, None] for i in range(self.posterior.woodbury_inv.shape[2]): alpha = -2.*np.dot(kern.K(Xnew, self._predictive_variable), self.posterior.woodbury_inv[:, :, i]) var_jac[:, :, i] += kern.gradients_X(alpha, Xnew, self._predictive_variable) else: var_jac = dv_dX alpha = -2.*np.dot(kern.K(Xnew, self._predictive_variable), self.posterior.woodbury_inv) var_jac += kern.gradients_X(alpha, Xnew, self._predictive_variable) if self.normalizer is not None: mean_jac = self.normalizer.inverse_mean(mean_jac) \ - self.normalizer.inverse_mean(0.) if self.output_dim > 1: var_jac = self.normalizer.inverse_covariance(var_jac) else: var_jac = self.normalizer.inverse_variance(var_jac) return mean_jac, var_jac def predict_jacobian(self, Xnew, kern=None, full_cov=False): """ Compute the derivatives of the posterior of the GP. Given a set of points at which to predict X* (size [N*,Q]), compute the mean and variance of the derivative. Resulting arrays are sized: dL_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one). Note that this is the mean and variance of the derivative, not the derivative of the mean and variance! (See predictive_gradients for that) dv_dX* -- [N*, Q], (since all outputs have the same variance) If there is missing data, it is not implemented for now, but there will be one output variance per output dimension. :param X: The points at which to get the predictive gradients. :type X: np.ndarray (Xnew x self.input_dim) :param kern: The kernel to compute the jacobian for. :param boolean full_cov: whether to return the cross-covariance terms between the N* Jacobian vectors :returns: dmu_dX, dv_dX :rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q,(D)) ] """ if kern is None: kern = self.kern mean_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim)) for i in range(self.output_dim): mean_jac[:,:,i] = kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self._predictive_variable) dK_dXnew_full = np.empty((self._predictive_variable.shape[0], Xnew.shape[0], Xnew.shape[1])) one = np.ones((1,1)) for i in range(self._predictive_variable.shape[0]): dK_dXnew_full[i] = kern.gradients_X(one, Xnew, self._predictive_variable[[i]]) if full_cov: dK2_dXdX = kern.gradients_XX(one, Xnew) else: dK2_dXdX = kern.gradients_XX_diag(one, Xnew) #dK2_dXdX = np.zeros((Xnew.shape[0], Xnew.shape[1], Xnew.shape[1])) #for i in range(Xnew.shape[0]): # dK2_dXdX[i:i+1,:,:] = kern.gradients_XX(one, Xnew[i:i+1,:]) def compute_cov_inner(wi): if full_cov: var_jac = dK2_dXdX - np.einsum('qnm,msr->nsqr', dK_dXnew_full.T.dot(wi), dK_dXnew_full) # n,s = Xnew.shape[0], m = pred_var.shape[0] else: var_jac = dK2_dXdX - np.einsum('qnm,mnr->nqr', dK_dXnew_full.T.dot(wi), dK_dXnew_full) return var_jac if self.posterior.woodbury_inv.ndim == 3: # Missing data: if full_cov: var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],Xnew.shape[1],self.output_dim)) for d in range(self.posterior.woodbury_inv.shape[2]): var_jac[:, :, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d]) else: var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],Xnew.shape[1],self.output_dim)) for d in range(self.posterior.woodbury_inv.shape[2]): var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d]) else: var_jac = compute_cov_inner(self.posterior.woodbury_inv) return mean_jac, var_jac def predict_wishart_embedding(self, Xnew, kern=None, mean=True, covariance=True): """ Predict the wishart embedding G of the GP. This is the density of the input of the GP defined by the probabilistic function mapping f. G = J_mean.T*J_mean + output_dim*J_cov. :param array-like Xnew: The points at which to evaluate the magnification. :param :py:class:`~GPy.kern.Kern` kern: The kernel to use for the magnification. Supplying only a part of the learning kernel gives insights into the density of the specific kernel part of the input function. E.g. one can see how dense the linear part of a kernel is compared to the non-linear part etc. """ if kern is None: kern = self.kern mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False) mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac) Sigma = np.zeros(mumuT.shape) if var_jac.ndim == 4: # Missing data Sigma = var_jac.sum(-1) else: Sigma = self.output_dim*var_jac G = 0. if mean: G += mumuT if covariance: G += Sigma return G def predict_wishard_embedding(self, Xnew, kern=None, mean=True, covariance=True): warnings.warn("Wrong naming, use predict_wishart_embedding instead. Will be removed in future versions!", DeprecationWarning) return self.predict_wishart_embedding(Xnew, kern, mean, covariance) def predict_magnification(self, Xnew, kern=None, mean=True, covariance=True, dimensions=None): """ Predict the magnification factor as sqrt(det(G)) for each point N in Xnew. :param bool mean: whether to include the mean of the wishart embedding. :param bool covariance: whether to include the covariance of the wishart embedding. :param array-like dimensions: which dimensions of the input space to use [defaults to self.get_most_significant_input_dimensions()[:2]] """ G = self.predict_wishart_embedding(Xnew, kern, mean, covariance) if dimensions is None: dimensions = self.get_most_significant_input_dimensions()[:2] G = G[:, dimensions][:,:,dimensions] from ..util.linalg import jitchol mag = np.empty(Xnew.shape[0]) for n in range(Xnew.shape[0]): try: mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) except: mag[n] = np.sqrt(np.linalg.det(G[n, :, :])) return mag def posterior_samples_f(self,X, size=10, **predict_kwargs): """ Samples the posterior GP at the points X. :param X: The points at which to take the samples. :type X: np.ndarray (Nnew x self.input_dim) :param size: the number of a posteriori samples. :type size: int. :returns: set of simulations :rtype: np.ndarray (Nnew x D x samples) """ predict_kwargs["full_cov"] = True # Always use the full covariance for posterior samples. m, v = self._raw_predict(X, **predict_kwargs) if self.normalizer is not None: m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) def sim_one_dim(m, v): return np.random.multivariate_normal(m, v, size).T if self.output_dim == 1: return sim_one_dim(m.flatten(), v)[:, np.newaxis, :] else: fsim = np.empty((X.shape[0], self.output_dim, size)) for d in range(self.output_dim): if v.ndim == 3: fsim[:, d, :] = sim_one_dim(m[:, d], v[:, :, d]) else: fsim[:, d, :] = sim_one_dim(m[:, d], v) return fsim def posterior_samples(self, X, size=10, Y_metadata=None, likelihood=None, **predict_kwargs): """ Samples the posterior GP at the points X. :param X: the points at which to take the samples. :type X: np.ndarray (Nnew x self.input_dim.) :param size: the number of a posteriori samples. :type size: int. :param noise_model: for mixed noise likelihood, the noise model to use in the samples. :type noise_model: integer. :returns: Ysim: set of simulations, :rtype: np.ndarray (D x N x samples) (if D==1 we flatten out the first dimension) """ fsim = self.posterior_samples_f(X, size, **predict_kwargs) if likelihood is None: likelihood = self.likelihood if fsim.ndim == 3: for d in range(fsim.shape[1]): fsim[:, d] = likelihood.samples(fsim[:, d], Y_metadata=Y_metadata) else: fsim = likelihood.samples(fsim, Y_metadata=Y_metadata) return fsim def input_sensitivity(self, summarize=True): """ Returns the sensitivity for each dimension of this model """ return self.kern.input_sensitivity(summarize=summarize) def get_most_significant_input_dimensions(self, which_indices=None): return self.kern.get_most_significant_input_dimensions(which_indices) def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs): """ Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. kwargs are passed to the optimizer. They can be: :param max_iters: maximum number of function evaluations :type max_iters: int :param messages: whether to display during optimisation :type messages: bool :param optimizer: which optimizer to use (defaults to self.preferred optimizer), a range of optimisers can be found in :module:`~GPy.inference.optimization`, they include 'scg', 'lbfgs', 'tnc'. :type optimizer: string :param bool ipython_notebook: whether to use ipython notebook widgets or not. :param bool clear_after_finish: if in ipython notebook, we can clear the widgets after optimization. """ self.inference_method.on_optimization_start() try: ret = super(GP, self).optimize(optimizer, start, messages, max_iters, ipython_notebook, clear_after_finish, **kwargs) except KeyboardInterrupt: print("KeyboardInterrupt caught, calling on_optimization_end() to round things up") self.inference_method.on_optimization_end() raise return ret def infer_newX(self, Y_new, optimize=True): """ Infer X for the new observed data *Y_new*. :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the posterior estimation of X and the model that optimize X :rtype: (:class:`~GPy.core.parameterization.variational.VariationalPosterior` and numpy.ndarray, :class:`~GPy.core.model.Model`) """ from ..inference.latent_function_inference.inferenceX import infer_newX return infer_newX(self, Y_new, optimize=optimize) def log_predictive_density(self, x_test, y_test, Y_metadata=None): """ Calculation of the log predictive density .. math: p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) :param x_test: test locations (x_{*}) :type x_test: (Nx1) array :param y_test: test observations (y_{*}) :type y_test: (Nx1) array :param Y_metadata: metadata associated with the test points """ mu_star, var_star = self._raw_predict(x_test) return self.likelihood.log_predictive_density(y_test, mu_star, var_star, Y_metadata=Y_metadata) def log_predictive_density_sampling(self, x_test, y_test, Y_metadata=None, num_samples=1000): """ Calculation of the log predictive density by sampling .. math: p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) :param x_test: test locations (x_{*}) :type x_test: (Nx1) array :param y_test: test observations (y_{*}) :type y_test: (Nx1) array :param Y_metadata: metadata associated with the test points :param num_samples: number of samples to use in monte carlo integration :type num_samples: int """ mu_star, var_star = self._raw_predict(x_test) return self.likelihood.log_predictive_density_sampling(y_test, mu_star, var_star, Y_metadata=Y_metadata, num_samples=num_samples) def _raw_posterior_covariance_between_points(self, X1, X2): """ Computes the posterior covariance between points. Does not account for normalization or likelihood :param X1: some input observations :param X2: other input observations :returns: cov: raw posterior covariance: k(X1,X2) - k(X1,X) G^{-1} K(X,X2) """ return self.posterior.covariance_between_points(self.kern, self.X, X1, X2) def posterior_covariance_between_points(self, X1, X2, Y_metadata=None, likelihood=None, include_likelihood=True): """ Computes the posterior covariance between points. Includes likelihood variance as well as normalization so that evaluation at (x,x) is consistent with model.predict :param X1: some input observations :param X2: other input observations :param Y_metadata: metadata about the predicting point to pass to the likelihood :param include_likelihood: Whether or not to add likelihood noise to the predicted underlying latent function f. :type include_likelihood: bool :returns: cov: posterior covariance, a Numpy array, Nnew x Nnew if self.output_dim == 1, and Nnew x Nnew x self.output_dim otherwise. """ cov = self._raw_posterior_covariance_between_points(X1, X2) if include_likelihood: # Predict latent mean and push through likelihood mean, _ = self._raw_predict(X1, full_cov=True) if likelihood is None: likelihood = self.likelihood _, cov = likelihood.predictive_values(mean, cov, full_cov=True, Y_metadata=Y_metadata) if self.normalizer is not None: if self.output_dim > 1: cov = self.normalizer.inverse_covariance(cov) else: cov = self.normalizer.inverse_variance(cov) return cov
from flask import Flask, request, session, jsonify, current_app, url_for, redirect from flask.ext.pymongo import PyMongo from flask.sessions import SessionInterface, SessionMixin from pymongo import Connection, MongoClient import os from random import random from uuid import uuid4 from datetime import datetime, timedelta from werkzeug.datastructures import CallbackDict import cgi import hashlib # By default, Flask stores sessions on the client's side in cookies # It uses cryptography to prevent them from tampering with session data # However, it doesn't encrypt the data, so the user can see what's being stored # In this example I will show you how to store the session in the database # This is a session object. It is nothing more than a dict with some extra methods class MongoSession(CallbackDict, SessionMixin): def __init__(self, initial=None, sid=None): CallbackDict.__init__(self, initial) self.sid = sid self.modified = False # Session interface is responsible for handling logic related to sessions # i.e. storing, saving, etc class MongoSessionInterface(SessionInterface): # Init connection def __init__(self, host='localhost', port=27017, db='', collection='sessions'): client = MongoClient(host, port) self.store = client[db][collection] def open_session(self, app, request): # Get session id from the cookie sid = request.cookies.get(app.session_cookie_name) # If id is given (session was created) if sid: # Try to load a session from mongodb stored_session = self.store.find_one({'sid': sid}) if stored_session: # Check if the session isn't expired if stored_session.get('expiration') > datetime.utcnow(): return MongoSession(initial=stored_session['data'], sid=stored_session['sid']) # If there was no session or it was expired... # Generate a random id and create an empty session sid = str(uuid4()) return MongoSession(sid=sid) def save_session(self, app, session, response): domain = self.get_cookie_domain(app) # We're requested to delete the session if not session: response.delete_cookie(app.session_cookie_name, domain=domain) return # Refresh the session expiration time # First, use get_expiration_time from SessionInterface # If it fails, add 1 hour to current time if self.get_expiration_time(app, session): expiration = self.get_expiration_time(app, session) else: expiration = datetime.utcnow() + timedelta(hours=1) # Update the mongo document, where sid equals to session.sid self.store.update({'sid': session.sid}, {'sid': session.sid, 'data': session, 'expiration': expiration}, True) # Refresh the cookie response.set_cookie(app.session_cookie_name, session.sid, expires=self.get_expiration_time(app, session), httponly=True, domain=domain) app = Flask(__name__) app.session_interface = MongoSessionInterface(db='cah') mongo = PyMongo(app) connection = Connection() db = connection.cah black = db.black white = db.white users = db.users presalt = "MJoIcUvAbl6eWfR" postsalt = "YkzU7VMRx3jAgHQ" def resetDeck(): deckColl=0 carddir="./cards/" black.remove() white.remove() for deckName in os.listdir(carddir): if deckName[-4:] == ".txt": if deckName[-5] == "B": deckColl = black if deckName[-5] == "W": deckColl = white deck = open(carddir+deckName) card = deck.readline() while(card): cardData = {} cardData['contents'] = card.strip("\n").replace("\\n", "\n"); cardData['rand'] = random() if deckColl == black: cardData['blanks'] = card.count('_') if cardData != {}: deckColl.insert(cardData) card = deck.readline() return 1 def resetSessions(): session.clear() db.sessions.remove() return 1 def resetUsers(): users.remove() return 1 def drawCard(deck): try: return deck.find({'rand': {'$gte': random()}},{'_id':0,'rand':0}).sort('rand')[0] except: return drawCard(deck) def login(username, password): password=hashlib.sha512(presalt+cgi.escape(password)+postsalt).hexdigest() user = users.find({'username':username, 'password':password}) if(user.count()==1): session.clear() session['username'] = username session['userId'] = user[0]['_id'] return 1 else: return 0 def register(username, password): password=hashlib.sha512(presalt+cgi.escape(password)+postsalt).hexdigest() user = users.find({'username':username, 'password':password}) if user.count()==0: users.insert({'username':username, 'password':password}) return 1 else: return 0 def startRound(): return 1 @app.route('/draw/black') def drawBlack(): return drawCard(black)['contents'].replace("\n", "</br>").replace("_", "________") @app.route('/draw/white') def drawWhite(): return drawCard(white)['contents'] @app.route('/api/reset') def apiReset(): resetDeck() resetSessions() resetUsers() return jsonify({'status':"success"}) @app.route('/api/register', methods=['POST']) def apiRegister(): result = {} register(request.form['username'], request.form['password']) result['token'] = login(request.form['username'], request.form['password']) return jsonify(result) @app.route('/api/login', methods=['POST']) def apiLogin(): result = {} result['token'] = login(request.form['username'], request.form['password']) return jsonify(result) @app.route("/") def indexPage(): if 'username' in session: return session['username'] else: return redirect(url_for('loginPage')) @app.route("/logout") def logoutPage(): session.clear() return redirect(url_for('indexPage')) @app.route("/login") def loginPage(): if 'username' in session: return redirect(url_for('indexPage')) else: return '\ Login:\ <form name="login" action="/api/login" method="POST">\ Username: <input type="text" name="username">\ Password: <input type="text" name="password">\ <input type="submit" value="Submit">\ </form>\ Register:\ <form name="register" action="/api/register" method="POST">\ Username: <input type="text" name="username">\ Password: <input type="text" name="password">\ <input type="submit" value="Submit">\ </form>\ ' app.debug = True if __name__ == "__main__": app.run(host="0.0.0.0", port=90)
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Starting point for routing EC2 requests. """ import urlparse from eventlet.green import httplib import webob import webob.dec import webob.exc from nova.api.ec2 import apirequest from nova.api.ec2 import ec2utils from nova.api.ec2 import faults from nova.api import validator from nova.auth import manager from nova import context from nova import exception from nova import flags from nova import log as logging from nova.openstack.common import cfg from nova import utils from nova import wsgi LOG = logging.getLogger(__name__) ec2_opts = [ cfg.IntOpt('lockout_attempts', default=5, help='Number of failed auths before lockout.'), cfg.IntOpt('lockout_minutes', default=15, help='Number of minutes to lockout if triggered.'), cfg.IntOpt('lockout_window', default=15, help='Number of minutes for lockout window.'), cfg.StrOpt('keystone_ec2_url', default='http://localhost:5000/v2.0/ec2tokens', help='URL to get token from ec2 request.'), cfg.BoolOpt('ec2_private_dns_show_ip', default=False, help='Return the IP address as private dns hostname in ' 'describe instances'), ] FLAGS = flags.FLAGS FLAGS.register_opts(ec2_opts) flags.DECLARE('use_forwarded_for', 'nova.api.auth') def ec2_error(req, request_id, code, message): """Helper to send an ec2_compatible error""" LOG.error(_('%(code)s: %(message)s') % locals()) resp = webob.Response() resp.status = 400 resp.headers['Content-Type'] = 'text/xml' resp.body = str('<?xml version="1.0"?>\n' '<Response><Errors><Error><Code>%s</Code>' '<Message>%s</Message></Error></Errors>' '<RequestID>%s</RequestID></Response>' % (utils.utf8(code), utils.utf8(message), utils.utf8(request_id))) return resp ## Fault Wrapper around all EC2 requests ## class FaultWrapper(wsgi.Middleware): """Calls the middleware stack, captures any exceptions into faults.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: LOG.exception(_("FaultWrapper: %s"), unicode(ex)) return faults.Fault(webob.exc.HTTPInternalServerError()) class RequestLogging(wsgi.Middleware): """Access-Log akin logging for all EC2 API requests.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): start = utils.utcnow() rv = req.get_response(self.application) self.log_request_completion(rv, req, start) return rv def log_request_completion(self, response, request, start): apireq = request.environ.get('ec2.request', None) if apireq: controller = apireq.controller action = apireq.action else: controller = None action = None ctxt = request.environ.get('nova.context', None) delta = utils.utcnow() - start seconds = delta.seconds microseconds = delta.microseconds LOG.info( "%s.%ss %s %s %s %s:%s %s [%s] %s %s", seconds, microseconds, request.remote_addr, request.method, "%s%s" % (request.script_name, request.path_info), controller, action, response.status_int, request.user_agent, request.content_type, response.content_type, context=ctxt) class Lockout(wsgi.Middleware): """Lockout for x minutes on y failed auths in a z minute period. x = lockout_timeout flag y = lockout_window flag z = lockout_attempts flag Uses memcached if lockout_memcached_servers flag is set, otherwise it uses a very simple in-process cache. Due to the simplicity of the implementation, the timeout window is started with the first failed request, so it will block if there are x failed logins within that period. There is a possible race condition where simultaneous requests could sneak in before the lockout hits, but this is extremely rare and would only result in a couple of extra failed attempts.""" def __init__(self, application): """middleware can use fake for testing.""" if FLAGS.memcached_servers: import memcache else: from nova.common import memorycache as memcache self.mc = memcache.Client(FLAGS.memcached_servers, debug=0) super(Lockout, self).__init__(application) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): access_key = str(req.params['AWSAccessKeyId']) failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= FLAGS.lockout_attempts: detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) if res.status_int == 403: failures = self.mc.incr(failures_key) if failures is None: # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: lock_mins = FLAGS.lockout_minutes msg = _('Access key %(access_key)s has had %(failures)d' ' failed authentications and will be locked out' ' for %(lock_mins)d minutes.') % locals() LOG.warn(msg) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res class EC2Token(wsgi.Middleware): """Deprecated, only here to make merging easier.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Read request signature and access id. try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] except KeyError, e: LOG.exception(e) raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') if "ec2" in FLAGS.keystone_ec2_url: LOG.warning("Configuration setting for keystone_ec2_url needs " "to be updated to /tokens only. The /ec2 prefix is " "being deprecated") # Authenticate the request. creds = {'ec2Credentials': {'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, }} else: # Authenticate the request. creds = {'auth': {'OS-KSEC2:ec2Credentials': {'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, }}} creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} # Disable "has no x member" pylint error # for httplib and urlparse # pylint: disable-msg=E1101 o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse().read() conn.close() # NOTE(vish): We could save a call to keystone by # having keystone return token, tenant, # user, and roles from this call. result = utils.loads(response) try: token_id = result['access']['token']['id'] except (AttributeError, KeyError), e: LOG.exception(e) raise webob.exc.HTTPBadRequest() # Authenticated! req.headers['X-Auth-Token'] = token_id return self.application class EC2KeystoneAuth(wsgi.Middleware): """Authenticate an EC2 request with keystone and convert to context.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): request_id = context.generate_request_id() signature = req.params.get('Signature') if not signature: msg = _("Signature not provided") return ec2_error(req, request_id, "Unauthorized", msg) access = req.params.get('AWSAccessKeyId') if not access: msg = _("Access key not provided") return ec2_error(req, request_id, "Unauthorized", msg) # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') cred_dict = { 'access': access, 'signature': signature, 'host': req.host, 'verb': req.method, 'path': req.path, 'params': auth_params, } if "ec2" in FLAGS.keystone_ec2_url: creds = {'ec2Credentials': cred_dict} else: creds = {'auth': {'OS-KSEC2:ec2Credentials': cred_dict}} creds_json = utils.dumps(creds) headers = {'Content-Type': 'application/json'} o = urlparse.urlparse(FLAGS.keystone_ec2_url) if o.scheme == "http": conn = httplib.HTTPConnection(o.netloc) else: conn = httplib.HTTPSConnection(o.netloc) conn.request('POST', o.path, body=creds_json, headers=headers) response = conn.getresponse() data = response.read() if response.status != 200: if response.status == 401: msg = response.reason else: msg = _("Failure communicating with keystone") return ec2_error(req, request_id, "Unauthorized", msg) result = utils.loads(data) conn.close() try: token_id = result['access']['token']['id'] user_id = result['access']['user']['id'] project_id = result['access']['token']['tenant']['id'] roles = [role['name'] for role in result['access']['user']['roles']] except (AttributeError, KeyError), e: LOG.exception("Keystone failure: %s" % e) msg = _("Failure communicating with keystone") return ec2_error(req, request_id, "Unauthorized", msg) remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctxt = context.RequestContext(user_id, project_id, roles=roles, auth_token=token_id, remote_address=remote_address) req.environ['nova.context'] = ctxt return self.application class NoAuth(wsgi.Middleware): """Add user:project as 'nova.context' to WSGI environ.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if 'AWSAccessKeyId' not in req.params: raise webob.exc.HTTPBadRequest() user_id, _sep, project_id = req.params['AWSAccessKeyId'].partition(':') project_id = project_id or user_id remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['nova.context'] = ctx return self.application class Authenticate(wsgi.Middleware): """Authenticate an EC2 request and add 'nova.context' to WSGI environ.""" @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): # Read request signature and access id. try: signature = req.params['Signature'] access = req.params['AWSAccessKeyId'] except KeyError: raise webob.exc.HTTPBadRequest() # Make a copy of args for authentication and signature verification. auth_params = dict(req.params) # Not part of authentication args auth_params.pop('Signature') # Authenticate the request. authman = manager.AuthManager() try: (user, project) = authman.authenticate( access, signature, auth_params, req.method, req.host, req.path) # Be explicit for what exceptions are 403, the rest bubble as 500 except (exception.NotFound, exception.NotAuthorized, exception.InvalidSignature) as ex: LOG.audit(_("Authentication Failure: %s"), unicode(ex)) raise webob.exc.HTTPForbidden() # Authenticated! remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) roles = authman.get_active_roles(user, project) ctxt = context.RequestContext(user_id=user.id, project_id=project.id, is_admin=user.is_admin(), roles=roles, remote_address=remote_address) req.environ['nova.context'] = ctxt uname = user.name pname = project.name msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals() LOG.audit(msg, context=req.environ['nova.context']) return self.application class Requestify(wsgi.Middleware): def __init__(self, app, controller): super(Requestify, self).__init__(app) self.controller = utils.import_class(controller)() @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] args = dict(req.params) try: # Raise KeyError if omitted action = req.params['Action'] # Fix bug lp:720157 for older (version 1) clients version = req.params['SignatureVersion'] if int(version) == 1: non_args.remove('SignatureMethod') if 'SignatureMethod' in args: args.pop('SignatureMethod') for non_arg in non_args: # Remove, but raise KeyError if omitted args.pop(non_arg) except KeyError, e: raise webob.exc.HTTPBadRequest() LOG.debug(_('action: %s'), action) for key, value in args.items(): LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals()) # Success! api_request = apirequest.APIRequest(self.controller, action, req.params['Version'], args) req.environ['ec2.request'] = api_request return self.application class Authorizer(wsgi.Middleware): """Authorize an EC2 API request. Return a 401 if ec2.controller and ec2.action in WSGI environ may not be executed in nova.context. """ def __init__(self, application): super(Authorizer, self).__init__(application) self.action_roles = { 'CloudController': { 'DescribeAvailabilityZones': ['all'], 'DescribeRegions': ['all'], 'DescribeSnapshots': ['all'], 'DescribeKeyPairs': ['all'], 'CreateKeyPair': ['all'], 'DeleteKeyPair': ['all'], 'DescribeSecurityGroups': ['all'], 'ImportKeyPair': ['all'], 'AuthorizeSecurityGroupIngress': ['netadmin'], 'RevokeSecurityGroupIngress': ['netadmin'], 'CreateSecurityGroup': ['netadmin'], 'DeleteSecurityGroup': ['netadmin'], 'GetConsoleOutput': ['projectmanager', 'sysadmin'], 'DescribeVolumes': ['projectmanager', 'sysadmin'], 'CreateVolume': ['projectmanager', 'sysadmin'], 'AttachVolume': ['projectmanager', 'sysadmin'], 'DetachVolume': ['projectmanager', 'sysadmin'], 'DescribeInstances': ['all'], 'DescribeAddresses': ['all'], 'AllocateAddress': ['netadmin'], 'ReleaseAddress': ['netadmin'], 'AssociateAddress': ['netadmin'], 'DisassociateAddress': ['netadmin'], 'RunInstances': ['projectmanager', 'sysadmin'], 'TerminateInstances': ['projectmanager', 'sysadmin'], 'RebootInstances': ['projectmanager', 'sysadmin'], 'UpdateInstance': ['projectmanager', 'sysadmin'], 'StartInstances': ['projectmanager', 'sysadmin'], 'StopInstances': ['projectmanager', 'sysadmin'], 'DeleteVolume': ['projectmanager', 'sysadmin'], 'DescribeImages': ['all'], 'DeregisterImage': ['projectmanager', 'sysadmin'], 'RegisterImage': ['projectmanager', 'sysadmin'], 'DescribeImageAttribute': ['all'], 'ModifyImageAttribute': ['projectmanager', 'sysadmin'], 'UpdateImage': ['projectmanager', 'sysadmin'], 'CreateImage': ['projectmanager', 'sysadmin'], }, 'AdminController': { # All actions have the same permission: ['none'] (the default) # superusers will be allowed to run them # all others will get HTTPUnauthorized. }, } @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['nova.context'] controller = req.environ['ec2.request'].controller.__class__.__name__ action = req.environ['ec2.request'].action allowed_roles = self.action_roles[controller].get(action, ['none']) if self._matches_any_role(context, allowed_roles): return self.application else: LOG.audit(_('Unauthorized request for controller=%(controller)s ' 'and action=%(action)s') % locals(), context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): """Return True if any role in roles is allowed in context.""" if context.is_admin: return True if 'all' in roles: return True if 'none' in roles: return False return any(role in context.roles for role in roles) class Validator(wsgi.Middleware): def validate_ec2_id(val): if not validator.validate_str()(val): return False try: ec2utils.ec2_id_to_id(val) except exception.InvalidEc2Id: return False return True validator.validate_ec2_id = validate_ec2_id validator.DEFAULT_VALIDATOR = { 'instance_id': validator.validate_ec2_id, 'volume_id': validator.validate_ec2_id, 'image_id': validator.validate_ec2_id, 'attribute': validator.validate_str(), 'image_location': validator.validate_image_path, 'public_ip': validator.validate_ipv4, 'region_name': validator.validate_str(), 'group_name': validator.validate_str(max_length=255), 'group_description': validator.validate_str(max_length=255), 'size': validator.validate_int(), 'user_data': validator.validate_user_data } def __init__(self, application): super(Validator, self).__init__(application) @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): if validator.validate(req.environ['ec2.request'].args, validator.DEFAULT_VALIDATOR): return self.application else: raise webob.exc.HTTPBadRequest() class Executor(wsgi.Application): """Execute an EC2 API request. Executes 'ec2.action' upon 'ec2.controller', passing 'nova.context' and 'ec2.action_args' (all variables in WSGI environ.) Returns an XML response, or a 400 upon failure. """ @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): context = req.environ['nova.context'] request_id = context.request_id api_request = req.environ['ec2.request'] result = None try: result = api_request.invoke(context) except exception.InstanceNotFound as ex: LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), context=context) ec2_id = ec2utils.id_to_ec2_id(ex.kwargs['instance_id']) message = ex.message % {'instance_id': ec2_id} return ec2_error(req, request_id, type(ex).__name__, message) except exception.VolumeNotFound as ex: LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), context=context) ec2_id = ec2utils.id_to_ec2_vol_id(ex.kwargs['volume_id']) message = ex.message % {'volume_id': ec2_id} return ec2_error(req, request_id, type(ex).__name__, message) except exception.SnapshotNotFound as ex: LOG.info(_('SnapshotNotFound raised: %s'), unicode(ex), context=context) ec2_id = ec2utils.id_to_ec2_snap_id(ex.kwargs['snapshot_id']) message = ex.message % {'snapshot_id': ec2_id} return ec2_error(req, request_id, type(ex).__name__, message) except exception.NotFound as ex: LOG.info(_('NotFound raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.EC2APIError as ex: LOG.exception(_('EC2APIError raised: %s'), unicode(ex), context=context) if ex.code: return ec2_error(req, request_id, ex.code, unicode(ex)) else: return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.KeyPairExists as ex: LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.InvalidParameterValue as ex: LOG.debug(_('InvalidParameterValue raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.InvalidPortRange as ex: LOG.debug(_('InvalidPortRange raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.NotAuthorized as ex: LOG.info(_('NotAuthorized raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.InvalidRequest as ex: LOG.debug(_('InvalidRequest raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.QuotaError as ex: LOG.debug(_('QuotaError raised: %s'), unicode(ex), context=context) return ec2_error(req, request_id, type(ex).__name__, unicode(ex)) except exception.InvalidInstanceIDMalformed as ex: LOG.debug(_('ValidatorError raised: %s'), unicode(ex), context=context) #EC2 Compatibility return self._error(req, context, "InvalidInstanceID.Malformed", unicode(ex)) except Exception as ex: env = req.environ.copy() for k in env.keys(): if not isinstance(env[k], basestring): env.pop(k) LOG.exception(_('Unexpected error raised: %s'), unicode(ex)) LOG.error(_('Environment: %s') % utils.dumps(env)) return ec2_error(req, request_id, 'UnknownError', _('An unknown error has occurred. ' 'Please try your request again.')) else: resp = webob.Response() resp.status = 200 resp.headers['Content-Type'] = 'text/xml' resp.body = str(result) return resp
#!/usr/bin/env python # Copyright (c) 2016 Forrest Pieper """ Tool for managing and pushing local git branches to various git remotes and branches on those remotes """ import argparse import logging import os import threading import subprocess command_list = ('list', 'delete', 'push', 'add') BASE_DIR = './.deploy' TARGET_CONF = os.path.join(BASE_DIR, 'deploy_targets.conf') def parse_file(target_file_path): """ parse conf file and return a dictionary of targets where the keys are the local branch name and the values are a list of <remote>:<remote_branch> strings """ with open(target_file_path, 'r') as target_file: targets = {} for line in target_file: entries = [x.strip() for x in line.split(' ') if x.strip() != ''] if len(entries) < 2: logging.debug('skipping line in target conf file. Not enough values to unpack: {0}'.format(entries)) continue branch = entries[0] remotes = entries[1:] targets[branch] = remotes return targets def write_file(target_file_path, targets): """ write dictionary of targets to conf file """ with open(target_file_path, 'w') as target_file: for branch, remotes in targets.iteritems(): target_file.write(branch) for remote in remotes: target_file.write(" ") target_file.write(remote) target_file.write("\n") def call(cmd, output=False, *args, **kwargs): """ call system command, optionally returning output """ logging.debug('calling command: {0}'.format(cmd)) if output: return subprocess.check_output(cmd, shell=True, *args, **kwargs) else: return subprocess.call(cmd, shell=True, *args, **kwargs) def print_target(branch, remotes): """ pretty print a target """ print "{0}{1}".format(branch.ljust(26, ' '), [r.strip() for r in remotes]) def find_missing_git_remotes(remotes_to_check): """ check that each value in a list of strings is an existing git remote returns the name of the first missing remote (if any), or None if all remotes exist """ git_remotes = call('git remote', output=True).split('\n') for remote in remotes_to_check: if ':' in remote: remote_name, remote_branch = remote.split(':') else: remote_name = remote if remote_name not in git_remotes: return remote return None def list_cmd(targets): """ list command: list all targets Returns True if successful, otherwise False """ if len(targets) < 1: logging.error('No targets exist') return False print 'BRANCH (local_branch)'.ljust(25, ' '), 'REMOTES [<remote>:<remote_branch>]' for branch, remotes in targets.iteritems(): print_target(branch, remotes) return True def delete(targets, branch_name): """ delete command: delete target associated with <branch_name> local branch Returns the updated targets dict """ if branch_name not in targets: logging.error('No such deploy target: {0}'.format(branch_name)) return targets print 'You are about to delete the following deploy target:' print_target(branch_name, targets[branch_name]) x = raw_input('Continue? (y/N): ') if 'y' not in x.lower(): print 'Cancelling...' return targets del targets[branch_name] logging.debug('{0} target deleted'.format(branch_name)) return targets def push(targets, branch_name, multithread=False, force=False): """ push command: push local branch <branch_name> to all associated remotes Returns True if successful, otherwise False """ # Check that branch_name target exists in target file if branch_name not in targets: logging.error('No such deploy target: {0}'.format(branch_name)) return False # Check that working tree has no modifications if call('git diff --quiet && git diff --cached --quiet'): logging.error('working tree has modifications') return False # Check that branch_name git branch exists branches = call('git branch --list', output=True).split("\n") branches = [x.strip('* ') for x in branches if x != ''] if branch_name not in branches: logging.error('no such local branch') return False # Checkout branch logging.info('checking out {0} branch'.format(branch_name)) if call('git checkout {0} --quiet'.format(branch_name)): logging.error('failed to checkout branch') return False # TODO: should we fetch/pull here? # push to each remote... remote_log_dir = os.path.join(BASE_DIR, branch_name.replace('/', '-')) if not os.path.exists(remote_log_dir): os.mkdir(remote_log_dir) open_threads = [] results = [] for remote in targets[branch_name]: pid = targets[branch_name].index(remote) if ":" in remote: remote_name, remote_branch = remote.split(':') else: remote_name = remote remote_branch = branch_name branch_log = os.path.join(remote_log_dir, remote_name) + '.out' def push_remote(_pid, _remote_name, _branch_name, _remote_branch, _force, _branch_log): with open(_branch_log, 'w+') as target_log: cmd = 'git push {0} {1}:{2} {3}'.format( _remote_name, _branch_name, _remote_branch, '--force' if _force else '' ) res = call(cmd, output=False, stdout=target_log, stderr=target_log) logging.info('process {0} finished with exit code {1}'.format(_pid, res)) results.append(res) t = threading.Thread( target=push_remote, args=(pid, remote_name, branch_name, remote_branch, force, branch_log) ) t.start() logging.info('process {4}: Pushing {0} local branch to {1}:{2}. Logging output to {3}'.format(branch_name, remote_name, remote_branch, branch_log, pid)) if not multithread: t.join() else: open_threads.append(t) if multithread: for t in open_threads: t.join() for result in results: if results: return False return True def add(targets, branch_name, remotes): """ add command: add a target associating local branch 'branch_name' to a list of 'remotes' 'remotes' is a list of <remote>:<remote_branch> strings if the :<remote_branch> is omitted, 'branch_name' will be used for both the local and remote branch Returns the updated targets dict """ if branch_name in targets: print 'target branch already exists. Continuing will overwrite existing target' x = raw_input('Continue? (y/N)') if 'y' not in x.lower(): print 'Cancelling...' return targets missing_remote = find_missing_git_remotes(remotes) if missing_remote: logging.error('git remote does not exist: {0}'.format(missing_remote)) return targets targets[branch_name] = remotes return targets def main(args, loglevel): logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel) if not os.path.exists(BASE_DIR): os.mkdir(BASE_DIR) if not os.path.exists(TARGET_CONF): open(TARGET_CONF, 'w+').close() command = args.command.lower() if command not in command_list: logging.error('invalid command. choices are {0}'.format(command_list)) exit(1) targets = parse_file(TARGET_CONF) if command == 'list': if list_cmd(targets): exit(0) exit(1) if not args.branch: logging.error('This command requires a target branch') exit(1) branch_name = args.branch.lower() if command == 'delete': targets = delete(targets, branch_name) write_file(TARGET_CONF, targets) exit(0) elif command == 'push': if push(targets, branch_name, multithread=args.multithread, force=args.force): exit(0) exit(1) remotes = args.remotes if not remotes: logging.error('This command requires at least one remote') exit(1) if command == 'add': targets = add(targets, branch_name, remotes) write_file(TARGET_CONF, targets) exit(0) logging.error('unknown error') exit(1) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Tool for managing and pushing local git branches to various git remotes and branches on those remotes", epilog=""" This tool allows users to associate a local branch with a list of git remotes (and branches on those remotes) \ and quickly push the local branch to all associated remotes. A target includes a single local branch plus one or more remote:remote_branch values. If the :remote_branch portion is omitted,\ the name of the local branch will be used. Targets are identified by the name of the local branch. The list, add, and delete commands allow users to manage their list of targets The push command pushes the target's local branch to each of the associated remote:remote_branch values. The list of targets is stored in a configuration file located at {target_conf} During a push command, output from the git push call is directed to a file located at {base_dir}/<target>/<remote>.out If a target includes multiple remotes, the push calls are run simultaneously in separate processes """.format(target_conf=TARGET_CONF, base_dir=BASE_DIR), fromfile_prefix_chars='@', formatter_class=argparse.RawTextHelpFormatter ) parser.add_argument( "command", help="""options:\ncommand required args:\n list \n delete <branch>\n push <branch>\n add <branch> <remote> [<remote>...]""", metavar="command" ) parser.add_argument( "branch", help="target (local) branch", metavar="branch", nargs="?" ) parser.add_argument( "remotes", metavar='remotes', type=str, nargs='*', help='list of git remotes in the format <remote_name>:<remote_branch_name>.\ If the :<remote_branch_name> is omitted, the associated local branch will be used', ) parser.add_argument( "-v", "--verbose", help="increase output verbosity", action="store_true" ) parser.add_argument( "--force", help="force push to remotes", action='store_true' ) parser.add_argument( "-m", "--multithread", help="push to all target remotes in concurrent threads", action='store_true', ) args = parser.parse_args() # Setup logging if args.verbose: loglevel = logging.DEBUG else: loglevel = logging.INFO main(args, loglevel)
# -*- coding: iso-8859-1 -*- """Get useful information from live Python objects. This module encapsulates the interface provided by the internal special attributes (func_*, co_*, im_*, tb_*, etc.) in a friendlier fashion. It also provides some help for examining source code and class layout. Here are some of the useful functions provided by this module: ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), isroutine() - check object types getmembers() - get members of an object that satisfy a given condition getfile(), getsourcefile(), getsource() - find an object's source code getdoc(), getcomments() - get documentation on an object getmodule() - determine the module that an object came from getclasstree() - arrange classes so as to represent their hierarchy getargspec(), getargvalues(), getcallargs() - get info about function arguments formatargspec(), formatargvalues() - format an argument spec getouterframes(), getinnerframes() - get info about frames currentframe() - get the current stack frame stack(), trace() - get info about frames on the stack or in a traceback """ # This module is in the public domain. No warranties. __author__ = 'Ka-Ping Yee <ping@lfw.org>' __date__ = '1 Jan 2001' import sys import os import types import string import re import dis import imp import tokenize import linecache from operator import attrgetter from collections import namedtuple # These constants are from Include/code.h. CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 0x1, 0x2, 0x4, 0x8 CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40 # See Include/object.h TPFLAGS_IS_ABSTRACT = 1 << 20 # ----------------------------------------------------------- type-checking def ismodule(object): """Return true if the object is a module. Module objects provide these attributes: __doc__ documentation string __file__ filename (missing for built-in modules)""" return isinstance(object, types.ModuleType) def isclass(object): """Return true if the object is a class. Class objects provide these attributes: __doc__ documentation string __module__ name of module in which this class was defined""" return isinstance(object, (type, types.ClassType)) def ismethod(object): """Return true if the object is an instance method. Instance method objects provide these attributes: __doc__ documentation string __name__ name with which this method was defined im_class class object in which this method belongs im_func function object containing implementation of method im_self instance to which this method is bound, or None""" return isinstance(object, types.MethodType) def ismethoddescriptor(object): """Return true if the object is a method descriptor. But not if ismethod() or isclass() or isfunction() are true. This is new in Python 2.2, and, for example, is true of int.__add__. An object passing this test has a __get__ attribute but not a __set__ attribute, but beyond that the set of attributes varies. __name__ is usually sensible, and __doc__ often is. Methods implemented via descriptors that also pass one of the other tests return false from the ismethoddescriptor() test, simply because the other tests promise more -- you can, e.g., count on having the im_func attribute (etc) when an object passes ismethod().""" return (hasattr(object, "__get__") and not hasattr(object, "__set__") # else it's a data descriptor and not ismethod(object) # mutual exclusion and not isfunction(object) and not isclass(object)) def isdatadescriptor(object): """Return true if the object is a data descriptor. Data descriptors have both a __get__ and a __set__ attribute. Examples are properties (defined in Python) and getsets and members (defined in C). Typically, data descriptors will also have __name__ and __doc__ attributes (properties, getsets, and members have both of these attributes), but this is not guaranteed.""" return (hasattr(object, "__set__") and hasattr(object, "__get__")) if hasattr(types, 'MemberDescriptorType'): # CPython and equivalent def ismemberdescriptor(object): """Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.""" return isinstance(object, types.MemberDescriptorType) else: # Other implementations def ismemberdescriptor(object): """Return true if the object is a member descriptor. Member descriptors are specialized descriptors defined in extension modules.""" return False if hasattr(types, 'GetSetDescriptorType'): # CPython and equivalent def isgetsetdescriptor(object): """Return true if the object is a getset descriptor. getset descriptors are specialized descriptors defined in extension modules.""" return isinstance(object, types.GetSetDescriptorType) else: # Other implementations def isgetsetdescriptor(object): """Return true if the object is a getset descriptor. getset descriptors are specialized descriptors defined in extension modules.""" return False def isfunction(object): """Return true if the object is a user-defined function. Function objects provide these attributes: __doc__ documentation string __name__ name with which this function was defined func_code code object containing compiled function bytecode func_defaults tuple of any default values for arguments func_doc (same as __doc__) func_globals global namespace in which this function was defined func_name (same as __name__)""" return isinstance(object, types.FunctionType) def isgeneratorfunction(object): """Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See help(isfunction) for attributes listing.""" return bool((isfunction(object) or ismethod(object)) and object.func_code.co_flags & CO_GENERATOR) def isgenerator(object): """Return true if the object is a generator. Generator objects provide these attributes: __iter__ defined to support interation over container close raises a new GeneratorExit exception inside the generator to terminate the iteration gi_code code object gi_frame frame object or possibly None once the generator has been exhausted gi_running set to 1 when generator is executing, 0 otherwise next return the next item from the container send resumes the generator and "sends" a value that becomes the result of the current yield-expression throw used to raise an exception inside the generator""" return isinstance(object, types.GeneratorType) def istraceback(object): """Return true if the object is a traceback. Traceback objects provide these attributes: tb_frame frame object at this level tb_lasti index of last attempted instruction in bytecode tb_lineno current line number in Python source code tb_next next inner traceback object (called by this level)""" return isinstance(object, types.TracebackType) def isframe(object): """Return true if the object is a frame object. Frame objects provide these attributes: f_back next outer frame object (this frame's caller) f_builtins built-in namespace seen by this frame f_code code object being executed in this frame f_exc_traceback traceback if raised in this frame, or None f_exc_type exception type if raised in this frame, or None f_exc_value exception value if raised in this frame, or None f_globals global namespace seen by this frame f_lasti index of last attempted instruction in bytecode f_lineno current line number in Python source code f_locals local namespace seen by this frame f_restricted 0 or 1 if frame is in restricted execution mode f_trace tracing function for this frame, or None""" return isinstance(object, types.FrameType) def iscode(object): """Return true if the object is a code object. Code objects provide these attributes: co_argcount number of arguments (not including * or ** args) co_code string of raw compiled bytecode co_consts tuple of constants used in the bytecode co_filename name of file in which this code object was created co_firstlineno number of first line in Python source code co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg co_lnotab encoded mapping of line numbers to bytecode indices co_name name with which this code object was defined co_names tuple of names of local variables co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables""" return isinstance(object, types.CodeType) def isbuiltin(object): """Return true if the object is a built-in function or method. Built-in functions and methods provide these attributes: __doc__ documentation string __name__ original name of this function or method __self__ instance to which a method is bound, or None""" return isinstance(object, types.BuiltinFunctionType) def isroutine(object): """Return true if the object is any kind of function or method.""" return (isbuiltin(object) or isfunction(object) or ismethod(object) or ismethoddescriptor(object)) def isabstract(object): """Return true if the object is an abstract base class (ABC).""" return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT) def getmembers(object, predicate=None): """Return all members of an object as (name, value) pairs sorted by name. Optionally, only return members that satisfy a given predicate.""" results = [] for key in dir(object): try: value = getattr(object, key) except AttributeError: continue if not predicate or predicate(value): results.append((key, value)) results.sort() return results Attribute = namedtuple('Attribute', 'name kind defining_class object') def classify_class_attrs(cls): """Return list of attribute-descriptor tuples. For each name in dir(cls), the return list contains a 4-tuple with these elements: 0. The name (a string). 1. The kind of attribute this is, one of these strings: 'class method' created via classmethod() 'static method' created via staticmethod() 'property' created via property() 'method' any other flavor of method 'data' not a method 2. The class which defined this attribute (a class). 3. The object as obtained directly from the defining class's __dict__, not via getattr. This is especially important for data attributes: C.data is just a data object, but C.__dict__['data'] may be a data descriptor with additional info, like a __doc__ string. """ mro = getmro(cls) names = dir(cls) result = [] for name in names: # Get the object associated with the name, and where it was defined. # Getting an obj from the __dict__ sometimes reveals more than # using getattr. Static and class methods are dramatic examples. # Furthermore, some objects may raise an Exception when fetched with # getattr(). This is the case with some descriptors (bug #1785). # Thus, we only use getattr() as a last resort. homecls = None for base in (cls,) + mro: if name in base.__dict__: obj = base.__dict__[name] homecls = base break else: obj = getattr(cls, name) homecls = getattr(obj, "__objclass__", homecls) # Classify the object. if isinstance(obj, staticmethod): kind = "static method" elif isinstance(obj, classmethod): kind = "class method" elif isinstance(obj, property): kind = "property" elif ismethoddescriptor(obj): kind = "method" elif isdatadescriptor(obj): kind = "data" else: obj_via_getattr = getattr(cls, name) if (ismethod(obj_via_getattr) or ismethoddescriptor(obj_via_getattr)): kind = "method" else: kind = "data" obj = obj_via_getattr result.append(Attribute(name, kind, homecls, obj)) return result # ----------------------------------------------------------- class helpers def _searchbases(cls, accum): # Simulate the "classic class" search order. if cls in accum: return accum.append(cls) for base in cls.__bases__: _searchbases(base, accum) def getmro(cls): "Return tuple of base classes (including cls) in method resolution order." if hasattr(cls, "__mro__"): return cls.__mro__ else: result = [] _searchbases(cls, result) return tuple(result) # -------------------------------------------------- source code extraction def indentsize(line): """Return the indent size, in spaces, at the start of a line of text.""" expline = string.expandtabs(line) return len(expline) - len(string.lstrip(expline)) def getdoc(object): """Get the documentation string for an object. All tabs are expanded to spaces. To clean up docstrings that are indented to line up with blocks of code, any whitespace than can be uniformly removed from the second line onwards is removed.""" try: doc = object.__doc__ except AttributeError: return None if not isinstance(doc, types.StringTypes): return None return cleandoc(doc) def cleandoc(doc): """Clean up indentation from docstrings. Any whitespace that can be uniformly removed from the second line onwards is removed.""" try: lines = string.split(string.expandtabs(doc), '\n') except UnicodeError: return None else: # Find minimum indentation of any non-blank lines after first line. margin = sys.maxint for line in lines[1:]: content = len(string.lstrip(line)) if content: indent = len(line) - content margin = min(margin, indent) # Remove indentation. if lines: lines[0] = lines[0].lstrip() if margin < sys.maxint: for i in range(1, len(lines)): lines[i] = lines[i][margin:] # Remove any trailing or leading blank lines. while lines and not lines[-1]: lines.pop() while lines and not lines[0]: lines.pop(0) return string.join(lines, '\n') def getfile(object): """Work out which source or compiled file an object was defined in.""" if ismodule(object): if hasattr(object, '__file__'): return object.__file__ raise TypeError('{!r} is a built-in module'.format(object)) if isclass(object): object = sys.modules.get(object.__module__) if hasattr(object, '__file__'): return object.__file__ raise TypeError('{!r} is a built-in class'.format(object)) if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): return object.co_filename raise TypeError('{!r} is not a module, class, method, ' 'function, traceback, frame, or code object'.format(object)) ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type') def getmoduleinfo(path): """Get the module name, suffix, mode, and module type for a given file.""" filename = os.path.basename(path) suffixes = map(lambda info: (-len(info[0]), info[0], info[1], info[2]), imp.get_suffixes()) suffixes.sort() # try longest suffixes first, in case they overlap for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return ModuleInfo(filename[:neglen], suffix, mode, mtype) def getmodulename(path): """Return the module name for a given file, or None.""" info = getmoduleinfo(path) if info: return info[0] def getsourcefile(object): """Return the filename that can be used to locate an object's source. Return None if no way can be identified to get the source. """ filename = getfile(object) if string.lower(filename[-4:]) in ('.pyc', '.pyo'): filename = filename[:-4] + '.py' for suffix, mode, kind in imp.get_suffixes(): if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix: # Looks like a binary file. We want to only return a text file. return None if os.path.exists(filename): return filename # only return a non-existent filename if the module has a PEP 302 loader if hasattr(getmodule(object, filename), '__loader__'): return filename # or it is in the linecache if filename in linecache.cache: return filename def getabsfile(object, _filename=None): """Return an absolute path to the source or compiled file for an object. The idea is for each object to have a unique origin, so this routine normalizes the result as much as possible.""" if _filename is None: _filename = getsourcefile(object) or getfile(object) return os.path.normcase(os.path.abspath(_filename)) modulesbyfile = {} _filesbymodname = {} def getmodule(object, _filename=None): """Return the module an object was defined in, or None if not found.""" if ismodule(object): return object if hasattr(object, '__module__'): return sys.modules.get(object.__module__) # Try the filename to modulename cache if _filename is not None and _filename in modulesbyfile: return sys.modules.get(modulesbyfile[_filename]) # Try the cache again with the absolute file name try: file = getabsfile(object, _filename) except TypeError: return None if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Update the filename to module name cache and check yet again # Copy sys.modules in order to cope with changes while iterating for modname, module in sys.modules.items(): if ismodule(module) and hasattr(module, '__file__'): f = module.__file__ if f == _filesbymodname.get(modname, None): # Have already mapped this module, so skip it continue _filesbymodname[modname] = f f = getabsfile(module) # Always map to the name the module knows itself by modulesbyfile[f] = modulesbyfile[ os.path.realpath(f)] = module.__name__ if file in modulesbyfile: return sys.modules.get(modulesbyfile[file]) # Check the main module main = sys.modules['__main__'] if not hasattr(object, '__name__'): return None if hasattr(main, object.__name__): mainobject = getattr(main, object.__name__) if mainobject is object: return main # Check builtins builtin = sys.modules['__builtin__'] if hasattr(builtin, object.__name__): builtinobject = getattr(builtin, object.__name__) if builtinobject is object: return builtin def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved.""" file = getfile(object) sourcefile = getsourcefile(object) if not sourcefile and file[0] + file[-1] != '<>': raise IOError('source code not available') file = sourcefile if sourcefile else file module = getmodule(object, file) if module: lines = linecache.getlines(file, module.__dict__) else: lines = linecache.getlines(file) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') # make some effort to find the best matching class definition: # use the one with the least indentation, which is the one # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list candidates.append((match.group(1), i)) if candidates: # this will sort by whitespace, and by line number, # less whitespace first candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') lnum = object.co_firstlineno - 1 pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') while lnum > 0: if pat.match(lines[lnum]): break lnum = lnum - 1 return lines, lnum raise IOError('could not find code object') def getcomments(object): """Get lines of comments immediately preceding an object's source code. Returns None when source can't be found. """ try: lines, lnum = findsource(object) except (IOError, TypeError): return None if ismodule(object): # Look for a comment block at the top of the file. start = 0 if lines and lines[0][:2] == '#!': start = 1 while start < len(lines) and string.strip(lines[start]) in ('', '#'): start = start + 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(string.expandtabs(lines[end])) end = end + 1 return string.join(comments, '') # Look for a preceding block of comments at the same indentation. elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and string.lstrip(lines[end])[:1] == '#' and \ indentsize(lines[end]) == indent: comments = [string.lstrip(string.expandtabs(lines[end]))] if end > 0: end = end - 1 comment = string.lstrip(string.expandtabs(lines[end])) while comment[:1] == '#' and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = string.lstrip(string.expandtabs(lines[end])) while comments and string.strip(comments[0]) == '#': comments[:1] = [] while comments and string.strip(comments[-1]) == '#': comments[-1:] = [] return string.join(comments, '') class EndOfBlock(Exception): pass class BlockFinder: """Provide a tokeneater() method to detect the end of a code block.""" def __init__(self): self.indent = 0 self.islambda = False self.started = False self.passline = False self.last = 1 def tokeneater(self, type, token, srow_scol, erow_ecol, line): srow, scol = srow_scol erow, ecol = erow_ecol if not self.started: # look for the first "def", "class" or "lambda" if token in ("def", "class", "lambda"): if token == "lambda": self.islambda = True self.started = True self.passline = True # skip to the end of the line elif type == tokenize.NEWLINE: self.passline = False # stop skipping when a NEWLINE is seen self.last = srow if self.islambda: # lambdas always end at the first NEWLINE raise EndOfBlock elif self.passline: pass elif type == tokenize.INDENT: self.indent = self.indent + 1 self.passline = True elif type == tokenize.DEDENT: self.indent = self.indent - 1 # the end of matching indent/dedent pairs end a block # (note that this only works for "def"/"class" blocks, # not e.g. for "if: else:" or "try: finally:" blocks) if self.indent <= 0: raise EndOfBlock elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): # any other token on the same indentation level end the previous # block as well, except the pseudo-tokens COMMENT and NL. raise EndOfBlock def getblock(lines): """Extract the block of code at the top of the given list of lines.""" blockfinder = BlockFinder() try: tokenize.tokenize(iter(lines).next, blockfinder.tokeneater) except (EndOfBlock, IndentationError): pass return lines[:blockfinder.last] def getsourcelines(object): """Return a list of source lines and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of the lines corresponding to the object and the line number indicates where in the original source file the first line of code was found. An IOError is raised if the source code cannot be retrieved.""" lines, lnum = findsource(object) if ismodule(object): return lines, 0 else: return getblock(lines[lnum:]), lnum + 1 def getsource(object): """Return the text of the source code for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a single string. An IOError is raised if the source code cannot be retrieved.""" lines, lnum = getsourcelines(object) return string.join(lines, '') # --------------------------------------------------- class tree extraction def walktree(classes, children, parent): """Recursive helper function for getclasstree().""" results = [] classes.sort(key=attrgetter('__module__', '__name__')) for c in classes: results.append((c, c.__bases__)) if c in children: results.append(walktree(children[c], children, c)) return results def getclasstree(classes, unique=0): """Arrange the given list of classes into a hierarchy of nested lists. Where a nested list appears, it contains classes derived from the class whose entry immediately precedes the list. Each entry is a 2-tuple containing a class and a tuple of its base classes. If the 'unique' argument is true, exactly one entry appears in the returned structure for each class in the given list. Otherwise, classes using multiple inheritance and their descendants will appear multiple times.""" children = {} roots = [] for c in classes: if c.__bases__: for parent in c.__bases__: if not parent in children: children[parent] = [] children[parent].append(c) if unique and parent in classes: break elif c not in roots: roots.append(c) for parent in children: if parent not in classes: roots.append(parent) return walktree(roots, children, None) # ------------------------------------------------ argument list extraction Arguments = namedtuple('Arguments', 'args varargs keywords') def getargs(co): """Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None.""" if not iscode(co): if hasattr(len, 'func_code') and type(co) is type(len.func_code): # PyPy extension: built-in function objects have a func_code too. # There is no co_code on it, but co_argcount and co_varnames and # co_flags are present. pass else: raise TypeError('{!r} is not a code object'.format(co)) code = getattr(co, 'co_code', '') nargs = co.co_argcount names = co.co_varnames args = list(names[:nargs]) step = 0 # The following acrobatics are for anonymous (tuple) arguments. for i in range(nargs): if args[i][:1] in ('', '.'): stack, remain, count = [], [], [] while step < len(code): op = ord(code[step]) step = step + 1 if op >= dis.HAVE_ARGUMENT: opname = dis.opname[op] value = ord(code[step]) + ord(code[step+1])*256 step = step + 2 if opname in ('UNPACK_TUPLE', 'UNPACK_SEQUENCE'): remain.append(value) count.append(value) elif opname == 'STORE_FAST': stack.append(names[value]) # Special case for sublists of length 1: def foo((bar)) # doesn't generate the UNPACK_TUPLE bytecode, so if # `remain` is empty here, we have such a sublist. if not remain: stack[0] = [stack[0]] break else: remain[-1] = remain[-1] - 1 while remain[-1] == 0: remain.pop() size = count.pop() stack[-size:] = [stack[-size:]] if not remain: break remain[-1] = remain[-1] - 1 if not remain: break args[i] = stack[0] varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & CO_VARKEYWORDS: varkw = co.co_varnames[nargs] return Arguments(args, varargs, varkw) ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults') def getargspec(func): """Get the names and default values of a function's arguments. A tuple of four things is returned: (args, varargs, varkw, defaults). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'defaults' is an n-tuple of the default values of the last n arguments. """ if ismethod(func): func = func.im_func if not (isfunction(func) or isbuiltin(func) and hasattr(func, 'func_code')): # PyPy extension: this works for built-in functions too raise TypeError('{!r} is not a Python function'.format(func)) args, varargs, varkw = getargs(func.func_code) return ArgSpec(args, varargs, varkw, func.func_defaults) ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') def getargvalues(frame): """Get information about arguments passed into a particular frame. A tuple of four things is returned: (args, varargs, varkw, locals). 'args' is a list of the argument names (it may contain nested lists). 'varargs' and 'varkw' are the names of the * and ** arguments or None. 'locals' is the locals dictionary of the given frame.""" args, varargs, varkw = getargs(frame.f_code) return ArgInfo(args, varargs, varkw, frame.f_locals) def joinseq(seq): if len(seq) == 1: return '(' + seq[0] + ',)' else: return '(' + string.join(seq, ', ') + ')' def strseq(object, convert, join=joinseq): """Recursively walk a sequence, stringifying each element.""" if type(object) in (list, tuple): return join(map(lambda o, c=convert, j=join: strseq(o, c, j), object)) else: return convert(object) def formatargspec(args, varargs=None, varkw=None, defaults=None, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): """Format an argument spec from the 4 values returned by getargspec. The first four arguments are (args, varargs, varkw, defaults). The other four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.""" specs = [] if defaults: firstdefault = len(args) - len(defaults) for i, arg in enumerate(args): spec = strseq(arg, formatarg, join) if defaults and i >= firstdefault: spec = spec + formatvalue(defaults[i - firstdefault]) specs.append(spec) if varargs is not None: specs.append(formatvarargs(varargs)) if varkw is not None: specs.append(formatvarkw(varkw)) return '(' + string.join(specs, ', ') + ')' def formatargvalues(args, varargs, varkw, locals, formatarg=str, formatvarargs=lambda name: '*' + name, formatvarkw=lambda name: '**' + name, formatvalue=lambda value: '=' + repr(value), join=joinseq): """Format an argument spec from the 4 values returned by getargvalues. The first four arguments are (args, varargs, varkw, locals). The next four arguments are the corresponding optional formatting functions that are called to turn names and values into strings. The ninth argument is an optional function to format the sequence of arguments.""" def convert(name, locals=locals, formatarg=formatarg, formatvalue=formatvalue): return formatarg(name) + formatvalue(locals[name]) specs = [] for i in range(len(args)): specs.append(strseq(args[i], convert, join)) if varargs: specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) if varkw: specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) return '(' + string.join(specs, ', ') + ')' def getcallargs(func, *positional, **named): """Get the mapping of arguments to values. A dict is returned, with keys the function argument names (including the names of the * and ** arguments, if any), and values the respective bound values from 'positional' and 'named'.""" args, varargs, varkw, defaults = getargspec(func) f_name = func.__name__ arg2value = {} # The following closures are basically because of tuple parameter unpacking. assigned_tuple_params = [] def assign(arg, value): if isinstance(arg, str): arg2value[arg] = value else: assigned_tuple_params.append(arg) value = iter(value) for i, subarg in enumerate(arg): try: subvalue = next(value) except StopIteration: raise ValueError('need more than %d %s to unpack' % (i, 'values' if i > 1 else 'value')) assign(subarg,subvalue) try: next(value) except StopIteration: pass else: raise ValueError('too many values to unpack') def is_assigned(arg): if isinstance(arg,str): return arg in arg2value return arg in assigned_tuple_params if ismethod(func) and func.im_self is not None: # implicit 'self' (or 'cls' for classmethods) argument positional = (func.im_self,) + positional num_pos = len(positional) num_total = num_pos + len(named) num_args = len(args) num_defaults = len(defaults) if defaults else 0 for arg, value in zip(args, positional): assign(arg, value) if varargs: if num_pos > num_args: assign(varargs, positional[-(num_pos-num_args):]) else: assign(varargs, ()) elif 0 < num_args < num_pos: raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at most' if defaults else 'exactly', num_args, 'arguments' if num_args > 1 else 'argument', num_total)) elif num_args == 0 and num_total: if varkw: if num_pos: # XXX: We should use num_pos, but Python also uses num_total: raise TypeError('%s() takes exactly 0 arguments ' '(%d given)' % (f_name, num_total)) else: raise TypeError('%s() takes no argument (%d given)' % (f_name, num_total)) for arg in args: if isinstance(arg, str) and arg in named: if is_assigned(arg): raise TypeError("%s() got multiple values for keyword " "argument '%s'" % (f_name, arg)) else: assign(arg, named.pop(arg)) if defaults: # fill in any missing values with the defaults for arg, value in zip(args[-num_defaults:], defaults): if not is_assigned(arg): assign(arg, value) if varkw: assign(varkw, named) elif named: unexpected = next(iter(named)) if isinstance(unexpected, unicode): unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace') raise TypeError("%s() got an unexpected keyword argument '%s'" % (f_name, unexpected)) unassigned = num_args - len([arg for arg in args if is_assigned(arg)]) if unassigned: num_required = num_args - num_defaults raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at least' if defaults else 'exactly', num_required, 'arguments' if num_required > 1 else 'argument', num_total)) return arg2value # -------------------------------------------------- stack frame extraction Traceback = namedtuple('Traceback', 'filename lineno function code_context index') def getframeinfo(frame, context=1): """Get information about a frame or traceback object. A tuple of five things is returned: the filename, the line number of the current line, the function name, a list of lines of context from the source code, and the index of the current line within that list. The optional second argument specifies the number of lines of context to return, which are centered around the current line.""" if istraceback(frame): lineno = frame.tb_lineno frame = frame.tb_frame else: lineno = frame.f_lineno if not isframe(frame): raise TypeError('{!r} is not a frame or traceback object'.format(frame)) filename = getsourcefile(frame) or getfile(frame) if context > 0: start = lineno - 1 - context//2 try: lines, lnum = findsource(frame) except IOError: lines = index = None else: start = max(start, 1) start = max(0, min(start, len(lines) - context)) lines = lines[start:start+context] index = lineno - 1 - start else: lines = index = None return Traceback(filename, lineno, frame.f_code.co_name, lines, index) def getlineno(frame): """Get the line number from a frame object, allowing for optimization.""" # FrameType.f_lineno is now a descriptor that grovels co_lnotab return frame.f_lineno def getouterframes(frame, context=1): """Get a list of records for a frame and all higher (calling) frames. Each record contains a frame object, filename, line number, function name, a list of lines of context, and index within the context.""" framelist = [] while frame: framelist.append((frame,) + getframeinfo(frame, context)) frame = frame.f_back return framelist def getinnerframes(tb, context=1): """Get a list of records for a traceback's frame and all lower frames. Each record contains a frame object, filename, line number, function name, a list of lines of context, and index within the context.""" framelist = [] while tb: framelist.append((tb.tb_frame,) + getframeinfo(tb, context)) tb = tb.tb_next return framelist if hasattr(sys, '_getframe'): currentframe = sys._getframe else: currentframe = lambda _=None: None def stack(context=1): """Return a list of records for the stack above the caller's frame.""" return getouterframes(sys._getframe(1), context) def trace(context=1): """Return a list of records for the stack below the current exception.""" return getinnerframes(sys.exc_info()[2], context)
import torch.utils.data as data import torch import h5py from bisect import bisect_right import numpy as np from src.data import load_data import src.data.stats as stats import dill from tqdm import tqdm class DatasetFromHdf5(data.Dataset): def __init__(self, filename, normalized=True, log=False, maxsize=30000): super().__init__() self.maxsize = maxsize self.f = h5py.File(filename, mode='r', swmr=True) self.schedules = self.f.get('schedules') self.programs = self.f.get('programs') self.speedups = self.f.get('speedup') self.times = self.f.get('times') self.prog_names = self.f.get('programs_names') self.sched_names = self.f.get('schedules_names') self.X = np.concatenate((np.array(self.programs), np.array(self.schedules)), axis=1).astype('float32') self.Y = np.array(self.speedups, dtype='float32').reshape(-1, 1) if log: self.Y = np.log(self.Y) self.mean = np.mean(self.Y) self.std = np.std(self.Y) self.Y = (self.Y - self.mean)/self.std def __len__(self): if self.maxsize is None: return len(self.Y) return self.maxsize def __getitem__(self, index): return self.X[index], self.Y[index] def get_prog_name(self, index): return self.prog_names[index] def get_sched_name(self, index): return self.sched_names[index] def normalize_min_max(self, data): data = np.array(data) denominator = data.max(axis=0) - data.min(axis=0) denominator[denominator == 0] = 1 data = (data - data.min(axis=0))/denominator return data def normalize_dataset(self): #reopen file in write mode filename = self.f.filename self.f.close() self.f = h5py.File(filename, mode='a') self.programs = self.f.get('programs') self.schedules = self.f.get('schedules') #normalize programs normalized_progs = self.normalize_min_max(self.programs) self.f.create_dataset('normalized_programs', data=normalized_progs, dtype="float32") #normalize schedules normalized_scheds = self.normalize_min_max(self.schedules) self.f.create_dataset('normalized_schedules', data=normalized_scheds, dtype="float32") #go back to read mode self.f.close() self.__init__(filename) class DatasetFromPkl(data.Dataset): def __init__(self, filename, normalized=False, log=False, maxsize=100000): super().__init__() self.maxsize = maxsize self.dataset = filename #read dataset f = open(filename, 'rb') dataset_dict = dill.load(f) f.close() self.programs = dataset_dict['programs'] self.program_indexes = dataset_dict['program_indexes'] self.schedules = dataset_dict['schedules'] self.exec_times = dataset_dict['exec_times'] self.speedups = dataset_dict['speedup'] self.X = [] self.Y = [] self.restricted_program_indexes = [] self.restricted_schedules = [] for i in tqdm(range(len(self.schedules))): program = self.programs[self.program_indexes[i]] self.X.append(program.add_schedule(self.schedules[i]).__array__()) self.Y.append(self.speedups[i]) self.restricted_program_indexes.append(self.program_indexes[i]) self.restricted_schedules.append(self.schedules[i]) self.X = np.array(self.X).astype('float32') self.Y = np.array(self.Y, dtype='float32').reshape(-1, 1) self.Y_speedups = np.array(self.Y, dtype='float32') if log: self.Y = np.log(self.Y) self.mean = np.mean(self.Y) self.std = np.std(self.Y) self.Y = (self.Y - self.mean)/self.std def __getitem__(self, index): return self.X[index], self.Y[index] def __len__(self): if self.maxsize is None: return len(self.Y) return self.maxsize @staticmethod def pickle_data(data_path='data/training_data/', dataset_path='data/speedup_dataset.pkl'): st = stats.Stats(data_path) print("Reading data") programs, schedules, exec_times = st.load_data() print("data loaded") print("Serializing") load_data.serialize(programs, schedules, exec_times, filename=dataset_path) print("done") #loads data using filter on speedup and excluding some functions class DatasetFromPkl_Filter(data.Dataset): def __init__(self, filename, normalized=False, log=False, maxsize=100000, speedup_lo_bound=0, speedup_up_bound=np.inf, exlude_funcs=[]): super().__init__() self.maxsize = maxsize self.dataset = filename #read dataset f = open(filename, 'rb') dataset_dict = dill.load(f) f.close() self.programs = dataset_dict['programs'] self.program_indexes = dataset_dict['program_indexes'] self.schedules = dataset_dict['schedules'] self.exec_times = dataset_dict['exec_times'] self.speedups = dataset_dict['speedup'] self.X = [] self.Y = [] self.restricted_program_indexes = [] self.restricted_schedules = [] for i in tqdm(range(len(self.schedules))): if((self.speedups[i]>=speedup_lo_bound) & (self.speedups[i]<=speedup_up_bound) & (self.programs[self.program_indexes[i]].name not in exlude_funcs)): program = self.programs[self.program_indexes[i]] self.X.append(program.add_schedule(self.schedules[i]).__array__()) self.Y.append(self.speedups[i]) self.restricted_program_indexes.append(self.program_indexes[i]) self.restricted_schedules.append(self.schedules[i]) else: pass self.X = np.array(self.X).astype('float32') self.Y = np.array(self.Y, dtype='float32').reshape(-1, 1) self.Y_speedups = np.array(self.Y, dtype='float32') if log: self.Y = np.log(self.Y) self.mean = np.mean(self.Y) self.std = np.std(self.Y) self.Y = (self.Y - self.mean)/self.std def __getitem__(self, index): return self.X[index], self.Y[index] def __len__(self): if self.maxsize is None: return len(self.Y) return self.maxsize #loads data using filter function and transformation on speedups class DatasetFromPkl_Transform(data.Dataset): def __init__(self, filename, normalized=False, log=False, maxsize=100000, filter_func=None, transform_func=None): super().__init__() self.maxsize = maxsize self.dataset = filename #read dataset f = open(filename, 'rb') dataset_dict = dill.load(f) f.close() self.programs = dataset_dict['programs'] self.program_indexes = dataset_dict['program_indexes'] self.schedules = dataset_dict['schedules'] self.exec_times = dataset_dict['exec_times'] self.speedups = dataset_dict['speedup'] self.X = [] self.Y = [] self.restricted_program_indexes = [] self.restricted_schedules = [] if (filter_func==None): filter_func = lambda x : True if (transform_func==None): transform_func = lambda x : x for i in tqdm(range(len(self.schedules))): if(filter_func(self)): program = self.programs[self.program_indexes[i]] self.X.append(program.add_schedule(self.schedules[i]).__array__()) self.Y.append(self.speedups[i]) self.restricted_program_indexes.append(self.program_indexes[i]) self.restricted_schedules.append(self.schedules[i]) else: pass self.X = np.array(self.X).astype('float32') self.Y = np.array(self.Y, dtype='float32').reshape(-1, 1) self.Y_speedups = np.array(self.Y, dtype='float32') self.Y = transform_func(self.Y) self.Y = np.array(self.Y, dtype='float32') if log: self.Y = np.log(self.Y) self.mean = np.mean(self.Y) self.std = np.std(self.Y) self.Y = (self.Y - self.mean)/self.std def __getitem__(self, index): return self.X[index], self.Y[index] def __len__(self): if self.maxsize is None: return len(self.Y) return self.maxsize class DatasetFromPkl_old(data.Dataset): def __init__(self, filename, normalized=False, log=False, maxsize=100000): super().__init__() self.maxsize = maxsize self.dataset = filename #read dataset f = open(filename, 'rb') dataset_dict = dill.load(f) f.close() self.programs = dataset_dict['programs'] self.program_indexes = dataset_dict['program_indexes'] self.schedules = dataset_dict['schedules'] self.exec_times = dataset_dict['exec_times'] self.speedups = dataset_dict['speedup'] programs = [program.__array__() for program in self.programs] schedules = [schedule.__array__() for schedule in self.schedules] self.X = np.concatenate((np.array(programs)[self.program_indexes], np.array(schedules)), axis=1).astype('float32') self.Y = np.array(self.speedups, dtype='float32').reshape(-1, 1) if log: self.Y = np.log(self.Y) self.mean = np.mean(self.Y) self.std = np.std(self.Y) self.Y = (self.Y - self.mean)/self.std def __getitem__(self, index): return self.X[index], self.Y[index] def __len__(self): if self.maxsize is None: return len(self.Y) return self.maxsize @staticmethod def pickle_data(data_path='data/training_data/', dataset_path='data/speedup_dataset.pkl'): st = stats.Stats(data_path) print("Reading data") programs, schedules, exec_times = st.load_data() print("data loaded") print("Serializing") load_data.serialize(programs, schedules, exec_times, filename=dataset_path) print("done")
def get_injured_sharks(): """ >>> from ibeis.scripts.getshark import * # NOQA """ import requests url = 'http://www.whaleshark.org/getKeywordImages.jsp' resp = requests.get(url) assert resp.status_code == 200 keywords = resp.json()['keywords'] key_list = ut.take_column(keywords, 'indexName') key_to_nice = {k['indexName']: k['readableName'] for k in keywords} injury_patterns = [ 'injury', 'net', 'hook', 'trunc', 'damage', 'scar', 'nicks', 'bite', ] injury_keys = [key for key in key_list if any([pat in key for pat in injury_patterns])] noninjury_keys = ut.setdiff(key_list, injury_keys) injury_nice = ut.lmap(lambda k: key_to_nice[k], injury_keys) # NOQA noninjury_nice = ut.lmap(lambda k: key_to_nice[k], noninjury_keys) # NOQA key_list = injury_keys keyed_images = {} for key in ut.ProgIter(key_list, lbl='reading index', bs=True): key_url = url + '?indexName={indexName}'.format(indexName=key) key_resp = requests.get(key_url) assert key_resp.status_code == 200 key_imgs = key_resp.json()['images'] keyed_images[key] = key_imgs key_hist = {key: len(imgs) for key, imgs in keyed_images.items()} key_hist = ut.sort_dict(key_hist, ut.identity) print(ut.repr3(key_hist)) nice_key_hist = ut.map_dict_keys(lambda k: key_to_nice[k], key_hist) nice_key_hist = ut.sort_dict(nice_key_hist, ut.identity) print(ut.repr3(nice_key_hist)) key_to_urls = {key: ut.take_column(vals, 'url') for key, vals in keyed_images.items()} overlaps = {} import itertools overlap_img_list = [] for k1, k2 in itertools.combinations(key_to_urls.keys(), 2): overlap_imgs = ut.isect(key_to_urls[k1], key_to_urls[k2]) num_overlap = len(overlap_imgs) overlaps[(k1, k2)] = num_overlap overlaps[(k1, k1)] = len(key_to_urls[k1]) if num_overlap > 0: #print('[%s][%s], overlap=%r' % (k1, k2, num_overlap)) overlap_img_list.extend(overlap_imgs) all_img_urls = list(set(ut.flatten(key_to_urls.values()))) num_all = len(all_img_urls) # NOQA print('num_all = %r' % (num_all,)) # Determine super-categories categories = ['nicks', 'scar', 'trunc'] # Force these keys into these categories key_to_cat = {'scarbite': 'other_injury'} cat_to_keys = ut.ddict(list) for key in key_to_urls.keys(): flag = 1 if key in key_to_cat: cat = key_to_cat[key] cat_to_keys[cat].append(key) continue for cat in categories: if cat in key: cat_to_keys[cat].append(key) flag = 0 if flag: cat = 'other_injury' cat_to_keys[cat].append(key) cat_urls = ut.ddict(list) for cat, keys in cat_to_keys.items(): for key in keys: cat_urls[cat].extend(key_to_urls[key]) cat_hist = {} for cat in list(cat_urls.keys()): cat_urls[cat] = list(set(cat_urls[cat])) cat_hist[cat] = len(cat_urls[cat]) print(ut.repr3(cat_to_keys)) print(ut.repr3(cat_hist)) key_to_cat = dict([(val, key) for key, vals in cat_to_keys.items() for val in vals]) #ingestset = { # '__class__': 'ImageSet', # 'images': ut.ddict(dict) #} #for key, key_imgs in keyed_images.items(): # for imgdict in key_imgs: # url = imgdict['url'] # encid = imgdict['correspondingEncounterNumber'] # # Make structure # encdict = encounters[encid] # encdict['__class__'] = 'Encounter' # imgdict = ut.delete_keys(imgdict.copy(), ['correspondingEncounterNumber']) # imgdict['__class__'] = 'Image' # cat = key_to_cat[key] # annotdict = {'relative_bbox': [.01, .01, .98, .98], 'tags': [cat, key]} # annotdict['__class__'] = 'Annotation' # # Ensure structures exist # encdict['images'] = encdict.get('images', []) # imgdict['annots'] = imgdict.get('annots', []) # # Add an image to this encounter # encdict['images'].append(imgdict) # # Add an annotation to this image # imgdict['annots'].append(annotdict) ##http://springbreak.wildbook.org/rest/org.ecocean.Encounter/1111 #get_enc_url = 'http://www.whaleshark.org/rest/org.ecocean.Encounter/%s' % (encid,) #resp = requests.get(get_enc_url) #print(ut.repr3(encdict)) #print(ut.repr3(encounters)) # Download the files to the local disk #fpath_list = all_urls = ut.unique(ut.take_column( ut.flatten( ut.dict_subset(keyed_images, ut.flatten(cat_to_keys.values())).values() ), 'url')) dldir = ut.truepath('~/tmpsharks') from os.path import commonprefix, basename # NOQA prefix = commonprefix(all_urls) suffix_list = [url_[len(prefix):] for url_ in all_urls] fname_list = [suffix.replace('/', '--') for suffix in suffix_list] fpath_list = [] for url, fname in ut.ProgIter(zip(all_urls, fname_list), lbl='downloading imgs', freq=1): fpath = ut.grab_file_url(url, download_dir=dldir, fname=fname, verbose=False) fpath_list.append(fpath) # Make sure we keep orig info #url_to_keys = ut.ddict(list) url_to_info = ut.ddict(dict) for key, imgdict_list in keyed_images.items(): for imgdict in imgdict_list: url = imgdict['url'] info = url_to_info[url] for k, v in imgdict.items(): info[k] = info.get(k, []) info[k].append(v) info['keys'] = info.get('keys', []) info['keys'].append(key) #url_to_keys[url].append(key) info_list = ut.take(url_to_info, all_urls) for info in info_list: if len(set(info['correspondingEncounterNumber'])) > 1: assert False, 'url with two different encounter nums' # Combine duplicate tags hashid_list = [ut.get_file_uuid(fpath_, stride=8) for fpath_ in ut.ProgIter(fpath_list, bs=True)] groupxs = ut.group_indices(hashid_list)[1] # Group properties by duplicate images #groupxs = [g for g in groupxs if len(g) > 1] fpath_list_ = ut.take_column(ut.apply_grouping(fpath_list, groupxs), 0) url_list_ = ut.take_column(ut.apply_grouping(all_urls, groupxs), 0) info_list_ = [ut.map_dict_vals(ut.flatten, ut.dict_accum(*info_)) for info_ in ut.apply_grouping(info_list, groupxs)] encid_list_ = [ut.unique(info_['correspondingEncounterNumber'])[0] for info_ in info_list_] keys_list_ = [ut.unique(info_['keys']) for info_ in info_list_] cats_list_ = [ut.unique(ut.take(key_to_cat, keys)) for keys in keys_list_] clist = ut.ColumnLists({ 'gpath': fpath_list_, 'url': url_list_, 'encid': encid_list_, 'key': keys_list_, 'cat': cats_list_, }) #for info_ in ut.apply_grouping(info_list, groupxs): # info = ut.dict_accum(*info_) # info = ut.map_dict_vals(ut.flatten, info) # x = ut.unique(ut.flatten(ut.dict_accum(*info_)['correspondingEncounterNumber'])) # if len(x) > 1: # info = info.copy() # del info['keys'] # print(ut.repr3(info)) flags = ut.lmap(ut.fpath_has_imgext, clist['gpath']) clist = clist.compress(flags) import ibeis ibs = ibeis.opendb('WS_Injury', allow_newdir=True) gid_list = ibs.add_images(clist['gpath']) clist['gid'] = gid_list failed_flags = ut.flag_None_items(clist['gid']) print('# failed %s' % (sum(failed_flags)),) passed_flags = ut.not_list(failed_flags) clist = clist.compress(passed_flags) ut.assert_all_not_None(clist['gid']) #ibs.get_image_uris_original(clist['gid']) ibs.set_image_uris_original(clist['gid'], clist['url'], overwrite=True) #ut.zipflat(clist['cat'], clist['key']) if False: # Can run detection instead clist['tags'] = ut.zipflat(clist['cat']) aid_list = ibs.use_images_as_annotations(clist['gid'], adjust_percent=0.01, tags_list=clist['tags']) aid_list import plottool as pt from ibeis import core_annots pt.qt4ensure() #annots = ibs.annots() #aids = [1, 2] #ibs.depc_annot.get('hog', aids , 'hog') #ibs.depc_annot.get('chip', aids, 'img') for aid in ut.InteractiveIter(ibs.get_valid_aids()): hogs = ibs.depc_annot.d.get_hog_hog([aid]) chips = ibs.depc_annot.d.get_chips_img([aid]) chip = chips[0] hogimg = core_annots.make_hog_block_image(hogs[0]) pt.clf() pt.imshow(hogimg, pnum=(1, 2, 1)) pt.imshow(chip, pnum=(1, 2, 2)) fig = pt.gcf() fig.show() fig.canvas.draw() #print(len(groupxs)) #if False: #groupxs = ut.find_duplicate_items(ut.lmap(basename, suffix_list)).values() #print(ut.repr3(ut.apply_grouping(all_urls, groupxs))) # # FIX # for fpath, fname in zip(fpath_list, fname_list): # if ut.checkpath(fpath): # ut.move(fpath, join(dirname(fpath), fname)) # print('fpath = %r' % (fpath,)) #import ibeis #from ibeis.dbio import ingest_dataset #dbdir = ibeis.sysres.lookup_dbdir('WS_ALL') #self = ingest_dataset.Ingestable2(dbdir) if False: # Show overlap matrix import plottool as pt import pandas as pd import numpy as np dict_ = overlaps s = pd.Series(dict_, index=pd.MultiIndex.from_tuples(overlaps)) df = s.unstack() lhs, rhs = df.align(df.T) df = lhs.add(rhs, fill_value=0).fillna(0) label_texts = df.columns.values def label_ticks(label_texts): import plottool as pt truncated_labels = [repr(lbl[0:100]) for lbl in label_texts] ax = pt.gca() ax.set_xticks(list(range(len(label_texts)))) ax.set_xticklabels(truncated_labels) [lbl.set_rotation(-55) for lbl in ax.get_xticklabels()] [lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()] #xgrid, ygrid = np.meshgrid(range(len(label_texts)), range(len(label_texts))) #pt.plot_surface3d(xgrid, ygrid, disjoint_mat) ax.set_yticks(list(range(len(label_texts)))) ax.set_yticklabels(truncated_labels) [lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()] [lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()] #[lbl.set_rotation(20) for lbl in ax.get_yticklabels()] #df = df.sort(axis=0) #df = df.sort(axis=1) sortx = np.argsort(df.sum(axis=1).values)[::-1] df = df.take(sortx, axis=0) df = df.take(sortx, axis=1) fig = pt.figure(fnum=1) fig.clf() mat = df.values.astype(np.int32) mat[np.diag_indices(len(mat))] = 0 vmax = mat[(1 - np.eye(len(mat))).astype(np.bool)].max() import matplotlib.colors norm = matplotlib.colors.Normalize(vmin=0, vmax=vmax, clip=True) pt.plt.imshow(mat, cmap='hot', norm=norm, interpolation='none') pt.plt.colorbar() pt.plt.grid('off') label_ticks(label_texts) fig.tight_layout() #overlap_df = pd.DataFrame.from_dict(overlap_img_list) class TmpImage(ut.NiceRepr): pass from skimage.feature import hog from skimage import data, color, exposure import plottool as pt image2 = color.rgb2gray(data.astronaut()) # NOQA fpath = './GOPR1120.JPG' import vtool as vt for fpath in [fpath]: """ http://scikit-image.org/docs/dev/auto_examples/plot_hog.html """ image = vt.imread(fpath, grayscale=True) image = pt.color_funcs.to_base01(image) fig = pt.figure(fnum=2) fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=True) fig, (ax1, ax2) = pt.plt.subplots(1, 2, figsize=(8, 4), sharex=True, sharey=True) ax1.axis('off') ax1.imshow(image, cmap=pt.plt.cm.gray) ax1.set_title('Input image') ax1.set_adjustable('box-forced') # Rescale histogram for better display hog_image_rescaled = exposure.rescale_intensity(hog_image, in_range=(0, 0.02)) ax2.axis('off') ax2.imshow(hog_image_rescaled, cmap=pt.plt.cm.gray) ax2.set_title('Histogram of Oriented Gradients') ax1.set_adjustable('box-forced') pt.plt.show() #for def detect_sharks(ibs, gids): #import ibeis #ibs = ibeis.opendb('WS_ALL') config = { 'algo' : 'yolo', 'sensitivity' : 0.2, 'config_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg'), 'weight_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.39000.weights'), 'class_filepath' : ut.truepath('~/work/WS_ALL/localizer_backup/detect.yolo.2.cfg.classes'), } depc = ibs.depc_image #imgsets = ibs.imagesets(text='Injured Sharks') #images = ibs.images(imgsets.gids[0]) images = ibs.images(gids) images = images.compress([ext not in ['.gif'] for ext in images.exts]) gid_list = images.gids # result is a tuple: # (score, bbox_list, theta_list, conf_list, class_list) results_list = depc.get_property('localizations', gid_list, None, config=config) results_list2 = [] multi_gids = [] failed_gids = [] #ibs.set_image_imagesettext(failed_gids, ['Fixme'] * len(failed_gids)) ibs.set_image_imagesettext(multi_gids, ['Fixme2'] * len(multi_gids)) failed_gids for gid, res in zip(gid_list, results_list): score, bbox_list, theta_list, conf_list, class_list = res if len(bbox_list) == 0: failed_gids.append(gid) elif len(bbox_list) == 1: results_list2.append((gid, bbox_list, theta_list)) elif len(bbox_list) > 1: multi_gids.append(gid) idx = conf_list.argmax() res2 = (gid, bbox_list[idx:idx + 1], theta_list[idx:idx + 1]) results_list2.append(res2) ut.dict_hist(([t[1].shape[0] for t in results_list])) localized_imgs = ibs.images(ut.take_column(results_list2, 0)) assert all([len(a) == 1 for a in localized_imgs.aids]) old_annots = ibs.annots(ut.flatten(localized_imgs.aids)) #old_tags = old_annots.case_tags # Override old bboxes import numpy as np bboxes = np.array(ut.take_column(results_list2, 1))[:, 0, :] ibs.set_annot_bboxes(old_annots.aids, bboxes) if False: import plottool as pt pt.qt4ensure() inter = pt.MultiImageInteraction( ibs.get_image_paths(ut.take_column(results_list2, 0)), bboxes_list=ut.take_column(results_list2, 1) ) inter.dump_to_disk('shark_loc', num=50, prefix='shark_loc') inter.start() inter = pt.MultiImageInteraction(ibs.get_image_paths(failed_gids)) inter.start() inter = pt.MultiImageInteraction(ibs.get_image_paths(multi_gids)) inter.start() def train_part_detector(): """ Problem: healthy sharks usually have a mostly whole body shot injured sharks usually have a close up shot. This distribution of images is likely what the injur-shark net is picking up on. The goal is to train a detector that looks for things that look like the distribution of injured sharks. We will run this on healthy sharks to find the parts of """ import ibeis ibs = ibeis.opendb('WS_ALL') imgset = ibs.imagesets(text='Injured Sharks') injured_annots = imgset.annots[0] # NOQA #config = { # 'dim_size': (224, 224), # 'resize_dim': 'wh' #} from pydarknet import Darknet_YOLO_Detector data_path = ibs.export_to_xml() output_path = join(ibs.get_cachedir(), 'training', 'localizer') ut.ensuredir(output_path) dark = Darknet_YOLO_Detector() results = dark.train(data_path, output_path) del dark localizer_weight_path, localizer_config_path, localizer_class_path = results classifier_model_path = ibs.classifier_train() labeler_model_path = ibs.labeler_train() output_path = join(ibs.get_cachedir(), 'training', 'detector') ut.ensuredir(output_path) ut.copy(localizer_weight_path, join(output_path, 'localizer.weights')) ut.copy(localizer_config_path, join(output_path, 'localizer.config')) ut.copy(localizer_class_path, join(output_path, 'localizer.classes')) ut.copy(classifier_model_path, join(output_path, 'classifier.npy')) ut.copy(labeler_model_path, join(output_path, 'labeler.npy')) # ibs.detector_train() def purge_ensure_one_annot_per_images(ibs): """ pip install Pipe """ # Purge all but one annotation images = ibs.images() #images.aids groups = images._annot_groups import numpy as np # Take all but the largest annotations per images large_masks = [ut.index_to_boolmask([np.argmax(x)], len(x)) for x in groups.bbox_area] small_masks = ut.lmap(ut.not_list, large_masks) # Remove all but the largets annotation small_aids = ut.zipcompress(groups.aid, small_masks) small_aids = ut.flatten(small_aids) # Fix any empty images images = ibs.images() empty_images = ut.where(np.array(images.num_annotations) == 0) print('empty_images = %r' % (empty_images,)) #list(map(basename, map(dirname, images.uris_original))) def VecPipe(func): import pipe @pipe.Pipe def wrapped(sequence): return map(func, sequence) #return (None if item is None else func(item) for item in sequence) return wrapped name_list = list(images.uris_original | VecPipe(dirname) | VecPipe(basename)) aids_list = images.aids ut.assert_all_eq(list(aids_list | VecPipe(len))) annots = ibs.annots(ut.flatten(aids_list)) annots.names = name_list def shark_misc(): import ibeis ibs = ibeis.opendb('WS_ALL') aid_list = ibs.get_valid_aids() flag_list = ibs.get_annot_been_adjusted(aid_list) adjusted_aids = ut.compress(aid_list, flag_list) return adjusted_aids #if False: # # TRY TO FIGURE OUT WHY URLS ARE MISSING IN STEP 1 # encounter_to_parsed1 = parsed1.group_items('encounter') # encounter_to_parsed2 = parsed2.group_items('encounter') # url_to_parsed1 = parsed1.group_items('img_url') # url_to_parsed2 = parsed2.group_items('img_url') # def set_overlap(set1, set2): # set1 = set(set1) # set2 = set(set2) # return ut.odict([ # ('s1', len(set1)), # ('s2', len(set2)), # ('isect', len(set1.intersection(set2))), # ('union', len(set1.union(set2))), # ('s1 - s2', len(set1.difference(set2))), # ('s2 - s1', len(set2.difference(set1))), # ]) # print('encounter overlap: ' + ut.repr3(set_overlap(encounter_to_parsed1, encounter_to_parsed2))) # print('url overlap: ' + ut.repr3(set_overlap(url_to_parsed1, url_to_parsed2))) # url1 = list(url_to_parsed1.keys()) # url2 = list(url_to_parsed2.keys()) # # remove common prefixes # from os.path import commonprefix, basename # NOQA # cp1 = commonprefix(url1) # cp2 = commonprefix(url2) # #suffix1 = sorted([u[len(cp1):].lower() for u in url1]) # #suffix2 = sorted([u[len(cp2):].lower() for u in url2]) # suffix1 = sorted([u[len(cp1):] for u in url1]) # suffix2 = sorted([u[len(cp2):] for u in url2]) # print('suffix overlap: ' + ut.repr3(set_overlap(suffix1, suffix2))) # set1 = set(suffix1) # set2 = set(suffix2) # only1 = list(set1 - set1.intersection(set2)) # only2 = list(set2 - set1.intersection(set2)) # import numpy as np # for suf in ut.ProgIter(only2, bs=True): # dist = np.array(ut.edit_distance(suf, only1)) # idx = ut.argsort(dist)[0:3] # if dist[idx][0] < 3: # close = ut.take(only1, idx) # print('---') # print('suf = %r' % (join(cp2, suf),)) # print('close = %s' % (ut.repr3([join(cp1, c) for c in close]),)) # print('---') # break # # Associate keywords with original images # #lower_urls = [x.lower() for x in parsed['img_url']] # url_to_idx = ut.make_index_lookup(parsed1['img_url']) # parsed1['keywords'] = [[] for _ in range(len(parsed1))] # for url, keys in url_to_keys.items(): # # hack because urls are note in the same format # url = url.replace('wildbook_data_dir', 'shepherd_data_dir') # url = url.lower() # if url in url_to_idx: # idx = url_to_idx[url] # parsed1['keywords'][idx].extend(keys) #healthy_annots = ibs.annots(ibs.imagesets(text='Non-Injured Sharks').aids[0]) #ibs.set_annot_prop('healthy', healthy_annots.aids, [True] * len(healthy_annots)) #['healthy' in t and len(t) > 0 for t in single_annots.case_tags] #healthy_tags = [] #ut.find_duplicate_items(cur_img_uuids) #ut.find_duplicate_items(new_img_uuids) #cur_uuids = set(cur_img_uuids) #new_uuids = set(new_img_uuids) #both_uuids = new_uuids.intersection(cur_uuids) #only_cur = cur_uuids - both_uuids #only_new = new_uuids - both_uuids #print('len(cur_uuids) = %r' % (len(cur_uuids))) #print('len(new_uuids) = %r' % (len(new_uuids))) #print('len(both_uuids) = %r' % (len(both_uuids))) #print('len(only_cur) = %r' % (len(only_cur))) #print('len(only_new) = %r' % (len(only_new))) # Ensure that data in both sets are syncronized #images_both = [] #if False: # print('Removing small images') # import numpy as np # import vtool as vt # imgsize_list = np.array([vt.open_image_size(gpath) for gpath in parsed['new_fpath']]) # sqrt_area_list = np.sqrt(np.prod(imgsize_list, axis=1)) # areq_flags_list = sqrt_area_list >= 750 # parsed = parsed.compress(areq_flags_list)
from django.db.models import Q, Sum from django.db.models.deletion import ProtectedError from django.db.utils import IntegrityError from django.forms.models import modelform_factory from django.test import TestCase, skipIfDBFeature from .models import ( A, Address, B, Board, C, CharLink, Company, Contact, Content, D, Developer, Guild, HasLinkThing, Link, Node, Note, OddRelation1, OddRelation2, Organization, Person, Place, Related, Restaurant, Tag, Team, TextLink, ) class GenericRelationTests(TestCase): def test_inherited_models_content_type(self): """ Test that GenericRelations on inherited classes use the correct content type. """ p = Place.objects.create(name="South Park") r = Restaurant.objects.create(name="Chubby's") l1 = Link.objects.create(content_object=p) l2 = Link.objects.create(content_object=r) self.assertEqual(list(p.links.all()), [l1]) self.assertEqual(list(r.links.all()), [l2]) def test_reverse_relation_pk(self): """ Test that the correct column name is used for the primary key on the originating model of a query. See #12664. """ p = Person.objects.create(account=23, name='Chef') Address.objects.create(street='123 Anywhere Place', city='Conifer', state='CO', zipcode='80433', content_object=p) qs = Person.objects.filter(addresses__zipcode='80433') self.assertEqual(1, qs.count()) self.assertEqual('Chef', qs[0].name) def test_charlink_delete(self): oddrel = OddRelation1.objects.create(name='clink') CharLink.objects.create(content_object=oddrel) oddrel.delete() def test_textlink_delete(self): oddrel = OddRelation2.objects.create(name='tlink') TextLink.objects.create(content_object=oddrel) oddrel.delete() def test_q_object_or(self): """ Tests that SQL query parameters for generic relations are properly grouped when OR is used. Test for bug http://code.djangoproject.com/ticket/11535 In this bug the first query (below) works while the second, with the query parameters the same but in reverse order, does not. The issue is that the generic relation conditions do not get properly grouped in parentheses. """ note_contact = Contact.objects.create() org_contact = Contact.objects.create() Note.objects.create(note='note', content_object=note_contact) org = Organization.objects.create(name='org name') org.contacts.add(org_contact) # search with a non-matching note and a matching org name qs = Contact.objects.filter(Q(notes__note__icontains=r'other note') | Q(organizations__name__icontains=r'org name')) self.assertIn(org_contact, qs) # search again, with the same query parameters, in reverse order qs = Contact.objects.filter( Q(organizations__name__icontains=r'org name') | Q(notes__note__icontains=r'other note')) self.assertIn(org_contact, qs) def test_join_reuse(self): qs = Person.objects.filter( addresses__street='foo' ).filter( addresses__street='bar' ) self.assertEqual(str(qs.query).count('JOIN'), 2) def test_generic_relation_ordering(self): """ Test that ordering over a generic relation does not include extraneous duplicate results, nor excludes rows not participating in the relation. """ p1 = Place.objects.create(name="South Park") p2 = Place.objects.create(name="The City") c = Company.objects.create(name="Chubby's Intl.") Link.objects.create(content_object=p1) Link.objects.create(content_object=c) places = list(Place.objects.order_by('links__id')) def count_places(place): return len([p for p in places if p.id == place.id]) self.assertEqual(len(places), 2) self.assertEqual(count_places(p1), 1) self.assertEqual(count_places(p2), 1) def test_target_model_is_unsaved(self): """Test related to #13085""" # Fails with another, ORM-level error dev1 = Developer(name='Joe') note = Note(note='Deserves promotion', content_object=dev1) with self.assertRaises(IntegrityError): note.save() def test_target_model_len_zero(self): """ Saving a model with a GenericForeignKey to a model instance whose __len__ method returns 0 (Team.__len__() here) shouldn't fail (#13085). """ team1 = Team.objects.create(name='Backend devs') note = Note(note='Deserve a bonus', content_object=team1) note.save() def test_target_model_nonzero_false(self): """Test related to #13085""" # __nonzero__() returns False -- This actually doesn't currently fail. # This test validates that g1 = Guild.objects.create(name='First guild') note = Note(note='Note for guild', content_object=g1) note.save() @skipIfDBFeature('interprets_empty_strings_as_nulls') def test_gfk_to_model_with_empty_pk(self): """Test related to #13085""" # Saving model with GenericForeignKey to model instance with an # empty CharField PK b1 = Board.objects.create(name='') tag = Tag(label='VP', content_object=b1) tag.save() def test_ticket_20378(self): # Create a couple of extra HasLinkThing so that the autopk value # isn't the same for Link and HasLinkThing. hs1 = HasLinkThing.objects.create() hs2 = HasLinkThing.objects.create() hs3 = HasLinkThing.objects.create() hs4 = HasLinkThing.objects.create() l1 = Link.objects.create(content_object=hs3) l2 = Link.objects.create(content_object=hs4) self.assertQuerysetEqual( HasLinkThing.objects.filter(links=l1), [hs3], lambda x: x) self.assertQuerysetEqual( HasLinkThing.objects.filter(links=l2), [hs4], lambda x: x) self.assertQuerysetEqual( HasLinkThing.objects.exclude(links=l2), [hs1, hs2, hs3], lambda x: x, ordered=False) self.assertQuerysetEqual( HasLinkThing.objects.exclude(links=l1), [hs1, hs2, hs4], lambda x: x, ordered=False) def test_ticket_20564(self): b1 = B.objects.create() b2 = B.objects.create() b3 = B.objects.create() c1 = C.objects.create(b=b1) c2 = C.objects.create(b=b2) c3 = C.objects.create(b=b3) A.objects.create(flag=None, content_object=b1) A.objects.create(flag=True, content_object=b2) self.assertQuerysetEqual( C.objects.filter(b__a__flag=None), [c1, c3], lambda x: x ) self.assertQuerysetEqual( C.objects.exclude(b__a__flag=None), [c2], lambda x: x ) def test_ticket_20564_nullable_fk(self): b1 = B.objects.create() b2 = B.objects.create() b3 = B.objects.create() d1 = D.objects.create(b=b1) d2 = D.objects.create(b=b2) d3 = D.objects.create(b=b3) d4 = D.objects.create() A.objects.create(flag=None, content_object=b1) A.objects.create(flag=True, content_object=b1) A.objects.create(flag=True, content_object=b2) self.assertQuerysetEqual( D.objects.exclude(b__a__flag=None), [d2], lambda x: x ) self.assertQuerysetEqual( D.objects.filter(b__a__flag=None), [d1, d3, d4], lambda x: x ) self.assertQuerysetEqual( B.objects.filter(a__flag=None), [b1, b3], lambda x: x ) self.assertQuerysetEqual( B.objects.exclude(a__flag=None), [b2], lambda x: x ) def test_extra_join_condition(self): # A crude check that content_type_id is taken in account in the # join/subquery condition. self.assertIn("content_type_id", str(B.objects.exclude(a__flag=None).query).lower()) # No need for any joins - the join from inner query can be trimmed in # this case (but not in the above case as no a objects at all for given # B would then fail). self.assertNotIn(" join ", str(B.objects.exclude(a__flag=True).query).lower()) self.assertIn("content_type_id", str(B.objects.exclude(a__flag=True).query).lower()) def test_annotate(self): hs1 = HasLinkThing.objects.create() hs2 = HasLinkThing.objects.create() HasLinkThing.objects.create() b = Board.objects.create(name=str(hs1.pk)) Link.objects.create(content_object=hs2) l = Link.objects.create(content_object=hs1) Link.objects.create(content_object=b) qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk) # If content_type restriction isn't in the query's join condition, # then wrong results are produced here as the link to b will also match # (b and hs1 have equal pks). self.assertEqual(qs.count(), 1) self.assertEqual(qs[0].links__sum, l.id) l.delete() # Now if we don't have proper left join, we will not produce any # results at all here. # clear cached results qs = qs.all() self.assertEqual(qs.count(), 1) # Note - 0 here would be a nicer result... self.assertIs(qs[0].links__sum, None) # Finally test that filtering works. self.assertEqual(qs.filter(links__sum__isnull=True).count(), 1) self.assertEqual(qs.filter(links__sum__isnull=False).count(), 0) def test_filter_targets_related_pk(self): HasLinkThing.objects.create() hs2 = HasLinkThing.objects.create() l = Link.objects.create(content_object=hs2) self.assertNotEqual(l.object_id, l.pk) self.assertQuerysetEqual( HasLinkThing.objects.filter(links=l.pk), [hs2], lambda x: x) def test_editable_generic_rel(self): GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__') form = GenericRelationForm() self.assertIn('links', form.fields) form = GenericRelationForm({'links': None}) self.assertTrue(form.is_valid()) form.save() links = HasLinkThing._meta.get_field('links') self.assertEqual(links.save_form_data_calls, 1) def test_ticket_22998(self): related = Related.objects.create() content = Content.objects.create(related_obj=related) Node.objects.create(content=content) # deleting the Related cascades to the Content cascades to the Node, # where the pre_delete signal should fire and prevent deletion. with self.assertRaises(ProtectedError): related.delete() def test_ticket_22982(self): place = Place.objects.create(name='My Place') self.assertIn('GenericRelatedObjectManager', str(place.links))
"""Command-line utilities for experiments subsystem.""" import argparse import datetime import collections import yaml import dateutil.tz from jacquard.utils import is_recursive from jacquard.buckets import NotEnoughBucketsException, close, release from jacquard.storage import retrying from jacquard.commands import BaseCommand, CommandError from jacquard.constraints import ConstraintContext from jacquard.experiments.experiment import Experiment class Launch(BaseCommand): """ Launch a given experiment. This is one of the main user commands. It promotes an experiment to being live, which effectively locks it out from being changed and starts putting users on its branches. """ help = "start an experiment running" def add_arguments(self, parser): """Add argparse arguments.""" parser.add_argument("experiment", help="experiment to launch") parser.add_argument( "--relaunch", action="store_true", help=( "re-launch a previously concluded test, " "discarding previous results" ), ) @retrying def handle(self, config, options): """Run command.""" with config.storage.transaction() as store: try: experiment = Experiment.from_store(store, options.experiment) except LookupError: raise CommandError( 'No such experiment: "{id}"'.format(id=options.experiment) ) current_experiments = store.get("active-experiments", []) if experiment.id in current_experiments: raise CommandError( "Experiment '{experiment_id}' already launched!".format( experiment_id=experiment.id ) ) if experiment.concluded is not None: if options.relaunch: experiment.concluded = None experiment.launched = None else: raise CommandError( "Experiment '{id}' already concluded!".format(id=experiment.id) ) experiment.launched = datetime.datetime.now(dateutil.tz.tzutc()) specialised_constraints = experiment.constraints.specialise( ConstraintContext(era_start_date=experiment.launched) ) try: release( store, experiment.id, specialised_constraints, experiment.branch_launch_configuration(), ) except NotEnoughBucketsException as e: raise CommandError( "Conflicts: {conflicts}".format( conflicts=e.human_readable_conflicts() ) ) store["active-experiments"] = (current_experiments + [options.experiment]) experiment.save(store) class Conclude(BaseCommand): """ Conclude a given experiment. This is one of the main user commands. It demotes an experiment to no longer being live, records a conclusion date, and (optionally but strongly advised) promotes the settings from one of its branches into the defaults. """ help = "finish an experiment" def add_arguments(self, parser): """Add argparse arguments.""" parser.add_argument("experiment", help="experiment to conclude") mutex_group = parser.add_mutually_exclusive_group(required=True) mutex_group.add_argument( "branch", help="branch to promote to default", nargs="?" ) mutex_group.add_argument( "--no-promote-branch", help="do not promote a branch to default", action="store_false", dest="promote_branch", ) @retrying def handle(self, config, options): """Run command.""" with config.storage.transaction() as store: try: experiment = Experiment.from_store(store, options.experiment) except LookupError: raise CommandError( 'No such experiment: "{id}"'.format(id=options.experiment) ) current_experiments = store.get("active-experiments", []) concluded_experiments = store.get("concluded-experiments", []) if options.experiment not in current_experiments: if experiment.concluded is None: message = ("Experiment '{experiment_id}' not launched!").format( experiment_id=options.experiment ) else: message = ( "Experiment '{experiment_id}' already concluded (at " "{concluded})!" ).format( experiment_id=options.experiment, concluded=experiment.concluded ) raise CommandError(message) current_experiments.remove(options.experiment) concluded_experiments.append(options.experiment) close( store, experiment.id, experiment.constraints, experiment.branch_launch_configuration(), ) if options.promote_branch: defaults = store.get("defaults", {}) # Find branch matching ID try: branch_configuration = experiment.branch(options.branch) except LookupError: raise CommandError( "Experiment '{experiment_id}' has no branch '{branch_name}'".format( experiment_id=options.experiment, branch_name=options.branch ) ) defaults.update(branch_configuration["settings"]) store["defaults"] = defaults experiment.concluded = datetime.datetime.now(dateutil.tz.tzutc()) experiment.save(store) store["active-experiments"] = current_experiments store["concluded-experiments"] = concluded_experiments class Load(BaseCommand): """ Load an experiment definition from a file. This is obviously a pretty awful interface which will only do for this MVP state of the project, but currently this is the mechanism for loading an experiment definition. """ help = "load an experiment definition from a file" def add_arguments(self, parser): """Add argparse arguments.""" parser.add_argument( "files", nargs="+", type=argparse.FileType("r"), metavar="file", help="experiment definition", ) parser.add_argument( "--skip-launched", action="store_true", help="do not load or error on launched experiments", ) @retrying def handle(self, config, options): """Run command.""" with config.storage.transaction() as store: live_experiments = store.get("active-experiments", ()) concluded_experiments = store.get("concluded-experiments", ()) for file in options.files: try: definition = yaml.safe_load(file) except (yaml.YAMLError, UnicodeError) as e: raise CommandError(str(e)) if is_recursive(definition): raise CommandError("Recursive structure in experiment definition") try: experiment = Experiment.from_json(definition) except ValueError as e: raise CommandError(str(e)) from None if experiment.id in live_experiments: if options.skip_launched: continue else: raise CommandError( "Experiment '{experiment_id}' is live, " "refusing to edit".format(experiment_id=experiment.id) ) elif experiment.id in concluded_experiments: if options.skip_launched: continue else: raise CommandError( "Experiment '{experiment_id}' has concluded, " "refusing to edit".format(experiment_id=experiment.id) ) experiment.save(store) class ListExperiments(BaseCommand): """ List all experiments. Mostly useful in practice when one cannot remember the ID of an experiment. """ help = "list all experiments" def add_arguments(self, parser): """Add argparse arguments.""" parser.add_argument( "--detailed", action="store_true", help="whether to show experiment details in the listing", ) parser.add_argument( "--active", action="store_true", help="only show active experiments" ) def handle(self, config, options): """Run command.""" with config.storage.transaction(read_only=True) as store: for experiment in Experiment.enumerate(store): if options.active and not experiment.is_live(): continue Show.show_experiment(experiment, options.detailed) class Show(BaseCommand): """Show a given experiment.""" help = "show details about an experiment" @staticmethod def show_experiment(experiment, detailed=True, with_settings=False): """Print information about the given experiment.""" if experiment.name == experiment.id: title = experiment.id else: title = "{experiment_id}: {name}".format( experiment_id=experiment.id, name=experiment.name ) print(title) if detailed: print("=" * len(title)) print() if experiment.launched: print("Launched: {launch_date}".format(launch_date=experiment.launched)) if experiment.concluded: print( "Concluded: {concluded_date}".format( concluded_date=experiment.concluded ) ) else: print("In progress") else: print("Not yet launched") print() if with_settings: settings = set() for branch in experiment.branches: settings.update(branch["settings"].keys()) print("Settings") print("--------") for setting in sorted(settings): print(" * {setting}".format(setting=setting)) print() def add_arguments(self, parser): """Add argparse arguments.""" parser.add_argument("experiment", help="experiment to show") parser.add_argument( "--settings", action="store_true", help="include which settings this experiment will cover", ) def handle(self, config, options): """Run command.""" with config.storage.transaction(read_only=True) as store: try: experiment = Experiment.from_store(store, options.experiment) except LookupError: raise CommandError( 'No such experiment: "{id}"'.format(id=options.experiment) ) self.show_experiment(experiment, with_settings=options.settings) class SettingsUnderActiveExperiments(BaseCommand): """Show all settings which are covered under active experiments.""" help = "show settings under active experimentation" def handle(self, config, options): """Run command.""" all_settings = set() experimental_settings = collections.defaultdict(set) with config.storage.transaction(read_only=True) as store: all_settings.update(store.get("defaults", {}).keys()) active_experiments = list(store.get("active-experiments", ())) for experiment in active_experiments: experiment_config = store["experiments/{slug}".format(slug=experiment)] for branch in experiment_config["branches"]: all_settings.update(branch["settings"].keys()) for setting in branch["settings"].keys(): experimental_settings[setting].add(experiment) for setting in sorted(all_settings): relevant_experiments = list(experimental_settings[setting]) relevant_experiments.sort() if relevant_experiments: print( "{setting}: {experiments}".format( setting=setting, experiments=", ".join(relevant_experiments) ) ) else: print("{setting}: NOT UNDER EXPERIMENT".format(setting=setting))
""" Copyright 2016 Brian Quach Licensed under MIT (https://github.com/brianquach/udacity-nano-fullstack-catalog/blob/master/LICENSE) # noqa """ import httplib2 import json import os import random import string from apiclient import discovery from apiclient import errors as gErrors from dicttoxml import dicttoxml from flask import flash from flask import Flask from flask import g from flask import jsonify from flask import make_response from flask import Markup from flask import redirect from flask import render_template from flask import request from flask import session from flask import url_for from functools import wraps from oauth2client import client from oauth2client.file import Storage from werkzeug import secure_filename from catalog import app from catalog import CLIENT_ID from catalog import db from catalog import G_CREDENTIAL_STORAGE from catalog import UPLOAD_PATH from catalog.forms import CreateCatalogItemForm from catalog.forms import EditCatalogItemForm from catalog.models import Catagory from catalog.models import CatagoryItem from catalog.models import User def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if 'user_id' not in session: return redirect(url_for('dashboard')) return f(*args, **kwargs) return decorated_function def owner_required(f): @wraps(f) def decorated_function(*args, **kwargs): catagory_item = CatagoryItem.\ query.\ filter_by( id=kwargs['catagory_item_id'], user_id=session['user_id'] ).\ first() if catagory_item is None: return redirect( url_for( 'view_catagory_item', catagory_item_id=kwargs['catagory_item_id'] ) ) return f(catagory_item) return decorated_function @app.context_processor def inject_oauth(): """Inject Catalog application oauth information for use by all templates. Creates and adds an anti-forgery state token to the session for oauth login authentication. This is because the login button will be displayed on all public pages. Returns: A dictionary that will contain information for user authentication. """ if 'state' in session: state = session['state'] else: state = ''.join( random.choice( string.ascii_uppercase + string.digits ) for x in xrange(32) ) session['state'] = state username = '' picture = '' is_logged_in = 'user_id' in session if is_logged_in: username = session['username'] picture = session['picture'] information = dict( client_id=CLIENT_ID, state=state, user_name=username, picture=picture, is_logged_in=is_logged_in ) return information @app.route('/') @app.route('/catalog') def dashboard(): """Catalog main page. Returns: A HTML page representing the index page. """ catagories = Catagory.query.all() catagory_items = CatagoryItem.\ query.\ order_by(CatagoryItem.id.desc()).\ limit(9) for catagory_item in catagory_items: if catagory_item.user_id is not None and catagory_item.picture: catagory_item.picture = url_for( 'static', filename='uploads/{0}'.format(catagory_item.picture) ) return render_template( 'index.html', catagory_items=catagory_items, catagories=catagories ) @app.route('/catagory/<int:catagory_id>/item') def view_catagory(catagory_id): """Catagory page. Lists items belonging to the catagory for users to view. Allow a user who is logged in to create an item. Returns: A HTML page representing a catagory. """ catagories = Catagory.query.all() catagory = Catagory.query.filter_by(id=catagory_id).one() catagory_items = CatagoryItem.\ query.\ filter_by(catagory_id=catagory_id).\ all() return render_template( 'catagory.html', catagories=catagories, name=catagory.name, items=catagory_items ) @app.route('/book/<int:catagory_item_id>') def view_catagory_item(catagory_item_id): """Catagory iem page. Display catagory item information. If user is authorized, allow user to edit or delete item. Returns: A HTML page representing a catagory item. """ is_authorized = False catagory_item = CatagoryItem.\ query.\ filter_by(id=catagory_item_id).\ one() # If there is no user_id associated with catagory_id, then it was created # by the system as test data. Test data hot links to images found on the # internet; otherwise, if there is an image uploaded by a user it will be # in the uploads folder. if catagory_item.user_id is not None and catagory_item.picture: catagory_item.picture = url_for( 'static', filename='uploads/{0}'.format(catagory_item.picture) ) if 'user_id' in session: is_authorized = catagory_item.user_id == session['user_id'] return render_template( 'view_item.html', item=catagory_item, is_authorized=is_authorized ) @app.route('/item/create', methods=['GET', 'POST']) @login_required def create_catagory_item(): """Serve create catagory item page, otherwise create new catagory item. Image file data is stored to server and saved in the database as a path to the stored image on disk. User must be logged in to use this function. Returns: HTML page, otherwise redirect. """ catagories = Catagory.query.order_by('name').all() form = CreateCatalogItemForm(obj=catagories) form.catagory_id.choices = [(c.id, c.name) for c in catagories] if (request.method == 'POST'): if form.validate_on_submit(): user_id = session['user_id'] catagory_item = CatagoryItem( name=form.name.data, author=form.author.data, description=form.description.data, catagory_id=form.catagory_id.data, user_id=user_id ) db.session.add(catagory_item) db.session.commit() # Filepath structure saves filename under catagory item ID # directory so there will be no conflict between uploaded pictures # with the same name. filename = secure_filename(form.image.data.filename) if filename: relative_path = '{0}/{1}'.format( catagory_item.id, filename ) image_file_path = UPLOAD_PATH + relative_path os.makedirs(os.path.join(UPLOAD_PATH + str(catagory_item.id))) form.image.data.save(image_file_path) catagory_item.picture = relative_path db.session.commit() flash('{0} successfully added!'.format(catagory_item.name)) return redirect(url_for('dashboard')) else: flash(Markup(''' <span class="error"> Please make sure to input a name and author! </span> ''')) return render_template('create_item.html', form=form) @app.route('/book/<int:catagory_item_id>/edit', methods=['GET', 'POST']) @login_required @owner_required def edit_catagory_item(catagory_item): """Serve edit catagory item page, otherwise edie catagory item. User must be logged in to use this function.. Returns: HTML page, otherwise redirect. """ catagories = Catagory.query.order_by('name').all() form = EditCatalogItemForm(obj=catagories) form.catagory_id.choices = [(c.id, c.name) for c in catagories] if request.method == 'POST': if form.validate_on_submit(): filename = secure_filename(form.image.data.filename) if filename: if catagory_item.picture: os.remove(os.path.join(UPLOAD_PATH, catagory_item.picture)) item_file_path = os.path.join( UPLOAD_PATH + str(catagory_item.id) ) if not os.path.isdir(item_file_path): os.makedirs(item_file_path) relative_path = '{0}/{1}'.format( catagory_item.id, filename ) image_file_path = UPLOAD_PATH + relative_path form.image.data.save(image_file_path) catagory_item.picture = relative_path catagory_item.name = form.name.data catagory_item.author = form.author.data catagory_item.description = form.description.data catagory_item.catagory_id = form.catagory_id.data db.session.commit() flash('{0} successfully edited!'.format(catagory_item.name)) else: flash(Markup(''' <span class="error"> Please make sure to input a name and author! </span> ''')) return render_template( 'edit_item.html', form=form, item=catagory_item, catagory_item_id=catagory_item_id ) return redirect( url_for('view_catagory_item', catagory_item_id=catagory_item.id) ) else: form.name.data = catagory_item.name form.author.data = catagory_item.author form.description.data = catagory_item.description form.catagory_id.data = catagory_item.catagory_id return render_template( 'edit_item.html', form=form, item=catagory_item, catagory_item_id=catagory_item.id ) @app.route('/book/<int:catagory_item_id>/delete', methods=['GET', 'POST']) @login_required @owner_required def delete_catagory_item(catagory_item): """Delete catagory iem page. If user is authorized, confirm delete book intention. Returns: HTML page, otherwise redirect. If POST respond with JSON object. """ if request.method == 'POST': if catagory_item.picture: relative_path = catagory_item.picture image_file_path = UPLOAD_PATH + relative_path os.remove(image_file_path) os.rmdir(os.path.join(UPLOAD_PATH + str(catagory_item.id))) db.session.delete(catagory_item) db.session.commit() flash('{0} successfully deleted!'.format(catagory_item.name)) return jsonify(isDeleted=True) return render_template( 'delete_item.html', catagory_id=catagory_item.catagory_id, catagory_item_id=catagory_item.id ) @app.route('/login', methods=['POST']) def server_oauth_login(): """Authenticate a user using OAuth through Google's API. Verifies that the user is the one actually making the call and that the access token return is intended for this application. Returns: A userinfo object: family_name: "A String", The user's last name. name: String, The user's full name. picture: String, URL of the user's picture image. locale: String, The user's preferred locale. gender: String, The user's gender. id: String, The obfuscated ID of the user. link: String, URL of the profile page. given_name: String, The user's first name. email: String, The user's email address. hd: String, The hosted domain e.g. example.com if the user is Google apps user. verified_email: Boolean, true if the email address is verified. Always verified because we only return the user's primary email address. """ authorization_token = request.data state_token = request.args.get('state') # Ensure anti-forgery state token is from the expected user making # this call if state_token != session.get('state'): response = jsonify(json.dumps('Invalid state parameter.'), 401) response.status_code = 401 return response try: # Create flow object to help aquire user credentials flow = client.flow_from_clientsecrets( 'client_secrets.json', scope='', redirect_uri='postmessage' ) # Exchange authorization token for access code credentials = flow.step2_exchange(authorization_token) except client.FlowExchangeError: response = jsonify('Failed to upgrade the authorization code.') response.status_code = 401 return response # Apply acess token to http object http_auth = credentials.authorize(httplib2.Http()) access_token = credentials.access_token # Build service call to google for user profile information oauth_service = discovery.build('oauth2', 'v2', http_auth) http_request = oauth_service.tokeninfo(access_token=access_token) try: token_info = http_request.execute() except gErrors.HttpError, err: error = json.loads(err.content) response = jsonify(error.get('error_description')) response.status_code = 400 return response # Verify that the access token is used for the intended user. gplus_id = credentials.id_token['sub'] if token_info['user_id'] != gplus_id: response = jsonify('Token\'s user ID doesn\'t match given user ID.') response.status_code = 401 return response # Verify that the access token is valid for this app. if token_info['issued_to'] != CLIENT_ID: response = jsonify('Token\'s client ID does not match app\'s.') response.status_code = 401 return response # stored_credentials = session.get('credentials') # stored_gplus_id = session.get('gplus_id') # if stored_credentials is not None and gplus_id == stored_gplus_id: # response = make_response( # json.dumps('Current user is already connected.'), # 200 # ) # response.headers['Content-Type'] = 'application/json' # return response userinfo = oauth_service.userinfo().get().execute() email = userinfo['email'] username = userinfo['name'] session['username'] = username session['picture'] = userinfo['picture'] session['email'] = email session['gplus_id'] = gplus_id # create a user account if none associated with email user_id = get_user_id(email) if user_id is None: user_id = create_user(session) session['user_id'] = user_id G_CREDENTIAL_STORAGE.put(credentials) flash('You\'re now logged in as {0}'.format(username)) return jsonify(userinfo) @app.route('/logout') def server_oauth_logout(): """Revoke user authentication and deletes user's session information. Returns: JSON response detailing a success or failed logout. """ try: credentials = G_CREDENTIAL_STORAGE.get() if credentials is None: response = jsonify(message='Current user not connected.') response.status_code = 401 return response credentials.revoke(httplib2.Http()) except err: print err finally: if G_CREDENTIAL_STORAGE.get() is not None: G_CREDENTIAL_STORAGE.delete() del session['gplus_id'] del session['username'] del session['email'] del session['picture'] del session['state'] del session['user_id'] flash('Successfully logged out') return jsonify(message='Successfully disconnected.') # User Helper Functions. Code taken and modified from Udacity Authentication & # Authorization: OAuth course. def create_user(session): """Add a new user into the database. Args: session: object containing user's information from oauth provider. Returns: Newly added user object. """ newUser = User( name=session['username'], email=session['email'], picture=session['picture'] ) db.session.add(newUser) db.session.commit() user = User.query.filter_by(email=session['email']).one() return user.id def get_userinfo(user_id): """Fetches user information. Args: user_id: user's id. Returns: A user object matching given user id. """ user = User.query.filter_by(id=user_id).one() return user def get_user_id(email): """Fetches user's id. Args: email: user's email address. Returns: User ID of user with given email address. """ try: user = User.query.filter_by(email=email).one() return user.id except: return None # JSON Endpoints @app.route('/me.json') def user_json(): """Fetches JSON representation of logged in user. Returns: JSON object of currently logged in user, if user is logged in. Otherwise an empty JSON object. """ if 'user_id' in session: user_id = session['user_id'] user = User.query.filter_by(id=user_id).one() return jsonify(user=user.serialize) return jsonify([]) @app.route('/catagories.json') def catagory_json(): """Fetches JSON representation of all catagories and their items. Returns: JSON object of all catagories and their items. """ catagories = Catagory.query.all() return jsonify(catagories=[c.serialize for c in catagories]) @app.route('/catagory/<int:catagory_id>/items.json') def catagory_item_json(catagory_id): """Fetches JSON representation of all items in a given catagory. Args: catagory_id: Id of catagory to fetch items from. Returns: JSON object of a catagory and its items. """ catagory = Catagory.query.filter_by(id=catagory_id).one() catagory_items = CatagoryItem.\ query.\ filter_by(catagory_id=catagory_id).\ all() return jsonify( catagory=catagory.name, catagory_items=[ci.serialize for ci in catagory_items] ) # XML Endpoints @app.route('/me.xml') def user_xml(): """Fetches XML representation of logged in user. Returns: XML file of currently logged in user, if user is logged in. Otherwise an empty XML file. """ if 'user_id' in session: user_id = session['user_id'] user = User.query.filter_by(id=user_id).one() response = make_response( dicttoxml(user.serialize, custom_root='me', attr_type=False), 200 ) else: response = make_response( '<?xml version="1.0" encoding="UTF-8"?><me></me>', 401 ) response.headers['Content-Type'] = 'text/xml' return response @app.route('/catagories.xml') def catagory_xml(): """Fetches XML representation of all catagories and their items. Returns: XML file of all catagories and their items. """ catagories = Catagory.query.all() response = make_response( dicttoxml( [c.serialize for c in catagories], custom_root='catagories', attr_type=False ), 200 ) response.headers['Content-Type'] = 'text/xml' return response @app.route('/catagory/<int:catagory_id>/items.xml') def catagory_item_xml(catagory_id): """Fetches XML representation of all items in a given catagory. Args: catagory_id: Id of catagory to fetch items from. Returns: XML file of a catagory and its items. """ catagory = Catagory.query.filter_by(id=catagory_id).one() catagory_items = CatagoryItem.\ query.\ filter_by(catagory_id=catagory_id).\ all() response = make_response( dicttoxml({ 'name': catagory.name, 'items': [ci.serialize for ci in catagory_items] }, custom_root='catagory', attr_type=False), 200 ) response.headers['Content-Type'] = 'text/xml' return response
#!/usr/bin/env python # Copyright 2014 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import ast import importlib import inspect import os import sys import unittest import urllib import uuid DECORATOR_MODULE = 'test' DECORATOR_NAME = 'idempotent_id' DECORATOR_IMPORT = 'tempest.%s' % DECORATOR_MODULE IMPORT_LINE = 'from tempest import %s' % DECORATOR_MODULE DECORATOR_TEMPLATE = "@%s.%s('%%s')" % (DECORATOR_MODULE, DECORATOR_NAME) UNIT_TESTS_EXCLUDE = 'tempest.tests' class SourcePatcher(object): """"Lazy patcher for python source files""" def __init__(self): self.source_files = None self.patches = None self.clear() def clear(self): """Clear inner state""" self.source_files = {} self.patches = {} @staticmethod def _quote(s): return urllib.quote(s) @staticmethod def _unquote(s): return urllib.unquote(s) def add_patch(self, filename, patch, line_no): """Add lazy patch""" if filename not in self.source_files: with open(filename) as f: self.source_files[filename] = self._quote(f.read()) patch_id = str(uuid.uuid4()) if not patch.endswith('\n'): patch += '\n' self.patches[patch_id] = self._quote(patch) lines = self.source_files[filename].split(self._quote('\n')) lines[line_no - 1] = ''.join(('{%s:s}' % patch_id, lines[line_no - 1])) self.source_files[filename] = self._quote('\n').join(lines) def _save_changes(self, filename, source): print('%s fixed' % filename) with open(filename, 'w') as f: f.write(source) def apply_patches(self): """Apply all patches""" for filename in self.source_files: patched_source = self._unquote( self.source_files[filename].format(**self.patches) ) self._save_changes(filename, patched_source) self.clear() class TestChecker(object): def __init__(self, package): self.package = package self.base_path = os.path.abspath(os.path.dirname(package.__file__)) def _path_to_package(self, path): relative_path = path[len(self.base_path) + 1:] if relative_path: return '.'.join((self.package.__name__,) + tuple(relative_path.split('/'))) else: return self.package.__name__ def _modules_search(self): """Recursive search for python modules in base package""" modules = [] for root, dirs, files in os.walk(self.base_path): if not os.path.exists(os.path.join(root, '__init__.py')): continue root_package = self._path_to_package(root) for item in files: if item.endswith('.py'): module_name = '.'.join((root_package, os.path.splitext(item)[0])) if not module_name.startswith(UNIT_TESTS_EXCLUDE): modules.append(module_name) return modules @staticmethod def _get_idempotent_id(test_node): """ Return key-value dict with all metadata from @test.idempotent_id decorators for test method """ idempotent_id = None for decorator in test_node.decorator_list: if (hasattr(decorator, 'func') and hasattr(decorator.func, 'attr') and decorator.func.attr == DECORATOR_NAME and hasattr(decorator.func, 'value') and decorator.func.value.id == DECORATOR_MODULE): for arg in decorator.args: idempotent_id = ast.literal_eval(arg) return idempotent_id @staticmethod def _is_decorator(line): return line.strip().startswith('@') @staticmethod def _is_def(line): return line.strip().startswith('def ') def _add_uuid_to_test(self, patcher, test_node, source_path): with open(source_path) as src: src_lines = src.read().split('\n') lineno = test_node.lineno insert_position = lineno while True: if (self._is_def(src_lines[lineno - 1]) or (self._is_decorator(src_lines[lineno - 1]) and (DECORATOR_TEMPLATE.split('(')[0] <= src_lines[lineno - 1].strip().split('(')[0]))): insert_position = lineno break lineno += 1 patcher.add_patch( source_path, ' ' * test_node.col_offset + DECORATOR_TEMPLATE % uuid.uuid4(), insert_position ) @staticmethod def _is_test_case(module, node): if (node.__class__ is ast.ClassDef and hasattr(module, node.name) and inspect.isclass(getattr(module, node.name))): return issubclass(getattr(module, node.name), unittest.TestCase) @staticmethod def _is_test_method(node): return (node.__class__ is ast.FunctionDef and node.name.startswith('test_')) @staticmethod def _next_node(body, node): if body.index(node) < len(body): return body[body.index(node) + 1] @staticmethod def _import_name(node): if type(node) == ast.Import: return node.names[0].name elif type(node) == ast.ImportFrom: return '%s.%s' % (node.module, node.names[0].name) def _add_import_for_test_uuid(self, patcher, src_parsed, source_path): with open(source_path) as f: src_lines = f.read().split('\n') line_no = 0 tempest_imports = [node for node in src_parsed.body if self._import_name(node) and 'tempest.' in self._import_name(node)] if not tempest_imports: import_snippet = '\n'.join(('', IMPORT_LINE, '')) else: for node in tempest_imports: if self._import_name(node) < DECORATOR_IMPORT: continue else: line_no = node.lineno import_snippet = IMPORT_LINE break else: line_no = tempest_imports[-1].lineno while True: if (not src_lines[line_no - 1] or getattr(self._next_node(src_parsed.body, tempest_imports[-1]), 'lineno') == line_no or line_no == len(src_lines)): break line_no += 1 import_snippet = '\n'.join((IMPORT_LINE, '')) patcher.add_patch(source_path, import_snippet, line_no) def get_tests(self): """Get test methods with sources from base package with metadata""" tests = {} for module_name in self._modules_search(): tests[module_name] = {} module = importlib.import_module(module_name) source_path = '.'.join( (os.path.splitext(module.__file__)[0], 'py') ) with open(source_path, 'r') as f: source = f.read() tests[module_name]['source_path'] = source_path tests[module_name]['tests'] = {} source_parsed = ast.parse(source) tests[module_name]['ast'] = source_parsed tests[module_name]['import_valid'] = ( hasattr(module, DECORATOR_MODULE) and inspect.ismodule(getattr(module, DECORATOR_MODULE)) ) test_cases = (node for node in source_parsed.body if self._is_test_case(module, node)) for node in test_cases: for subnode in filter(self._is_test_method, node.body): test_name = '%s.%s' % (node.name, subnode.name) tests[module_name]['tests'][test_name] = subnode return tests @staticmethod def _filter_tests(function, tests): """Filter tests with condition 'function(test_node) == True'""" result = {} for module_name in tests: for test_name in tests[module_name]['tests']: if function(module_name, test_name, tests): if module_name not in result: result[module_name] = { 'ast': tests[module_name]['ast'], 'source_path': tests[module_name]['source_path'], 'import_valid': tests[module_name]['import_valid'], 'tests': {} } result[module_name]['tests'][test_name] = \ tests[module_name]['tests'][test_name] return result def find_untagged(self, tests): """Filter all tests without uuid in metadata""" def check_uuid_in_meta(module_name, test_name, tests): idempotent_id = self._get_idempotent_id( tests[module_name]['tests'][test_name]) return not idempotent_id return self._filter_tests(check_uuid_in_meta, tests) def report_collisions(self, tests): """Reports collisions if there are any. Returns true if collisions exist. """ uuids = {} def report(module_name, test_name, tests): test_uuid = self._get_idempotent_id( tests[module_name]['tests'][test_name]) if not test_uuid: return if test_uuid in uuids: error_str = "%s:%s\n uuid %s collision: %s<->%s\n%s:%s" % ( tests[module_name]['source_path'], tests[module_name]['tests'][test_name].lineno, test_uuid, test_name, uuids[test_uuid]['test_name'], uuids[test_uuid]['source_path'], uuids[test_uuid]['test_node'].lineno, ) print(error_str) print("cannot automatically resolve the collision, please " "manually remove the duplicate value on the new test.") return True else: uuids[test_uuid] = { 'module': module_name, 'test_name': test_name, 'test_node': tests[module_name]['tests'][test_name], 'source_path': tests[module_name]['source_path'] } return bool(self._filter_tests(report, tests)) def report_untagged(self, tests): """Reports untagged tests if there are any. Returns true if untagged tests exist. """ def report(module_name, test_name, tests): error_str = "%s:%s\nmissing @test.idempotent_id('...')\n%s\n" % ( tests[module_name]['source_path'], tests[module_name]['tests'][test_name].lineno, test_name ) print(error_str) return True return bool(self._filter_tests(report, tests)) def fix_tests(self, tests): """Add uuids to all tests specified in tests and fix it in source files """ patcher = SourcePatcher() for module_name in tests: add_import_once = True for test_name in tests[module_name]['tests']: if not tests[module_name]['import_valid'] and add_import_once: self._add_import_for_test_uuid( patcher, tests[module_name]['ast'], tests[module_name]['source_path'] ) add_import_once = False self._add_uuid_to_test( patcher, tests[module_name]['tests'][test_name], tests[module_name]['source_path']) patcher.apply_patches() def run(): parser = argparse.ArgumentParser() parser.add_argument('--package', action='store', dest='package', default='tempest', type=str, help='Package with tests') parser.add_argument('--fix', action='store_true', dest='fix_tests', help='Attempt to fix tests without UUIDs') args = parser.parse_args() sys.path.append(os.path.join(os.path.dirname(__file__), '..')) pkg = importlib.import_module(args.package) checker = TestChecker(pkg) errors = False tests = checker.get_tests() untagged = checker.find_untagged(tests) errors = checker.report_collisions(tests) or errors if args.fix_tests and untagged: checker.fix_tests(untagged) else: errors = checker.report_untagged(untagged) or errors if errors: sys.exit("@test.idempotent_id existence and uniqueness checks failed\n" "Run 'tox -v -euuidgen' to automatically fix tests with\n" "missing @test.idempotent_id decorators.") if __name__ == '__main__': run()
# Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Analytics for extracting facts based on StudentAnswerEntity entries.""" __author__ = 'Mike Gainer (mgainer@google.com)' import collections import logging import courses from common import tags import models from tools import verify QuestionAnswerInfo = collections.namedtuple( 'QuestionAnswerInfo', ['unit_id', 'lesson_id', 'sequence', # 0-based index of the question within the lesson/assessment. 'question_id', # ID of the QuestionEntity to which this is an answer. 'question_type', # McQuestion, SaQuestion, etc. 'timestamp', # Timestamp from the event. 'answers', # The answer (or answers, if multiple-answer multiple-choice). 'score', # Unweighted score for the answer. 'weighted_score', # Score fully weighted by question instance in HTML # or question usage in group and assessment (if in # assessment) 'tallied', # Boolean: False for lessons where questions are not scored. ]) def _unpack_single_question_answer_1_5( info, types, score, assessment_weight, timestamp, answers, valid_question_ids): if info['id'] not in valid_question_ids: logging.info('Question with ID "%s" is no longer present; ' 'ignoring data for it.', info['id']) return [] weighted_score = score * info['weight'] * assessment_weight return [QuestionAnswerInfo( info['unit'], info['lesson'], info['sequence'], info['id'], types, timestamp, answers, score, weighted_score, tallied=True)] def _unpack_question_group_answer_1_5( info, types, scores, assessment_weight, timestamp, answers, usage_id, unit_responses, group_to_questions, valid_question_ids): # Sometimes the event contains enough information to get the # question IDs in the question group directly; this happens for # assessments. For graded lessons, we don't have that luxury, and # we need to (attempt to) rediscover the question IDs from # information gathered at map/reduce time. ret = [] seqs = [] q_ids = [] if info['id'] not in group_to_questions: logging.info( 'Question group with ID %s is referenced in an event, but ' 'is no longer present in the course. Ignoring the ' 'question group answer.', info['id']) return [] if usage_id in unit_responses: # Assessment events contain packed strings of the form # <question-usage-id-string>.<sequence#>.<QuestionEntity id> # keyed by the usage-ID string for the question group. # Unpack these into arrays for use below. packed_ids = unit_responses[usage_id].keys() packed_ids.sort(key=lambda packed: int(packed.split('.')[1])) for packed_id in packed_ids: _, seq, q_id = packed_id.split('.') seqs.append(seq) if q_id not in valid_question_ids: logging.info('Question with ID "%s" is no longer present; ' 'ignoring it and the question group containing ' 'it.', info['id']) return [] q_ids.append(q_id) else: for seq, q_info in enumerate(group_to_questions[info['id']]): seqs.append(seq) q_id = q_info['question'] if q_id not in valid_question_ids: logging.info('Question with ID "%s" is no longer present; ' 'ignoring it and the question group containing ' 'it.', info['id']) return [] q_ids.append(q_id) if (len(q_ids) != len(answers) or len(q_ids) != len(group_to_questions[info['id']])): logging.info( 'Question group usage "%s" in location "%s" has ' 'changed length since an older event was recorded; ' 'ignoring the unusable group answer.', usage_id, unit_responses.get('location', '')) return [] for q_id, seq, answer, q_type, q_score in zip( q_ids, seqs, answers, types, scores): # Here, we are guessing at the actual weight, since the # weight for each question is not supplied in the event. # We do, however, have the question ID, so if that question # is still part of the question group, we can use the current # weight value. # TODO(mgainer): When we get server-side grading, this mess # can finally get ripped out. weight_in_group = 1.0 for item in group_to_questions.get(info['id'], []): if item['question'] == q_id: weight_in_group = item['weight'] weighted_score = q_score * weight_in_group * assessment_weight ret.append(QuestionAnswerInfo( info['unit'], info['lesson'], info['sequence'] + int(seq), q_id, q_type, timestamp, answer, q_score, weighted_score, tallied=True)) return ret def unpack_student_answer_1_5(questions_info, valid_question_ids, assessment_weights, group_to_questions, unit_responses, timestamp): """Unpack JSON from event; convert to QuestionAnswerInfo objects. The JSON for events is unusually shaped; function regularizes it into plain-old-data objects. Also translates from question-usage ID to unit_id/lesson_id/question_id. (Note that this makes the reasonable assumption that a question will only be used once per lesson). Note that this function flattens question groups, unpacking everything as a single array of questions. Args: questions_info: A map from question usage ID to unit/lesson/question IDs. Generate this by calling get_questions_by_usage_id(). assessment_weights: Map from assessment ID to weight for that assessment. group_to_questions: Map from question group ID to list of dicts holding question ID as 'question' and weight as 'weight'. unit_responses: The user's responses. Obtain this by unpacking an event of type 'submit-assessment' and picking out the 'values' item, or an event of type 'attempt-lesson' and picking out 'answers'. timestamp: Timestamp from the event. Value is copied into the results, but not otherwise used. Returns: An array of QuestionAnswerInfo corresponding to the answers given by the student, as recorded in the submitted event. """ ret = [] contained_types = unit_responses['containedTypes'] for usage_id, answers in unit_responses['answers'].items(): if usage_id == 'version': # Found in graded assessments. continue if usage_id not in questions_info: logging.info('Question or question-group ID "%s" in event is ' 'no longer present', usage_id) continue # Skip items from no-longer-present questions. # Note: The variable names here are in plural, but for single # questions, 'types', 'score' and 'answers' contain just one # item. (whereas for question groups, these are all arrays) info = questions_info[usage_id] types = contained_types[usage_id] score = unit_responses['individualScores'][usage_id] unit_id = info['unit'] assessment_weight = assessment_weights.get(str(unit_id), 1.0) # Single question - give its answer. if types == 'McQuestion' or types == 'SaQuestion': ret.extend(_unpack_single_question_answer_1_5( info, types, score, assessment_weight, timestamp, answers, valid_question_ids)) # Question group. Fetch IDs of sub-questions, which are packed as # <group-usage-id>.<sequence>.<question-id>. # Order by <sequence>, which is 0-based within question-group. elif isinstance(types, list): ret.extend(_unpack_question_group_answer_1_5( info, types, score, assessment_weight, timestamp, answers, usage_id, unit_responses, group_to_questions, valid_question_ids)) return ret def unpack_check_answers( content, questions_info, valid_question_ids, assessment_weights, group_to_questions, timestamp): """Parse check-answers submissions for ungraded questions. The JSON for events is unusually shaped; function regularizes it into plain-old-data objects. Also translates from question-usage ID to unit_id/lesson_id/question_id. (Note that this makes the reasonable assumption that a question will only be used once per lesson). Note that this function flattens question groups, unpacking everything as a single array of questions. Args: content: The dict unpacked from a JSON string for an event with a source of 'tag-assessment'. questions_info: A map from question usage ID to unit/lesson/question IDs. Generate this by calling get_questions_by_usage_id(). assessment_weights: Map from assessment ID to weight for that assessment. group_to_questions: A map of group ID to dicts, as follows. Generate this by calling group_to_questions(), below. 'question' -> string containing question ID. 'weight' -> float representing the weight of that question in this group timestamp: Timestamp from the event. Value is copied into the results, but not otherwise used. Returns: An array of QuestionAnswerInfo corresponding to the answers given by the student, as recorded in the submitted event. """ qtype = content.get('type') usage_id = content['instanceid'] if usage_id not in questions_info: logging.info('Question or question-group ID "%s" in event is ' 'no longer present', usage_id) return [] info = questions_info[usage_id] assessment_weight = assessment_weights.get(info['unit'], 1.0) if qtype == 'SaQuestion' or qtype == 'McQuestion': if info['id'] not in valid_question_ids: logging.info('Question with ID "%s" is no longer present; ' 'ignoring data for it.', info['id']) return [] score = content.get('score', 0.0) weighted_score = score * info['weight'] * assessment_weight answers = [QuestionAnswerInfo( info['unit'], info['lesson'], info['sequence'], info['id'], qtype, timestamp, content['answer'], score, weighted_score, tallied=False)] elif qtype == 'QuestionGroup': values = content.get('answer') scores = content.get('individualScores') types = content.get('containedTypes') # Here, we have to hope that the length and order of questions within # a group has not changed since the event was recorded, as the events # do not record the question ID within the group. We just assume that # the index at the time the check-answer event was recorded is the # same as in the question group currently. # TODO(mgainer): When we get server-side grading, buff this up. group_id = questions_info[usage_id]['id'] if group_id not in group_to_questions: logging.info( 'Question group with ID %s is referenced in an event, but ' 'is no longer present in the course. Ignoring the unusable ' 'question group answer.', group_id) return [] q_infos = group_to_questions.get(group_id, []) if len(q_infos) != len(values): logging.info('Ignoring event for question group "%s": ' 'This group currently has length %d, ' 'but event has length %d.', usage_id, len(q_infos), len(values)) return [] answers = [] i = 0 for q_info, val, score, qtype in zip(q_infos, values, scores, types): weighted_score = score * q_info['weight'] * assessment_weight answers.append(QuestionAnswerInfo( info['unit'], info['lesson'], info['sequence'] + i, q_info['question'], qtype, timestamp, val, score, weighted_score, tallied=False)) i += 1 else: logging.warning('Not handling unknown question or group type "%s"', qtype) answers = [] return answers def _add_questions_from_html( questions_by_usage_id, unit_id, lesson_id, html, question_group_lengths): """Parse rich-text HTML and add questions found to map by ID.""" sequence_counter = 0 for component in tags.get_components_from_html(html): if component['cpt_name'] == 'question': questions_by_usage_id[component['instanceid']] = { 'unit': unit_id, 'lesson': lesson_id, 'sequence': sequence_counter, 'id': component['quid'], 'weight': float(component.get('weight', 1.0)), } sequence_counter += 1 elif component['cpt_name'] == 'question-group': questions_by_usage_id[component['instanceid']] = { 'unit': unit_id, 'lesson': lesson_id, 'sequence': sequence_counter, 'id': component['qgid'], } if component['qgid'] in question_group_lengths: sequence_counter += ( question_group_lengths[component['qgid']]) def get_questions_by_usage_id(app_context): """Build map: question-usage-ID to {question ID, unit ID, sequence}. When a question or question-group is mentioned on a CourseBuilder HTML page, it is identified by a unique opaque ID which indicates *that usage* of a particular question. Args: app_context: Normal context object giving namespace, etc. Returns: A map of precalculated facts to be made available to mapper workerbee instances. """ questions_by_usage_id = {} # To know a question's sequence number within an assessment, we need # to know how many questions a question group contains. question_group_lengths = {} for group in models.QuestionGroupDAO.get_all(): question_group_lengths[str(group.id)] = ( len(group.question_ids)) # Run through course. For each assessment, parse the HTML content # looking for questions and question groups. For each of those, # record the unit ID, use-of-item-on-page-instance-ID (a string # like 'RK3q5H2dS7So'), and the sequence on the page. Questions # count as one position. Question groups increase the sequence # count by the number of questions they contain. course = courses.Course(None, app_context) for unit in course.get_units(): _add_questions_from_html(questions_by_usage_id, unit.unit_id, None, unit.html_content, question_group_lengths) for lesson in course.get_lessons(unit.unit_id): _add_questions_from_html(questions_by_usage_id, unit.unit_id, lesson.lesson_id, lesson.objectives, question_group_lengths) return questions_by_usage_id def get_assessment_weights(app_context): ret = {} course = courses.Course(None, app_context) for unit in course.get_units(): if unit.type == verify.UNIT_TYPE_ASSESSMENT: ret[str(unit.unit_id)] = float(unit.weight) return ret def get_group_to_questions(): ret = {} for group in models.QuestionGroupDAO.get_all(): items = group.items for element in items: element['question'] = str(element['question']) element['weight'] = float(element['weight']) ret[str(group.id)] = items return ret def get_unscored_lesson_ids(app_context): ret = [] for lesson in courses.Course(None, app_context).get_lessons_for_all_units(): if not lesson.scored: ret.append(lesson.lesson_id) return ret def get_valid_question_ids(): ret = [] for question in models.QuestionDAO.get_all(): ret.append(str(question.id)) return ret
#!/usr/bin/env python # # Electrum - lightweight Bitcoin client # Copyright (C) 2011 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals # list of words from http://en.wiktionary.org/wiki/Wiktionary:Frequency_lists/Contemporary_poetry words = [ "like", "just", "love", "know", "never", "want", "time", "out", "there", "make", "look", "eye", "down", "only", "think", "heart", "back", "then", "into", "about", "more", "away", "still", "them", "take", "thing", "even", "through", "long", "always", "world", "too", "friend", "tell", "try", "hand", "thought", "over", "here", "other", "need", "smile", "again", "much", "cry", "been", "night", "ever", "little", "said", "end", "some", "those", "around", "mind", "people", "girl", "leave", "dream", "left", "turn", "myself", "give", "nothing", "really", "off", "before", "something", "find", "walk", "wish", "good", "once", "place", "ask", "stop", "keep", "watch", "seem", "everything", "wait", "got", "yet", "made", "remember", "start", "alone", "run", "hope", "maybe", "believe", "body", "hate", "after", "close", "talk", "stand", "own", "each", "hurt", "help", "home", "god", "soul", "new", "many", "two", "inside", "should", "true", "first", "fear", "mean", "better", "play", "another", "gone", "change", "use", "wonder", "someone", "hair", "cold", "open", "best", "any", "behind", "happen", "water", "dark", "laugh", "stay", "forever", "name", "work", "show", "sky", "break", "came", "deep", "door", "put", "black", "together", "upon", "happy", "such", "great", "white", "matter", "fill", "past", "please", "burn", "cause", "enough", "touch", "moment", "soon", "voice", "scream", "anything", "stare", "sound", "red", "everyone", "hide", "kiss", "truth", "death", "beautiful", "mine", "blood", "broken", "very", "pass", "next", "forget", "tree", "wrong", "air", "mother", "understand", "lip", "hit", "wall", "memory", "sleep", "free", "high", "realize", "school", "might", "skin", "sweet", "perfect", "blue", "kill", "breath", "dance", "against", "fly", "between", "grow", "strong", "under", "listen", "bring", "sometimes", "speak", "pull", "person", "become", "family", "begin", "ground", "real", "small", "father", "sure", "feet", "rest", "young", "finally", "land", "across", "today", "different", "guy", "line", "fire", "reason", "reach", "second", "slowly", "write", "eat", "smell", "mouth", "step", "learn", "three", "floor", "promise", "breathe", "darkness", "push", "earth", "guess", "save", "song", "above", "along", "both", "color", "house", "almost", "sorry", "anymore", "brother", "okay", "dear", "game", "fade", "already", "apart", "warm", "beauty", "heard", "notice", "question", "shine", "began", "piece", "whole", "shadow", "secret", "street", "within", "finger", "point", "morning", "whisper", "child", "moon", "green", "story", "glass", "kid", "silence", "since", "soft", "yourself", "empty", "shall", "angel", "answer", "baby", "bright", "dad", "path", "worry", "hour", "drop", "follow", "power", "war", "half", "flow", "heaven", "act", "chance", "fact", "least", "tired", "children", "near", "quite", "afraid", "rise", "sea", "taste", "window", "cover", "nice", "trust", "lot", "sad", "cool", "force", "peace", "return", "blind", "easy", "ready", "roll", "rose", "drive", "held", "music", "beneath", "hang", "mom", "paint", "emotion", "quiet", "clear", "cloud", "few", "pretty", "bird", "outside", "paper", "picture", "front", "rock", "simple", "anyone", "meant", "reality", "road", "sense", "waste", "bit", "leaf", "thank", "happiness", "meet", "men", "smoke", "truly", "decide", "self", "age", "book", "form", "alive", "carry", "escape", "damn", "instead", "able", "ice", "minute", "throw", "catch", "leg", "ring", "course", "goodbye", "lead", "poem", "sick", "corner", "desire", "known", "problem", "remind", "shoulder", "suppose", "toward", "wave", "drink", "jump", "woman", "pretend", "sister", "week", "human", "joy", "crack", "grey", "pray", "surprise", "dry", "knee", "less", "search", "bleed", "caught", "clean", "embrace", "future", "king", "son", "sorrow", "chest", "hug", "remain", "sat", "worth", "blow", "daddy", "final", "parent", "tight", "also", "create", "lonely", "safe", "cross", "dress", "evil", "silent", "bone", "fate", "perhaps", "anger", "class", "scar", "snow", "tiny", "tonight", "continue", "control", "dog", "edge", "mirror", "month", "suddenly", "comfort", "given", "loud", "quickly", "gaze", "plan", "rush", "stone", "town", "battle", "ignore", "spirit", "stood", "stupid", "yours", "brown", "build", "dust", "hey", "kept", "pay", "phone", "twist", "although", "ball", "beyond", "hidden", "nose", "taken", "fail", "float", "pure", "somehow", "wash", "wrap", "angry", "cheek", "creature", "forgotten", "heat", "rip", "single", "space", "special", "weak", "whatever", "yell", "anyway", "blame", "job", "choose", "country", "curse", "drift", "echo", "figure", "grew", "laughter", "neck", "suffer", "worse", "yeah", "disappear", "foot", "forward", "knife", "mess", "somewhere", "stomach", "storm", "beg", "idea", "lift", "offer", "breeze", "field", "five", "often", "simply", "stuck", "win", "allow", "confuse", "enjoy", "except", "flower", "seek", "strength", "calm", "grin", "gun", "heavy", "hill", "large", "ocean", "shoe", "sigh", "straight", "summer", "tongue", "accept", "crazy", "everyday", "exist", "grass", "mistake", "sent", "shut", "surround", "table", "ache", "brain", "destroy", "heal", "nature", "shout", "sign", "stain", "choice", "doubt", "glance", "glow", "mountain", "queen", "stranger", "throat", "tomorrow", "city", "either", "fish", "flame", "rather", "shape", "spin", "spread", "ash", "distance", "finish", "image", "imagine", "important", "nobody", "shatter", "warmth", "became", "feed", "flesh", "funny", "lust", "shirt", "trouble", "yellow", "attention", "bare", "bite", "money", "protect", "amaze", "appear", "born", "choke", "completely", "daughter", "fresh", "friendship", "gentle", "probably", "six", "deserve", "expect", "grab", "middle", "nightmare", "river", "thousand", "weight", "worst", "wound", "barely", "bottle", "cream", "regret", "relationship", "stick", "test", "crush", "endless", "fault", "itself", "rule", "spill", "art", "circle", "join", "kick", "mask", "master", "passion", "quick", "raise", "smooth", "unless", "wander", "actually", "broke", "chair", "deal", "favorite", "gift", "note", "number", "sweat", "box", "chill", "clothes", "lady", "mark", "park", "poor", "sadness", "tie", "animal", "belong", "brush", "consume", "dawn", "forest", "innocent", "pen", "pride", "stream", "thick", "clay", "complete", "count", "draw", "faith", "press", "silver", "struggle", "surface", "taught", "teach", "wet", "bless", "chase", "climb", "enter", "letter", "melt", "metal", "movie", "stretch", "swing", "vision", "wife", "beside", "crash", "forgot", "guide", "haunt", "joke", "knock", "plant", "pour", "prove", "reveal", "steal", "stuff", "trip", "wood", "wrist", "bother", "bottom", "crawl", "crowd", "fix", "forgive", "frown", "grace", "loose", "lucky", "party", "release", "surely", "survive", "teacher", "gently", "grip", "speed", "suicide", "travel", "treat", "vein", "written", "cage", "chain", "conversation", "date", "enemy", "however", "interest", "million", "page", "pink", "proud", "sway", "themselves", "winter", "church", "cruel", "cup", "demon", "experience", "freedom", "pair", "pop", "purpose", "respect", "shoot", "softly", "state", "strange", "bar", "birth", "curl", "dirt", "excuse", "lord", "lovely", "monster", "order", "pack", "pants", "pool", "scene", "seven", "shame", "slide", "ugly", "among", "blade", "blonde", "closet", "creek", "deny", "drug", "eternity", "gain", "grade", "handle", "key", "linger", "pale", "prepare", "swallow", "swim", "tremble", "wheel", "won", "cast", "cigarette", "claim", "college", "direction", "dirty", "gather", "ghost", "hundred", "loss", "lung", "orange", "present", "swear", "swirl", "twice", "wild", "bitter", "blanket", "doctor", "everywhere", "flash", "grown", "knowledge", "numb", "pressure", "radio", "repeat", "ruin", "spend", "unknown", "buy", "clock", "devil", "early", "false", "fantasy", "pound", "precious", "refuse", "sheet", "teeth", "welcome", "add", "ahead", "block", "bury", "caress", "content", "depth", "despite", "distant", "marry", "purple", "threw", "whenever", "bomb", "dull", "easily", "grasp", "hospital", "innocence", "normal", "receive", "reply", "rhyme", "shade", "someday", "sword", "toe", "visit", "asleep", "bought", "center", "consider", "flat", "hero", "history", "ink", "insane", "muscle", "mystery", "pocket", "reflection", "shove", "silently", "smart", "soldier", "spot", "stress", "train", "type", "view", "whether", "bus", "energy", "explain", "holy", "hunger", "inch", "magic", "mix", "noise", "nowhere", "prayer", "presence", "shock", "snap", "spider", "study", "thunder", "trail", "admit", "agree", "bag", "bang", "bound", "butterfly", "cute", "exactly", "explode", "familiar", "fold", "further", "pierce", "reflect", "scent", "selfish", "sharp", "sink", "spring", "stumble", "universe", "weep", "women", "wonderful", "action", "ancient", "attempt", "avoid", "birthday", "branch", "chocolate", "core", "depress", "drunk", "especially", "focus", "fruit", "honest", "match", "palm", "perfectly", "pillow", "pity", "poison", "roar", "shift", "slightly", "thump", "truck", "tune", "twenty", "unable", "wipe", "wrote", "coat", "constant", "dinner", "drove", "egg", "eternal", "flight", "flood", "frame", "freak", "gasp", "glad", "hollow", "motion", "peer", "plastic", "root", "screen", "season", "sting", "strike", "team", "unlike", "victim", "volume", "warn", "weird", "attack", "await", "awake", "built", "charm", "crave", "despair", "fought", "grant", "grief", "horse", "limit", "message", "ripple", "sanity", "scatter", "serve", "split", "string", "trick", "annoy", "blur", "boat", "brave", "clearly", "cling", "connect", "fist", "forth", "imagination", "iron", "jock", "judge", "lesson", "milk", "misery", "nail", "naked", "ourselves", "poet", "possible", "princess", "sail", "size", "snake", "society", "stroke", "torture", "toss", "trace", "wise", "bloom", "bullet", "cell", "check", "cost", "darling", "during", "footstep", "fragile", "hallway", "hardly", "horizon", "invisible", "journey", "midnight", "mud", "nod", "pause", "relax", "shiver", "sudden", "value", "youth", "abuse", "admire", "blink", "breast", "bruise", "constantly", "couple", "creep", "curve", "difference", "dumb", "emptiness", "gotta", "honor", "plain", "planet", "recall", "rub", "ship", "slam", "soar", "somebody", "tightly", "weather", "adore", "approach", "bond", "bread", "burst", "candle", "coffee", "cousin", "crime", "desert", "flutter", "frozen", "grand", "heel", "hello", "language", "level", "movement", "pleasure", "powerful", "random", "rhythm", "settle", "silly", "slap", "sort", "spoken", "steel", "threaten", "tumble", "upset", "aside", "awkward", "bee", "blank", "board", "button", "card", "carefully", "complain", "crap", "deeply", "discover", "drag", "dread", "effort", "entire", "fairy", "giant", "gotten", "greet", "illusion", "jeans", "leap", "liquid", "march", "mend", "nervous", "nine", "replace", "rope", "spine", "stole", "terror", "accident", "apple", "balance", "boom", "childhood", "collect", "demand", "depression", "eventually", "faint", "glare", "goal", "group", "honey", "kitchen", "laid", "limb", "machine", "mere", "mold", "murder", "nerve", "painful", "poetry", "prince", "rabbit", "shelter", "shore", "shower", "soothe", "stair", "steady", "sunlight", "tangle", "tease", "treasure", "uncle", "begun", "bliss", "canvas", "cheer", "claw", "clutch", "commit", "crimson", "crystal", "delight", "doll", "existence", "express", "fog", "football", "gay", "goose", "guard", "hatred", "illuminate", "mass", "math", "mourn", "rich", "rough", "skip", "stir", "student", "style", "support", "thorn", "tough", "yard", "yearn", "yesterday", "advice", "appreciate", "autumn", "bank", "beam", "bowl", "capture", "carve", "collapse", "confusion", "creation", "dove", "feather", "girlfriend", "glory", "government", "harsh", "hop", "inner", "loser", "moonlight", "neighbor", "neither", "peach", "pig", "praise", "screw", "shield", "shimmer", "sneak", "stab", "subject", "throughout", "thrown", "tower", "twirl", "wow", "army", "arrive", "bathroom", "bump", "cease", "cookie", "couch", "courage", "dim", "guilt", "howl", "hum", "husband", "insult", "led", "lunch", "mock", "mostly", "natural", "nearly", "needle", "nerd", "peaceful", "perfection", "pile", "price", "remove", "roam", "sanctuary", "serious", "shiny", "shook", "sob", "stolen", "tap", "vain", "void", "warrior", "wrinkle", "affection", "apologize", "blossom", "bounce", "bridge", "cheap", "crumble", "decision", "descend", "desperately", "dig", "dot", "flip", "frighten", "heartbeat", "huge", "lazy", "lick", "odd", "opinion", "process", "puzzle", "quietly", "retreat", "score", "sentence", "separate", "situation", "skill", "soak", "square", "stray", "taint", "task", "tide", "underneath", "veil", "whistle", "anywhere", "bedroom", "bid", "bloody", "burden", "careful", "compare", "concern", "curtain", "decay", "defeat", "describe", "double", "dreamer", "driver", "dwell", "evening", "flare", "flicker", "grandma", "guitar", "harm", "horrible", "hungry", "indeed", "lace", "melody", "monkey", "nation", "object", "obviously", "rainbow", "salt", "scratch", "shown", "shy", "stage", "stun", "third", "tickle", "useless", "weakness", "worship", "worthless", "afternoon", "beard", "boyfriend", "bubble", "busy", "certain", "chin", "concrete", "desk", "diamond", "doom", "drawn", "due", "felicity", "freeze", "frost", "garden", "glide", "harmony", "hopefully", "hunt", "jealous", "lightning", "mama", "mercy", "peel", "physical", "position", "pulse", "punch", "quit", "rant", "respond", "salty", "sane", "satisfy", "savior", "sheep", "slept", "social", "sport", "tuck", "utter", "valley", "wolf", "aim", "alas", "alter", "arrow", "awaken", "beaten", "belief", "brand", "ceiling", "cheese", "clue", "confidence", "connection", "daily", "disguise", "eager", "erase", "essence", "everytime", "expression", "fan", "flag", "flirt", "foul", "fur", "giggle", "glorious", "ignorance", "law", "lifeless", "measure", "mighty", "muse", "north", "opposite", "paradise", "patience", "patient", "pencil", "petal", "plate", "ponder", "possibly", "practice", "slice", "spell", "stock", "strife", "strip", "suffocate", "suit", "tender", "tool", "trade", "velvet", "verse", "waist", "witch", "aunt", "bench", "bold", "cap", "certainly", "click", "companion", "creator", "dart", "delicate", "determine", "dish", "dragon", "drama", "drum", "dude", "everybody", "feast", "forehead", "former", "fright", "fully", "gas", "hook", "hurl", "invite", "juice", "manage", "moral", "possess", "raw", "rebel", "royal", "scale", "scary", "several", "slight", "stubborn", "swell", "talent", "tea", "terrible", "thread", "torment", "trickle", "usually", "vast", "violence", "weave", "acid", "agony", "ashamed", "awe", "belly", "blend", "blush", "character", "cheat", "common", "company", "coward", "creak", "danger", "deadly", "defense", "define", "depend", "desperate", "destination", "dew", "duck", "dusty", "embarrass", "engine", "example", "explore", "foe", "freely", "frustrate", "generation", "glove", "guilty", "health", "hurry", "idiot", "impossible", "inhale", "jaw", "kingdom", "mention", "mist", "moan", "mumble", "mutter", "observe", "ode", "pathetic", "pattern", "pie", "prefer", "puff", "rape", "rare", "revenge", "rude", "scrape", "spiral", "squeeze", "strain", "sunset", "suspend", "sympathy", "thigh", "throne", "total", "unseen", "weapon", "weary" ] n = 1626 # Note about US patent no 5892470: Here each word does not represent a given digit. # Instead, the digit represented by a word is variable, it depends on the previous word. def mn_encode( message ): assert len(message) % 8 == 0 out = [] for i in range(len(message)//8): word = message[8*i:8*i+8] x = int(word, 16) w1 = (x%n) w2 = ((x//n) + w1)%n w3 = ((x//n//n) + w2)%n out += [ words[w1], words[w2], words[w3] ] return out def mn_decode( wlist ): out = '' for i in range(len(wlist)//3): word1, word2, word3 = wlist[3*i:3*i+3] w1 = words.index(word1) w2 = (words.index(word2))%n w3 = (words.index(word3))%n x = w1 +n*((w2-w1)%n) +n*n*((w3-w2)%n) out += '%08x'%x return out if __name__ == '__main__': import sys if len(sys.argv) == 1: print('I need arguments: a hex string to encode, or a list of words to decode') elif len(sys.argv) == 2: print(' '.join(mn_encode(sys.argv[1]))) else: print(mn_decode(sys.argv[1:]))
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for handling events.""" from __future__ import annotations import logging from core import feconf from core.domain import exp_domain from core.domain import exp_fetchers from core.domain import feedback_services from core.domain import stats_domain from core.domain import stats_services from core.domain import taskqueue_services from core.platform import models (feedback_models, stats_models, user_models) = models.Registry.import_models([ models.NAMES.feedback, models.NAMES.statistics, models.NAMES.user]) transaction_services = models.Registry.import_transaction_services() class BaseEventHandler: """Base class for event dispatchers.""" # A string denoting the type of the event. Should be specified by # subclasses and considered immutable. EVENT_TYPE = None @classmethod def _handle_event(cls, *args, **kwargs): """Perform in-request processing of an incoming event.""" raise NotImplementedError( 'Subclasses of BaseEventHandler should implement the ' '_handle_event() method, using explicit arguments ' '(no *args or **kwargs).') @classmethod def record(cls, *args, **kwargs): """Process incoming events. Callers of event handlers should call this method, not _handle_event(). """ cls._handle_event(*args, **kwargs) class StatsEventsHandler(BaseEventHandler): """Event handler for incremental update of analytics model using aggregated stats data. """ EVENT_TYPE = feconf.EVENT_TYPE_ALL_STATS @classmethod def _is_latest_version(cls, exp_id, exp_version): """Verifies whether the exploration version for the stats to be stored corresponds to the latest version of the exploration. """ exploration = exp_fetchers.get_exploration_by_id(exp_id) return exploration.version == exp_version @classmethod def _handle_event(cls, exploration_id, exp_version, aggregated_stats): if 'undefined' in aggregated_stats['state_stats_mapping']: logging.error( 'Aggregated stats contains an undefined state name: %s' % list(aggregated_stats['state_stats_mapping'].keys())) return if cls._is_latest_version(exploration_id, exp_version): taskqueue_services.defer( taskqueue_services.FUNCTION_ID_UPDATE_STATS, taskqueue_services.QUEUE_NAME_STATS, exploration_id, exp_version, aggregated_stats) class AnswerSubmissionEventHandler(BaseEventHandler): """Event handler for recording answer submissions.""" EVENT_TYPE = feconf.EVENT_TYPE_ANSWER_SUBMITTED @classmethod def _handle_event( cls, exploration_id, exploration_version, state_name, interaction_id, answer_group_index, rule_spec_index, classification_categorization, session_id, time_spent_in_secs, params, normalized_answer): """Records an event when an answer triggers a rule. The answer recorded here is a Python-representation of the actual answer submitted by the user. """ # TODO(sll): Escape these args? stats_services.record_answer( exploration_id, exploration_version, state_name, interaction_id, stats_domain.SubmittedAnswer( normalized_answer, interaction_id, answer_group_index, rule_spec_index, classification_categorization, params, session_id, time_spent_in_secs)) feedback_is_useful = ( classification_categorization != ( exp_domain.DEFAULT_OUTCOME_CLASSIFICATION)) stats_models.AnswerSubmittedEventLogEntryModel.create( exploration_id, exploration_version, state_name, session_id, time_spent_in_secs, feedback_is_useful) class ExplorationActualStartEventHandler(BaseEventHandler): """Event handler for recording exploration actual start events.""" EVENT_TYPE = feconf.EVENT_TYPE_ACTUAL_START_EXPLORATION @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id): stats_models.ExplorationActualStartEventLogEntryModel.create( exp_id, exp_version, state_name, session_id) class SolutionHitEventHandler(BaseEventHandler): """Event handler for recording solution hit events.""" EVENT_TYPE = feconf.EVENT_TYPE_SOLUTION_HIT @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id, time_spent_in_state_secs): stats_models.SolutionHitEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent_in_state_secs) class StartExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration start events.""" EVENT_TYPE = feconf.EVENT_TYPE_START_EXPLORATION @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id, params, play_type): stats_models.StartExplorationEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, params, play_type) handle_exploration_start(exp_id) class MaybeLeaveExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration leave events.""" EVENT_TYPE = feconf.EVENT_TYPE_MAYBE_LEAVE_EXPLORATION @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id, time_spent, params, play_type): stats_models.MaybeLeaveExplorationEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent, params, play_type) class CompleteExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration completion events.""" EVENT_TYPE = feconf.EVENT_TYPE_COMPLETE_EXPLORATION @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id, time_spent, params, play_type): stats_models.CompleteExplorationEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent, params, play_type) class RateExplorationEventHandler(BaseEventHandler): """Event handler for recording exploration rating events.""" EVENT_TYPE = feconf.EVENT_TYPE_RATE_EXPLORATION @classmethod def _handle_event(cls, exp_id, user_id, rating, old_rating): stats_models.RateExplorationEventLogEntryModel.create( exp_id, user_id, rating, old_rating) handle_exploration_rating(exp_id, rating, old_rating) class StateHitEventHandler(BaseEventHandler): """Event handler for recording state hit events.""" EVENT_TYPE = feconf.EVENT_TYPE_STATE_HIT # TODO(sll): Remove params before sending this event to the jobs taskqueue. @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id, params, play_type): stats_models.StateHitEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, params, play_type) class StateCompleteEventHandler(BaseEventHandler): """Event handler for recording state complete events.""" EVENT_TYPE = feconf.EVENT_TYPE_STATE_COMPLETED @classmethod def _handle_event( cls, exp_id, exp_version, state_name, session_id, time_spent_in_state_secs): stats_models.StateCompleteEventLogEntryModel.create( exp_id, exp_version, state_name, session_id, time_spent_in_state_secs) class LeaveForRefresherExpEventHandler(BaseEventHandler): """Event handler for recording "leave for refresher exploration" events.""" EVENT_TYPE = feconf.EVENT_TYPE_LEAVE_FOR_REFRESHER_EXP @classmethod def _handle_event( cls, exp_id, refresher_exp_id, exp_version, state_name, session_id, time_spent_in_state_secs): stats_models.LeaveForRefresherExplorationEventLogEntryModel.create( exp_id, refresher_exp_id, exp_version, state_name, session_id, time_spent_in_state_secs) class FeedbackThreadCreatedEventHandler(BaseEventHandler): """Event handler for recording new feedback thread creation events.""" EVENT_TYPE = feconf.EVENT_TYPE_NEW_THREAD_CREATED @classmethod def _handle_event(cls, exp_id): feedback_services.handle_new_thread_created(exp_id) class FeedbackThreadStatusChangedEventHandler(BaseEventHandler): """Event handler for recording reopening feedback thread events.""" EVENT_TYPE = feconf.EVENT_TYPE_THREAD_STATUS_CHANGED @classmethod def _handle_event(cls, exp_id, old_status, new_status): feedback_services.handle_thread_status_changed( exp_id, old_status, new_status) def handle_exploration_start(exp_id): """Handles a user's start of an exploration. Args: exp_id: str. The exploration which has been started. """ exp_summary = exp_fetchers.get_exploration_summary_by_id(exp_id) if exp_summary: for user_id in exp_summary.owner_ids: _increment_total_plays_count_transactional(user_id) def handle_exploration_rating(exp_id, rating, old_rating): """Handles a new rating for an exploration. Args: exp_id: str. The exploration which has been rated. rating: int. The new rating of the exploration. old_rating: int. The old rating of the exploration before refreshing. """ exp_summary = exp_fetchers.get_exploration_summary_by_id(exp_id) if exp_summary: for user_id in exp_summary.owner_ids: _refresh_average_ratings_transactional(user_id, rating, old_rating) @transaction_services.run_in_transaction_wrapper def _refresh_average_ratings_transactional(user_id, new_rating, old_rating): """Refreshes the average rating for a user. Args: user_id: str. The id of the user. new_rating: int. The new rating of the exploration. old_rating: int|None. The old rating of the exploration before refreshing, or None if the exploration hasn't been rated by the user yet. """ user_stats_model = user_models.UserStatsModel.get(user_id, strict=False) if user_stats_model is None: user_models.UserStatsModel( id=user_id, average_ratings=new_rating, num_ratings=1).put() return num_ratings = user_stats_model.num_ratings average_ratings = user_stats_model.average_ratings if average_ratings is None: average_ratings = new_rating num_ratings += 1 else: sum_of_ratings = (average_ratings * num_ratings) + new_rating if old_rating is None: num_ratings += 1 else: sum_of_ratings -= old_rating average_ratings = sum_of_ratings / float(num_ratings) user_stats_model.average_ratings = average_ratings user_stats_model.num_ratings = num_ratings user_stats_model.update_timestamps() user_stats_model.put() @transaction_services.run_in_transaction_wrapper def _increment_total_plays_count_transactional(user_id): """Increments the total plays count of the exploration. Args: user_id: str. The id of the user. """ user_stats_model = user_models.UserStatsModel.get(user_id, strict=False) if user_stats_model is None: user_models.UserStatsModel(id=user_id, total_plays=1).put() else: user_stats_model.total_plays += 1 user_stats_model.update_timestamps() user_stats_model.put()
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Optional, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.analytics.data_v1beta.types import analytics_data_api from google.analytics.data_v1beta.types import data from .transports.base import BetaAnalyticsDataTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import BetaAnalyticsDataGrpcAsyncIOTransport from .client import BetaAnalyticsDataClient class BetaAnalyticsDataAsyncClient: """Google Analytics reporting data service.""" _client: BetaAnalyticsDataClient DEFAULT_ENDPOINT = BetaAnalyticsDataClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BetaAnalyticsDataClient.DEFAULT_MTLS_ENDPOINT metadata_path = staticmethod(BetaAnalyticsDataClient.metadata_path) parse_metadata_path = staticmethod(BetaAnalyticsDataClient.parse_metadata_path) common_billing_account_path = staticmethod( BetaAnalyticsDataClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( BetaAnalyticsDataClient.parse_common_billing_account_path ) common_folder_path = staticmethod(BetaAnalyticsDataClient.common_folder_path) parse_common_folder_path = staticmethod( BetaAnalyticsDataClient.parse_common_folder_path ) common_organization_path = staticmethod( BetaAnalyticsDataClient.common_organization_path ) parse_common_organization_path = staticmethod( BetaAnalyticsDataClient.parse_common_organization_path ) common_project_path = staticmethod(BetaAnalyticsDataClient.common_project_path) parse_common_project_path = staticmethod( BetaAnalyticsDataClient.parse_common_project_path ) common_location_path = staticmethod(BetaAnalyticsDataClient.common_location_path) parse_common_location_path = staticmethod( BetaAnalyticsDataClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: BetaAnalyticsDataAsyncClient: The constructed client. """ return BetaAnalyticsDataClient.from_service_account_info.__func__(BetaAnalyticsDataAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: BetaAnalyticsDataAsyncClient: The constructed client. """ return BetaAnalyticsDataClient.from_service_account_file.__func__(BetaAnalyticsDataAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @classmethod def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None ): """Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the client cert source is None. (2) if `client_options.client_cert_source` is provided, use the provided one; if the default client cert source exists, use the default one; otherwise the client cert source is None. The API endpoint is determined in the following order: (1) if `client_options.api_endpoint` if provided, use the provided one. (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the default mTLS endpoint; if the environment variabel is "never", use the default API endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise use the default API endpoint. More details can be found at https://google.aip.dev/auth/4114. Args: client_options (google.api_core.client_options.ClientOptions): Custom options for the client. Only the `api_endpoint` and `client_cert_source` properties may be used in this method. Returns: Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the client cert source to use. Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ return BetaAnalyticsDataClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore @property def transport(self) -> BetaAnalyticsDataTransport: """Returns the transport used by the client instance. Returns: BetaAnalyticsDataTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(BetaAnalyticsDataClient).get_transport_class, type(BetaAnalyticsDataClient) ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, BetaAnalyticsDataTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the beta analytics data client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.BetaAnalyticsDataTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = BetaAnalyticsDataClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def run_report( self, request: Union[analytics_data_api.RunReportRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.RunReportResponse: r"""Returns a customized report of your Google Analytics event data. Reports contain statistics derived from data collected by the Google Analytics tracking code. The data returned from the API is as a table with columns for the requested dimensions and metrics. Metrics are individual measurements of user activity on your property, such as active users or event count. Dimensions break down metrics across some common criteria, such as country or event name. .. code-block:: python from google.analytics import data_v1beta def sample_run_report(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.RunReportRequest( ) # Make the request response = client.run_report(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.RunReportRequest, dict]): The request object. The request to generate a report. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.RunReportResponse: The response report table corresponding to a request. """ # Create or coerce a protobuf request object. request = analytics_data_api.RunReportRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.run_report, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def run_pivot_report( self, request: Union[analytics_data_api.RunPivotReportRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.RunPivotReportResponse: r"""Returns a customized pivot report of your Google Analytics event data. Pivot reports are more advanced and expressive formats than regular reports. In a pivot report, dimensions are only visible if they are included in a pivot. Multiple pivots can be specified to further dissect your data. .. code-block:: python from google.analytics import data_v1beta def sample_run_pivot_report(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.RunPivotReportRequest( ) # Make the request response = client.run_pivot_report(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.RunPivotReportRequest, dict]): The request object. The request to generate a pivot report. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.RunPivotReportResponse: The response pivot report table corresponding to a pivot request. """ # Create or coerce a protobuf request object. request = analytics_data_api.RunPivotReportRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.run_pivot_report, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def batch_run_reports( self, request: Union[analytics_data_api.BatchRunReportsRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.BatchRunReportsResponse: r"""Returns multiple reports in a batch. All reports must be for the same GA4 Property. .. code-block:: python from google.analytics import data_v1beta def sample_batch_run_reports(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.BatchRunReportsRequest( ) # Make the request response = client.batch_run_reports(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.BatchRunReportsRequest, dict]): The request object. The batch request containing multiple report requests. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.BatchRunReportsResponse: The batch response containing multiple reports. """ # Create or coerce a protobuf request object. request = analytics_data_api.BatchRunReportsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.batch_run_reports, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def batch_run_pivot_reports( self, request: Union[analytics_data_api.BatchRunPivotReportsRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.BatchRunPivotReportsResponse: r"""Returns multiple pivot reports in a batch. All reports must be for the same GA4 Property. .. code-block:: python from google.analytics import data_v1beta def sample_batch_run_pivot_reports(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.BatchRunPivotReportsRequest( ) # Make the request response = client.batch_run_pivot_reports(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.BatchRunPivotReportsRequest, dict]): The request object. The batch request containing multiple pivot report requests. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.BatchRunPivotReportsResponse: The batch response containing multiple pivot reports. """ # Create or coerce a protobuf request object. request = analytics_data_api.BatchRunPivotReportsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.batch_run_pivot_reports, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def get_metadata( self, request: Union[analytics_data_api.GetMetadataRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.Metadata: r"""Returns metadata for dimensions and metrics available in reporting methods. Used to explore the dimensions and metrics. In this method, a Google Analytics GA4 Property Identifier is specified in the request, and the metadata response includes Custom dimensions and metrics as well as Universal metadata. For example if a custom metric with parameter name ``levels_unlocked`` is registered to a property, the Metadata response will contain ``customEvent:levels_unlocked``. Universal metadata are dimensions and metrics applicable to any property such as ``country`` and ``totalUsers``. .. code-block:: python from google.analytics import data_v1beta def sample_get_metadata(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.GetMetadataRequest( name="name_value", ) # Make the request response = client.get_metadata(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.GetMetadataRequest, dict]): The request object. Request for a property's dimension and metric metadata. name (:class:`str`): Required. The resource name of the metadata to retrieve. This name field is specified in the URL path and not URL parameters. Property is a numeric Google Analytics GA4 Property identifier. To learn more, see `where to find your Property ID <https://developers.google.com/analytics/devguides/reporting/data/v1/property-id>`__. Example: properties/1234/metadata Set the Property ID to 0 for dimensions and metrics common to all properties. In this special mode, this method will not return custom dimensions and metrics. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.Metadata: The dimensions and metrics currently accepted in reporting methods. """ # Create or coerce a protobuf request object. # Quick check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = analytics_data_api.GetMetadataRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_metadata, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def run_realtime_report( self, request: Union[analytics_data_api.RunRealtimeReportRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.RunRealtimeReportResponse: r"""The Google Analytics Realtime API returns a customized report of realtime event data for your property. These reports show events and usage from the last 30 minutes. .. code-block:: python from google.analytics import data_v1beta def sample_run_realtime_report(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.RunRealtimeReportRequest( ) # Make the request response = client.run_realtime_report(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.RunRealtimeReportRequest, dict]): The request object. The request to generate a realtime report. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.RunRealtimeReportResponse: The response realtime report table corresponding to a request. """ # Create or coerce a protobuf request object. request = analytics_data_api.RunRealtimeReportRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.run_realtime_report, default_timeout=60.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def check_compatibility( self, request: Union[analytics_data_api.CheckCompatibilityRequest, dict] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> analytics_data_api.CheckCompatibilityResponse: r"""This compatibility method lists dimensions and metrics that can be added to a report request and maintain compatibility. This method fails if the request's dimensions and metrics are incompatible. In Google Analytics, reports fail if they request incompatible dimensions and/or metrics; in that case, you will need to remove dimensions and/or metrics from the incompatible report until the report is compatible. The Realtime and Core reports have different compatibility rules. This method checks compatibility for Core reports. .. code-block:: python from google.analytics import data_v1beta def sample_check_compatibility(): # Create a client client = data_v1beta.BetaAnalyticsDataClient() # Initialize request argument(s) request = data_v1beta.CheckCompatibilityRequest( ) # Make the request response = client.check_compatibility(request=request) # Handle the response print(response) Args: request (Union[google.analytics.data_v1beta.types.CheckCompatibilityRequest, dict]): The request object. The request for compatibility information for a report's dimensions and metrics. Check compatibility provides a preview of the compatibility of a report; fields shared with the `runReport` request should be the same values as in your `runReport` request. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.analytics.data_v1beta.types.CheckCompatibilityResponse: The compatibility response with the compatibility of each dimension & metric. """ # Create or coerce a protobuf request object. request = analytics_data_api.CheckCompatibilityRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.check_compatibility, default_timeout=None, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("property", request.property),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # Done; return the response. return response async def __aenter__(self): return self async def __aexit__(self, exc_type, exc, tb): await self.transport.close() try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-analytics-data",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("BetaAnalyticsDataAsyncClient",)
from .SingleCharClass import * from .Styling import DefaultStyle from .TextEnums import * DEFAULTSPACING = 1.0 / 10 class Label(BaseControl): """ Text displayer. @rtype : Label """ def __init__(self, left, top, width, text, parent, pinning=PinningEnum.TopLeft, fontSize=10, fontID='default', color=None, ID=None, imgID=None, rotation=None, outlineLength=OutlineLenghtEnum.NoOutline, style=None): """ :param borderSize: :type borderSize: """ style = style or DefaultStyle(color) if color is None: color = vec4(style.backgroundColor) color.w = 0 self._outlineLength = outlineLength self._fontSize = fontSize self._text = text self._fontWeight = FontWeightEnum.Normal self._fontColor = style.fontColor self._outlineColor = style.fontOutlineColor self._fontBorder = 0 self._fontID = fontID self._oldOffset = 0 height = self._presetHeightByFont(parent) super(Label, self).__init__(left, top, width, height + (self._spacing * 2), parent, pinning, color, ID, imgID, rotation, style) self.borderSize = 0 self._isBuilt = False self._dirtyProperties = True self._updateSizeProperties() self._updateChars() @property def fontSize(self): return self._fontSize @fontSize.setter def fontSize(self, value): self._fontSize = value height = self._presetHeightByFont(self.parent) self.height = height def _presetHeightByFont(self, parent): height = parent._guiMan.getFontSizeInPixels(self.fontSize, self.fontID) self._fontMaxHeight = height self._spacing = DEFAULTSPACING * height self._baseline = height - (height / 6.0) return height def _getText(self): return self._text def _setText(self, val): self._text = val self._isBuilt = False text = property(_getText, _setText) def _updateChars(self): self._isBuilt = True self._dirtyProperties = False try: self._children.clear() except AttributeError: self._children = [] height = self._height left = 0 for c in self._text: newChar = SingleChar(left, 0, height, c, self, PinningEnum.NoPinning, fontID=self._fontID, color=vec4(0, 0, 0, 0), borderSize=0, style=self.style) newChar.outlineLength = self._outlineLength newChar.outlineColor = self._outlineColor newChar.fontWeight = self._fontWeight newChar.fontColor = self._fontColor left += height self._setCharsRatio() def _setCharsRatio(self): self._dirty = True if len(self._children) == 0: return spacing = self._spacing advanceX = spacing maxHeight = float(self._fontMaxHeight) guiMan = self._guiMan assert isinstance(guiMan, GuiManager) fontInfo = guiMan.fontInfos[self.fontID] assert isinstance(fontInfo, AtlasInfo) baseline = self._baseline gotBroken = False for c in self._children: c._dirty = True assert isinstance(c, SingleChar) hasChar = c._charCode in fontInfo.charDataDict if not hasChar: raise NotImplementedError('char \'{}\' not included in font atlas.'.format(c.char)) cdata = fontInfo.charDataDict[c._charCode] assert isinstance(cdata, CharData) if cdata.height > cdata.width: boxScale = maxHeight * cdata.height else: boxScale = cdata.width * maxHeight boxWidth = boxScale boxHeight = boxScale charHeight = cdata.height * maxHeight charWidth = cdata.width * maxHeight charBottom = (boxHeight - charHeight) / 2.0 charLeft = (boxWidth - charWidth) / 2.0 if c.char == ' ': posY = 0 else: posY = baseline - (cdata.above * maxHeight) - charBottom lowerEdge = posY + boxHeight if lowerEdge > maxHeight: self._baseline -= lowerEdge - maxHeight self._setCharsRatio() gotBroken = True break c.size = vec3(boxWidth, boxHeight, 1) c.position = vec3(advanceX - charLeft, posY + spacing, 0) # advanceX += (cdata.advance[0] * maxHeight) # This is the 'right way' # advanceX += charWidth # This is a safe way advanceX += ((cdata.advance[0] * maxHeight) + charWidth) / 2.0 # This looks better c._material.shaderProperties['fontHeightInPixels'] = self.height self._totalLength = c.position.x + c.size.x if not gotBroken: self._alignText() def _alignText(self): size = vec3(self._totalLength, self.height - (self.borderSize * 2) + (self._spacing * 2), 1) x, y, z = self.getAlignedPosition(size, self.size, self.borderSize, self._vTextAlign, self._hTextAlign) for c in self._children: c.left += x def _hTextAlignSet(self, value): super(Label, self)._hTextAlignSet(value) self._dirty = True self._alignText() def _vTextAlignSet(self, value): super(Label, self)._vTextAlignSet(value) self._dirty = True self._alignText() def _update(self): if not self._isBuilt: self._updateChars() if self._dirtyProperties: self._updateCharacterProperties() super(Label, self)._update() def _updateCharacterProperties(self): for c in self._children: c.fontID = self._fontID c.fontBorder = self._fontBorder c.outlineColor = self._outlineColor c.fontColor = self._fontColor c.fontWeight = self._fontWeight def _getFontBorder(self): return self._fontBorder def _setFontBorder(self, val): self._fontBorder = val self._dirtyProperties = True fontBorder = property(_getFontBorder, _setFontBorder) @property def outlineColor(self): return self._outlineColor @outlineColor.setter def outlineColor(self, value): self._outlineColor = value self._dirtyProperties = True @property def outlineLength(self): return self._outlineLength @outlineLength.setter def outlineLength(self, value): self._outlineLength = value self._dirtyProperties = True def _getFontColor(self): return self._fontColor def _setFontColor(self, val): self._fontColor = val self._dirtyProperties = True fontColor = property(_getFontColor, _setFontColor) def _getfontWeight(self): return self._fontWeight def _setfontWeight(self, val): self._fontWeight = val self._dirtyProperties = True fontWeight = property(_getfontWeight, _setfontWeight) def _setFont(self, fontID): self._fontID = fontID w, h, z = self.size height = self._presetHeightByFont(self.parent) self.size = vec3(w, height + (self._spacing * 2), z) self._dirtyProperties = True self._isBuilt = False def _getFont(self): return self._fontID fontID = property(_getFont, _setFont) def __repr__(self): return self._text def _updateSizeProperties(self): super(Label, self)._updateSizeProperties() # self._setCharsRatio() @property def color(self): return super(Label, self)._getColor() @color.setter def color(self, value): self._dirtyProperties = True super(Label, self)._setColor(value)
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines a retrieval model that is trained on next-item prediction task using dynamic item density smoothing. The retrieval model trains the user and item tower to generate low-dimensional user and item embeddings, which are used to retrieve candidate items for the next item engagement. The sample_weights are computed based on iterative updates using dynamic item density smoothing as proposed in the following project doc - http://shortn/_uPej1Fh7Jq#heading=h.sd8nixbhxgso. """ import os from typing import Any, Callable, Dict, Optional, Tuple from absl import logging import numpy as np from scipy import stats import tensorflow as tf from multiple_user_representations.models import retrieval from multiple_user_representations.models import util class DensityWeightedRetrievalModel(retrieval.RetrievalModel): """Retrieval model for next-item task with iterative density item weights.""" def __init__(self, user_model, candidate_model, task, num_items, use_disagreement_loss = False, l2_normalize = False): """See the base class.""" self._use_sample_weight = True self._critic = stats.gaussian_kde super().__init__(user_model, candidate_model, task, num_items, use_disagreement_loss, l2_normalize, self._use_sample_weight) def _update_item_weights_using_density( self, item_dataset, item_count_weights, num_samples_for_density_model = 10000, momentum = 0.9, save_state_path = None): """Updates item weights based on the item density in the embedding space. Args: item_dataset: The tf dataset containing ids of all items. item_count_weights: A dictionary mapping item_id to (item_count, item_weight), where item_count refers to the frequency in the training dataset. num_samples_for_density_model: Num samples to use when learning density model. momentum: A scalar momentum used to update item weights iteratively. save_state_path: If not None, saves item_embeddings and sample_weight for visualization at save_state_path. Returns: updated_item_count_weight: A dictionary mapping item_id to (item_count, updated_item_weight). """ def item_map(batched_items): return tf.squeeze( self.candidate_model(tf.expand_dims(batched_items, axis=1))) item_embeddings = list( item_dataset.batch(100).map(item_map).unbatch().as_numpy_iterator()) item_embeddings = np.array(item_embeddings) train_next_items = np.array(list(item_count_weights.keys())) train_item_counts, train_item_weights = zip( *list(item_count_weights.values())) train_item_weights = np.array(train_item_weights) sampled_items = np.random.choice( train_next_items, p=train_item_weights, size=num_samples_for_density_model) # Get embeddings and smooth the data with normal noise. train_data = item_embeddings[sampled_items] train_data += np.random.normal(loc=0.0, scale=0.1, size=train_data.shape) # Learning the density function. kernel = self._critic(train_data.T) # Update weights train_next_item_embeddings = item_embeddings[train_next_items] item_densities = kernel(train_next_item_embeddings.T) + 1e-4 item_density_weights = (1.0 / item_densities) item_density_weights /= np.sum(item_density_weights) updated_item_weights = momentum * train_item_weights + ( 1 - momentum) * item_density_weights updated_item_count_weight = dict( zip(train_next_items, zip(train_item_counts, updated_item_weights))) if save_state_path is not None: embeddings_file = os.path.join(save_state_path, "embeddings.npy") current_weights_file = os.path.join(save_state_path, "current_weights.npy") new_weights_file = os.path.join(save_state_path, "new_weights.npy") user_query_file = os.path.join(save_state_path, "user_queries.npy") util.save_np(embeddings_file, item_embeddings) util.save_np(current_weights_file, item_count_weights) util.save_np(new_weights_file, updated_item_count_weight) util.save_np(user_query_file, self.user_model.query_head.numpy()) return updated_item_count_weight def iterative_training(self, fit_retrieval_model_fn, train_dataset, item_dataset, item_count_weights, results_dir = None, delta = 0.005, momentum = 0.9, max_iterations = 20): """Performs iterative training of the retrieval model. For each iteration the sample_weights are updated in the training dataset. The weights are computed using the self._update_item_weights_using_density method. Args: fit_retrieval_model_fn: A function that trains the retrieval model on the given dataset. The function should take tf.data.Dataset as argument. train_dataset: The train dataset for the next_item prediction task. item_dataset: The item dataset containing the item ids. item_count_weights: A dictionary mapping item_ids to (item_count, item_sampling_probability). results_dir: Path to save intermediate results. delta: Stop iterative_training when ||w_{t+1} - w_t|| < delta. momentum: Momentum for weight updates. max_iterations: Max number of iterations for weight updates. Returns: The object returned from the fit_retrieval_model_fn in the last iteration. """ converged = False iteration = 0 history = None while not converged: path = os.path.join( results_dir, f"iteration_{iteration+1}") if results_dir is not None else None updated_item_count_weights = self._update_item_weights_using_density( item_dataset, item_count_weights, momentum=momentum, save_state_path=path) train_dataset = util.update_train_dataset_with_sample_weights( train_dataset, updated_item_count_weights) prior_weights = [] updated_weights = [] for item_id in item_count_weights.keys(): prior_weights.append(item_count_weights[item_id][1]) updated_weights.append(updated_item_count_weights[item_id][1]) prior_weights = np.array(prior_weights) updated_weights = np.array(updated_weights) delta_w = np.linalg.norm(prior_weights - updated_weights, ord=np.inf) item_count_weights = updated_item_count_weights converged = delta_w < delta if not converged: history = fit_retrieval_model_fn(train_dataset=train_dataset) iteration += 1 logging.info("Iteration %d Delta_W: %.4f", iteration, delta_w) if iteration >= max_iterations: logging.info("Max iteration ({%d}) reached! Delta_W: %.4f", iteration, delta_w) break return history
""" User name to file name conversion. This was taken form the UFO 3 spec. """ from __future__ import unicode_literals from fontTools.misc.py23 import basestring, unicode illegalCharacters = "\" * + / : < > ? [ \ ] | \0".split(" ") illegalCharacters += [chr(i) for i in range(1, 32)] illegalCharacters += [chr(0x7F)] reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ") reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ") maxFileNameLength = 255 class NameTranslationError(Exception): pass def userNameToFileName(userName, existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> userNameToFileName("a") == "a" True >>> userNameToFileName("A") == "A_" True >>> userNameToFileName("AE") == "A_E_" True >>> userNameToFileName("Ae") == "A_e" True >>> userNameToFileName("ae") == "ae" True >>> userNameToFileName("aE") == "aE_" True >>> userNameToFileName("a.alt") == "a.alt" True >>> userNameToFileName("A.alt") == "A_.alt" True >>> userNameToFileName("A.Alt") == "A_.A_lt" True >>> userNameToFileName("A.aLt") == "A_.aL_t" True >>> userNameToFileName(u"A.alT") == "A_.alT_" True >>> userNameToFileName("T_H") == "T__H_" True >>> userNameToFileName("T_h") == "T__h" True >>> userNameToFileName("t_h") == "t_h" True >>> userNameToFileName("F_F_I") == "F__F__I_" True >>> userNameToFileName("f_f_i") == "f_f_i" True >>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash" True >>> userNameToFileName(".notdef") == "_notdef" True >>> userNameToFileName("con") == "_con" True >>> userNameToFileName("CON") == "C_O_N_" True >>> userNameToFileName("con.alt") == "_con.alt" True >>> userNameToFileName("alt.con") == "alt._con" True """ # the incoming name must be a unicode string if not isinstance(userName, unicode): raise ValueError("The value for userName must be a unicode string.") # establish the prefix and suffix lengths prefixLength = len(prefix) suffixLength = len(suffix) # replace an initial period with an _ # if no prefix is to be added if not prefix and userName[0] == ".": userName = "_" + userName[1:] # filter the user name filteredUserName = [] for character in userName: # replace illegal characters with _ if character in illegalCharacters: character = "_" # add _ to all non-lower characters elif character != character.lower(): character += "_" filteredUserName.append(character) userName = "".join(filteredUserName) # clip to 255 sliceLength = maxFileNameLength - prefixLength - suffixLength userName = userName[:sliceLength] # test for illegal files names parts = [] for part in userName.split("."): if part.lower() in reservedFileNames: part = "_" + part parts.append(part) userName = ".".join(parts) # test for clash fullName = prefix + userName + suffix if fullName.lower() in existing: fullName = handleClash1(userName, existing, prefix, suffix) # finished return fullName def handleClash1(userName, existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> prefix = ("0" * 5) + "." >>> suffix = "." + ("0" * 10) >>> existing = ["a" * 5] >>> e = list(existing) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000001.0000000000') True >>> e = list(existing) >>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000002.0000000000') True >>> e = list(existing) >>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix) >>> handleClash1(userName="A" * 5, existing=e, ... prefix=prefix, suffix=suffix) == ( ... '00000.AAAAA000000000000001.0000000000') True """ # if the prefix length + user name length + suffix length + 15 is at # or past the maximum length, silce 15 characters off of the user name prefixLength = len(prefix) suffixLength = len(suffix) if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength: l = (prefixLength + len(userName) + suffixLength + 15) sliceLength = maxFileNameLength - l userName = userName[:sliceLength] finalName = None # try to add numbers to create a unique name counter = 1 while finalName is None: name = userName + str(counter).zfill(15) fullName = prefix + name + suffix if fullName.lower() not in existing: finalName = fullName break else: counter += 1 if counter >= 999999999999999: break # if there is a clash, go to the next fallback if finalName is None: finalName = handleClash2(existing, prefix, suffix) # finished return finalName def handleClash2(existing=[], prefix="", suffix=""): """ existing should be a case-insensitive list of all existing file names. >>> prefix = ("0" * 5) + "." >>> suffix = "." + ("0" * 10) >>> existing = [prefix + str(i) + suffix for i in range(100)] >>> e = list(existing) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.100.0000000000') True >>> e = list(existing) >>> e.remove(prefix + "1" + suffix) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.1.0000000000') True >>> e = list(existing) >>> e.remove(prefix + "2" + suffix) >>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == ( ... '00000.2.0000000000') True """ # calculate the longest possible string maxLength = maxFileNameLength - len(prefix) - len(suffix) maxValue = int("9" * maxLength) # try to find a number finalName = None counter = 1 while finalName is None: fullName = prefix + str(counter) + suffix if fullName.lower() not in existing: finalName = fullName break else: counter += 1 if counter >= maxValue: break # raise an error if nothing has been found if finalName is None: raise NameTranslationError("No unique name could be found.") # finished return finalName if __name__ == "__main__": import doctest doctest.testmod()
# Copyright (c) 2017-2019 Uber Technologies, Inc. # SPDX-License-Identifier: Apache-2.0 import os from functools import partial import pytest import torch import pyro import pyro.distributions as dist from pyro import poutine from pyro.infer.mcmc import HMC, NUTS from pyro.infer.mcmc.api import MCMC, StreamingMCMC, _MultiSampler, _UnarySampler from pyro.infer.mcmc.mcmc_kernel import MCMCKernel from pyro.infer.mcmc.util import initialize_model, select_samples from pyro.ops.streaming import StackStats, StatsOfDict from pyro.util import optional from tests.common import assert_close class PriorKernel(MCMCKernel): """ Disregards the value of the current trace (or observed data) and samples a value from the model's prior. """ def __init__(self, model): self.model = model self.data = None self._initial_params = None self._prototype_trace = None self.transforms = None def setup(self, warmup_steps, data): self.data = data init_params, potential_fn, transforms, model_trace = initialize_model( self.model, model_args=(data,) ) if self._initial_params is None: self._initial_params = init_params if self.transforms is None: self.transforms = transforms self._prototype_trace = model_trace def diagnostics(self): return {"dummy_key": "dummy_value"} @property def initial_params(self): return self._initial_params @initial_params.setter def initial_params(self, params): self._initial_params = params def cleanup(self): self.data = None def sample_params(self): trace = poutine.trace(self.model).get_trace(self.data) return {k: v["value"] for k, v in trace.iter_stochastic_nodes()} def sample(self, params): new_params = self.sample_params() assert params.keys() == new_params.keys() for k, v in params.items(): assert new_params[k].shape == v.shape return new_params def normal_normal_model(data): x = torch.tensor([0.0]) y = pyro.sample("y", dist.Normal(x, torch.ones(data.shape))) pyro.sample("obs", dist.Normal(y, torch.tensor([1.0])), obs=data) return y def run_default_mcmc( data, kernel, num_samples, warmup_steps=None, initial_params=None, num_chains=1, hook_fn=None, mp_context=None, transforms=None, num_draws=None, group_by_chain=False, ): mcmc = MCMC( kernel=kernel, num_samples=num_samples, warmup_steps=warmup_steps, initial_params=initial_params, num_chains=num_chains, hook_fn=hook_fn, mp_context=mp_context, transforms=transforms, ) mcmc.run(data) return mcmc.get_samples(num_draws, group_by_chain=group_by_chain), mcmc.num_chains def run_streaming_mcmc( data, kernel, num_samples, warmup_steps=None, initial_params=None, num_chains=1, hook_fn=None, mp_context=None, transforms=None, num_draws=None, group_by_chain=False, ): mcmc = StreamingMCMC( kernel=kernel, num_samples=num_samples, warmup_steps=warmup_steps, initial_params=initial_params, statistics=StatsOfDict(default=StackStats), num_chains=num_chains, hook_fn=hook_fn, transforms=transforms, ) mcmc.run(data) statistics = mcmc.get_statistics(group_by_chain=group_by_chain) if group_by_chain: samples = {} agg = {} for (_, name), stat in statistics.items(): if name in agg: agg[name].append(stat["samples"]) else: agg[name] = [stat["samples"]] for name, l in agg.items(): samples[name] = torch.stack(l) else: samples = {name: stat["samples"] for name, stat in statistics.items()} samples = select_samples(samples, num_draws, group_by_chain) if not group_by_chain: samples = {name: stat.unsqueeze(-1) for name, stat in samples.items()} return samples, mcmc.num_chains @pytest.mark.parametrize("run_mcmc_cls", [run_default_mcmc, run_streaming_mcmc]) @pytest.mark.parametrize("num_draws", [None, 1800, 2200]) @pytest.mark.parametrize("group_by_chain", [False, True]) @pytest.mark.parametrize("num_chains", [1, 2]) @pytest.mark.filterwarnings("ignore:num_chains") def test_mcmc_interface(run_mcmc_cls, num_draws, group_by_chain, num_chains): num_samples = 2000 data = torch.tensor([1.0]) initial_params, _, transforms, _ = initialize_model( normal_normal_model, model_args=(data,), num_chains=num_chains ) kernel = PriorKernel(normal_normal_model) samples, mcmc_num_chains = run_mcmc_cls( data, kernel, num_samples=num_samples, warmup_steps=100, initial_params=initial_params, num_chains=num_chains, mp_context="spawn", transforms=transforms, num_draws=num_draws, group_by_chain=group_by_chain, ) # test sample shape expected_samples = num_draws if num_draws is not None else num_samples if group_by_chain: expected_shape = (mcmc_num_chains, expected_samples, 1) elif num_draws is not None: # FIXME: what is the expected behavior of num_draw is not None and group_by_chain=False? expected_shape = (expected_samples, 1) else: expected_shape = (mcmc_num_chains * expected_samples, 1) assert samples["y"].shape == expected_shape # test sample stats if group_by_chain: samples = {k: v.reshape((-1,) + v.shape[2:]) for k, v in samples.items()} sample_mean = samples["y"].mean() sample_std = samples["y"].std() assert_close(sample_mean, torch.tensor(0.0), atol=0.1) assert_close(sample_std, torch.tensor(1.0), atol=0.1) @pytest.mark.parametrize( "num_chains, cpu_count", [ (1, 2), (2, 1), (2, 2), (2, 3), ], ) @pytest.mark.parametrize("default_init_params", [True, False]) def test_num_chains(num_chains, cpu_count, default_init_params, monkeypatch): monkeypatch.setattr(torch.multiprocessing, "cpu_count", lambda: cpu_count) data = torch.tensor([1.0]) initial_params, _, transforms, _ = initialize_model( normal_normal_model, model_args=(data,), num_chains=num_chains ) if default_init_params: initial_params = None kernel = PriorKernel(normal_normal_model) available_cpu = max(1, cpu_count - 1) mp_context = "spawn" with optional(pytest.warns(UserWarning), available_cpu < num_chains): mcmc = MCMC( kernel, num_samples=10, warmup_steps=10, num_chains=num_chains, initial_params=initial_params, transforms=transforms, mp_context=mp_context, ) mcmc.run(data) assert mcmc.num_chains == num_chains if mcmc.num_chains == 1 or available_cpu < num_chains: assert isinstance(mcmc.sampler, _UnarySampler) else: assert isinstance(mcmc.sampler, _MultiSampler) def _empty_model(): return torch.tensor(1) def _hook(iters, kernel, samples, stage, i): assert samples == {} iters.append((stage, i)) @pytest.mark.parametrize("run_mcmc_cls", [run_default_mcmc, run_streaming_mcmc]) @pytest.mark.parametrize( "kernel, model", [ (HMC, _empty_model), (NUTS, _empty_model), ], ) @pytest.mark.parametrize("jit", [False, True]) @pytest.mark.parametrize("num_chains", [1, 2]) @pytest.mark.filterwarnings("ignore:num_chains") def test_null_model_with_hook(run_mcmc_cls, kernel, model, jit, num_chains): num_warmup, num_samples = 10, 10 initial_params, potential_fn, transforms, _ = initialize_model( model, num_chains=num_chains ) iters = [] hook = partial(_hook, iters) mp_context = "spawn" if "CUDA_TEST" in os.environ else None kern = kernel(potential_fn=potential_fn, transforms=transforms, jit_compile=jit) samples, _ = run_mcmc_cls( data=None, kernel=kern, num_samples=num_samples, warmup_steps=num_warmup, initial_params=initial_params, hook_fn=hook, num_chains=num_chains, mp_context=mp_context, ) assert samples == {} if num_chains == 1: expected = [("Warmup", i) for i in range(num_warmup)] + [ ("Sample", i) for i in range(num_samples) ] assert iters == expected @pytest.mark.parametrize("run_mcmc_cls", [run_default_mcmc, run_streaming_mcmc]) @pytest.mark.parametrize("num_chains", [1, 2]) @pytest.mark.filterwarnings("ignore:num_chains") def test_mcmc_diagnostics(run_mcmc_cls, num_chains): data = torch.tensor([2.0]).repeat(3) initial_params, _, transforms, _ = initialize_model( normal_normal_model, model_args=(data,), num_chains=num_chains ) kernel = PriorKernel(normal_normal_model) if run_mcmc_cls == run_default_mcmc: mcmc = MCMC( kernel, num_samples=10, warmup_steps=10, num_chains=num_chains, mp_context="spawn", initial_params=initial_params, transforms=transforms, ) else: mcmc = StreamingMCMC( kernel, num_samples=10, warmup_steps=10, num_chains=num_chains, initial_params=initial_params, transforms=transforms, ) mcmc.run(data) if not torch.backends.mkl.is_available(): pytest.skip() diagnostics = mcmc.diagnostics() if run_mcmc_cls == run_default_mcmc: # TODO n_eff for streaming MCMC assert diagnostics["y"]["n_eff"].shape == data.shape assert diagnostics["y"]["r_hat"].shape == data.shape assert diagnostics["dummy_key"] == { "chain {}".format(i): "dummy_value" for i in range(num_chains) } @pytest.mark.parametrize("run_mcmc_cls", [run_default_mcmc, run_streaming_mcmc]) @pytest.mark.filterwarnings("ignore:num_chains") def test_sequential_consistent(run_mcmc_cls, monkeypatch): # test if there is no stuff left from the previous chain monkeypatch.setattr(torch.multiprocessing, "cpu_count", lambda: 1) class FirstKernel(NUTS): def setup(self, warmup_steps, *args, **kwargs): self._chain_id = 0 if "_chain_id" not in self.__dict__ else 1 pyro.set_rng_seed(self._chain_id) super().setup(warmup_steps, *args, **kwargs) class SecondKernel(NUTS): def setup(self, warmup_steps, *args, **kwargs): self._chain_id = 1 if "_chain_id" not in self.__dict__ else 0 pyro.set_rng_seed(self._chain_id) super().setup(warmup_steps, *args, **kwargs) data = torch.tensor([1.0]) kernel = FirstKernel(normal_normal_model) samples1, _ = run_mcmc_cls( data, kernel, num_samples=100, warmup_steps=100, num_chains=2, group_by_chain=True, ) kernel = SecondKernel(normal_normal_model) samples2, _ = run_mcmc_cls( data, kernel, num_samples=100, warmup_steps=100, num_chains=2, group_by_chain=True, ) assert_close(samples1["y"][0], samples2["y"][1]) assert_close(samples1["y"][1], samples2["y"][0]) @pytest.mark.parametrize("run_mcmc_cls", [run_default_mcmc, run_streaming_mcmc]) def test_model_with_potential_fn(run_mcmc_cls): init_params = {"z": torch.tensor(0.0)} def potential_fn(params): return params["z"] run_mcmc_cls( data=None, kernel=HMC(potential_fn=potential_fn), num_samples=10, warmup_steps=10, initial_params=init_params, ) @pytest.mark.parametrize("save_params", ["xy", "x", "y", "xy"]) @pytest.mark.parametrize( "Kernel,options", [ (HMC, {}), (NUTS, {"max_tree_depth": 2}), ], ) def test_save_params(save_params, Kernel, options): save_params = list(save_params) def model(): x = pyro.sample("x", dist.Normal(0, 1)) with pyro.plate("plate", 2): y = pyro.sample("y", dist.Normal(x, 1)) pyro.sample("obs", dist.Normal(y, 1), obs=torch.zeros(2)) kernel = Kernel(model, **options) mcmc = MCMC(kernel, warmup_steps=2, num_samples=4, save_params=save_params) mcmc.run() samples = mcmc.get_samples() assert set(samples.keys()) == set(save_params) diagnostics = mcmc.diagnostics() diagnostics = {k: v for k, v in diagnostics.items() if k in "xy"} assert set(diagnostics.keys()) == set(save_params) mcmc.summary() # smoke test
""" Data Structures used to represented molecules for convolutions. """ from __future__ import print_function from __future__ import division from __future__ import unicode_literals __author__ = "Han Altae-Tran and Bharath Ramsundar" __copyright__ = "Copyright 2016, Stanford University" __license__ = "MIT" import csv import random import numpy as np def cumulative_sum_minus_last(l, offset=0): """Returns cumulative sums for set of counts, removing last entry. Returns the cumulative sums for a set of counts with the first returned value starting at 0. I.e [3,2,4] -> [0, 3, 5]. Note last sum element 9 is missing. Useful for reindexing Parameters ---------- l: list List of integers. Typically small counts. """ return np.delete(np.insert(np.cumsum(l), 0, 0), -1) + offset def cumulative_sum(l, offset=0): """Returns cumulative sums for set of counts. Returns the cumulative sums for a set of counts with the first returned value starting at 0. I.e [3,2,4] -> [0, 3, 5, 9]. Keeps final sum for searching. Useful for reindexing. Parameters ---------- l: list List of integers. Typically small counts. """ return np.insert(np.cumsum(l), 0, 0) + offset class ConvMol(object): """Holds information about a molecules. Resorts order of atoms internally to be in order of increasing degree. Note that only heavy atoms (hydrogens excluded) are considered here. """ def __init__(self, atom_features, adj_list, max_deg=10, min_deg=0): """ Parameters ---------- atom_features: np.ndarray Has shape (n_atoms, n_feat) canon_ad_list: list List of length n_atoms, with neighor indices of each atom. max_deg: int, optional Maximum degree of any atom. min_deg: int, optional Minimum degree of any atom. """ self.atom_features = atom_features self.n_atoms, self.n_feat = atom_features.shape self.deg_list = np.array([len(nbrs) for nbrs in adj_list], dtype=np.int32) self.canon_adj_list = adj_list self.deg_adj_lists = [] self.deg_slice = [] self.max_deg = max_deg self.min_deg = min_deg self.membership = self.get_num_atoms() * [0] self._deg_sort() # Get the degree id list (which corrects for min_deg) self.deg_id_list = np.array(self.deg_list) - min_deg # Get the size of each degree block deg_size = [ self.get_num_atoms_with_deg(deg) for deg in range(self.min_deg, self.max_deg + 1) ] # Get the the start indices for items in each block self.deg_start = cumulative_sum(deg_size) # Get the node indices when they are reset when the degree changes deg_block_indices = [ i - self.deg_start[self.deg_list[i]] for i in range(self.n_atoms) ] # Convert to numpy array self.deg_block_indices = np.array(deg_block_indices) def get_atoms_with_deg(self, deg): """Retrieves atom_features with the specific degree""" start_ind = self.deg_slice[deg - self.min_deg, 0] size = self.deg_slice[deg - self.min_deg, 1] return self.atom_features[start_ind:(start_ind + size), :] def get_num_atoms_with_deg(self, deg): """Returns the number of atoms with the given degree""" return self.deg_slice[deg - self.min_deg, 1] def get_num_atoms(self): return self.n_atoms def _deg_sort(self): """Sorts atoms by degree and reorders internal data structures. Sort the order of the atom_features by degree, maintaining original order whenever two atom_features have the same degree. """ old_ind = range(self.get_num_atoms()) deg_list = self.deg_list new_ind = list(np.lexsort((old_ind, deg_list))) num_atoms = self.get_num_atoms() # Reorder old atom_features self.atom_features = self.atom_features[new_ind, :] # Reorder old deg lists self.deg_list = [self.deg_list[i] for i in new_ind] # Sort membership self.membership = [self.membership[i] for i in new_ind] # Create old to new dictionary. not exactly intuitive old_to_new = dict(zip(new_ind, old_ind)) # Reorder adjacency lists self.canon_adj_list = [self.canon_adj_list[i] for i in new_ind] self.canon_adj_list = [[old_to_new[k] for k in self.canon_adj_list[i]] for i in range(len(new_ind))] # Get numpy version of degree list for indexing deg_array = np.array(self.deg_list) # Initialize adj_lists, which supports min_deg = 1 only self.deg_adj_lists = (self.max_deg + 1 - self.min_deg) * [0] # Parse as deg separated for deg in range(self.min_deg, self.max_deg + 1): # Get indices corresponding to the current degree rng = np.array(range(num_atoms)) indices = rng[deg_array == deg] # Extract and save adjacency list for the current degree to_cat = [self.canon_adj_list[i] for i in indices] if len(to_cat) > 0: adj_list = np.vstack([self.canon_adj_list[i] for i in indices]) self.deg_adj_lists[deg - self.min_deg] = adj_list else: self.deg_adj_lists[deg - self.min_deg] = np.zeros( [0, deg], dtype=np.int32) # Construct the slice information deg_slice = np.zeros([self.max_deg + 1 - self.min_deg, 2], dtype=np.int32) for deg in range(self.min_deg, self.max_deg + 1): if deg == 0: deg_size = np.sum(deg_array == deg) else: deg_size = self.deg_adj_lists[deg - self.min_deg].shape[0] deg_slice[deg - self.min_deg, 1] = deg_size # Get the cumulative indices after the first index if deg > self.min_deg: deg_slice[deg - self.min_deg, 0] = ( deg_slice[deg - self.min_deg - 1, 0] + deg_slice[deg - self.min_deg - 1, 1]) # Set indices with zero sized slices to zero to avoid indexing errors deg_slice[:, 0] *= (deg_slice[:, 1] != 0) self.deg_slice = deg_slice def get_atom_features(self): """Returns canonicalized version of atom features. Features are sorted by atom degree, with original order maintained when degrees are same. """ return self.atom_features def get_adjacency_list(self): """Returns a canonicalized adjacency list. Canonicalized means that the atoms are re-ordered by degree. Returns ------- list Canonicalized form of adjacency list. """ return self.canon_adj_list def get_deg_adjacency_lists(self): """Returns adjacency lists grouped by atom degree. Returns ------- list Has length (max_deg+1-min_deg). The element at position deg is itself a list of the neighbor-lists for atoms with degree deg. """ return self.deg_adj_lists def get_deg_slice(self): """Returns degree-slice tensor. The deg_slice tensor allows indexing into a flattened version of the molecule's atoms. Assume atoms are sorted in order of degree. Then deg_slice[deg][0] is the starting position for atoms of degree deg in flattened list, and deg_slice[deg][1] is the number of atoms with degree deg. Note deg_slice has shape (max_deg+1-min_deg, 2). Returns ------- deg_slice: np.ndarray Shape (max_deg+1-min_deg, 2) """ return self.deg_slice # TODO(rbharath): Can this be removed? @staticmethod def get_null_mol(n_feat, max_deg=10, min_deg=0): """Constructs a null molecules Get one molecule with one atom of each degree, with all the atoms connected to themselves, and containing n_feat features. Parameters ---------- n_feat : int number of features for the nodes in the null molecule """ # Use random insted of zeros to prevent weird issues with summing to zero atom_features = np.random.uniform(0, 1, [max_deg + 1 - min_deg, n_feat]) canon_adj_list = [ deg * [deg - min_deg] for deg in range(min_deg, max_deg + 1) ] return ConvMol(atom_features, canon_adj_list) @staticmethod def agglomerate_mols(mols, max_deg=10, min_deg=0): """Concatenates list of ConvMol's into one mol object that can be used to feed into tensorflow placeholders. The indexing of the molecules are preseved during the combination, but the indexing of the atoms are greatly changed. Parameters ---- mols: list ConvMol objects to be combined into one molecule.""" num_mols = len(mols) atoms_per_mol = [mol.get_num_atoms() for mol in mols] # Get atoms by degree atoms_by_deg = [ mol.get_atoms_with_deg(deg) for deg in range(min_deg, max_deg + 1) for mol in mols ] # stack the atoms all_atoms = np.vstack(atoms_by_deg) # Sort all atoms by degree. # Get the size of each atom list separated by molecule id, then by degree mol_deg_sz = [[mol.get_num_atoms_with_deg(deg) for mol in mols] for deg in range(min_deg, max_deg + 1)] # Get the final size of each degree block deg_sizes = list(map(np.sum, mol_deg_sz)) # Get the index at which each degree starts, not resetting after each degree # And not stopping at any speciic molecule deg_start = cumulative_sum_minus_last(deg_sizes) # Get the tensorflow object required for slicing (deg x 2) matrix, with the # first column telling the start indices of each degree block and the # second colum telling the size of each degree block # Input for tensorflow deg_slice = np.array(list(zip(deg_start, deg_sizes))) # Determines the membership (atom i belongs to membership[i] molecule) membership = [ k for deg in range(min_deg, max_deg + 1) for k in range(num_mols) for i in range(mol_deg_sz[deg][k]) ] # Get the index at which each deg starts, resetting after each degree # (deg x num_mols) matrix describing the start indices when you count up the atoms # in the final representation, stopping at each molecule, # resetting every time the degree changes start_by_deg = np.vstack([cumulative_sum_minus_last(l) for l in mol_deg_sz]) # Gets the degree resetting block indices for the atoms in each molecule # Here, the indices reset when the molecules change, and reset when the # degree changes deg_block_indices = [mol.deg_block_indices for mol in mols] # Get the degree id lookup list. It allows us to search for the degree of a # molecule mol_id with corresponding atom mol_atom_id using # deg_id_lists[mol_id,mol_atom_id] deg_id_lists = [mol.deg_id_list for mol in mols] # This is used for convience in the following function (explained below) start_per_mol = deg_start[:, np.newaxis] + start_by_deg def to_final_id(mol_atom_id, mol_id): # Get the degree id (corrected for min_deg) of the considered atom deg_id = deg_id_lists[mol_id][mol_atom_id] # Return the final index of atom mol_atom_id in molecule mol_id. Using # the degree of this atom, must find the index in the molecule's original # degree block corresponding to degree id deg_id (second term), and then # calculate which index this degree block ends up in the final # representation (first term). The sum of the two is the final indexn return start_per_mol[deg_id, mol_id] + deg_block_indices[mol_id][ mol_atom_id] # Initialize the new degree separated adjacency lists deg_adj_lists = [ np.zeros([deg_sizes[deg], deg], dtype=np.int32) for deg in range(min_deg, max_deg + 1) ] # Update the old adjcency lists with the new atom indices and then combine # all together for deg in range(min_deg, max_deg + 1): row = 0 # Initialize counter deg_id = deg - min_deg # Get corresponding degree id # Iterate through all the molecules for mol_id in range(num_mols): # Get the adjacency lists for this molecule and current degree id nbr_list = mols[mol_id].deg_adj_lists[deg_id] # Correct all atom indices to the final indices, and then save the # results into the new adjacency lists for i in range(nbr_list.shape[0]): for j in range(nbr_list.shape[1]): deg_adj_lists[deg_id][row, j] = to_final_id(nbr_list[i, j], mol_id) # Increment once row is done row += 1 # Get the final aggregated molecule concat_mol = MultiConvMol(all_atoms, deg_adj_lists, deg_slice, membership, num_mols) return concat_mol class MultiConvMol(object): """Holds information about multiple molecules, for use in feeding information into tensorflow. Generated using the agglomerate_mols function """ def __init__(self, nodes, deg_adj_lists, deg_slice, membership, num_mols): self.nodes = nodes self.deg_adj_lists = deg_adj_lists self.deg_slice = deg_slice self.membership = membership self.num_mols = num_mols self.num_atoms = nodes.shape[0] def get_deg_adjacency_lists(self): return self.deg_adj_lists def get_atom_features(self): return self.nodes def get_num_atoms(self): return self.num_atoms def get_num_molecules(self): return self.num_mols class WeaveMol(object): """Holds information about a molecule Molecule struct used in weave models """ def __init__(self, nodes, pairs): self.nodes = nodes self.pairs = pairs self.num_atoms = self.nodes.shape[0] self.n_features = self.nodes.shape[1] def get_pair_features(self): return self.pairs def get_atom_features(self): return self.nodes def get_num_atoms(self): return self.num_atoms def get_num_features(self): return self.n_features
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function import os import tensorflow as tf from niftynet.engine.application_driver import ApplicationDriver from niftynet.utilities.util_common import ParserNamespace from tests.niftynet_testcase import NiftyNetTestCase TARGET_FILE = os.path.join('testing_data', 'test_splitting.csv') def _generate_base_params(): # initialise compulsory params that are irrelevant # to this unit test user_param = dict() user_param['SYSTEM'] = ParserNamespace( model_dir='./testing_data', num_threads=2, num_gpus=1, cuda_devices='', event_handler=None, iteration_generator=None) user_param['NETWORK'] = ParserNamespace( batch_size=20, name='tests.toy_application.TinyNet') user_param['TRAINING'] = ParserNamespace( starting_iter=0, max_iter=2, save_every_n=2, tensorboard_every_n=0, max_checkpoints=100) user_param['INFERENCE'] = ParserNamespace( inference_iter=-1) user_param['CUSTOM'] = ParserNamespace( name='tests.toy_application.ToyApplication', vector_size=100, mean=10.0, stddev=2.0) return user_param def _generate_data_param(): user_param = dict() user_param['modality'] = ParserNamespace( csv_file=os.path.join('testing_data', 'mod1test.csv'), path_to_search='testing_data', filename_contains='nii') user_param['modality2'] = ParserNamespace( csv_file=os.path.join('testing_data', 'mod2test.csv'), path_to_search='testing_data', filename_contains='nii') return user_param def generate_input_params(**arg_dicts): user_param = _generate_base_params() for key in list(arg_dicts): if not arg_dicts[key]: continue user_param[key].update(**arg_dicts[key]) return user_param def clear_target(): if not os.path.isfile(TARGET_FILE): return os.remove(TARGET_FILE) def write_target(): clear_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': 2, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.1, 'exclude_fraction_for_inference': 0.1, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) assert os.path.isfile(TARGET_FILE) return class DriverPartitionerTestExistingFile(NiftyNetTestCase): def test_training(self): write_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': 2, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.1, 'exclude_fraction_for_inference': 0.1, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertTrue(partitioner.has_training) self.assertTrue(partitioner.has_inference) self.assertTrue(partitioner.has_validation) def test_training_no_validation(self): write_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': -1, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.0, 'exclude_fraction_for_inference': 0.0, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertTrue(partitioner.has_training) self.assertTrue(partitioner.has_inference) self.assertTrue(partitioner.has_validation) def test_inference_no_validation(self): write_target() user_param = generate_input_params( SYSTEM={'action': 'inference', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': -1, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.0, 'exclude_fraction_for_inference': 0.0, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertTrue(partitioner.has_training) self.assertTrue(partitioner.has_inference) self.assertTrue(partitioner.has_validation) def test_inference_validation(self): write_target() user_param = generate_input_params( SYSTEM={'action': 'inference', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': 10, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.0, 'exclude_fraction_for_inference': 0.0, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertTrue(partitioner.has_training) self.assertTrue(partitioner.has_inference) self.assertTrue(partitioner.has_validation) class DriverPartitionerTestNoFile(NiftyNetTestCase): def test_training(self): clear_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': 2, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.1, 'exclude_fraction_for_inference': 0.1, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertTrue(partitioner.has_training) self.assertTrue(partitioner.has_inference) self.assertTrue(partitioner.has_validation) def test_training_no_validation(self): clear_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': -1, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.0, 'exclude_fraction_for_inference': 0.0, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertFalse(partitioner.has_training) self.assertFalse(partitioner.has_inference) self.assertFalse(partitioner.has_validation) self.assertTrue(partitioner.all_files is not None) clear_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': -1, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.0, 'exclude_fraction_for_inference': 0.0, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) partitioner = app_driver.data_partitioner self.assertFalse(partitioner.has_training) self.assertFalse(partitioner.has_inference) self.assertFalse(partitioner.has_validation) self.assertTrue(partitioner.all_files is not None) def test_inference(self): clear_target() user_param = generate_input_params( SYSTEM={'action': 'inference', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': 1, 'validation_max_iter': 1, 'exclude_fraction_for_validation': 0.1, 'exclude_fraction_for_inference': 0.0, } ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) self.assertTrue(app_driver.data_partitioner is not None) self.assertFalse(os.path.isfile(TARGET_FILE)) partitioner = app_driver.data_partitioner self.assertTrue(partitioner._partition_ids is None) def test_inference_no_validation(self): clear_target() user_param = generate_input_params( SYSTEM={'action': 'inference', 'dataset_split_file': TARGET_FILE }, ) data_param = _generate_data_param() app_driver = ApplicationDriver() app_driver.initialise_application(user_param, data_param) self.assertTrue(app_driver.data_partitioner is not None) self.assertFalse(os.path.isfile(TARGET_FILE)) partitioner = app_driver.data_partitioner self.assertTrue(partitioner._partition_ids is None) class DriverPartitionerTestNoData(NiftyNetTestCase): def test_no_data_param_infer(self): clear_target() user_param = generate_input_params( SYSTEM={'action': 'inference', 'dataset_split_file': TARGET_FILE } ) app_driver = ApplicationDriver() app_driver.initialise_application(user_param, {}) self.assertTrue(app_driver.data_partitioner is not None) self.assertFalse(os.path.isfile(TARGET_FILE)) partitioner = app_driver.data_partitioner self.assertFalse(partitioner.all_files) def test_no_data_param_train(self): clear_target() user_param = generate_input_params( SYSTEM={'action': 'train', 'dataset_split_file': TARGET_FILE }, TRAINING={'validation_every_n': -1, 'exclude_fraction_for_validation': 0.1, 'exclude_fraction_for_inference': 0.1, } ) app_driver = ApplicationDriver() app_driver.initialise_application(user_param, {}) self.assertTrue(app_driver.data_partitioner is not None) self.assertFalse(os.path.isfile(TARGET_FILE)) partitioner = app_driver.data_partitioner self.assertFalse(partitioner.all_files) if __name__ == "__main__": tf.test.main()
""" Houdini to lucille RIB exporter. Written by Syoyo Fujita(syoyo@lucillerender.org) """ import os, sys import re import math ribnodelist = {} def vnormalize(v): d2 = v[0] * v[0] + v[1] * v[1] + v[2] * v[2] d = math.sqrt(d2) if (d > 1.0e-12): invd = 1.0 / d v[0] *= invd v[1] *= invd v[2] *= invd def vcross(a, b): v = [0.0, 0.0, 0.0] v[0] = a[1] * b[2] - b[1] * a[2] v[1] = a[2] * b[0] - b[2] * a[0] v[2] = a[0] * b[1] - b[0] * a[1] return v def ortho(n): w = [0.0, 0.0, 0.0] if (n[0] < 0.6 and n[0] > -0.6): w[0] = 1.0 elif (n[1] < 0.6 and n[1] > -0.6): w[1] = 1.0 elif (n[2] < 0.6 and n[2] > -0.6): w[2] = 1.0 else: w[0] = 1.0 u = vcross(w, n) vnormalize(u) v = vcross(n, u) vnormalize(v) return (u, v, n) def matmul(m, x, y, z): w = [0.0, 0.0, 0.0] w[0] = m[0][0] * x + m[1][0] * y + m[2][0] * z w[1] = m[0][1] * x + m[1][1] * y + m[2][1] * z w[2] = m[0][2] * x + m[1][2] * y + m[2][2] * z return w # # Create a tube geometry from control points. # def emit_fur_as_tube_polygon(prim): ndiv = 8 n = prim.numVertices() if prim.type() != hou.primType.Polygon: return None if n < 2: return None s = "" s += "# seg = %d\n" % n s += "PointsPolygons " # Create ndiv * (n-1) faces(quads) # build face(quad polygon) s += " [ " for j in range(ndiv*(n-1)): s += "4 " s += " ] " tubes = [] for i in range(n-1): v1 = prim.vertex(i+1).point().position() v0 = prim.vertex(i).point().position() width = 2.0 * prim.vertex(i).attribValue("width") axis = [ v1[0] - v0[0] , v1[1] - v0[1] , v1[2] - v0[2] ] vnormalize(axis) basis = ortho(axis) topPts = [] bottomPts = [] for j in range(ndiv): x = width * math.cos( 2.0 * math.pi * (j / float(ndiv)) ) y = width * math.sin( 2.0 * math.pi * (j / float(ndiv)) ) z = 0.0 p = matmul(basis, x, y, z) bp = [ p[0] + v0[0] , p[1] + v0[1] , p[2] + v0[2] ] bottomPts.append(bp) tp = [ p[0] + v1[0] , p[1] + v1[1] , p[2] + v1[2] ] topPts.append(tp) tubes.append((bottomPts, topPts)) # emit indices s += " [ " for i in range(n-1): offt = ndiv * i for j in range(ndiv-1): s += "%d %d %d %d " % (offt + j, offt + j + ndiv, offt + j + 1 + ndiv, offt + j + 1) s += "%d %d %d %d " % (offt + (ndiv-1), offt + (ndiv-1) + ndiv, offt + ndiv, offt) s += " ] " s += " \"P\" [ " # emit verts for tube in tubes: bp = tube[0] for p in bp: s += " %f %f %f " % (p[0], p[1], p[2]) tp = tube[1] for p in tp: s += " %f %f %f " % (p[0], p[1], p[2]) s += " ] " # print s return s def emit_header(): """ Emits RIB header string """ s = "" s += "# RenderMan RIB-Structure 1.1\n" s += "# Exported by htol, Houdini to lucille RIB exporter by Syoyo Fujita\n" s += "Display \"untitled.hdr\" \"framebuffer\" \"rgb\"\n" return s def emit_footer(): """ Emits RIB footer string """ s = "" s += "# RIB End\n" return s def visit_camera(cam): print "[htol] ==> Exporining camera : ", cam.path() # print cam.path() """ Camera matrix should be inverted, and should apply rh -> lh conversion. """ xform = cam.parmTransform().inverted() #xform = cam.parmTransform() m = xform.asTuple() xm = [] for i in range(16): xm.append(m[i]) fov = 45.0 # FIXME: calculate fov from camera object. s = "" s += "PixelSamples 2 2\n" s += "Shutter 0.0 1.0\n" s += "Projection \"perspective\" \"fov\" [" + str(fov) + "]\n" s += "Orientation \"rh\"\n" # Houdini employs reft hand coord. s += "ConcatTransform [" for i in range(16): s += "%f " % xm[i] s += "]\n" # print s return s def visit_fur(obj): # # Filter out non-geometry node. # if not hasattr(obj, "displayNode"): print "[htol] Skipped object ", obj return if obj.displayNode() is None: print "[htol] Skipped object ", obj return # obj node has parmTransform method? xform = None if hasattr(obj, "parmTransform"): xform = obj.parmTransform() else: print "[htol] Skipped object ", obj return geo = obj.displayNode().geometry() assert geo is not None s = "" # emit xform m = xform.asTuple() s += "# Fur. prims = %d\n" % (len(geo.prims())) s += "AttributeBegin\n" s += "Transform [ " for i in range(16): s += "%f " % m[i] s += " ]\n" for (fid, prim) in enumerate(geo.prims()): if prim.type() == hou.primType.Mesh: continue # skip if not isinstance(prim, hou.Polygon): print "[htol] Warn: [%s] is not a polygonal object, skipping export" % obj.path() return # Fur is represented as an control points + width ss = emit_fur_as_tube_polygon(prim) if ss is not None: s += ss s += "\nAttributeEnd\n" return s def visit_object(obj): print "[htol] ==> Exporing ", obj.path() # # Filter out non-geometry node. # if not hasattr(obj, "displayNode"): print "[htol] Skipped object ", obj return None if obj.displayNode() is None: print "[htol] Skipped object ", obj return None if not hasattr(obj.displayNode(), "geometry"): print "[htol] Skipped object ", obj return None # obj node has parmTransform method? xform = None if hasattr(obj, "parmTransform"): xform = obj.parmTransform() else: print "[htol] Skipped object ", obj return None geo = obj.displayNode().geometry() hasP = True # This should be always true hasN = False # Geometry has per vertex normal? if not hasattr(geo, "primAttribs"): print "[htol] Skipped object ", obj return None # # Fur obj? # for primAttrib in geo.primAttribs(): if primAttrib.name() == "furdensity": # Delegate the process to visit_fur() return visit_fur(obj) for attrib in geo.pointAttribs(): # print "[DBG] ", (attrib.name(), attrib.dataType(), attrib.size()) if attrib.name() == "N" and attrib.size() == 3: hasN = True s = "" # emit xform m = xform.asTuple() s += "AttributeBegin\n" s += "Transform [ " for i in range(16): s += "%f " % m[i] s += " ]\n" s += "PointsPolygons [" # emit face for (fid, prim) in enumerate(geo.prims()): if prim.type() == hou.primType.Mesh: continue # skip if not isinstance(prim, hou.Polygon): print "[htol] Warn: [%s] is not a polygonal object, skipping export" % obj.path() return s += "%d " % prim.numVertices() s += " ] [ " # Extract polygonal primitives. polyPrims = [prim for prim in geo.prims() if prim.type() == hou.primType.Polygon] # emit face idx vsum = 0 for (fid, prim) in enumerate(polyPrims): for vid in xrange(prim.numVertices()): s += "%d " % (vsum + vid) vsum += prim.numVertices() s += "] \"P\" [ " # emit positions for (fid, prim) in enumerate(polyPrims): for vid in xrange(prim.numVertices()): # get vertex v = prim.vertex(vid) p = v.point().position() s += "%f %f %f " % (p[0], p[1], p[2]) # x,y and z s += "]" if hasN: # emit normals s += " \"N\" [" # emit positions for (fid, prim) in enumerate(polyPrims): for vid in xrange(prim.numVertices()): # get vertex v = prim.vertex(vid) n = v.point().attribValue("N") s += "%f %f %f " % (n[0], n[1], n[2]) # x,y and z s += "]" s += "\nAttributeEnd\n" # print s print " Exported %d vertices." % vsum return s # for point in geo.points(): # print "[DBG] ", point.position() # print def walk_tree(node, indent=0): """ Recursively walk through houdini nodes and emit geometry data to RIB. """ global ribnodelist cam_re = re.compile("cam\d") obj_re = re.compile("/obj/") s = "" for child in node.children(): if obj_re.search(child.path()): # camera node? if (cam_re.match(child.name())): s = visit_camera(child) if s is not None: ribnodelist["camera"] = s else: s = visit_object(child) if s is not None: ribnodelist[child.name()] = s # print " " * indent + child.name() + " (" + str(type(child)) + ")" walk_tree(child, indent + 3) def export(node, ribname): """ Export geometry in the houdini world into RIB file. - node : Root node to export the data. e.g. hou.node("/") - ribname : Name of the RIB file. """ print "[htol] Exporing the scene..." f = open(ribname, "w") print >>f, emit_header() walk_tree(node) if not ribnodelist.has_key("camera"): print "[htol] No camera was found!" else: print >>f, ribnodelist["camera"] del ribnodelist["camera"] print >>f, "WorldBegin" for (k, v) in ribnodelist.items(): print >>f, v print >>f, "WorldEnd" print >>f, emit_footer() f.close() print "[htol] Expored the scene" # For testing the exporter... if __name__ == '__main__': import hou ribfile = os.path.join(os.environ["HOME"], "untitled.rib") export(hou.node("/"), ribfile)
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import tempfile import shutil from tempfile import mkdtemp from shutil import rmtree import numpy as np import nibabel as nb from nipype.testing import (assert_equal, assert_not_equal, assert_raises, skipif, example_data) import nipype.interfaces.fsl.dti as fsl from nipype.interfaces.fsl import Info, no_fsl from nipype.interfaces.base import Undefined # nosetests --with-doctest path_to/test_fsl.py def skip_dti_tests(): """XXX These tests are skipped until we clean up some of this code """ return True def create_files_in_directory(): outdir = os.path.realpath(mkdtemp()) cwd = os.getcwd() os.chdir(outdir) filelist = ['a.nii', 'b.nii'] for f in filelist: hdr = nb.Nifti1Header() shape = (3, 3, 3, 4) hdr.set_data_shape(shape) img = np.random.random(shape) nb.save(nb.Nifti1Image(img, np.eye(4), hdr), os.path.join(outdir, f)) return filelist, outdir, cwd def clean_directory(outdir, old_wd): if os.path.exists(outdir): rmtree(outdir) os.chdir(old_wd) # test dtifit @skipif(no_fsl) def test_dtifit2(): filelist, outdir, cwd = create_files_in_directory() dti = fsl.DTIFit() # make sure command gets called yield assert_equal, dti.cmd, 'dtifit' # test raising error with mandatory args absent yield assert_raises, ValueError, dti.run # .inputs based parameters setting dti.inputs.dwi = filelist[0] dti.inputs.base_name = 'foo.dti.nii' dti.inputs.mask = filelist[1] dti.inputs.bvecs = filelist[0] dti.inputs.bvals = filelist[1] dti.inputs.min_z = 10 dti.inputs.max_z = 50 yield assert_equal, dti.cmdline, \ 'dtifit -k %s -o foo.dti.nii -m %s -r %s -b %s -Z 50 -z 10' % (filelist[0], filelist[1], filelist[0], filelist[1]) clean_directory(outdir, cwd) # Globals to store paths for tbss tests tbss_dir = None test_dir = None def setup_tbss(): # Setup function is called before each test. Setup is called only # once for each generator function. global tbss_dir, tbss_files, test_dir test_dir = os.getcwd() tbss_dir = tempfile.mkdtemp() os.chdir(tbss_dir) tbss_files = ['a.nii', 'b.nii'] for f in tbss_files: fp = open(f, 'wt') fp.write('dummy') fp.close() def teardown_tbss(): # Teardown is called after each test to perform cleanup os.chdir(test_dir) shutil.rmtree(tbss_dir) @skipif(skip_dti_tests) def test_randomise2(): rand = fsl.Randomise() # make sure command gets called yield assert_equal, rand.cmd, 'randomise' # test raising error with mandatory args absent yield assert_raises, ValueError, rand.run # .inputs based parameters setting rand.inputs.input_4D = 'infile.nii' rand.inputs.output_rootname = 'outfile' rand.inputs.design_matrix = 'design.mat' rand.inputs.t_contrast = 'infile.con' actualCmdline = sorted(rand.cmdline.split()) cmd = 'randomise -i infile.nii -o outfile -d design.mat -t infile.con' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline # .run based parameter setting rand2 = fsl.Randomise(input_4D='infile2', output_rootname='outfile2', f_contrast='infile.f', one_sample_gmean=True, int_seed=4) actualCmdline = sorted(rand2.cmdline.split()) cmd = 'randomise -i infile2 -o outfile2 -1 -f infile.f --seed=4' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline rand3 = fsl.Randomise() results = rand3.run(input_4D='infile3', output_rootname='outfile3') yield assert_equal, results.runtime.cmdline, \ 'randomise -i infile3 -o outfile3' # test arguments for opt_map opt_map = {'demean_data': ('-D', True), 'one_sample_gmean': ('-1', True), 'mask_image': ('-m inp_mask', 'inp_mask'), 'design_matrix': ('-d design.mat', 'design.mat'), 't_contrast': ('-t input.con', 'input.con'), 'f_contrast': ('-f input.fts', 'input.fts'), 'xchange_block_labels': ('-e design.grp', 'design.grp'), 'print_unique_perm': ('-q', True), 'print_info_parallelMode': ('-Q', True), 'num_permutations': ('-n 10', 10), 'vox_pvalus': ('-x', True), 'fstats_only': ('--fonly', True), 'thresh_free_cluster': ('-T', True), 'thresh_free_cluster_2Dopt': ('--T2', True), 'cluster_thresholding': ('-c 0.20', 0.20), 'cluster_mass_thresholding': ('-C 0.40', 0.40), 'fcluster_thresholding': ('-F 0.10', 0.10), 'fcluster_mass_thresholding': ('-S 0.30', 0.30), 'variance_smoothing': ('-v 0.20', 0.20), 'diagnostics_off': ('--quiet', True), 'output_raw': ('-R', True), 'output_perm_vect': ('-P', True), 'int_seed': ('--seed=20', 20), 'TFCE_height_param': ('--tfce_H=0.11', 0.11), 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), 'list_num_voxel_EVs_pos': ('--vxl=1,2,3,4', '1,2,3,4'), 'list_img_voxel_EVs': ('--vxf=6,7,8,9,3', '6,7,8,9,3')} for name, settings in list(opt_map.items()): rand4 = fsl.Randomise(input_4D='infile', output_rootname='root', **{name: settings[1]}) yield assert_equal, rand4.cmdline, rand4.cmd + ' -i infile -o root ' \ + settings[0] @skipif(skip_dti_tests) def test_Randomise_parallel(): rand = fsl.Randomise_parallel() # make sure command gets called yield assert_equal, rand.cmd, 'randomise_parallel' # test raising error with mandatory args absent yield assert_raises, ValueError, rand.run # .inputs based parameters setting rand.inputs.input_4D = 'infile.nii' rand.inputs.output_rootname = 'outfile' rand.inputs.design_matrix = 'design.mat' rand.inputs.t_contrast = 'infile.con' actualCmdline = sorted(rand.cmdline.split()) cmd = ('randomise_parallel -i infile.nii -o outfile -d design.mat -t ' 'infile.con') desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline # .run based parameter setting rand2 = fsl.Randomise_parallel(input_4D='infile2', output_rootname='outfile2', f_contrast='infile.f', one_sample_gmean=True, int_seed=4) actualCmdline = sorted(rand2.cmdline.split()) cmd = 'randomise_parallel -i infile2 -o outfile2 -1 -f infile.f --seed=4' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline rand3 = fsl.Randomise_parallel() results = rand3.run(input_4D='infile3', output_rootname='outfile3') yield assert_equal, results.runtime.cmdline, \ 'randomise_parallel -i infile3 -o outfile3' # test arguments for opt_map opt_map = {'demean_data': ('-D', True), 'one_sample_gmean': ('-1', True), 'mask_image': ('-m inp_mask', 'inp_mask'), 'design_matrix': ('-d design.mat', 'design.mat'), 't_contrast': ('-t input.con', 'input.con'), 'f_contrast': ('-f input.fts', 'input.fts'), 'xchange_block_labels': ('-e design.grp', 'design.grp'), 'print_unique_perm': ('-q', True), 'print_info_parallelMode': ('-Q', True), 'num_permutations': ('-n 10', 10), 'vox_pvalus': ('-x', True), 'fstats_only': ('--fonly', True), 'thresh_free_cluster': ('-T', True), 'thresh_free_cluster_2Dopt': ('--T2', True), 'cluster_thresholding': ('-c 0.20', 0.20), 'cluster_mass_thresholding': ('-C 0.40', 0.40), 'fcluster_thresholding': ('-F 0.10', 0.10), 'fcluster_mass_thresholding': ('-S 0.30', 0.30), 'variance_smoothing': ('-v 0.20', 0.20), 'diagnostics_off': ('--quiet', True), 'output_raw': ('-R', True), 'output_perm_vect': ('-P', True), 'int_seed': ('--seed=20', 20), 'TFCE_height_param': ('--tfce_H=0.11', 0.11), 'TFCE_extent_param': ('--tfce_E=0.50', 0.50), 'TFCE_connectivity': ('--tfce_C=0.30', 0.30), 'list_num_voxel_EVs_pos': ('--vxl=' + repr([1, 2, 3, 4]), repr([1, 2, 3, 4])), 'list_img_voxel_EVs': ('--vxf=' + repr([6, 7, 8, 9, 3]), repr([6, 7, 8, 9, 3]))} for name, settings in list(opt_map.items()): rand4 = fsl.Randomise_parallel(input_4D='infile', output_rootname='root', **{name: settings[1]}) yield assert_equal, rand4.cmdline, rand4.cmd + ' -i infile -o root ' \ + settings[0] # test proj_thresh @skipif(skip_dti_tests) def test_Proj_thresh(): proj = fsl.ProjThresh() # make sure command gets called yield assert_equal, proj.cmd, 'proj_thresh' # test raising error with mandatory args absent yield assert_raises, ValueError, proj.run # .inputs based parameters setting proj.inputs.volumes = ['vol1', 'vol2', 'vol3'] proj.inputs.threshold = 3 yield assert_equal, proj.cmdline, 'proj_thresh vol1 vol2 vol3 3' proj2 = fsl.ProjThresh(threshold=10, volumes=['vola', 'volb']) yield assert_equal, proj2.cmdline, 'proj_thresh vola volb 10' # .run based parameters setting proj3 = fsl.ProjThresh() results = proj3.run(volumes=['inp1', 'inp3', 'inp2'], threshold=2) yield assert_equal, results.runtime.cmdline, 'proj_thresh inp1 inp3 inp2 2' yield assert_not_equal, results.runtime.returncode, 0 yield assert_equal, isinstance(results.interface.inputs.volumes, list), \ True yield assert_equal, results.interface.inputs.threshold, 2 # test arguments for opt_map # Proj_thresh doesn't have an opt_map{} # test vec_reg @skipif(skip_dti_tests) def test_Vec_reg(): vrg = fsl.VecReg() # make sure command gets called yield assert_equal, vrg.cmd, 'vecreg' # test raising error with mandatory args absent yield assert_raises, ValueError, vrg.run # .inputs based parameters setting vrg.inputs.infile = 'infile' vrg.inputs.outfile = 'outfile' vrg.inputs.refVolName = 'MNI152' vrg.inputs.affineTmat = 'tmat.mat' yield assert_equal, vrg.cmdline, \ 'vecreg -i infile -o outfile -r MNI152 -t tmat.mat' # .run based parameter setting vrg2 = fsl.VecReg(infile='infile2', outfile='outfile2', refVolName='MNI152', affineTmat='tmat2.mat', brainMask='nodif_brain_mask') actualCmdline = sorted(vrg2.cmdline.split()) cmd = 'vecreg -i infile2 -o outfile2 -r MNI152 -t tmat2.mat -m nodif_brain_mask' desiredCmdline = sorted(cmd.split()) yield assert_equal, actualCmdline, desiredCmdline vrg3 = fsl.VecReg() results = vrg3.run(infile='infile3', outfile='outfile3', refVolName='MNI152', affineTmat='tmat3.mat',) yield assert_equal, results.runtime.cmdline, \ 'vecreg -i infile3 -o outfile3 -r MNI152 -t tmat3.mat' yield assert_not_equal, results.runtime.returncode, 0 yield assert_equal, results.interface.inputs.infile, 'infile3' yield assert_equal, results.interface.inputs.outfile, 'outfile3' yield assert_equal, results.interface.inputs.refVolName, 'MNI152' yield assert_equal, results.interface.inputs.affineTmat, 'tmat3.mat' # test arguments for opt_map opt_map = {'verbose': ('-v', True), 'helpDoc': ('-h', True), 'tensor': ('--tensor', True), 'affineTmat': ('-t Tmat', 'Tmat'), 'warpFile': ('-w wrpFile', 'wrpFile'), 'interpolation': ('--interp=sinc', 'sinc'), 'brainMask': ('-m mask', 'mask')} for name, settings in list(opt_map.items()): vrg4 = fsl.VecReg(infile='infile', outfile='outfile', refVolName='MNI152', **{name: settings[1]}) yield assert_equal, vrg4.cmdline, vrg4.cmd + \ ' -i infile -o outfile -r MNI152 ' + settings[0] # test find_the_biggest @skipif(skip_dti_tests) def test_Find_the_biggest(): fbg = fsl.FindTheBiggest() # make sure command gets called yield assert_equal, fbg.cmd, 'find_the_biggest' # test raising error with mandatory args absent yield assert_raises, ValueError, fbg.run # .inputs based parameters setting fbg.inputs.infiles = 'seed*' fbg.inputs.outfile = 'fbgfile' yield assert_equal, fbg.cmdline, 'find_the_biggest seed* fbgfile' fbg2 = fsl.FindTheBiggest(infiles='seed2*', outfile='fbgfile2') yield assert_equal, fbg2.cmdline, 'find_the_biggest seed2* fbgfile2' # .run based parameters setting fbg3 = fsl.FindTheBiggest() results = fbg3.run(infiles='seed3', outfile='out3') yield assert_equal, results.runtime.cmdline, 'find_the_biggest seed3 out3' # test arguments for opt_map # Find_the_biggest doesn't have an opt_map{} @skipif(no_fsl) def test_tbss_skeleton(): skeletor = fsl.TractSkeleton() files, newdir, olddir = create_files_in_directory() # Test the underlying command yield assert_equal, skeletor.cmd, "tbss_skeleton" # It shouldn't run yet yield assert_raises, ValueError, skeletor.run # Test the most basic way to use it skeletor.inputs.in_file = files[0] # First by implicit argument skeletor.inputs.skeleton_file = True yield assert_equal, skeletor.cmdline, \ "tbss_skeleton -i a.nii -o %s" % os.path.join(newdir, "a_skeleton.nii") # Now with a specific name skeletor.inputs.skeleton_file = "old_boney.nii" yield assert_equal, skeletor.cmdline, "tbss_skeleton -i a.nii -o old_boney.nii" # Now test the more complicated usage bones = fsl.TractSkeleton(in_file="a.nii", project_data=True) # This should error yield assert_raises, ValueError, bones.run # But we can set what we need bones.inputs.threshold = 0.2 bones.inputs.distance_map = "b.nii" bones.inputs.data_file = "b.nii" # Even though that's silly # Now we get a command line yield assert_equal, bones.cmdline, \ "tbss_skeleton -i a.nii -p 0.200 b.nii %s b.nii %s" % (Info.standard_image("LowerCingulum_1mm.nii.gz"), os.path.join(newdir, "b_skeletonised.nii")) # Can we specify a mask? bones.inputs.use_cingulum_mask = Undefined bones.inputs.search_mask_file = "a.nii" yield assert_equal, bones.cmdline, \ "tbss_skeleton -i a.nii -p 0.200 b.nii a.nii b.nii %s" % os.path.join(newdir, "b_skeletonised.nii") # Looks good; clean up clean_directory(newdir, olddir) @skipif(no_fsl) def test_distancemap(): mapper = fsl.DistanceMap() files, newdir, olddir = create_files_in_directory() # Test the underlying command yield assert_equal, mapper.cmd, "distancemap" # It shouldn't run yet yield assert_raises, ValueError, mapper.run # But if we do this... mapper.inputs.in_file = "a.nii" # It should yield assert_equal, mapper.cmdline, "distancemap --out=%s --in=a.nii" % os.path.join(newdir, "a_dstmap.nii") # And we should be able to write out a maxima map mapper.inputs.local_max_file = True yield assert_equal, mapper.cmdline, \ "distancemap --out=%s --in=a.nii --localmax=%s" % (os.path.join(newdir, "a_dstmap.nii"), os.path.join(newdir, "a_lclmax.nii")) # And call it whatever we want mapper.inputs.local_max_file = "max.nii" yield assert_equal, mapper.cmdline, \ "distancemap --out=%s --in=a.nii --localmax=max.nii" % os.path.join(newdir, "a_dstmap.nii") # Not much else to do here clean_directory(newdir, olddir)
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.container_v1.types import cluster_service from google.protobuf import empty_pb2 # type: ignore from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .grpc import ClusterManagerGrpcTransport class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): """gRPC AsyncIO backend transport for ClusterManager. Google Kubernetes Engine Cluster Manager v1 This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "container.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: aio.Channel: A gRPC AsyncIO channel object. """ return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "container.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: """Create the channel designed to connect to this service. This property caches on the instance; repeated calls return the same channel. """ # Return the channel from cache. return self._grpc_channel @property def list_clusters( self, ) -> Callable[ [cluster_service.ListClustersRequest], Awaitable[cluster_service.ListClustersResponse], ]: r"""Return a callable for the list clusters method over gRPC. Lists all clusters owned by a project in either the specified zone or all zones. Returns: Callable[[~.ListClustersRequest], Awaitable[~.ListClustersResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: self._stubs["list_clusters"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/ListClusters", request_serializer=cluster_service.ListClustersRequest.serialize, response_deserializer=cluster_service.ListClustersResponse.deserialize, ) return self._stubs["list_clusters"] @property def get_cluster( self, ) -> Callable[ [cluster_service.GetClusterRequest], Awaitable[cluster_service.Cluster] ]: r"""Return a callable for the get cluster method over gRPC. Gets the details of a specific cluster. Returns: Callable[[~.GetClusterRequest], Awaitable[~.Cluster]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: self._stubs["get_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/GetCluster", request_serializer=cluster_service.GetClusterRequest.serialize, response_deserializer=cluster_service.Cluster.deserialize, ) return self._stubs["get_cluster"] @property def create_cluster( self, ) -> Callable[ [cluster_service.CreateClusterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the create cluster method over gRPC. Creates a cluster, consisting of the specified number and type of Google Compute Engine instances. By default, the cluster is created in the project's `default network <https://cloud.google.com/compute/docs/networks-and-firewalls#networks>`__. One firewall is added for the cluster. After cluster creation, the Kubelet creates routes for each node to allow the containers on that node to communicate with all other instances in the cluster. Finally, an entry is added to the project's global metadata indicating which CIDR range the cluster is using. Returns: Callable[[~.CreateClusterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: self._stubs["create_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/CreateCluster", request_serializer=cluster_service.CreateClusterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["create_cluster"] @property def update_cluster( self, ) -> Callable[ [cluster_service.UpdateClusterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the update cluster method over gRPC. Updates the settings of a specific cluster. Returns: Callable[[~.UpdateClusterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: self._stubs["update_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/UpdateCluster", request_serializer=cluster_service.UpdateClusterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["update_cluster"] @property def update_node_pool( self, ) -> Callable[ [cluster_service.UpdateNodePoolRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the update node pool method over gRPC. Updates the version and/or image type for the specified node pool. Returns: Callable[[~.UpdateNodePoolRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_node_pool" not in self._stubs: self._stubs["update_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/UpdateNodePool", request_serializer=cluster_service.UpdateNodePoolRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["update_node_pool"] @property def set_node_pool_autoscaling( self, ) -> Callable[ [cluster_service.SetNodePoolAutoscalingRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set node pool autoscaling method over gRPC. Sets the autoscaling settings for the specified node pool. Returns: Callable[[~.SetNodePoolAutoscalingRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_node_pool_autoscaling" not in self._stubs: self._stubs["set_node_pool_autoscaling"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetNodePoolAutoscaling", request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_node_pool_autoscaling"] @property def set_logging_service( self, ) -> Callable[ [cluster_service.SetLoggingServiceRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set logging service method over gRPC. Sets the logging service for a specific cluster. Returns: Callable[[~.SetLoggingServiceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_logging_service" not in self._stubs: self._stubs["set_logging_service"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetLoggingService", request_serializer=cluster_service.SetLoggingServiceRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_logging_service"] @property def set_monitoring_service( self, ) -> Callable[ [cluster_service.SetMonitoringServiceRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set monitoring service method over gRPC. Sets the monitoring service for a specific cluster. Returns: Callable[[~.SetMonitoringServiceRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_monitoring_service" not in self._stubs: self._stubs["set_monitoring_service"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetMonitoringService", request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_monitoring_service"] @property def set_addons_config( self, ) -> Callable[ [cluster_service.SetAddonsConfigRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set addons config method over gRPC. Sets the addons for a specific cluster. Returns: Callable[[~.SetAddonsConfigRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_addons_config" not in self._stubs: self._stubs["set_addons_config"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetAddonsConfig", request_serializer=cluster_service.SetAddonsConfigRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_addons_config"] @property def set_locations( self, ) -> Callable[ [cluster_service.SetLocationsRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set locations method over gRPC. Sets the locations for a specific cluster. Deprecated. Use `projects.locations.clusters.update <https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.locations.clusters/update>`__ instead. Returns: Callable[[~.SetLocationsRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_locations" not in self._stubs: self._stubs["set_locations"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetLocations", request_serializer=cluster_service.SetLocationsRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_locations"] @property def update_master( self, ) -> Callable[ [cluster_service.UpdateMasterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the update master method over gRPC. Updates the master for a specific cluster. Returns: Callable[[~.UpdateMasterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_master" not in self._stubs: self._stubs["update_master"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/UpdateMaster", request_serializer=cluster_service.UpdateMasterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["update_master"] @property def set_master_auth( self, ) -> Callable[ [cluster_service.SetMasterAuthRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set master auth method over gRPC. Sets master auth materials. Currently supports changing the admin password or a specific cluster, either via password generation or explicitly setting the password. Returns: Callable[[~.SetMasterAuthRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_master_auth" not in self._stubs: self._stubs["set_master_auth"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetMasterAuth", request_serializer=cluster_service.SetMasterAuthRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_master_auth"] @property def delete_cluster( self, ) -> Callable[ [cluster_service.DeleteClusterRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the delete cluster method over gRPC. Deletes the cluster, including the Kubernetes endpoint and all worker nodes. Firewalls and routes that were configured during cluster creation are also deleted. Other Google Compute Engine resources that might be in use by the cluster, such as load balancer resources, are not deleted if they weren't present when the cluster was initially created. Returns: Callable[[~.DeleteClusterRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/DeleteCluster", request_serializer=cluster_service.DeleteClusterRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["delete_cluster"] @property def list_operations( self, ) -> Callable[ [cluster_service.ListOperationsRequest], Awaitable[cluster_service.ListOperationsResponse], ]: r"""Return a callable for the list operations method over gRPC. Lists all operations in a project in a specific zone or all zones. Returns: Callable[[~.ListOperationsRequest], Awaitable[~.ListOperationsResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_operations" not in self._stubs: self._stubs["list_operations"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/ListOperations", request_serializer=cluster_service.ListOperationsRequest.serialize, response_deserializer=cluster_service.ListOperationsResponse.deserialize, ) return self._stubs["list_operations"] @property def get_operation( self, ) -> Callable[ [cluster_service.GetOperationRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the get operation method over gRPC. Gets the specified operation. Returns: Callable[[~.GetOperationRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_operation" not in self._stubs: self._stubs["get_operation"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/GetOperation", request_serializer=cluster_service.GetOperationRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["get_operation"] @property def cancel_operation( self, ) -> Callable[[cluster_service.CancelOperationRequest], Awaitable[empty_pb2.Empty]]: r"""Return a callable for the cancel operation method over gRPC. Cancels the specified operation. Returns: Callable[[~.CancelOperationRequest], Awaitable[~.Empty]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_operation" not in self._stubs: self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/CancelOperation", request_serializer=cluster_service.CancelOperationRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, ) return self._stubs["cancel_operation"] @property def get_server_config( self, ) -> Callable[ [cluster_service.GetServerConfigRequest], Awaitable[cluster_service.ServerConfig], ]: r"""Return a callable for the get server config method over gRPC. Returns configuration info about the Google Kubernetes Engine service. Returns: Callable[[~.GetServerConfigRequest], Awaitable[~.ServerConfig]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_server_config" not in self._stubs: self._stubs["get_server_config"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/GetServerConfig", request_serializer=cluster_service.GetServerConfigRequest.serialize, response_deserializer=cluster_service.ServerConfig.deserialize, ) return self._stubs["get_server_config"] @property def get_json_web_keys( self, ) -> Callable[ [cluster_service.GetJSONWebKeysRequest], Awaitable[cluster_service.GetJSONWebKeysResponse], ]: r"""Return a callable for the get json web keys method over gRPC. Gets the public component of the cluster signing keys in JSON Web Key format. This API is not yet intended for general use, and is not available for all clusters. Returns: Callable[[~.GetJSONWebKeysRequest], Awaitable[~.GetJSONWebKeysResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_json_web_keys" not in self._stubs: self._stubs["get_json_web_keys"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/GetJSONWebKeys", request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, ) return self._stubs["get_json_web_keys"] @property def list_node_pools( self, ) -> Callable[ [cluster_service.ListNodePoolsRequest], Awaitable[cluster_service.ListNodePoolsResponse], ]: r"""Return a callable for the list node pools method over gRPC. Lists the node pools for a cluster. Returns: Callable[[~.ListNodePoolsRequest], Awaitable[~.ListNodePoolsResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_node_pools" not in self._stubs: self._stubs["list_node_pools"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/ListNodePools", request_serializer=cluster_service.ListNodePoolsRequest.serialize, response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, ) return self._stubs["list_node_pools"] @property def get_node_pool( self, ) -> Callable[ [cluster_service.GetNodePoolRequest], Awaitable[cluster_service.NodePool] ]: r"""Return a callable for the get node pool method over gRPC. Retrieves the requested node pool. Returns: Callable[[~.GetNodePoolRequest], Awaitable[~.NodePool]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_node_pool" not in self._stubs: self._stubs["get_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/GetNodePool", request_serializer=cluster_service.GetNodePoolRequest.serialize, response_deserializer=cluster_service.NodePool.deserialize, ) return self._stubs["get_node_pool"] @property def create_node_pool( self, ) -> Callable[ [cluster_service.CreateNodePoolRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the create node pool method over gRPC. Creates a node pool for a cluster. Returns: Callable[[~.CreateNodePoolRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_node_pool" not in self._stubs: self._stubs["create_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/CreateNodePool", request_serializer=cluster_service.CreateNodePoolRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["create_node_pool"] @property def delete_node_pool( self, ) -> Callable[ [cluster_service.DeleteNodePoolRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the delete node pool method over gRPC. Deletes a node pool from a cluster. Returns: Callable[[~.DeleteNodePoolRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_node_pool" not in self._stubs: self._stubs["delete_node_pool"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/DeleteNodePool", request_serializer=cluster_service.DeleteNodePoolRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["delete_node_pool"] @property def rollback_node_pool_upgrade( self, ) -> Callable[ [cluster_service.RollbackNodePoolUpgradeRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the rollback node pool upgrade method over gRPC. Rolls back a previously Aborted or Failed NodePool upgrade. This makes no changes if the last upgrade successfully completed. Returns: Callable[[~.RollbackNodePoolUpgradeRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "rollback_node_pool_upgrade" not in self._stubs: self._stubs["rollback_node_pool_upgrade"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/RollbackNodePoolUpgrade", request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["rollback_node_pool_upgrade"] @property def set_node_pool_management( self, ) -> Callable[ [cluster_service.SetNodePoolManagementRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set node pool management method over gRPC. Sets the NodeManagement options for a node pool. Returns: Callable[[~.SetNodePoolManagementRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_node_pool_management" not in self._stubs: self._stubs["set_node_pool_management"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetNodePoolManagement", request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_node_pool_management"] @property def set_labels( self, ) -> Callable[ [cluster_service.SetLabelsRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set labels method over gRPC. Sets labels on a cluster. Returns: Callable[[~.SetLabelsRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_labels" not in self._stubs: self._stubs["set_labels"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetLabels", request_serializer=cluster_service.SetLabelsRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_labels"] @property def set_legacy_abac( self, ) -> Callable[ [cluster_service.SetLegacyAbacRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set legacy abac method over gRPC. Enables or disables the ABAC authorization mechanism on a cluster. Returns: Callable[[~.SetLegacyAbacRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_legacy_abac" not in self._stubs: self._stubs["set_legacy_abac"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetLegacyAbac", request_serializer=cluster_service.SetLegacyAbacRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_legacy_abac"] @property def start_ip_rotation( self, ) -> Callable[ [cluster_service.StartIPRotationRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the start ip rotation method over gRPC. Starts master IP rotation. Returns: Callable[[~.StartIPRotationRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "start_ip_rotation" not in self._stubs: self._stubs["start_ip_rotation"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/StartIPRotation", request_serializer=cluster_service.StartIPRotationRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["start_ip_rotation"] @property def complete_ip_rotation( self, ) -> Callable[ [cluster_service.CompleteIPRotationRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the complete ip rotation method over gRPC. Completes master IP rotation. Returns: Callable[[~.CompleteIPRotationRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "complete_ip_rotation" not in self._stubs: self._stubs["complete_ip_rotation"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/CompleteIPRotation", request_serializer=cluster_service.CompleteIPRotationRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["complete_ip_rotation"] @property def set_node_pool_size( self, ) -> Callable[ [cluster_service.SetNodePoolSizeRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set node pool size method over gRPC. Sets the size for a specific node pool. The new size will be used for all replicas, including future replicas created by modifying [NodePool.locations][google.container.v1.NodePool.locations]. Returns: Callable[[~.SetNodePoolSizeRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_node_pool_size" not in self._stubs: self._stubs["set_node_pool_size"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetNodePoolSize", request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_node_pool_size"] @property def set_network_policy( self, ) -> Callable[ [cluster_service.SetNetworkPolicyRequest], Awaitable[cluster_service.Operation] ]: r"""Return a callable for the set network policy method over gRPC. Enables or disables Network Policy for a cluster. Returns: Callable[[~.SetNetworkPolicyRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_network_policy" not in self._stubs: self._stubs["set_network_policy"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetNetworkPolicy", request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_network_policy"] @property def set_maintenance_policy( self, ) -> Callable[ [cluster_service.SetMaintenancePolicyRequest], Awaitable[cluster_service.Operation], ]: r"""Return a callable for the set maintenance policy method over gRPC. Sets the maintenance policy for a cluster. Returns: Callable[[~.SetMaintenancePolicyRequest], Awaitable[~.Operation]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_maintenance_policy" not in self._stubs: self._stubs["set_maintenance_policy"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/SetMaintenancePolicy", request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, response_deserializer=cluster_service.Operation.deserialize, ) return self._stubs["set_maintenance_policy"] @property def list_usable_subnetworks( self, ) -> Callable[ [cluster_service.ListUsableSubnetworksRequest], Awaitable[cluster_service.ListUsableSubnetworksResponse], ]: r"""Return a callable for the list usable subnetworks method over gRPC. Lists subnetworks that are usable for creating clusters in a project. Returns: Callable[[~.ListUsableSubnetworksRequest], Awaitable[~.ListUsableSubnetworksResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_usable_subnetworks" not in self._stubs: self._stubs["list_usable_subnetworks"] = self.grpc_channel.unary_unary( "/google.container.v1.ClusterManager/ListUsableSubnetworks", request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, ) return self._stubs["list_usable_subnetworks"] def close(self): return self.grpc_channel.close() __all__ = ("ClusterManagerGrpcAsyncIOTransport",)
# Copyright (c) 2016 Fabian Kochem from libtree import core, utils class Node: """ Representation of a tree node and entrypoint for local tree operations. It's a thin wrapper around the underlaying core functions. It does not contain any data besides the database ID and must therefore query the database every time the value of an attribute like ``parent`` has been requested. This decision has been made to avoid race conditions when working in concurrent or distributed environments, but comes at the cost of slower runtime execution speeds. If this becomes a problem for you, grab the the corresponding :class:`libtree.core.node_data.NodeData` object via :attr:`libtree.node.Node.node_data`. This object is tightly coupled to a :class:`libtree.transaction.Transaction` object. It behaves like a partial which passes a database cursor and node ID into every :mod:`libtree.core` function. It also has a few convenience features like attribute access via Python properties and shorter method names. :param transaction: Transaction object :type transaction: Transaction :param uuid4 id: Database node ID .. automethod:: __len__ .. automethod:: __eq__ """ __slots__ = [ '_cursor', '_Node__id', '_transaction' ] def __init__(self, transaction, id): self.__id = id self._transaction = transaction self._cursor = transaction.cursor def __repr__(self): if 'title' in self.properties: ret = "<Node id={!r}, title='{!s}'>" return ret.format(self.id, self.properties['title']) else: ret = '<Node id={!r}>' return ret.format(self.id) def __eq__(self, other): """ Determine if this node is equal to ``other``. """ if other.__class__ == Node: nd_self = self.node_data nd_other = core.get_node(self._cursor, other.id) return nd_self.to_dict() == nd_other.to_dict() return False def __hash__(self): return hash('<Node {}>'.format(self.id)) def __len__(self): """ Return amount of child nodes. """ return int(core.get_children_count(self._cursor, self.id)) @property def id(self): """ Database ID """ return self.__id @property def node_data(self): """ Get a :class:`libtree.core.node_data.NodeData` object for current node ID from database. """ return core.get_node(self._cursor, self.id) @property def parent(self): """ Get parent node. """ parent = self.node_data.parent if parent is not None: return Node(self._transaction, self.node_data.parent) return None @property def position(self): """ Get position in between sibling nodes. """ return self.node_data.position @property def properties(self): """ Get property dictionary. """ return self.node_data.properties @property def inherited_properties(self): """ Get inherited property dictionary. """ return core.get_inherited_properties(self._cursor, self.id) @property def recursive_properties(self): """ Get inherited and recursively merged property dictionary. """ return core.get_recursive_properties(self._cursor, self.id) @property def children(self): """ Get list of immediate child nodes. """ ret = [] for _id in core.get_child_ids(self._cursor, self.id): node = Node(self._transaction, _id) ret.append(node) return ret @property def has_children(self): """ Return whether immediate children exist. """ return core.get_children_count(self._cursor, self.id) > 0 @property def ancestors(self): """ Get bottom-up ordered list of ancestor nodes. """ ret = [] for node in core.get_ancestors(self._cursor, self.id, sort=True): node = Node(self._transaction, node.id) ret.append(node) return utils.vectorize_nodes(ret)[::-1] @property def descendants(self): """ Get set of descendant nodes. """ ret = set() for _id in core.get_descendant_ids(self._cursor, self.id): node = Node(self._transaction, _id) ret.add(node) return ret def delete(self): """ Delete node and its subtree. """ return core.delete_node(self._cursor, self.id) def insert_child(self, properties=None, position=-1, id=None): """ Create a child node and return it. :param dict properties: Inheritable key/value pairs (see :ref:`core-properties`) :param int position: Position in between siblings. If 0, the node will be inserted at the beginning of the parents children. If -1, the node will be inserted the the end of the parents children. :param uuid4 id: Use this ID instead of automatically generating one. """ node_data = core.insert_node(self._cursor, self.id, properties, position=position, auto_position=True, id=id) return Node(self._transaction, node_data.id) def move(self, target, position=-1): """ Move node and its subtree from its current to another parent node. Raises ``ValueError`` if ``target`` is inside this nodes' subtree. :param target: New parent node :type target: Node :param int position: Position in between siblings. If 0, the node will be inserted at the beginning of the parents children. If -1, the node will be inserted the the end of the parents children. """ core.change_parent(self._cursor, self.id, target.id, position=position, auto_position=True) def swap_position(self, other): """ Swap position with ``other`` position. :param other: Node to swap the position with :type other: Node """ core.swap_node_positions(self._cursor, self.id, other.id) def set_properties(self, properties): """ Set properties. :param dict properties: Property dictionary """ core.set_properties(self._cursor, self.id, properties) def update_properties(self, properties): """ Set properties. :param dict properties: Property dictionary """ core.update_properties(self._cursor, self.id, properties) def set_position(self, new_position): """ Set position. :param int position: Position in between siblings. If 0, the node will be inserted at the beginning of the parents children. If -1, the node will be inserted the the end of the parents children. """ core.set_position(self._cursor, self.id, new_position, auto_position=True) def get_child_at_position(self, position): """ Get child node at certain position. :param int position: Position to get the child node from """ return core.get_node_at_position(self._cursor, self.id, position)
""" Author: Robert Post UOfAlberta 2015 This Class stores samples of experience - Only the most recent x samples are stored. Samples are stores in a circular array. Memory is used to construct random batches for the DQN. """ import time import numpy as np import theano floatX = theano.config.floatX class DQNAgentMemory(object): def __init__(self, stateShape, phiLength=4, memorySize=10000, discountRate = 1.0, numTasks = 1): """ Arguments: stateShape - tuple containing the dimensions of the experiences being stored phiLength - number of images in a state. memorySize - The number of experiences that can be stored An experience is a single frame, a state is several frames (phiLength) in a single numpy tensor """ self.currentMemoryIndex = 0 self.numberOfExperiences = 0 self.memorySize = memorySize self.stateShape = stateShape self.phiLength = phiLength self.numTasks = numTasks self.discountRate = discountRate self.taskSampleCount = np.zeros(self.numTasks, dtype='int32') self.totalTaskSampleCount = np.zeros(self.numTasks, dtype='int32') self.stateMemory = np.zeros((self.memorySize,) + self.stateShape , dtype = 'uint8') self.rewardMemory = np.zeros(self.memorySize, dtype = floatX) self.actionMemory = np.zeros(self.memorySize, dtype='int32') self.terminalMemory = np.zeros(self.memorySize, dtype='int32') self.taskMemory = -1 * np.ones(self.memorySize, dtype='int32') def addFrame(self, frame, memoryIndex = None): assert( memoryIndex == None or ( (memoryIndex < memorySize) and (memoryIndex >= 0) ) ) if memoryIndex == None: memoryIndex = self.currentMemoryIndex assert(self.stateShape[0] == frame.shape[0]) assert(self.stateShape[1] == frame.shape[1]) self.stateMemory[memoryIndex, ...] = frame def addExperience(self, reward, action, terminal = 0, taskIndex = 0, memoryIndex = None): assert( memoryIndex == None or ( (memoryIndex < memorySize) and (memoryIndex >= 0) ) ) if memoryIndex == None: memoryIndex = self.currentMemoryIndex self.actionMemory[memoryIndex] = action self.rewardMemory[memoryIndex] = reward self.terminalMemory[memoryIndex] = terminal if self.taskMemory[memoryIndex] != -1: #Overwritting another memory self.taskSampleCount[self.taskMemory[memoryIndex]] -= 1 self.taskSampleCount[taskIndex] += 1 self.totalTaskSampleCount[taskIndex] += 1 self.taskMemory[memoryIndex] = taskIndex self.currentMemoryIndex = (self.currentMemoryIndex + 1) % self.memorySize self.numberOfExperiences += 1 def getPhiIndices(self, index = None): assert index < self.memorySize assert index < self.numberOfExperiences if index == None: index = self.currentMemoryIndex startingIndex = (index - self.phiLength + 1) % self.memorySize phiIndices = [(startingIndex + i) % self.memorySize for i in xrange(self.phiLength)] return phiIndices def getPhi(self, index = None): phiIndices = self.getPhiIndices(index) phi = np.array([self.stateMemory[i] for i in phiIndices]) return phi def getRandomExperienceBatch(self, batchSize, kReturnLength = 1, taskIndex = None): assert batchSize < self.numberOfExperiences - self.phiLength + 1 assert kReturnLength > 0 if taskIndex is not None: #We cant make a batch of this task as we dont have enough samples! assert self.taskSampleCount[taskIndex] > batchSize experienceStateShape = (batchSize, self.phiLength) + self.stateShape batchStates = np.empty(experienceStateShape, dtype='uint8') batchNextStates = np.empty(experienceStateShape, dtype='uint8') batchRewards = np.empty((batchSize, 1), dtype=floatX) batchActions = np.empty((batchSize, 1), dtype='int32') batchNextActions= np.empty((batchSize, 1), dtype='int32') batchTerminals = np.empty((batchSize, 1), dtype='int32') batchTasks = np.empty(batchSize, dtype='int32') count = 0 maxIndex = min(self.numberOfExperiences, self.memorySize) while count < batchSize: index = np.random.randint(0, maxIndex - 1) phiIndices = self.getPhiIndices(index) #Picked a sample too close to start of episode - sample state crosses episode boundary if True in [self.terminalMemory[i] for i in phiIndices]: continue #Sample is not of the current task if taskIndex != None and self.taskMemory[index] != taskIndex: continue #There is a region of experience we dont want to sample from due to filling in new experience in the replay #This area is the region between the current memory index minux the desired return length #and the current memory index plus the phi length #as memories slightly over the current index will have their phi states invalidated by going between new and old memories #And memories kReturnLength behind the current index cant be sampled as they dont have k steps to form a full k step return upperBound = self.currentMemoryIndex + self.phiLength lowerBound = self.currentMemoryIndex - kReturnLength if upperBound % self.memorySize < upperBound: #looped over end of circular buffer by finding starting acceptable index thats above the upper bound if index >= lowerBound or index <= upperBound % self.memorySize: continue else: if lowerBound % self.memorySize > lowerBound: #Looped from start to end of circular buffer by subtracting kReturnLength when finding the lower bound if index >= lowerBound % self.memorySize or index <= upperBound: continue elif index <= upperBound and index >= lowerBound: continue currentReturn = 0.0 currentDiscount = 1.0 currentIndex = index for i in xrange(0, kReturnLength): currentIndex = (index + i) % self.memorySize currentReturn += currentDiscount * self.rewardMemory[currentIndex] currentDiscount *= self.discountRate endIndex = (currentIndex + 1) % self.memorySize if self.terminalMemory[endIndex] == True: break batchStates[count] = self.getPhi(index) batchNextStates[count] = self.getPhi(endIndex) batchRewards[count] = currentReturn batchActions[count] = self.actionMemory[index] batchNextActions[count]= self.actionMemory[endIndex] batchTerminals[count] = 1.0 - self.terminalMemory[endIndex] batchTasks[count] = self.taskMemory[index] count += 1 return batchStates, batchActions, batchRewards, batchNextStates, batchNextActions, batchTerminals, batchTasks def getLowestSampledTask(self): return np.argmin(self.taskSampleCount) def __len__(self): """ Return the total number of avaible data items. """ return max(0, min(self.numberOfExperiences - self.phiLength, self.memorySize - self.phiLength)) def main(): m = AgentMemory((3,2), 4, 10) for i in xrange(4): frame = np.random.randn(3,2) m.addFrame(frame) m.addExperience(1, 0, False) print "StateMemory:" print m.stateMemory print "\nPhi:\n" print m.getPhi() for i in xrange(15): frame = np.random.randn(3,2) m.addFrame(frame) m.addExperience(1, 0, False) frame = np.random.randn(3,2) m.addFrame(frame) m.addExperience(0, 1, True) print "\nCurrent Memory Index:" + str(m.currentMemoryIndex) + "\n" frame = np.random.randn(3,2) m.addFrame(frame) print m.stateMemory print "\nCurrent Phi crossing end of memory:\n" + str(m.getPhi()) return m if __name__ == "__main__": main()
# Licensed under a 3-clause BSD style license - see LICENSE.rst import pickle import textwrap from collections import OrderedDict import pytest import numpy as np from numpy.testing import assert_array_equal from astropy.nddata.nddata import NDData from astropy.nddata.nduncertainty import StdDevUncertainty from astropy import units as u from astropy.utils import NumpyRNGContext from astropy.wcs import WCS from astropy.wcs.wcsapi import HighLevelWCSWrapper, SlicedLowLevelWCS, \ BaseHighLevelWCS from .test_nduncertainty import FakeUncertainty from astropy.nddata import _testing as nd_testing class FakeNumpyArray: """ Class that has a few of the attributes of a numpy array. These attributes are checked for by NDData. """ def __init__(self): super().__init__() def shape(self): pass def __getitem__(self): pass def __array__(self): pass @property def dtype(self): return 'fake' class MinimalUncertainty: """ Define the minimum attributes acceptable as an uncertainty object. """ def __init__(self, value): self._uncertainty = value @property def uncertainty_type(self): return "totally and completely fake" class BadNDDataSubclass(NDData): def __init__(self, data, uncertainty=None, mask=None, wcs=None, meta=None, unit=None): self._data = data self._uncertainty = uncertainty self._mask = mask self._wcs = wcs self._unit = unit self._meta = meta # Setter tests def test_uncertainty_setter(): nd = NDData([1, 2, 3]) good_uncertainty = MinimalUncertainty(5) nd.uncertainty = good_uncertainty assert nd.uncertainty is good_uncertainty # Check the fake uncertainty (minimal does not work since it has no # parent_nddata attribute from NDUncertainty) nd.uncertainty = FakeUncertainty(5) assert nd.uncertainty.parent_nddata is nd # Check that it works if the uncertainty was set during init nd = NDData(nd) assert isinstance(nd.uncertainty, FakeUncertainty) nd.uncertainty = 10 assert not isinstance(nd.uncertainty, FakeUncertainty) assert nd.uncertainty.array == 10 def test_mask_setter(): # Since it just changes the _mask attribute everything should work nd = NDData([1, 2, 3]) nd.mask = True assert nd.mask nd.mask = False assert not nd.mask # Check that it replaces a mask from init nd = NDData(nd, mask=True) assert nd.mask nd.mask = False assert not nd.mask # Init tests def test_nddata_empty(): with pytest.raises(TypeError): NDData() # empty initializer should fail def test_nddata_init_data_nonarray(): inp = [1, 2, 3] nd = NDData(inp) assert (np.array(inp) == nd.data).all() def test_nddata_init_data_ndarray(): # random floats with NumpyRNGContext(123): nd = NDData(np.random.random((10, 10))) assert nd.data.shape == (10, 10) assert nd.data.size == 100 assert nd.data.dtype == np.dtype(float) # specific integers nd = NDData(np.array([[1, 2, 3], [4, 5, 6]])) assert nd.data.size == 6 assert nd.data.dtype == np.dtype(int) # Tests to ensure that creating a new NDData object copies by *reference*. a = np.ones((10, 10)) nd_ref = NDData(a) a[0, 0] = 0 assert nd_ref.data[0, 0] == 0 # Except we choose copy=True a = np.ones((10, 10)) nd_ref = NDData(a, copy=True) a[0, 0] = 0 assert nd_ref.data[0, 0] != 0 def test_nddata_init_data_maskedarray(): with NumpyRNGContext(456): NDData(np.random.random((10, 10)), mask=np.random.random((10, 10)) > 0.5) # Another test (just copied here) with NumpyRNGContext(12345): a = np.random.randn(100) marr = np.ma.masked_where(a > 0, a) nd = NDData(marr) # check that masks and data match assert_array_equal(nd.mask, marr.mask) assert_array_equal(nd.data, marr.data) # check that they are both by reference marr.mask[10] = ~marr.mask[10] marr.data[11] = 123456789 assert_array_equal(nd.mask, marr.mask) assert_array_equal(nd.data, marr.data) # or not if we choose copy=True nd = NDData(marr, copy=True) marr.mask[10] = ~marr.mask[10] marr.data[11] = 0 assert nd.mask[10] != marr.mask[10] assert nd.data[11] != marr.data[11] @pytest.mark.parametrize('data', [np.array([1, 2, 3]), 5]) def test_nddata_init_data_quantity(data): # Test an array and a scalar because a scalar Quantity does not always # behaves the same way as an array. quantity = data * u.adu ndd = NDData(quantity) assert ndd.unit == quantity.unit assert_array_equal(ndd.data, np.array(quantity.value)) if ndd.data.size > 1: # check that if it is an array it is not copied quantity.value[1] = 100 assert ndd.data[1] == quantity.value[1] # or is copied if we choose copy=True ndd = NDData(quantity, copy=True) quantity.value[1] = 5 assert ndd.data[1] != quantity.value[1] def test_nddata_init_data_masked_quantity(): a = np.array([2, 3]) q = a * u.m m = False mq = np.ma.array(q, mask=m) nd = NDData(mq) assert_array_equal(nd.data, a) # This test failed before the change in nddata init because the masked # arrays data (which in fact was a quantity was directly saved) assert nd.unit == u.m assert not isinstance(nd.data, u.Quantity) np.testing.assert_array_equal(nd.mask, np.array(m)) def test_nddata_init_data_nddata(): nd1 = NDData(np.array([1])) nd2 = NDData(nd1) assert nd2.wcs == nd1.wcs assert nd2.uncertainty == nd1.uncertainty assert nd2.mask == nd1.mask assert nd2.unit == nd1.unit assert nd2.meta == nd1.meta # Check that it is copied by reference nd1 = NDData(np.ones((5, 5))) nd2 = NDData(nd1) assert nd1.data is nd2.data # Check that it is really copied if copy=True nd2 = NDData(nd1, copy=True) nd1.data[2, 3] = 10 assert nd1.data[2, 3] != nd2.data[2, 3] # Now let's see what happens if we have all explicitly set nd1 = NDData(np.array([1]), mask=False, uncertainty=StdDevUncertainty(10), unit=u.s, meta={'dest': 'mordor'}, wcs=WCS(naxis=1)) nd2 = NDData(nd1) assert nd2.data is nd1.data assert nd2.wcs is nd1.wcs assert nd2.uncertainty.array == nd1.uncertainty.array assert nd2.mask == nd1.mask assert nd2.unit == nd1.unit assert nd2.meta == nd1.meta # now what happens if we overwrite them all too nd3 = NDData(nd1, mask=True, uncertainty=StdDevUncertainty(200), unit=u.km, meta={'observer': 'ME'}, wcs=WCS(naxis=1)) assert nd3.data is nd1.data assert nd3.wcs is not nd1.wcs assert nd3.uncertainty.array != nd1.uncertainty.array assert nd3.mask != nd1.mask assert nd3.unit != nd1.unit assert nd3.meta != nd1.meta def test_nddata_init_data_nddata_subclass(): uncert = StdDevUncertainty(3) # There might be some incompatible subclasses of NDData around. bnd = BadNDDataSubclass(False, True, 3, 2, 'gollum', 100) # Before changing the NDData init this would not have raised an error but # would have lead to a compromised nddata instance with pytest.raises(TypeError): NDData(bnd) # but if it has no actual incompatible attributes it passes bnd_good = BadNDDataSubclass(np.array([1, 2]), uncert, 3, HighLevelWCSWrapper(WCS(naxis=1)), {'enemy': 'black knight'}, u.km) nd = NDData(bnd_good) assert nd.unit == bnd_good.unit assert nd.meta == bnd_good.meta assert nd.uncertainty == bnd_good.uncertainty assert nd.mask == bnd_good.mask assert nd.wcs is bnd_good.wcs assert nd.data is bnd_good.data def test_nddata_init_data_fail(): # First one is sliceable but has no shape, so should fail. with pytest.raises(TypeError): NDData({'a': 'dict'}) # This has a shape but is not sliceable class Shape: def __init__(self): self.shape = 5 def __repr__(self): return '7' with pytest.raises(TypeError): NDData(Shape()) def test_nddata_init_data_fakes(): ndd1 = NDData(FakeNumpyArray()) # First make sure that NDData isn't converting its data to a numpy array. assert isinstance(ndd1.data, FakeNumpyArray) # Make a new NDData initialized from an NDData ndd2 = NDData(ndd1) # Check that the data wasn't converted to numpy assert isinstance(ndd2.data, FakeNumpyArray) # Specific parameters def test_param_uncertainty(): u = StdDevUncertainty(array=np.ones((5, 5))) d = NDData(np.ones((5, 5)), uncertainty=u) # Test that the parent_nddata is set. assert d.uncertainty.parent_nddata is d # Test conflicting uncertainties (other NDData) u2 = StdDevUncertainty(array=np.ones((5, 5))*2) d2 = NDData(d, uncertainty=u2) assert d2.uncertainty is u2 assert d2.uncertainty.parent_nddata is d2 def test_param_wcs(): # Since everything is allowed we only need to test something nd = NDData([1], wcs=WCS(naxis=1)) assert nd.wcs is not None # Test conflicting wcs (other NDData) nd2 = NDData(nd, wcs=WCS(naxis=1)) assert nd2.wcs is not None and nd2.wcs is not nd.wcs def test_param_meta(): # everything dict-like is allowed with pytest.raises(TypeError): NDData([1], meta=3) nd = NDData([1, 2, 3], meta={}) assert len(nd.meta) == 0 nd = NDData([1, 2, 3]) assert isinstance(nd.meta, OrderedDict) assert len(nd.meta) == 0 # Test conflicting meta (other NDData) nd2 = NDData(nd, meta={'image': 'sun'}) assert len(nd2.meta) == 1 nd3 = NDData(nd2, meta={'image': 'moon'}) assert len(nd3.meta) == 1 assert nd3.meta['image'] == 'moon' def test_param_mask(): # Since everything is allowed we only need to test something nd = NDData([1], mask=False) assert not nd.mask # Test conflicting mask (other NDData) nd2 = NDData(nd, mask=True) assert nd2.mask # (masked array) nd3 = NDData(np.ma.array([1], mask=False), mask=True) assert nd3.mask # (masked quantity) mq = np.ma.array(np.array([2, 3])*u.m, mask=False) nd4 = NDData(mq, mask=True) assert nd4.mask def test_param_unit(): with pytest.raises(ValueError): NDData(np.ones((5, 5)), unit="NotAValidUnit") NDData([1, 2, 3], unit='meter') # Test conflicting units (quantity as data) q = np.array([1, 2, 3]) * u.m nd = NDData(q, unit='cm') assert nd.unit != q.unit assert nd.unit == u.cm # (masked quantity) mq = np.ma.array(np.array([2, 3])*u.m, mask=False) nd2 = NDData(mq, unit=u.s) assert nd2.unit == u.s # (another NDData as data) nd3 = NDData(nd, unit='km') assert nd3.unit == u.km def test_pickle_nddata_with_uncertainty(): ndd = NDData(np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m) ndd_dumped = pickle.dumps(ndd) ndd_restored = pickle.loads(ndd_dumped) assert type(ndd_restored.uncertainty) is StdDevUncertainty assert ndd_restored.uncertainty.parent_nddata is ndd_restored assert ndd_restored.uncertainty.unit == u.m def test_pickle_uncertainty_only(): ndd = NDData(np.ones(3), uncertainty=StdDevUncertainty(np.ones(5), unit=u.m), unit=u.m) uncertainty_dumped = pickle.dumps(ndd.uncertainty) uncertainty_restored = pickle.loads(uncertainty_dumped) np.testing.assert_array_equal(ndd.uncertainty.array, uncertainty_restored.array) assert ndd.uncertainty.unit == uncertainty_restored.unit # Even though it has a parent there is no one that references the parent # after unpickling so the weakref "dies" immediately after unpickling # finishes. assert uncertainty_restored.parent_nddata is None def test_pickle_nddata_without_uncertainty(): ndd = NDData(np.ones(3), unit=u.m) dumped = pickle.dumps(ndd) ndd_restored = pickle.loads(dumped) np.testing.assert_array_equal(ndd.data, ndd_restored.data) # Check that the meta descriptor is working as expected. The MetaBaseTest class # takes care of defining all the tests, and we simply have to define the class # and any minimal set of args to pass. from astropy.utils.tests.test_metadata import MetaBaseTest class TestMetaNDData(MetaBaseTest): test_class = NDData args = np.array([[1.]]) # Representation tests def test_nddata_str(): arr1d = NDData(np.array([1, 2, 3])) assert str(arr1d) == '[1 2 3]' arr2d = NDData(np.array([[1, 2], [3, 4]])) assert str(arr2d) == textwrap.dedent(""" [[1 2] [3 4]]"""[1:]) arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])) assert str(arr3d) == textwrap.dedent(""" [[[1 2] [3 4]] [[5 6] [7 8]]]"""[1:]) # let's add units! arr = NDData(np.array([1, 2, 3]), unit="km") assert str(arr) == '[1 2 3] km' # what if it had these units? arr = NDData(np.array([1, 2, 3]), unit="erg cm^-2 s^-1 A^-1") assert str(arr) == '[1 2 3] erg / (A cm2 s)' def test_nddata_repr(): # The big test is eval(repr()) should be equal to the original! arr1d = NDData(np.array([1, 2, 3])) s = repr(arr1d) assert s == 'NDData([1, 2, 3])' got = eval(s) assert np.all(got.data == arr1d.data) assert got.unit == arr1d.unit arr2d = NDData(np.array([[1, 2], [3, 4]])) s = repr(arr2d) assert s == textwrap.dedent(""" NDData([[1, 2], [3, 4]])"""[1:]) got = eval(s) assert np.all(got.data == arr2d.data) assert got.unit == arr2d.unit arr3d = NDData(np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])) s = repr(arr3d) assert s == textwrap.dedent(""" NDData([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])"""[1:]) got = eval(s) assert np.all(got.data == arr3d.data) assert got.unit == arr3d.unit # let's add units! arr = NDData(np.array([1, 2, 3]), unit="km") s = repr(arr) assert s == "NDData([1, 2, 3], unit='km')" got = eval(s) assert np.all(got.data == arr.data) assert got.unit == arr.unit # Not supported features def test_slicing_not_supported(): ndd = NDData(np.ones((5, 5))) with pytest.raises(TypeError): ndd[0] def test_arithmetic_not_supported(): ndd = NDData(np.ones((5, 5))) with pytest.raises(TypeError): ndd + ndd def test_nddata_wcs_setter_error_cases(): ndd = NDData(np.ones((5, 5))) # Setting with a non-WCS should raise an error with pytest.raises(TypeError): ndd.wcs = "I am not a WCS" naxis = 2 # This should succeed since the WCS is currently None ndd.wcs = nd_testing._create_wcs_simple(naxis=naxis, ctype=['deg'] * naxis, crpix=[0] * naxis, crval=[10] * naxis, cdelt=[1] * naxis) with pytest.raises(ValueError): # This should fail since the WCS is not None ndd.wcs = nd_testing._create_wcs_simple(naxis=naxis, ctype=['deg'] * naxis, crpix=[0] * naxis, crval=[10] * naxis, cdelt=[1] * naxis) def test_nddata_wcs_setter_with_low_level_wcs(): ndd = NDData(np.ones((5, 5))) wcs = WCS() # If the wcs property is set with a low level WCS it should get # wrapped to high level. low_level = SlicedLowLevelWCS(wcs, 5) assert not isinstance(low_level, BaseHighLevelWCS) ndd.wcs = low_level assert isinstance(ndd.wcs, BaseHighLevelWCS) def test_nddata_init_with_low_level_wcs(): wcs = WCS() low_level = SlicedLowLevelWCS(wcs, 5) ndd = NDData(np.ones((5, 5)), wcs=low_level) assert isinstance(ndd.wcs, BaseHighLevelWCS) class NDDataCustomWCS(NDData): @property def wcs(self): return WCS() def test_overriden_wcs(): # Check that a sub-class that overrides `.wcs` without providing a setter # works NDDataCustomWCS(np.ones((5, 5)))
import re from django.db import connection from hashlib import sha256 from graphite.tags.base import BaseTagDB, TaggedSeries class LocalDatabaseTagDB(BaseTagDB): def find_series_query(self, tags): # sql will select series that match all tag expressions that don't match empty tags sql = 'SELECT s.path' sql += ' FROM tags_series AS s' params = [] where = [] whereparams = [] all_match_empty = True # expressions that do match empty tags will be used to filter the result filters = [] i = 0 for tagspec in tags: (tag, operator, spec) = self.parse_tagspec(tagspec) i += 1 s = str(i) if operator == '=': matches_empty = spec == '' if not matches_empty: where.append('v' + s + '.value=%s') whereparams.append(spec) elif operator == '=~': # make sure regex is anchored if not spec.startswith('^'): spec = '^(' + spec + ')' matches_empty = bool(re.match(spec, '')) if not matches_empty: where.append('v' + s + '.value ' + self._regexp_operator(connection) + ' %s') whereparams.append(spec) elif operator == '!=': matches_empty = spec != '' if not matches_empty: where.append('v' + s + '.value<>%s') whereparams.append(spec) elif operator == '!=~': # make sure regex is anchored if not spec.startswith('^'): spec = '^(' + spec + ')' matches_empty = not re.match(spec, '') if not matches_empty: where.append('v' + s + '.value ' + self._regexp_not_operator(connection) + ' %s') whereparams.append(spec) else: raise ValueError("Invalid operator %s" % operator) if matches_empty: filters.append((tag, operator, spec)) else: sql += ' JOIN tags_tag AS t' + s + ' ON t' + s + '.tag=%s' params.append(tag) sql += ' JOIN tags_seriestag AS st' + s + ' ON st' + s + '.series_id=s.id AND st' + s + '.tag_id=t' + s + '.id' sql += ' JOIN tags_tagvalue AS v' + s + ' ON v' + s + '.id=st' + s + '.value_id' all_match_empty = all_match_empty and matches_empty if all_match_empty: raise ValueError("At least one tagspec must not match the empty string") if where: sql += ' WHERE ' + ' AND '.join(where) params.extend(whereparams) sql += ' ORDER BY s.path' return sql, params, filters def _find_series(self, tags, requestContext=None): sql, params, filters = self.find_series_query(tags) def matches_filters(path): if not filters: return True parsed = self.parse(path) for (tag, operator, spec) in filters: value = parsed.tags.get(tag, '') if ( (operator == '=' and value != spec) or (operator == '=~' and re.match(spec, value) is None) or (operator == '!=' and value == spec) or (operator == '!=~' and re.match(spec, value) is not None) ): return False return True with connection.cursor() as cursor: cursor.execute(sql, params) return [row[0] for row in cursor if matches_filters(row[0])] def get_series(self, path, requestContext=None): with connection.cursor() as cursor: sql = 'SELECT s.id, t.tag, v.value' sql += ' FROM tags_series AS s' sql += ' JOIN tags_seriestag AS st ON st.series_id=s.id' sql += ' JOIN tags_tag AS t ON t.id=st.tag_id' sql += ' JOIN tags_tagvalue AS v ON v.id=st.value_id' sql += ' WHERE s.path=%s' params = [path] cursor.execute(sql, params) series_id = None tags = {tag: value for (series_id, tag, value) in cursor} if not tags: return None return TaggedSeries(tags['name'], tags, series_id=series_id) def list_tags(self, tagFilter=None, limit=None, requestContext=None): with connection.cursor() as cursor: sql = 'SELECT t.id, t.tag' sql += ' FROM tags_tag AS t' params = [] if tagFilter: # make sure regex is anchored if not tagFilter.startswith('^'): tagFilter = '^(' + tagFilter + ')' sql += ' WHERE t.tag ' + self._regexp_operator(connection) + ' %s' params.append(tagFilter) sql += ' ORDER BY t.tag' if limit: sql += ' LIMIT %s' params.append(int(limit)) cursor.execute(sql, params) return [{'id': tag_id, 'tag': tag} for (tag_id, tag) in cursor] def get_tag(self, tag, valueFilter=None, limit=None, requestContext=None): with connection.cursor() as cursor: sql = 'SELECT t.id, t.tag' sql += ' FROM tags_tag AS t' sql += ' WHERE t.tag=%s' params = [tag] cursor.execute(sql, params) row = cursor.fetchone() if not row: return None (tag_id, tag) = row return { 'id': tag_id, 'tag': tag, 'values': self.list_values( tag, valueFilter=valueFilter, limit=limit, requestContext=requestContext ), } def list_values(self, tag, valueFilter=None, limit=None, requestContext=None): with connection.cursor() as cursor: sql = 'SELECT v.id, v.value, COUNT(st.id)' sql += ' FROM tags_tagvalue AS v' sql += ' JOIN tags_seriestag AS st ON st.value_id=v.id' sql += ' JOIN tags_tag AS t ON t.id=st.tag_id' sql += ' WHERE t.tag=%s' params = [tag] if valueFilter: # make sure regex is anchored if not valueFilter.startswith('^'): valueFilter = '^(' + valueFilter + ')' sql += ' AND v.value ' + self._regexp_operator(connection) + ' %s' params.append(valueFilter) sql += ' GROUP BY v.id, v.value' sql += ' ORDER BY v.value' if limit: sql += ' LIMIT %s' params.append(int(limit)) cursor.execute(sql, params) return [{'id': value_id, 'value': value, 'count': count} for (value_id, value, count) in cursor] @staticmethod def _insert_ignore(table, cols, data): sql = table + ' (' + ','.join(cols) + ') VALUES ' + ', '.join(['(' + ', '.join(['%s'] * len(cols)) + ')'] * len(data)) params = [] for row in data: params.extend(row) if connection.vendor == 'mysql': sql = 'INSERT IGNORE INTO ' + sql elif connection.vendor == 'sqlite': sql = 'INSERT OR IGNORE INTO ' + sql elif connection.vendor == 'postgresql': sql = 'INSERT INTO ' + sql + ' ON CONFLICT DO NOTHING' # nosec else: raise Exception('Unsupported database vendor ' + connection.vendor) with connection.cursor() as cursor: cursor.execute(sql, params) @staticmethod def _regexp_operator(connection): if connection.vendor == 'mysql': return 'REGEXP' if connection.vendor == 'sqlite': # django provides an implementation of REGEXP for sqlite return 'REGEXP' if connection.vendor == 'postgresql': return '~*' raise Exception('Database vendor ' + connection.vendor + ' does not support regular expressions') @staticmethod def _regexp_not_operator(connection): if connection.vendor == 'mysql': return 'NOT REGEXP' if connection.vendor == 'sqlite': # django provides an implementation of REGEXP for sqlite return 'NOT REGEXP' if connection.vendor == 'postgresql': return '!~*' raise Exception('Database vendor ' + connection.vendor + ' does not support regular expressions') def tag_series(self, series, requestContext=None): # extract tags and normalize path parsed = self.parse(series) path = parsed.path # check if path is already tagged curr = self.get_series(path) if curr and parsed.tags == curr.tags: return path with connection.cursor() as cursor: # tags self._insert_ignore('tags_tag', ['tag'], [[tag] for tag in parsed.tags.keys()]) sql = 'SELECT id, tag FROM tags_tag WHERE tag IN (' + ', '.join(['%s'] * len(parsed.tags)) + ')' # nosec params = list(parsed.tags.keys()) cursor.execute(sql, params) tag_ids = {tag: tag_id for (tag_id, tag) in cursor} # tag values self._insert_ignore('tags_tagvalue', ['value'], [[value] for value in parsed.tags.values()]) sql = 'SELECT id, value FROM tags_tagvalue WHERE value IN (' + ', '.join(['%s'] * len(parsed.tags)) + ')' # nosec params = list(parsed.tags.values()) cursor.execute(sql, params) value_ids = {value: value_id for (value_id, value) in cursor} # series if curr: series_id = curr.id else: # hash column is used to support a unique index in mysql since path can be longer than 191 characters path_hash = sha256(path.encode('utf8')).hexdigest() self._insert_ignore('tags_series', ['hash', 'path'], [[path_hash, path]]) sql = 'SELECT id FROM tags_series WHERE path=%s' params = [path] cursor.execute(sql, params) series_id = cursor.fetchone()[0] # series tags self._insert_ignore( 'tags_seriestag', ['series_id', 'tag_id', 'value_id'], [[series_id, tag_ids[tag], value_ids[value]] for tag, value in parsed.tags.items()] ) return path def del_series(self, series, requestContext=None): # extract tags and normalize path parsed = self.parse(series) path = parsed.path with connection.cursor() as cursor: sql = 'SELECT id' sql += ' FROM tags_series' sql += ' WHERE path=%s' params = [path] cursor.execute(sql, params) row = cursor.fetchone() if not row: return True (series_id, ) = row sql = 'DELETE FROM tags_seriestag WHERE series_id=%s' params = [series_id] cursor.execute(sql, params) sql = 'DELETE FROM tags_series WHERE id=%s' params = [series_id] cursor.execute(sql, params) return True
# -*- coding: utf-8 -*- """ Use nose `$ pip install nose` `$ pip install mock` `$ nosetests test_commando.py -d -v` """ from contextlib import nested from commando import * from mock import patch try: import cStringIO StringIO = cStringIO except ImportError: import StringIO as xStringIO StringIO = xStringIO def trap_exit_fail(f): def test_wrapper(*args): try: f(*args) except SystemExit: import traceback print traceback.format_exc() assert False test_wrapper.__name__ = f.__name__ return test_wrapper def trap_exit_pass(f): def test_wrapper(*args): try: print f.__name__ f(*args) except SystemExit: pass test_wrapper.__name__ = f.__name__ return test_wrapper class BasicCommandLine(Application): @command(description='test', prog='Basic') @param('--force', action='store_true', dest='force1') @param('--force2', action='store', dest='force2') @param('--version', action='version', version='%(prog)s 1.0') def main(self, params): assert params.force1 == eval(params.force2) self._main() def _main(self): pass @trap_exit_fail def test_command_basic(): with patch.object(BasicCommandLine, '_main') as _main: c = BasicCommandLine() args = c.parse(['--force', '--force2', 'True']) c.run(args) assert _main.call_count == 1 args = c.parse(['--force2', 'False']) c.run(args) assert _main.call_count == 2 def test_command_version_param(): with patch.object(BasicCommandLine, '_main') as _main: c = BasicCommandLine() exception = False try: c.parse(['--version']) assert False except SystemExit: exception = True assert exception assert not _main.called def test_positional_params(): class PositionalCommandLine(Application): @command(description='test', prog='Basic') @param('force1', action='store') @param('force3', action='store') @param('force2', action='store') def main(self, params): self._main(params) def _main(self, params): assert params.force1 == '1' assert params.force3 == '2' assert params.force2 == '3' p = PositionalCommandLine() args = p.parse(['1', '2', '3']) p.run(args) def test_command_version(): class VersionCommandLine(Application): @command(description='test', prog='Basic') @param('--force', action='store_true', dest='force1') @param('--force2', action='store', dest='force2') @version('--version', version='%(prog)s 1.0') def main(self, params): assert params.force1 == eval(params.force2) self._main() def _main(self): pass with patch.object(VersionCommandLine, '_main') as _main: c = VersionCommandLine() exception = False try: c.parse(['--version']) assert False except SystemExit: exception = True assert exception assert not _main.called class SuperDecoratedCommandLine(Application): @command(description='test', prog='Basic') @true('--force', dest='force1') @store('--force2', dest='force2') @const('--jam', const='jam') @version('--version', version='%(prog)s 1.0') def main(self, params): assert params.force1 == eval(params.force2) self._main() def _main(self): pass def test_command_super(): with patch.object(SuperDecoratedCommandLine, '_main') as _main: c = SuperDecoratedCommandLine() args = c.parse(['--force', '--force2', 'True']) c.run(args) assert _main.call_count == 1 assert not args.jam args = c.parse(['--force2', 'False', '--jam']) c.run(args) assert _main.call_count == 2 assert args.jam == 'jam' class ComplexCommandLine(Application): @command(description='test', prog='Complex') @param('--force', action='store_true', dest='force1') @param('--force2', action='store', dest='force2') @param('--version', action='version', version='%(prog)s 1.0') def main(self, params): assert params.force1 == eval(params.force2) self._main() @subcommand('sub', description='test') @param('--launch', action='store_true', dest='launch1') @param('--launch2', action='store', dest='launch2') def sub(self, params): assert params.launch1 == eval(params.launch2) self._sub() def _main(): pass def _sub(): pass @trap_exit_pass def test_command_subcommands_usage(): with nested(patch.object(ComplexCommandLine, '_main'), patch.object(ComplexCommandLine, '_sub')) as (_main, _sub): c = ComplexCommandLine() c.parse(['--usage']) @trap_exit_fail def test_command_subcommands(): with nested(patch.object(ComplexCommandLine, '_main'), patch.object(ComplexCommandLine, '_sub')) as (_main, _sub): c = ComplexCommandLine() args = c.parse(['sub', '--launch', '--launch2', 'True']) c.run(args) assert not _main.called assert _sub.call_count == 1 class EmptyCommandLine(Application): @command(description='test', prog='Empty') def main(self, params): assert params == [] self._main() @subcommand('sub', description='test sub') def sub(self, params): assert params assert len(params.__dict__) == 1 self._sub() def _main(): pass def _sub(): pass @trap_exit_pass def test_empty_main_command(): with nested(patch.object(EmptyCommandLine, '_main'), patch.object(EmptyCommandLine, '_sub')) as (_main, _sub): e = EmptyCommandLine(raise_exceptions=True) args = e.parse(None) e.run(args) assert not _sub.called assert _main.call_count == 1 def test_empty_sub_command(): with nested(patch.object(EmptyCommandLine, '_main'), patch.object(EmptyCommandLine, '_sub')) as (_main, _sub): e = EmptyCommandLine(raise_exceptions=True) e.run(e.parse(['sub'])) assert not _main.called assert _sub.call_count == 1 class NestedCommandLine(Application): @command(description='test', prog='Nested') @param('--force', action='store_true', dest='force1') @param('--force2', action='store', dest='force2') @param('--version', action='version', version='%(prog)s 1.0') def main(self, params): assert params.force1 == eval(params.force2) self._main() @subcommand('sub', description='sub') @param('--launch', action='store_true', dest='launch1') @param('--launch2', action='store', dest='launch2') def sub(self, params): assert params.launch1 == eval(params.launch2) self._sub() @subcommand('foobar', description="foo bar!") def foobar(self, params): assert False @subcommand('bla', parent=foobar) @param('--launch2', action='store_true', dest='launch2') def foobar_bla(self, params): assert params.launch2 self._foobar_bla() @subcommand('blip', parent=foobar) def foobar_blip(self, params): assert False @subcommand('blop', parent=foobar_blip) def foobar_blip_blop(self, params): self._foobar_blip_blop() def _main(): pass def _sub(): pass def _foobar_bla(): pass def _foobar_blip_blop(): pass @trap_exit_fail def test_nested_command_subcommands(): with nested(patch.object(NestedCommandLine, '_main'), patch.object(NestedCommandLine, '_sub'), patch.object(NestedCommandLine, '_foobar_bla'), patch.object(NestedCommandLine, '_foobar_blip_blop')) \ as (_main, _sub, _foobar_bla, _foobar_blip_blop): c = NestedCommandLine(raise_exceptions=True) args = c.parse(['sub', '--launch', '--launch2', 'True']) c.run(args) assert not _main.called assert _sub.call_count == 1 args = c.parse(['foobar', 'bla', '--launch2']) c.run(args) assert not _main.called assert _sub.call_count == 1 assert _foobar_bla.call_count == 1 args = c.parse(['foobar', 'blip', 'blop']) c.run(args) assert not _main.called assert _sub.call_count == 1 assert _foobar_bla.call_count == 1 assert _foobar_blip_blop.call_count == 1
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union import warnings from azure.core.async_paging import AsyncItemPaged, AsyncList from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod from azure.mgmt.core.exceptions import ARMErrorFormat from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling from ... import models as _models T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class PrivateEndpointConnectionsOperations: """PrivateEndpointConnectionsOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure_databricks_management_client.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name: str, workspace_name: str, **kwargs: Any ) -> AsyncIterable["_models.PrivateEndpointConnectionsList"]: """List private endpoint connections. List private endpoint connections of the workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either PrivateEndpointConnectionsList or the result of cls(response) :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure_databricks_management_client.models.PrivateEndpointConnectionsList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionsList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-01-preview" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=3), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request async def extract_data(pipeline_response): deserialized = self._deserialize('PrivateEndpointConnectionsList', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, AsyncList(list_of_elem) async def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return AsyncItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections'} # type: ignore async def get( self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any ) -> "_models.PrivateEndpointConnection": """Get private endpoint connection. Get a private endpoint connection properties for a workspace. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param private_endpoint_connection_name: The name of the private endpoint connection. :type private_endpoint_connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: PrivateEndpointConnection, or the result of cls(response) :rtype: ~azure_databricks_management_client.models.PrivateEndpointConnection :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-01-preview" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=3), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore async def _create_initial( self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, private_endpoint_connection: "_models.PrivateEndpointConnection", **kwargs: Any ) -> "_models.PrivateEndpointConnection": cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-01-preview" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" # Construct URL url = self._create_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=3), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) if response.status_code == 202: deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore async def begin_create( self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, private_endpoint_connection: "_models.PrivateEndpointConnection", **kwargs: Any ) -> AsyncLROPoller["_models.PrivateEndpointConnection"]: """Update private endpoint connection status. Update the status of a private endpoint connection with the specified name. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param private_endpoint_connection_name: The name of the private endpoint connection. :type private_endpoint_connection_name: str :param private_endpoint_connection: The private endpoint connection with updated properties. :type private_endpoint_connection: ~azure_databricks_management_client.models.PrivateEndpointConnection :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[~azure_databricks_management_client.models.PrivateEndpointConnection] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._create_initial( resource_group_name=resource_group_name, workspace_name=workspace_name, private_endpoint_connection_name=private_endpoint_connection_name, private_endpoint_connection=private_endpoint_connection, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=3), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore async def _delete_initial( self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2021-04-01-preview" accept = "application/json" # Construct URL url = self._delete_initial.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=3), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) _delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore async def begin_delete( self, resource_group_name: str, workspace_name: str, private_endpoint_connection_name: str, **kwargs: Any ) -> AsyncLROPoller[None]: """Remove private endpoint connection. Remove private endpoint connection with the specified name. :param resource_group_name: The name of the resource group. The name is case insensitive. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param private_endpoint_connection_name: The name of the private endpoint connection. :type private_endpoint_connection_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.AsyncLROPoller[None] :raises ~azure.core.exceptions.HttpResponseError: """ polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = await self._delete_initial( resource_group_name=resource_group_name, workspace_name=workspace_name, private_endpoint_connection_name=private_endpoint_connection_name, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) kwargs.pop('content_type', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'), 'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=64, min_length=3), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'), } if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = AsyncNoPolling() else: polling_method = polling if cont_token: return AsyncLROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Databricks/workspaces/{workspaceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
"""Generate ast module from specification This script generates the ast module from a simple specification, which makes it easy to accomodate changes in the grammar. This approach would be quite reasonable if the grammar changed often. Instead, it is rather complex to generate the appropriate code. And the Node interface has changed more often than the grammar. """ import fileinput import re import sys from StringIO import StringIO SPEC = "ast.txt" COMMA = ", " def load_boilerplate(file): f = open(file) buf = f.read() f.close() i = buf.find('### ''PROLOGUE') j = buf.find('### ''EPILOGUE') pro = buf[i+12:j].strip() epi = buf[j+12:].strip() return pro, epi def strip_default(arg): """Return the argname from an 'arg = default' string""" i = arg.find('=') if i == -1: return arg t = arg[:i].strip() return t P_NODE = 1 P_OTHER = 2 P_NESTED = 3 P_NONE = 4 class NodeInfo: """Each instance describes a specific AST node""" def __init__(self, name, args): self.name = name self.args = args.strip() self.argnames = self.get_argnames() self.argprops = self.get_argprops() self.nargs = len(self.argnames) self.init = [] def get_argnames(self): if '(' in self.args: i = self.args.find('(') j = self.args.rfind(')') args = self.args[i+1:j] else: args = self.args return [strip_default(arg.strip()) for arg in args.split(',') if arg] def get_argprops(self): """Each argument can have a property like '*' or '!' XXX This method modifies the argnames in place! """ d = {} hardest_arg = P_NODE for i in range(len(self.argnames)): arg = self.argnames[i] if arg.endswith('*'): arg = self.argnames[i] = arg[:-1] d[arg] = P_OTHER hardest_arg = max(hardest_arg, P_OTHER) elif arg.endswith('!'): arg = self.argnames[i] = arg[:-1] d[arg] = P_NESTED hardest_arg = max(hardest_arg, P_NESTED) elif arg.endswith('&'): arg = self.argnames[i] = arg[:-1] d[arg] = P_NONE hardest_arg = max(hardest_arg, P_NONE) else: d[arg] = P_NODE self.hardest_arg = hardest_arg if hardest_arg > P_NODE: self.args = self.args.replace('*', '') self.args = self.args.replace('!', '') self.args = self.args.replace('&', '') return d def gen_source(self): buf = StringIO() print >> buf, "class %s(Node):" % self.name self._gen_init(buf) print >> buf self._gen_getChildren(buf) print >> buf self._gen_getChildNodes(buf) print >> buf self._gen_repr(buf) buf.seek(0, 0) return buf.read() def _gen_init(self, buf): if self.args: print >> buf, " def __init__(self, %s, lineno=None):" % self.args else: print >> buf, " def __init__(self, lineno=None):" if self.argnames: for name in self.argnames: print >> buf, " self.%s = %s" % (name, name) print >> buf, " self.lineno = lineno" # Copy the lines in self.init, indented four spaces. The rstrip() # business is to get rid of the four spaces if line happens to be # empty, so that reindent.py is happy with the output. for line in self.init: print >> buf, (" " + line).rstrip() def _gen_getChildren(self, buf): print >> buf, " def getChildren(self):" if len(self.argnames) == 0: print >> buf, " return ()" else: if self.hardest_arg < P_NESTED: clist = COMMA.join(["self.%s" % c for c in self.argnames]) if self.nargs == 1: print >> buf, " return %s," % clist else: print >> buf, " return %s" % clist else: if len(self.argnames) == 1: print >> buf, " return tuple(flatten(self.%s))" % self.argnames[0] else: print >> buf, " children = []" template = " children.%s(%sself.%s%s)" for name in self.argnames: if self.argprops[name] == P_NESTED: print >> buf, template % ("extend", "flatten(", name, ")") else: print >> buf, template % ("append", "", name, "") print >> buf, " return tuple(children)" def _gen_getChildNodes(self, buf): print >> buf, " def getChildNodes(self):" if len(self.argnames) == 0: print >> buf, " return ()" else: if self.hardest_arg < P_NESTED: clist = ["self.%s" % c for c in self.argnames if self.argprops[c] == P_NODE] if len(clist) == 0: print >> buf, " return ()" elif len(clist) == 1: print >> buf, " return %s," % clist[0] else: print >> buf, " return %s" % COMMA.join(clist) else: print >> buf, " nodelist = []" template = " nodelist.%s(%sself.%s%s)" for name in self.argnames: if self.argprops[name] == P_NONE: tmp = (" if self.%s is not None:\n" " nodelist.append(self.%s)") print >> buf, tmp % (name, name) elif self.argprops[name] == P_NESTED: print >> buf, template % ("extend", "flatten_nodes(", name, ")") elif self.argprops[name] == P_NODE: print >> buf, template % ("append", "", name, "") print >> buf, " return tuple(nodelist)" def _gen_repr(self, buf): print >> buf, " def __repr__(self):" if self.argnames: fmt = COMMA.join(["%s"] * self.nargs) if '(' in self.args: fmt = '(%s)' % fmt vals = ["repr(self.%s)" % name for name in self.argnames] vals = COMMA.join(vals) if self.nargs == 1: vals = vals + "," print >> buf, ' return "%s(%s)" %% (%s)' % \ (self.name, fmt, vals) else: print >> buf, ' return "%s()"' % self.name rx_init = re.compile('init\((.*)\):') def parse_spec(file): classes = {} cur = None for line in fileinput.input(file): if line.strip().startswith('#'): continue mo = rx_init.search(line) if mo is None: if cur is None: # a normal entry try: name, args = line.split(':') except ValueError: continue classes[name] = NodeInfo(name, args) cur = None else: # some code for the __init__ method cur.init.append(line) else: # some extra code for a Node's __init__ method name = mo.group(1) cur = classes[name] return sorted(classes.values(), key=lambda n: n.name) def main(): prologue, epilogue = load_boilerplate(sys.argv[-1]) print prologue print classes = parse_spec(SPEC) for info in classes: print info.gen_source() print epilogue if __name__ == "__main__": main() sys.exit(0) ### PROLOGUE """Python abstract syntax node definitions This file is automatically generated by Tools/compiler/astgen.py """ from consts import CO_VARARGS, CO_VARKEYWORDS def flatten(seq): l = [] for elt in seq: t = type(elt) if t is tuple or t is list: for elt2 in flatten(elt): l.append(elt2) else: l.append(elt) return l def flatten_nodes(seq): return [n for n in flatten(seq) if isinstance(n, Node)] nodes = {} class Node(object): """Abstract base class for ast nodes.""" def getChildren(self): pass # implemented by subclasses def __iter__(self): for n in self.getChildren(): yield n def asList(self): # for backwards compatibility return self.getChildren() def getChildNodes(self): pass # implemented by subclasses def _get_lineno(self): return self._lineno def _set_lineno(self, lineno): if lineno is not None and not isinstance(lineno, int): self._context = lineno self._lineno = lineno[1][0] else: self._lineno = lineno self._context = None lineno = property(_get_lineno, _set_lineno) class EmptyNode(Node): pass class Expression(Node): # Expression is an artificial node class to support "eval" nodes["expression"] = "Expression" def __init__(self, node): Node.__init__(self) self.node = node def getChildren(self): return self.node, def getChildNodes(self): return self.node, def __repr__(self): return "Expression(%s)" % (repr(self.node)) ### EPILOGUE for name, obj in globals().items(): if isinstance(obj, type) and issubclass(obj, Node): nodes[name.lower()] = obj
# # Python Macro Language for Dragon NaturallySpeaking # (c) Copyright 1999 by Joel Gould # Portions (c) Copyright 1999 by Dragon Systems, Inc. # # gramparser.py # This module contains the Python code to convert the textual represenation # of a command and control grammar in the standard SAPI CFG binary format. # # April 1, 2000 # - we now throw an exception if there is a bad parse instead of just # printing the error # - fixed a few minor bugs detecting errors # ######################################################################## # # Grammar format # # Rule Definition: # <RuleName> imported ; # <RuleName> = Expression ; # <RuleName> exported = Expression ; # # A rule needs the keywork "exported" in order to be activated or visible # to other grammars for importing. # # Expression: # <RuleName> // no spaces # {ListName} // no spaces # Word # "Word" # ( Expression ) # [ Expression ] // optional # Expression + // repeat # Expression Expression // sequence # Expression | Expression // alternative # # When building grammars there are three built in rules which can be imported: # # <dgnletters> Contains all the letters of the alphabet for spelling. # Letters are spelled like "a\\l", "b\\l", etc. # # <dgnwords> The set of all words active during dictation. # # <dgndictation> A special rule which corresponds to dictation. It is # roughly equivelent to ( <dgnwords> | "\(noise)" )+ However, the # noise words is filtered out from any results reported to clients. # ######################################################################## from struct import pack # # This is the lexical scanner. # # We take a list of strings as input (such as would be returned by readlines). # # After every call to getAnotherToken or testAndEatToken the variables token # and value will contain the details about the next token in the input stream. # import string SyntaxError = "SyntaxError" LexicalError = "LexicalError" SymbolError = "SymbolError" GrammarError = "GrammarError" SeqCode = 1 # sequence AltCode = 2 # alternative RepCode = 3 # repeat OptCode = 4 # optional class GramScanner: def __init__(self,text=['']): self.token = None self.value = None self.line = 0; self.char = 0; self.text = text + ['\0']; def newText(self,text): GramScanner.__init__(self, text); def getError(self): if self.token == '\0' or self.text[self.line][0] == '\0': return '=> (end of input)\n' else: return '=> '+self.text[self.line]+'\n=> '+(' ' * self.char)+'^\n' def testAndEatToken(self, token): if self.token != token: raise SyntaxError, "expecting '%s'" % token else: value = self.value self.getAnotherToken() return value def skipWhiteSpace(self): ch = self.char while 1: ln = self.text[self.line] lnLen = len(ln) while ch < lnLen and ln[ch] in string.whitespace: ch = ch + 1 if ch < lnLen and ln[ch] != '#': break self.line = self.line + 1 self.text[self.line] = string.replace(self.text[self.line],'\t',' ') self.text[self.line] = string.replace(self.text[self.line],'\n',' ') ch = 0 self.char = ch def getAnotherToken(self): if self.token == '\0': return None self.value = None self.skipWhiteSpace() ch = self.char ln = self.text[self.line] lnLen = len(ln) self.start = ch if ln[ch] in ['(', ')', '[', ']', '|', '+', '=', ';', '\0']: self.token = ln[ch] ch = ch + 1 elif ln[ch] == '"': self.token = 'word' ch = ch + 1 while ch < lnLen and ln[ch] != '"': ch = ch + 1 if ch >= lnLen: raise LexicalError, "expecting closing quote in word name" self.value = ln[self.start+1:ch] ch = ch + 1 elif ln[ch] == "'": self.token = 'word' ch = ch + 1 while ch < lnLen and ln[ch] != "'": ch = ch + 1 if ch >= lnLen: raise LexicalError, "expecting closing quote in word name" self.value = ln[self.start+1:ch] ch = ch + 1 elif ln[ch] == '<': self.token = 'rule' ch = ch + 1 while ch < lnLen and ln[ch] != '>': ch = ch + 1 if ch >= lnLen: raise LexicalError, "expecting closing angle bracket in rule name" self.value = ln[self.start+1:ch] ch = ch + 1 elif ln[ch] == '{': self.token = 'list' ch = ch + 1 while ch < lnLen and ln[ch] != '}': ch = ch + 1 if ch >= lnLen: raise LexicalError, "expecting closing brace in list name" self.value = ln[self.start+1:ch] ch = ch + 1 elif ln[ch] in string.letters + string.digits: self.token = 'word' while ch < lnLen and ln[ch] in string.letters + string.digits: ch = ch + 1 self.value = ln[self.start:ch] else: raise LexicalError, "unknown character found" self.char = ch # # This is a rule parser. It builds up data structures which contain details # about the rules in the parsed text. # # The definition of a rule is an array which contains tuples. The array # contains the rule elements in sequence. The tuples are pairs of element # type and element value # class GramParser: def __init__(self,text=['']): self.scanObj = GramScanner(text) self.knownRules = {} self.knownWords = {} self.knownLists = {} self.nextRule = 1 self.nextWord = 1 self.nextList = 1 self.exportRules = {} self.importRules = {} self.ruleDefines = {} def doParse(self,*text): if text: self.scanObj.newText(text[0]) try: self.scanObj.getAnotherToken() while self.scanObj.token != '\0': self.parseRule() except SyntaxError, message: raise SyntaxError,"Syntax error at column: %d\n%s\n"%(self.scanObj.start,message)+self.scanObj.getError() except LexicalError, message: raise LexicalError,"Lexical error at column: %d\n%s\n"%(self.scanObj.start,message)+self.scanObj.getError() except SymbolError, message: raise SymbolError,"Symbol error at column: %d\n%s\n"%(self.scanObj.start,message)+self.scanObj.getError() def parseRule(self): if self.scanObj.token != 'rule': raise SyntaxError, "expecting rule name to start rule definition" ruleName = self.scanObj.value if self.ruleDefines.has_key(ruleName): raise SymbolError, "rule '%s' has already been defined" % ruleName if self.importRules.has_key(ruleName): raise SymbolError, "rule '%s' has already been defined as imported" % ruleName if self.knownRules.has_key(ruleName): ruleNumber = self.knownRules[ruleName] else: ruleNumber = self.nextRule self.nextRule = self.nextRule + 1 self.knownRules[ruleName] = ruleNumber self.scanObj.getAnotherToken() if self.scanObj.token == 'word' and self.scanObj.value == 'imported': self.importRules[ruleName] = ruleNumber self.scanObj.getAnotherToken() else: if self.scanObj.token == 'word' and self.scanObj.value == 'exported': self.exportRules[ruleName] = ruleNumber self.scanObj.getAnotherToken() self.scanObj.testAndEatToken('=') self.ruleDefines[ruleName] = self.parseExpr() self.scanObj.testAndEatToken(';') def parseExpr(self): definition = [] moreThanOne = 0 while 1: definition = definition + self.parseExpr2() if self.scanObj.token != '|': break self.scanObj.getAnotherToken() moreThanOne = 1 if moreThanOne: return [ ('start', AltCode) ] + definition + [ ('end', AltCode) ] else: return definition def parseExpr2(self): definition = [] moreThanOne = 0 while 1: definition = definition + self.parseExpr3() if self.scanObj.token not in ( 'word', 'rule', 'list', '(', '[' ): break moreThanOne = 1 if moreThanOne: return [ ('start', SeqCode) ] + definition + [ ('end', SeqCode) ] else: return definition def parseExpr3(self): definition = self.parseExpr4() if self.scanObj.token == '+': self.scanObj.getAnotherToken() return [ ('start', RepCode) ] + definition + [ ('end', RepCode) ] else: return definition def parseExpr4(self): if self.scanObj.token == 'word': wordName = self.scanObj.value if not wordName: raise SyntaxError,"empty word name" if self.knownWords.has_key(wordName): wordNumber = self.knownWords[wordName] else: wordNumber = self.nextWord self.nextWord = self.nextWord + 1 self.knownWords[wordName] = wordNumber self.scanObj.getAnotherToken() return [ ( 'word', wordNumber ) ] elif self.scanObj.token == 'list': listName = self.scanObj.value if not listName: raise SyntaxError,"empty word name" if self.knownLists.has_key(listName): listNumber = self.knownLists[listName] else: listNumber = self.nextList self.nextList = self.nextList + 1 self.knownLists[listName] = listNumber self.scanObj.getAnotherToken() return [ ( 'list', listNumber ) ] elif self.scanObj.token == 'rule': ruleName = self.scanObj.value if not ruleName: raise SyntaxError,"empty word name" if self.knownRules.has_key(ruleName): ruleNumber = self.knownRules[ruleName] else: ruleNumber = self.nextRule self.nextRule = self.nextRule + 1 self.knownRules[ruleName] = ruleNumber self.scanObj.getAnotherToken() return [ ( 'rule', ruleNumber ) ] elif self.scanObj.token == '(': self.scanObj.getAnotherToken() definition = self.parseExpr() self.scanObj.testAndEatToken(')') return definition elif self.scanObj.token == '[': self.scanObj.getAnotherToken() definition = self.parseExpr() self.scanObj.testAndEatToken(']') return [ ('start', OptCode) ] + definition + [ ('end', OptCode) ] else: raise SyntaxError, "expecting expression (word, rule, etc.)" def checkForErrors(self): if not len(self.exportRules): raise GrammarError, "no rules were exported" for ruleName in self.knownRules.keys(): if not self.importRules.has_key(ruleName) and not self.ruleDefines.has_key(ruleName): raise GrammarError, "rule '%s' was not defined or imported" % ruleName def dumpContents(self): print "Dumping GramParser object..." print " knownRules:" for name in self.knownRules.keys(): print " ", name, self.knownRules[name] print " knownLists:" for name in self.knownLists.keys(): print " ", name, self.knownLists[name] print " knownWords:" for name in self.knownWords.keys(): print " ", name, self.knownWords[name] print " exportRules:" for name in self.exportRules.keys(): print " ", name, self.exportRules[name] print " importRules:" for name in self.importRules.keys(): print " ", name, self.importRules[name] print " ruleDefines:" for name in self.ruleDefines.keys(): print " ", name for element in self.ruleDefines[name]: print " ", element[0], element[1] # # This function takes a GramParser class which contains the parse of a grammar # and returns a "string" object which contains the binary representation of # that grammar. # # The binary form is standard SAPI which consists a header followed by five # "chunks". The first four chunks are all in the same format and are lists # of the names and number of the exported rules, imported rules, lists and # words respectively. # # The fifth chunk contains the details of the elements which make up each # defined rule. # def packGrammar(parseObj): output = "" # header: # DWORD dwType = 0 # DWORD dwFlags = 0 output = output + pack("LL", 0, 0) # various chunks if len(parseObj.exportRules): output = output + packGrammarChunk(4, parseObj.exportRules) if len(parseObj.importRules): output = output + packGrammarChunk(5, parseObj.importRules) if len(parseObj.knownLists): output = output + packGrammarChunk(6, parseObj.knownLists) if len(parseObj.knownWords): output = output + packGrammarChunk(2, parseObj.knownWords) if len(parseObj.ruleDefines): output = output + packGrammarRules(3, parseObj.knownRules, parseObj.ruleDefines) return output def packGrammarChunk(type,dict): output = "" totalLen = 0 for word in dict.keys(): # chunk data entry # DWORD dwSize = number of bytes in entry # DWORD dwNum = ID number for this rule/word # DWORD szName = name of rule/word, zero-term'd and padded to dword paddedLen = ( len(word) + 4 ) & 0xFFFC output = output + pack( "LL%ds" % paddedLen, paddedLen+8, dict[word], word ) totalLen = totalLen + paddedLen+8 # chunk header # DWORD dwChunkID = type # DWORD dwChunkSize = number of bytes in chunk not including this header return pack( "LL", type, totalLen ) + output def packGrammarRules(type,names,dict): output = "" totalLen = 0 elemType = { 'start':1, 'end':2, 'word':3, 'rule':4, 'list':6 } for word in dict.keys(): ruleDef = "" ruleLen = 0 for element in dict[word]: # repeated element: # WORD wType = element type # WORD wProb = 0 # DWORD dwValue = element value ruleDef = ruleDef + pack( "HHL", elemType[element[0]], 0, element[1] ) ruleLen = ruleLen + 8 # rule definition: # DWORD dwSize = number of bytes in rule definition # DWORD dwnum = ID number of rule output = output + pack( "LL", ruleLen+8, names[word] ) + ruleDef totalLen = totalLen + ruleLen+8 # chunk header: # DWORD dwChunkID = type # DWORD dwChunkSize = number of bytes in chunk not including this header return pack( "LL", type, totalLen ) + output # # This is a routine which was included for testing but can also be used to # compile grammar files. It takes an input file name containing a grammar # and an output file name to write the binary into. # def parseGrammarAndSave(inName,outName): inFile = open(inName,'r') parseObj = GramParser() parseObj.doParse( inFile.readlines() ) inFile.close() binary = packGrammar(parseObj) outFile = open(outName,'wb') outFile.write( binary ) outFile.write( "hello" ) outFile.close() # # This utility routine will split apart strings at linefeeds in a list of # strings. For example: # # [ "This is line one\nThis is line two", "This is line three" ] # # Becomes: # # [ "This is line one\n", "This is line two", "This is line three" ] # def splitApartLines(lines): x = 0 while x < len(lines): crlf = string.find(lines[x],'\n') if crlf >= 0: lines[x:x+1] = [ lines[x][:crlf+1], lines[x][crlf+1:] ] x = x + 1
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ This module implements the Field class, which simply a WeightedGraph (see the graph.py) module, plus an arrray that yields (possibly multi-dimnesional) features associated with graph vertices. This allows some kinds of computations (all thoses relating to mathematical morphology, diffusion etc.) Certain functions are provided to Instantiate Fields easily, given a WeightedGraph and feature data. Author:Bertrand Thirion, 2006--2011 """ from __future__ import print_function from __future__ import absolute_import from warnings import warn import numpy as np from .graph import WeightedGraph, Graph NEGINF = -np.inf def field_from_coo_matrix_and_data(x, data): """ Instantiates a weighted graph from a (sparse) coo_matrix Parameters ---------- x: (V, V) scipy.sparse.coo_matrix instance, the input matrix data: array of shape (V, dim), the field data Returns ------- ifield: resulting Field instance """ if x.shape[0] != x.shape[1]: raise ValueError("the input coo_matrix is not square") if data.shape[0] != x.shape[0]: raise ValueError("data and x do not have consistent shapes") i, j = x.nonzero() edges = np.vstack((i, j)).T weights = x.data ifield = Field(x.shape[0], edges, weights, data) return ifield def field_from_graph_and_data(g, data): """ Instantiate a Fieldfrom a WeightedGraph plus some feature data Parameters ---------- x: (V, V) scipy.sparse.coo_matrix instance, the input matrix data: array of shape (V, dim), the field data Returns ------- ifield: resulting field instance """ if data.shape[0] != g.V: raise ValueError("data and g do not have consistent shapes") ifield = Field(g.V, g.edges, g.weights, data) return ifield class Field(WeightedGraph): """ This is the basic field structure, which contains the weighted graph structure plus an array of data (the 'field') field is an array of size(n, p) where n is the number of vertices of the graph and p is the field dimension """ def __init__(self, V, edges=None, weights=None, field=None): """ Parameters ---------- V (int > 0) the number of vertices of the graph edges=None: the edge array of the graph weights=None: the asociated weights array field=None: the field data itself """ V = int(V) if V < 1: raise ValueError('cannot create graph with no vertex') self.V = int(V) self.E = 0 self.edges = [] self.weights = [] if (edges is not None) or (weights is not None): if len(edges) == 0: E = 0 elif edges.shape[0] == np.size(weights): E = edges.shape[0] else: raise ValueError('Incompatible size of the edges \ and weights matrices') self.V = V self.E = E self.edges = edges self.weights = weights self.field = [] if field is None: pass else: if np.size(field) == self.V: field = np.reshape(field, (self.V, 1)) if field.shape[0] != self.V: raise ValueError('field does not have a correct size') else: self.field = field def get_field(self): return self.field def set_field(self, field): if np.size(field) == self.V: field = np.reshape(field, (self.V, 1)) if field.shape[0] != self.V: raise ValueError('field does not have a correct size') else: self.field = field def closing(self, nbiter=1): """Morphological closing of the field data. self.field is changed inplace Parameters ---------- nbiter=1 : the number of iterations required """ nbiter = int(nbiter) self.dilation(nbiter) self.erosion(nbiter) def opening(self, nbiter=1): """Morphological opening of the field data. self.field is changed inplace Parameters ---------- nbiter: int, optional, the number of iterations required """ nbiter = int(nbiter) self.erosion(nbiter) self.dilation(nbiter) def dilation(self, nbiter=1, fast=True): """Morphological dilation of the field data, changed in place Parameters ---------- nbiter: int, optional, the number of iterations required Note ---- When data dtype is not float64, a slow version of the code is used """ nbiter = int(nbiter) if self.field.dtype != np.float64: warn('data type is not float64; a slower version is used') fast = False if fast: from ._graph import dilation if self.E > 0: if (self.field.size == self.V): self.field = self.field.reshape((self.V, 1)) idx, neighb, _ = self.compact_neighb() for i in range(nbiter): dilation(self.field, idx, neighb) else: from scipy.sparse import dia_matrix adj = self.to_coo_matrix() + dia_matrix( (np.ones(self.V), 0), (self.V, self.V)) rows = adj.tolil().rows for i in range(nbiter): self.field = np.array([self.field[row].max(0) for row in rows]) def highest_neighbor(self, refdim=0): """Computes the neighbor with highest field value along refdim Parameters ---------- refdim: int, optional, the dimension of the field under consideration Returns ------- hneighb: array of shape(self.V), index of the neighbor with highest value """ from scipy.sparse import dia_matrix refdim = int(refdim) # add self-edges to avoid singularities, when taking the maximum adj = self.to_coo_matrix() + dia_matrix( (np.ones(self.V), 0), (self.V, self.V)) rows = adj.tolil().rows hneighb = np.array([row[self.field[row].argmax()] for row in rows]) return hneighb def erosion(self, nbiter=1): """Morphological openeing of the field Parameters ---------- nbiter: int, optional, the number of iterations required """ nbiter = int(nbiter) lil = self.to_coo_matrix().tolil().rows.tolist() for i in range(nbiter): nf = np.zeros_like(self.field) for k, neighbors in enumerate(lil): nf[k] = self.field[neighbors].min(0) self.field = nf def get_local_maxima(self, refdim=0, th=NEGINF): """ Look for the local maxima of one dimension (refdim) of self.field Parameters ---------- refdim (int) the field dimension over which the maxima are looked after th = float, optional threshold so that only values above th are considered Returns ------- idx: array of shape (nmax) indices of the vertices that are local maxima depth: array of shape (nmax) topological depth of the local maxima : depth[idx[i]] = q means that idx[i] is a q-order maximum """ depth_all = self.local_maxima(refdim, th) idx = np.ravel(np.where(depth_all)) depth = depth_all[idx] return idx, depth def local_maxima(self, refdim=0, th=NEGINF): """Returns all the local maxima of a field Parameters ---------- refdim (int) field dimension over which the maxima are looked after th: float, optional threshold so that only values above th are considered Returns ------- depth: array of shape (nmax) a labelling of the vertices such that depth[v] = 0 if v is not a local maximum depth[v] = 1 if v is a first order maximum ... depth[v] = q if v is a q-order maximum """ refdim = int(refdim) if np.size(self.field) == 0: raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError(refdim > self.shape[1]) depth = np.zeros(self.V, np.int) # create a subfield(thresholding) sf = self.subfield(self.field.T[refdim] >= th) initial_field = sf.field.T[refdim] sf.field = initial_field.astype(np.float64) # compute the depth in the subgraph ldepth = sf.V * np.ones(sf.V, np.int) for k in range(sf.V): dilated_field_old = sf.field.ravel().copy() sf.dilation(1) non_max = sf.field.ravel() > dilated_field_old ldepth[non_max] = np.minimum(k, ldepth[non_max]) if (non_max == False).all(): ldepth[sf.field.ravel() == initial_field] = np.maximum(k, 1) break # write all the depth values depth[self.field[:, refdim] >= th] = ldepth return depth def diffusion(self, nbiter=1): """diffusion of the field data in the weighted graph structure self.field is changed inplace Parameters ---------- nbiter: int, optional the number of iterations required Notes ----- The process is run for all the dimensions of the field """ nbiter = int(nbiter) adj = self.to_coo_matrix() for i in range(nbiter): self.field = adj * self.field def custom_watershed(self, refdim=0, th=NEGINF): """ customized watershed analysis of the field. Note that bassins are found around each maximum (and not minimum as conventionally) Parameters ---------- refdim: int, optional th: float optional, threshold of the field Returns ------- idx: array of shape (nbassins) indices of the vertices that are local maxima label : array of shape (self.V) labelling of the vertices according to their bassin """ import numpy.ma as ma if (np.size(self.field) == 0): raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError('refdim>field.shape[1]') label = - np.ones(self.V, np.int) # create a subfield(thresholding) sf = self.subfield(self.field[:, refdim] >= th) # compute the basins hneighb = sf.highest_neighbor() edges = np.vstack((hneighb, np.arange(sf.V))).T edges = np.vstack((edges, np.vstack((np.arange(sf.V), hneighb)).T)) aux = Graph(sf.V, edges.shape[0], edges) llabel = aux.cc() n_bassins = len(np.unique(llabel)) # write all the depth values label[self.field[:, refdim] >= th] = llabel idx = np.array([ma.array( self.field[:, refdim], mask=(label != c)).argmax() for c in range(n_bassins)]) return idx, label def threshold_bifurcations(self, refdim=0, th=NEGINF): """Analysis of the level sets of the field: Bifurcations are defined as changes in the topology in the level sets when the level (threshold) is varied This can been thought of as a kind of Morse analysis Parameters ---------- th: float, optional, threshold so that only values above th are considered Returns ------- idx: array of shape (nlsets) indices of the vertices that are local maxima height: array of shape (nlsets) the depth of the local maxima depth[idx[i]] = q means that idx[i] is a q-order maximum Note that this is also the diameter of the basins associated with local maxima parents: array of shape (nlsets) the label of the maximum which dominates each local maximum i.e. it describes the hierarchy of the local maxima label: array of shape (self.V) a labelling of thevertices according to their bassin """ import numpy.ma as ma if (np.size(self.field) == 0): raise ValueError('No field has been defined so far') if self.field.shape[1] - 1 < refdim: raise ValueError('refdim>field.shape[1]') label = - np.ones(self.V, np.int) # create a subfield(thresholding) sf = self.subfield(self.field[:, refdim] >= th) initial_field = sf.field[:, refdim].copy() sf.field = initial_field.copy() # explore the subfield order = np.argsort(- initial_field) rows = sf.to_coo_matrix().tolil().rows llabel = - np.ones(sf.V, np.int) parent, root = np.arange(2 * self.V), np.arange(2 * self.V) # q will denote the region index q = 0 for i in order: if (llabel[rows[i]] > - 1).any(): nlabel = np.unique(llabel[rows[i]]) if nlabel[0] == -1: nlabel = nlabel[1:] nlabel = np.unique(root[nlabel]) if len(nlabel) == 1: # we are at a regular point llabel[i] = nlabel[0] else: # we are at a saddle point llabel[i] = q parent[nlabel] = q root[nlabel] = q for j in nlabel: root[root == j] = q q += 1 else: # this is a new component llabel[i] = q q += 1 parent = parent[:q] # write all the depth values label[self.field[:, refdim] >= th] = llabel idx = np.array([ma.array( self.field[:, refdim], mask=(label != c)).argmax() for c in range(q)]) return idx, parent, label def constrained_voronoi(self, seed): """Voronoi parcellation of the field starting from the input seed Parameters ---------- seed: int array of shape(p), the input seeds Returns ------- label: The resulting labelling of the data Fixme ----- deal with graphs with several ccs """ if np.size(self.field) == 0: raise ValueError('No field has been defined so far') seed = seed.astype(np.int) weights = np.sqrt(np.sum((self.field[self.edges.T[0]] - self.field[self.edges.T[1]]) ** 2, 1)) g = WeightedGraph(self.V, self.edges, weights) label = g.voronoi_labelling(seed) return label def geodesic_kmeans(self, seeds=None, label=None, maxiter=100, eps=1.e-4, verbose=0): """ Geodesic k-means algorithm i.e. obtention of clusters that are topologically connected and minimally variable concerning the information of self.field Parameters ---------- seeds: array of shape(p), optional, initial indices of the seeds within the field if seeds==None the labels are used as initialization labels: array of shape(self.V) initial labels, optional, it is expected that labels take their values in a certain range (0..lmax) if Labels==None, this is not used if seeds==None and labels==None, an ewxception is raised maxiter: int, optional, maximal number of iterations eps: float, optional, increase of inertia at which convergence is declared Returns ------- seeds: array of shape (p), the final seeds label : array of shape (self.V), the resulting field label J: float, inertia value """ if np.size(self.field) == 0: raise ValueError('No field has been defined so far') if (seeds is None) and (label is None): raise ValueError('No initialization has been provided') k = np.size(seeds) inertia_old = NEGINF if seeds is None: k = label.max() + 1 if np.size(np.unique(label)) != k: raise ValueError('missing values, cannot proceed') seeds = np.zeros(k).astype(np.int) for j in range(k): lj = np.nonzero(label == j)[0] cent = np.mean(self.field[lj], 0) tj = np.argmin(np.sum((cent - self.field[lj]) ** 2, 1)) seeds[j] = lj[tj] else: k = np.size(seeds) for i in range(maxiter): # voronoi labelling label = self.constrained_voronoi(seeds) # update the seeds inertia = 0 pinteria = 0 for j in range(k): lj = np.nonzero(label == j)[0] pinteria += np.sum( (self.field[seeds[j]] - self.field[lj]) ** 2) cent = np.mean(self.field[lj], 0) tj = np.argmin(np.sum((cent - self.field[lj]) ** 2, 1)) seeds[j] = lj[tj] inertia += np.sum((cent - self.field[lj]) ** 2) if verbose: print(i, inertia) if np.absolute(inertia_old - inertia) < eps: break inertia_old = inertia return seeds, label, inertia def ward(self, nbcluster): """Ward's clustering of self Parameters ---------- nbcluster: int, the number of desired clusters Returns ------- label: array of shape (self.V) the resulting field label J (float): the resulting inertia """ from nipy.algorithms.clustering.hierarchical_clustering\ import ward_segment label, J = ward_segment(self, self.field, qmax=nbcluster) # compute the resulting inertia inertia = 0 for j in range(nbcluster): lj = np.nonzero(label == j)[0] cent = np.mean(self.field[lj], 0) inertia += np.sum((cent - self.field[lj]) ** 2) return label, inertia def copy(self): """ copy function """ return Field(self.V, self.edges.copy(), self.weights.copy(), self.field.copy()) def subfield(self, valid): """Returns a subfield of self, with only vertices such that valid > 0 Parameters ---------- valid: array of shape (self.V), nonzero for vertices to be retained Returns ------- F: Field instance, the desired subfield of self Notes ----- The vertices are renumbered as [1..p] where p = sum(valid>0) when sum(valid) == 0 then None is returned """ G = self.subgraph(valid) if G is None: return None field = self.field[valid] if len(G.edges) == 0: edges = np.array([[], []]).T else: edges = G.edges return Field(G.V, edges, G.weights, field)
# Copyright 2016 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the 'License'): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' A simple thread safe timer queue implementation which has O(logn) time complexity. ''' try: import queue as Queue except ImportError: import Queue import logging import threading import traceback from time import time from .packages import sortedcontainers as sc __all__ = ['Timer', 'TimerQueueStruct', 'TimerQueue'] class Timer(object): '''Timer wraps the callback and timestamp related attributes. :param callback: Arbitrary callable object. :type callback: ``callable object`` :param when: The first expiration time, seconds since epoch. :type when: ``integer`` :param interval: Timer interval, if equals 0, one time timer, otherwise the timer will be periodically executed :type interval: ``integer`` :param ident: (optional) Timer identity. :type ident: ``integer`` ''' _ident = 0 _lock = threading.Lock() def __init__(self, callback, when, interval, ident=None): self._callback = callback self.when = when self.interval = interval if ident is not None: self.ident = ident else: with Timer._lock: self.ident = Timer._ident + 1 Timer._ident = Timer._ident + 1 def update_expiration(self): self.when += self.interval def __hash__(self): return hash(self.ident) def __eq__(self, other): return isinstance(other, Timer) and (self.ident == other.ident) def __lt__(self, other): return (self.when, self.ident) < (other.when, other.ident) def __le__(self, other): return (self.when, self.ident) <= (other.when, other.ident) def __gt__(self, other): return (self.when, self.ident) > (other.when, other.ident) def __ge__(self, other): return (self.when, self.ident) >= (other.when, other.ident) def __call__(self): self._callback() TEARDOWN_SENTINEL = None class TimerQueueStruct(object): ''' The underlying data structure for TimerQueue ''' def __init__(self): self._timers = sc.SortedSet() self._cancelling_timers = {} def add_timer(self, callback, when, interval, ident): ''' Add timer to the data structure. :param callback: Arbitrary callable object. :type callback: ``callable object`` :param when: The first expiration time, seconds since epoch. :type when: ``integer`` :param interval: Timer interval, if equals 0, one time timer, otherwise the timer will be periodically executed :type interval: ``integer`` :param ident: (optional) Timer identity. :type ident: ``integer`` :returns: A timer object which should not be manipulated directly by clients. Used to delete/update the timer :rtype: ``solnlib.timer_queue.Timer`` ''' timer = Timer(callback, when, interval, ident) self._timers.add(timer) return timer def remove_timer(self, timer): ''' Remove timer from data structure. :param timer: Timer object which is returned by ``TimerQueueStruct.add_timer``. :type timer: ``Timer`` ''' try: self._timers.remove(timer) except ValueError: logging.info('Timer=%s is not in queue, move it to cancelling ' 'list', timer.ident) else: self._cancelling_timers[timer.ident] = timer def get_expired_timers(self): ''' Get a list of expired timers. :returns: a list of ``Timer``, empty list if there is no expired timers. :rtype: ``list`` ''' next_expired_time = 0 now = time() expired_timers = [] for timer in self._timers: if timer.when <= now: expired_timers.append(timer) if expired_timers: del self._timers[:len(expired_timers)] if self._timers: next_expired_time = self._timers[0].when return (next_expired_time, expired_timers) def reset_timers(self, expired_timers): ''' Re-add the expired periodical timers to data structure for next round scheduling. :returns: True if there are timers added, False otherwise. :rtype: ``bool`` ''' has_new_timer = False cancelling_timers = self._cancelling_timers for timer in expired_timers: if timer.ident in cancelling_timers: logging.INFO('Timer=%s has been cancelled', timer.ident) continue elif timer.interval: # Repeated timer timer.update_expiration() self._timers.add(timer) has_new_timer = True cancelling_timers.clear() return has_new_timer def check_and_execute(self): ''' Get expired timers and execute callbacks for the timers. :returns: duration of next expired timer. :rtype: ``float`` ''' (next_expired_time, expired_timers) = self.get_expired_timers() for timer in expired_timers: try: timer() except Exception: logging.error(traceback.format_exc()) self.reset_timers(expired_timers) return _calc_sleep_time(next_expired_time) class TimerQueue(object): '''A simple timer queue implementation. It runs a separate thread to handle timers Note: to effectively use this timer queue, the timer callback should be short, otherwise it will cause other timers's delay execution. A typical use scenario in production is that the timers are just a simple functions which inject themselvies to a task queue and then they are picked up by a threading/process pool to execute, as shows below: Timers --enqueue---> TimerQueue --------expiration----------- | | \|/ Threading/Process Pool <---- TaskQueue <--enqueue-- Timers' callback (nonblocking) Usage:: >>> from solnlib import time_queue >>> tq = time_queue.TimerQueue() >>> tq.start() >>> t = tq.add_timer(my_func, time.time(), 10) >>> # do other stuff >>> tq.stop() ''' def __init__(self): self._timers = TimerQueueStruct() self._lock = threading.Lock() self._wakeup_queue = Queue.Queue() self._thr = threading.Thread(target=self._check_and_execute) self._thr.daemon = True self._started = False def start(self): '''Start the timer queue. ''' if self._started: return self._started = True self._thr.start() logging.info('TimerQueue started.') def stop(self): '''Stop the timer queue. ''' if not self._started: return self._started = True self._wakeup(TEARDOWN_SENTINEL) self._thr.join() def add_timer(self, callback, when, interval, ident=None): ''' Add timer to the queue. :param callback: Arbitrary callable object. :type callback: ``callable object`` :param when: The first expiration time, seconds since epoch. :type when: ``integer`` :param interval: Timer interval, if equals 0, one time timer, otherwise the timer will be periodically executed :type interval: ``integer`` :param ident: (optional) Timer identity. :type ident: ``integer`` :returns: A timer object which should not be manipulated directly by clients. Used to delete/update the timer ''' with self._lock: timer = self._timers.add_timer(callback, when, interval, ident) self._wakeup() return timer def remove_timer(self, timer): ''' Remove timer from the queue. :param timer: Timer object which is returned by ``TimerQueue.add_timer``. :type timer: ``Timer`` ''' with self._lock: self._timers.remove_timer(timer) def _check_and_execute(self): wakeup_queue = self._wakeup_queue while 1: (next_expired_time, expired_timers) = self._get_expired_timers() for timer in expired_timers: try: # Note, please make timer callback effective/short timer() except Exception: logging.error(traceback.format_exc()) self._reset_timers(expired_timers) sleep_time = _calc_sleep_time(next_expired_time) try: wakeup = wakeup_queue.get(timeout=sleep_time) if wakeup is TEARDOWN_SENTINEL: break except Queue.Empty: pass logging.info('TimerQueue stopped.') def _get_expired_timers(self): with self._lock: return self._timers.get_expired_timers() def _reset_timers(self, expired_timers): with self._lock: has_new_timer = self._timers.reset_timers(expired_timers) if has_new_timer: self._wakeup() def _wakeup(self, something='not_None'): self._wakeup_queue.put(something) def _calc_sleep_time(next_expired_time): if next_expired_time: now = time() if now < next_expired_time: sleep_time = next_expired_time - now else: sleep_time = 0.1 else: sleep_time = 1 return sleep_time
#!/usr/bin/env python # # KEISER 2018-06-08 # # Script to parse myNCBI My Bibliography medline export to jekyll page MD # # mybib.nbib source: # https://www.ncbi.nlm.nih.gov/myncbi/collections/mybibliography/ # download as "text file (MEDLINE format)" (.mybib) # # preprints.csv source: # <manual> from optparse import OptionParser import os import unicodecsv import csv import datetime import itertools from Bio import Medline DEF_OUTFILE = 'publications.md' DOI_URLBASE = 'https://doi.org' PMID_URLBASE = 'https://www.ncbi.nlm.nih.gov/pubmed' # preprint csv columns PCOL_NCBI_ID = 0 PCOL_JOUR = 1 PCOL_JOURNID = 2 PCOL_AUTH = 3 PCOL_TITLE = 4 PCOL_DATE = 5 PCOL_URL = 6 PCOL_DOI = 7 # OUTPUT file templates PG_HDR_TEMPLATE = """--- # this is autogenerated: do not edit title: Publications layout: splash permalink: /publications/ excerpt: "Selected lab publications, with links to preprints and supporting content." tags: [publications, papers, preprints, manuscripts] header: image: /assets/images/bar-network.png intro: - title: Publications %s --- {%% include feature_row id="intro" type="center" %%} """ F_ROW_HDR_TEMPLATE = """feature_row%d: %s """ F_ROW_HDR_ITEM = """ - image_path: /assets/images/papers/%s alt: >- %s title: >- %s excerpt: >- %s url: "%s" doi: "%s" btn_label: >- doi &nbsp; <i class="fas fa-external-link-alt"></i> btn_class: "btn--primary" %s""" F_ROW_HDR_PREPRINT = """ url2: "%s" btn2_label: >- %s &nbsp; <i class="fas fa-external-link-alt"></i> btn2_class: "btn--info" """ # js script is for https://www.altmetric.com/products/altmetric-badges/ F_ROW_INCL_TEMPLATE = """ <script type="text/javascript" src="https://d1bxh8uas1mnw7.cloudfront.net/assets/embed.js"></script> {%% include feature_row_paper.html id="feature_row%d" %%} """ PAPER_TEMPLATE = '<span itemprop="isPartOf" itemscope itemtype="http://schema.org/Periodical"><strong>%s</strong></span>. ' +\ '<span itemprop="datePublished">%s</span>. ' +\ '<span itemprop="author">%s</span>.' CSV_HEADER = ['id', 'title', 'journal', 'date', 'authors', 'link', 'doi_suffix', 'preprint_url', 'preprint_journal', 'jekyll_date','type'] def make_htmlsafe(txt): "make text html safe (primitive)" dangerlist = { '"' : "&quot;" } for d, r in dangerlist.iteritems(): txt = txt.replace(d, r) return txt # https://stackoverflow.com/questions/1624883/alternative-way-to-split-a-list-into-groups-of-n def grouper(n, iterable, fillvalue=None): "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" args = [iter(iterable)] * n return itertools.izip_longest(*args, fillvalue=fillvalue) # end grouper def aid_scrub(aid): "put a AID (e.g., DOI) into filename compatible format" return aid.replace('/','.') # end aid_scrub def get_id_url(record): "pull DOI if possible, otherwise PMID: return (id, url, doi)" if 'AID' in record: aid = filter(lambda x: x.lower().find('doi') != -1, record['AID']) assert len(aid) == 1 aid = aid[0].split()[0] print '\tdoi', aid return aid_scrub(aid), '%s/%s' % (DOI_URLBASE, aid), aid else: pmid = record['PMID'] print '\tpmid', pmid return pmid, '%s/%s' % (PMID_URLBASE, pmid), "" # end get_id_url def convert_date(datestr): "convert YYYY Mon Day format into Jekyll post date format" try: return datetime.datetime.strptime(datestr, "%Y %b %d").strftime("%Y-%m-%d") except ValueError: try: return datetime.datetime.strptime(datestr, "%Y %b").strftime("%Y-%m-%d") except ValueError: return datetime.datetime.strptime(datestr, "%Y").strftime("%Y-%m-%d") # end convert_date def main(fmedline, fpreprint, outfile, datafile): with open(fpreprint) as fi: reader = csv.reader(fi) print 'preprint: skipped header: ', reader.next() p_records = [] aid2preprint = {} for row in reader: aid = aid_scrub(row[PCOL_NCBI_ID]) if aid != '': aid2preprint[aid] = row else: p_records.append(row) print 'read %d preprint records, %d with ncbi ids' % (len(p_records) + len(aid2preprint), len(aid2preprint)) with open(fmedline) as fi: m_records = list(Medline.parse(fi)) publications = [] # read preprints/manual, and add first for record in p_records: pid = '.'.join([record[PCOL_JOUR].replace(' ','_'), record[PCOL_JOURNID]]) publications.append([ pid, make_htmlsafe(record[PCOL_TITLE]), record[PCOL_JOUR], record[PCOL_DATE], record[PCOL_AUTH], '', record[PCOL_DOI], (record[PCOL_URL], record[PCOL_JOUR]), convert_date(record[PCOL_DATE]), 'preprint']) # read papers from NCBI for record in m_records: aid, url, doi = get_id_url(record) pprint = None if aid in aid2preprint: pp = aid2preprint[aid] pprint = (pp[PCOL_URL], pp[PCOL_JOUR]) publications.append([ aid, make_htmlsafe(record['TI'].strip('.')), record['TA'], record['DP'], ", ".join(record['AU']), url, doi, pprint, convert_date(record['DP']), 'ncbi']) # sort all by date publications.sort(key=lambda x: x[8], reverse=True) print 'read %d medline records' % (len(m_records)) print 'merged into %d publication entries (expected %d)' % (len(publications), len(p_records) + len(m_records)) # ['id', 'title', 'journal', 'date', 'authors', 'link', 'doi', preprint, 'jekyll_date','type'] frows = [] for i, p3 in enumerate(grouper(3, publications)): items = [] for p in filter(lambda x: x is not None, p3): pp = p[7] if pp is not None: pprint = F_ROW_HDR_PREPRINT % pp else: pprint = '' items.append(F_ROW_HDR_ITEM % ( '%s.jpg' % p[0], p[1], '<span itemprop="name">%s</span>' % p[1], PAPER_TEMPLATE % (p[2], p[3], p[4]), # this is "excerpt" p[5], p[6], pprint, )) frows.append((F_ROW_HDR_TEMPLATE % (i, ''.join(items)))) with open(outfile, 'wb') as fo: fo.write(PG_HDR_TEMPLATE % ''.join(frows)) for i in xrange(len(frows)): fo.write(F_ROW_INCL_TEMPLATE % i) if datafile is not None: print 'outputting structured papers to %s as well' % datafile with open(datafile, 'wb') as fo: writer = unicodecsv.writer(fo, encoding='utf-8') writer.writerow(CSV_HEADER) for row in publications: row1 = row[:7] row2 = row[8:] pp = row[7] if pp is not None: # unpack pprint url from pprint journal pp = list(row[PCOL_DOI]) else: # populate pprint info with blanks pp = ['',''] writer.writerow(row1 + pp + row2) # end main if __name__ == '__main__': usage = 'usage: %prog [options] myncbi.bib preprints.csv' parser = OptionParser(usage) parser.add_option('-o','--outfile',dest='outfile',metavar='FILE', help='Output to FILE (default %default)', action='store',default=DEF_OUTFILE) parser.add_option('-d','--datafile',dest='datafile',metavar='FILE', help='Output copy of structured data to FILE (csv) (default %default)', action='store',default=None) options,args = parser.parse_args() try: arg1, arg2, = args except: parser.error("Incorrect number of arguments") main(arg1, arg2, options.outfile, options.datafile) # end __main__
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import calendar from datetime import datetime, timedelta from mock import patch from preggy import expect from tornado.testing import gen_test from tornado.httpclient import HTTPError from ujson import loads from tests.unit.base import ApiTestCase from tests.fixtures import PageFactory, ReviewFactory, KeyFactory, DomainFactory class TestReviewHandler(ApiTestCase): @gen_test def test_invalid_page_uuid_returns_404(self): page = PageFactory.create() try: yield self.authenticated_fetch( '/page/%s/review/invalid' % page.uuid ) except HTTPError: err = sys.exc_info()[1] expect(err).not_to_be_null() expect(err.code).to_equal(404) expect(err.response.reason).to_be_like('Not found') else: assert False, 'Should not have got this far' @gen_test def test_invalid_review_uuid_returns_redirect(self): page = PageFactory.create() review = ReviewFactory.create(page=page) response = yield self.authenticated_fetch( '/page/%s/review/%s' % (page.uuid, self.ZERO_UUID) ) expect(response.code).to_equal(200) expect(str(review.uuid)).to_equal(loads(response.body).get('uuid')) @gen_test def test_can_get_review(self): dt = datetime(2010, 11, 12, 13, 14, 15) dt_timestamp = calendar.timegm(dt.utctimetuple()) review = ReviewFactory.create(created_date=dt) key1 = KeyFactory.create(name='fact') review.add_fact(key1, 'value') key2 = KeyFactory.create(name='violation') review.add_violation(key2, 'value', 100, review.domain) self.db.flush() response = yield self.authenticated_fetch( '/page/%s/review/%s' % (review.page.uuid, review.uuid) ) expect(response.code).to_equal(200) expected = { 'domain': review.domain.name, 'page': review.page.to_dict(), 'uuid': str(review.uuid), 'isComplete': False, 'facts': [ {u'key': u'fact', u'value': u'value', u'title': u'unknown', u'unit': u'value', u'category': u'unknown'} ], 'violations': [ {u'points': 100, u'description': u'value', u'key': u'violation', u'title': u'undefined', u'category': 'undefined'} ], 'createdAt': dt_timestamp, 'completedAt': None, 'violationPoints': 100, 'violationCount': 1, } expect(loads(response.body)).to_be_like(expected) class TestLastReviewsHandler(ApiTestCase): @gen_test def test_can_get_last_reviews(self): page = PageFactory.create() date_now = datetime(2013, 11, 12, 13, 25, 27) review = ReviewFactory.create( page=page, is_active=True, is_complete=False, completed_date=date_now, created_date=date_now) key1 = KeyFactory.create(name='fact') review.add_fact(key1, 'value') key2 = KeyFactory.create(name='violation') review.add_violation(key2, 'value', 100, page.domain) review.is_complete = True self.db.flush() response = yield self.authenticated_fetch('/last-reviews') expect(response.code).to_equal(200) dt = calendar.timegm(date_now.utctimetuple()) expected = [{ 'domain': review.domain.name, 'page': page.to_dict(), 'uuid': str(review.uuid), 'isComplete': True, 'facts': [ {u'key': u'fact', u'unit': u'value', u'value': u'value', u'title': u'unknown', u'category': u'unknown'} ], 'violations': [ {u'points': 100, u'description': u'value', u'key': u'violation', u'title': u'undefined', u'category': 'undefined'} ], 'createdAt': dt, 'completedAt': dt, 'violationCount': 1, }] expect(loads(response.body)).to_be_like(expected) @gen_test def test_can_get_last_reviews_with_domain_filter(self): dt1 = datetime(2010, 10, 10, 10, 10, 10) dt2 = datetime(2010, 10, 11, 10, 10, 10) dt3 = datetime(2010, 10, 12, 10, 10, 10) domain1 = DomainFactory.create() domain2 = DomainFactory.create() page1 = PageFactory.create(domain=domain1) page2 = PageFactory.create(domain=domain2) ReviewFactory.create( is_active=True, is_complete=True, page=page1, completed_date=dt1) ReviewFactory.create( is_active=True, is_complete=True, page=page1, completed_date=dt2) ReviewFactory.create( is_active=True, is_complete=True, page=page1, completed_date=dt3) ReviewFactory.create( is_active=True, is_complete=True, page=page2, completed_date=dt1) ReviewFactory.create( is_active=True, is_complete=True, page=page2, completed_date=dt2) ReviewFactory.create( is_active=True, is_complete=True, page=page2, completed_date=dt3) response = yield self.authenticated_fetch( '/last-reviews?domain_filter=%s' % domain1.name ) expect(response.code).to_equal(200) expect(len(loads(response.body))).to_be_like(3) expect(all([x['domain'] == domain1.name for x in loads(response.body)])).to_be_true() response = yield self.authenticated_fetch('/last-reviews') expect(response.code).to_equal(200) expect(len(loads(response.body))).to_be_like(6) class TestLastReviewsInLastHourHandler(ApiTestCase): @gen_test @patch('datetime.datetime') def test_can_get_last_reviews_count_in_last_hour(self, datetime_mock): dt = datetime(2014, 2, 14, 15, 0, 30) datetime_mock.utcnow.return_value = dt ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=1) ) first_date = dt - timedelta(minutes=59) ReviewFactory.create( is_active=True, completed_date=first_date ) ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=5) ) ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=61) ) self.db.flush() self.authenticated_fetch('/reviews-in-last-hour', callback=self.stop) response = self.wait() expect(response.code).to_equal(200) result = loads(response.body) expect(result['count']).to_equal(3) expect(round(result['ellapsed'], 0)).to_be_like(59 * 60) @gen_test def test_can_get_last_reviews_count_in_last_hour_filter_by_domain(self): dt = datetime.utcnow() domain1 = DomainFactory.create() domain2 = DomainFactory.create() page1 = PageFactory.create(domain=domain1) page2 = PageFactory.create(domain=domain2) ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=1), page=page1 ) ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=59), page=page1 ) ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=1), page=page2 ) ReviewFactory.create( is_active=True, completed_date=dt - timedelta(minutes=59), page=page2 ) self.db.flush() response = yield self.authenticated_fetch( '/reviews-in-last-hour?domain_filter=%s' % domain1.name ) expect(response.code).to_equal(200) result = loads(response.body) expect(result['count']).to_equal(2)
"""This component provides basic support for Foscam IP cameras.""" from __future__ import annotations import asyncio from libpyfoscam import FoscamCamera import voluptuous as vol from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_STREAM, Camera from homeassistant.config_entries import SOURCE_IMPORT from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, CONF_USERNAME, ) from homeassistant.helpers import config_validation as cv, entity_platform from .const import ( CONF_RTSP_PORT, CONF_STREAM, DOMAIN, LOGGER, SERVICE_PTZ, SERVICE_PTZ_PRESET, ) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { vol.Required("ip"): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Optional(CONF_NAME, default="Foscam Camera"): cv.string, vol.Optional(CONF_PORT, default=88): cv.port, vol.Optional(CONF_RTSP_PORT): cv.port, } ) DIR_UP = "up" DIR_DOWN = "down" DIR_LEFT = "left" DIR_RIGHT = "right" DIR_TOPLEFT = "top_left" DIR_TOPRIGHT = "top_right" DIR_BOTTOMLEFT = "bottom_left" DIR_BOTTOMRIGHT = "bottom_right" MOVEMENT_ATTRS = { DIR_UP: "ptz_move_up", DIR_DOWN: "ptz_move_down", DIR_LEFT: "ptz_move_left", DIR_RIGHT: "ptz_move_right", DIR_TOPLEFT: "ptz_move_top_left", DIR_TOPRIGHT: "ptz_move_top_right", DIR_BOTTOMLEFT: "ptz_move_bottom_left", DIR_BOTTOMRIGHT: "ptz_move_bottom_right", } DEFAULT_TRAVELTIME = 0.125 ATTR_MOVEMENT = "movement" ATTR_TRAVELTIME = "travel_time" ATTR_PRESET_NAME = "preset_name" PTZ_GOTO_PRESET_COMMAND = "ptz_goto_preset" async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): """Set up a Foscam IP Camera.""" LOGGER.warning( "Loading foscam via platform config is deprecated, it will be automatically imported; Please remove it afterwards" ) config_new = { CONF_NAME: config[CONF_NAME], CONF_HOST: config["ip"], CONF_PORT: config[CONF_PORT], CONF_USERNAME: config[CONF_USERNAME], CONF_PASSWORD: config[CONF_PASSWORD], CONF_STREAM: "Main", CONF_RTSP_PORT: config.get(CONF_RTSP_PORT, 554), } hass.async_create_task( hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_IMPORT}, data=config_new ) ) async def async_setup_entry(hass, config_entry, async_add_entities): """Add a Foscam IP camera from a config entry.""" platform = entity_platform.async_get_current_platform() platform.async_register_entity_service( SERVICE_PTZ, { vol.Required(ATTR_MOVEMENT): vol.In( [ DIR_UP, DIR_DOWN, DIR_LEFT, DIR_RIGHT, DIR_TOPLEFT, DIR_TOPRIGHT, DIR_BOTTOMLEFT, DIR_BOTTOMRIGHT, ] ), vol.Optional(ATTR_TRAVELTIME, default=DEFAULT_TRAVELTIME): cv.small_float, }, "async_perform_ptz", ) platform.async_register_entity_service( SERVICE_PTZ_PRESET, { vol.Required(ATTR_PRESET_NAME): cv.string, }, "async_perform_ptz_preset", ) camera = FoscamCamera( config_entry.data[CONF_HOST], config_entry.data[CONF_PORT], config_entry.data[CONF_USERNAME], config_entry.data[CONF_PASSWORD], verbose=False, ) async_add_entities([HassFoscamCamera(camera, config_entry)]) class HassFoscamCamera(Camera): """An implementation of a Foscam IP camera.""" def __init__(self, camera, config_entry): """Initialize a Foscam camera.""" super().__init__() self._foscam_session = camera self._name = config_entry.title self._username = config_entry.data[CONF_USERNAME] self._password = config_entry.data[CONF_PASSWORD] self._stream = config_entry.data[CONF_STREAM] self._unique_id = config_entry.entry_id self._rtsp_port = config_entry.data[CONF_RTSP_PORT] self._motion_status = False async def async_added_to_hass(self): """Handle entity addition to hass.""" # Get motion detection status ret, response = await self.hass.async_add_executor_job( self._foscam_session.get_motion_detect_config ) if ret == -3: LOGGER.info( "Can't get motion detection status, camera %s configured with non-admin user", self._name, ) elif ret != 0: LOGGER.error( "Error getting motion detection status of %s: %s", self._name, ret ) else: self._motion_status = response == 1 @property def unique_id(self): """Return the entity unique ID.""" return self._unique_id def camera_image( self, width: int | None = None, height: int | None = None ) -> bytes | None: """Return a still image response from the camera.""" # Send the request to snap a picture and return raw jpg data # Handle exception if host is not reachable or url failed result, response = self._foscam_session.snap_picture_2() if result != 0: return None return response @property def supported_features(self): """Return supported features.""" if self._rtsp_port: return SUPPORT_STREAM return None async def stream_source(self): """Return the stream source.""" if self._rtsp_port: return f"rtsp://{self._username}:{self._password}@{self._foscam_session.host}:{self._rtsp_port}/video{self._stream}" return None @property def motion_detection_enabled(self): """Camera Motion Detection Status.""" return self._motion_status def enable_motion_detection(self): """Enable motion detection in camera.""" try: ret = self._foscam_session.enable_motion_detection() if ret != 0: if ret == -3: LOGGER.info( "Can't set motion detection status, camera %s configured with non-admin user", self._name, ) return self._motion_status = True except TypeError: LOGGER.debug( "Failed enabling motion detection on '%s'. Is it supported by the device?", self._name, ) def disable_motion_detection(self): """Disable motion detection.""" try: ret = self._foscam_session.disable_motion_detection() if ret != 0: if ret == -3: LOGGER.info( "Can't set motion detection status, camera %s configured with non-admin user", self._name, ) return self._motion_status = False except TypeError: LOGGER.debug( "Failed disabling motion detection on '%s'. Is it supported by the device?", self._name, ) async def async_perform_ptz(self, movement, travel_time): """Perform a PTZ action on the camera.""" LOGGER.debug("PTZ action '%s' on %s", movement, self._name) movement_function = getattr(self._foscam_session, MOVEMENT_ATTRS[movement]) ret, _ = await self.hass.async_add_executor_job(movement_function) if ret != 0: LOGGER.error("Error moving %s '%s': %s", movement, self._name, ret) return await asyncio.sleep(travel_time) ret, _ = await self.hass.async_add_executor_job( self._foscam_session.ptz_stop_run ) if ret != 0: LOGGER.error("Error stopping movement on '%s': %s", self._name, ret) return async def async_perform_ptz_preset(self, preset_name): """Perform a PTZ preset action on the camera.""" LOGGER.debug("PTZ preset '%s' on %s", preset_name, self._name) preset_function = getattr(self._foscam_session, PTZ_GOTO_PRESET_COMMAND) ret, _ = await self.hass.async_add_executor_job(preset_function, preset_name) if ret != 0: LOGGER.error( "Error moving to preset %s on '%s': %s", preset_name, self._name, ret ) return @property def name(self): """Return the name of this camera.""" return self._name
from django.db import models from django.contrib.auth.models import Group from django.utils.translation import ugettext_lazy as _ from jsonfield import JSONField from django.conf import settings from daiquiri.core.constants import ACCESS_LEVEL_CHOICES from daiquiri.core.managers import AccessLevelManager class Schema(models.Model): objects = AccessLevelManager() order = models.IntegerField( default=0, null=True, blank=True, verbose_name=_('Order'), help_text=_('Position in lists.') ) name = models.CharField( max_length=256, verbose_name=_('Name'), help_text=_('Name of the schema on the database server.') ) title = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('Title'), help_text=_('Human readable title of the schema.') ) description = models.TextField( null=True, blank=True, verbose_name=_('Description'), help_text=_('A brief description of the schema to be displayed in the user interface.') ) long_description = models.TextField( null=True, blank=True, verbose_name=_('Long description'), help_text=_('A more extensive description of the schema to be displayed on the public schema page.') ) attribution = models.TextField( null=True, blank=True, verbose_name=_('Attribution'), help_text=_('The desired attribution for the schema.') ) license = models.CharField( max_length=8, choices=settings.LICENSE_CHOICES, null=True, blank=True, verbose_name=_('License') ) doi = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('Digital object identifier') ) related_identifiers = JSONField( null=True, blank=True, verbose_name=_('Related Identifiers'), ) utype = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('IVOA Utype'), ) creators = JSONField( null=True, blank=True, verbose_name=_('Creators'), ) contributors = JSONField( null=True, blank=True, verbose_name=_('Contributors'), ) published = models.DateField( null=True, blank=True, verbose_name=_('Published'), ) updated = models.DateField( null=True, blank=True, verbose_name=_('Updated'), ) access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Access level') ) metadata_access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Metadata access level') ) groups = models.ManyToManyField( Group, blank=True, verbose_name=_('Groups'), help_text=_('The groups which have access to the schema.') ) class Meta: ordering = ('order', ) verbose_name = _('Schema') verbose_name_plural = _('Schemas') def __str__(self): return self.name @property def query_strings(self): return [self.name] @property def license_label(self): return dict(settings.LICENSE_CHOICES)[self.license] @property def license_url(self): return settings.LICENSE_URLS[self.license] class Table(models.Model): TYPE_TABLE = 'table' TYPE_VIEW = 'view' TYPE_CHOICES = ( (TYPE_TABLE, _('table')), (TYPE_VIEW, _('view')) ) objects = AccessLevelManager() schema = models.ForeignKey( Schema, related_name='tables', on_delete=models.CASCADE, verbose_name=_('Database'), help_text=_('Database the table belongs to.') ) order = models.IntegerField( null=True, blank=True, verbose_name=_('Order'), help_text=_('Position in lists.') ) name = models.CharField( max_length=256, verbose_name=_('Name'), help_text=_('Identifier of the table on the database server.') ) title = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('Title'), help_text=_('Human readable title of the table.') ) description = models.TextField( null=True, blank=True, verbose_name=_('Description'), help_text=_('A brief description of the table to be displayed in the user interface.') ) long_description = models.TextField( null=True, blank=True, verbose_name=_('Long description'), help_text=_('A more extensive description of the table to be displayed on the public database page.') ) attribution = models.TextField( null=True, blank=True, verbose_name=_('Attribution'), help_text=_('The desired attribution for the table.') ) license = models.CharField( max_length=8, choices=settings.LICENSE_CHOICES, null=True, blank=True, verbose_name=_('License') ) doi = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('Digital object identifier') ) related_identifiers = JSONField( null=True, blank=True, verbose_name=_('Related Identifiers'), ) type = models.CharField( max_length=8, choices=TYPE_CHOICES, verbose_name=_('Type of table') ) nrows = models.BigIntegerField( null=True, blank=True, verbose_name=_('Number of rows in the table') ) size = models.BigIntegerField( null=True, blank=True, verbose_name=_('Size of the table in bytes') ) utype = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('IVOA Utype') ) creators = JSONField( null=True, blank=True, verbose_name=_('Creators'), ) contributors = JSONField( null=True, blank=True, verbose_name=_('Contributors'), ) published = models.DateField( null=True, blank=True, verbose_name=_('Published'), ) updated = models.DateField( null=True, blank=True, verbose_name=_('Updated'), ) access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Access level') ) metadata_access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Metadata access level') ) groups = models.ManyToManyField( Group, blank=True, verbose_name=_('Groups'), help_text=_('The groups which have access to the table.') ) class Meta: ordering = ('schema__order', 'order', ) verbose_name = _('Table') verbose_name_plural = _('Tables') def __str__(self): return self.schema.name + '.' + self.name @property def query_strings(self): return [self.schema.name, self.name] @property def license_label(self): return dict(settings.LICENSE_CHOICES)[self.license] @property def license_url(self): return settings.LICENSE_URLS[self.license] class Column(models.Model): objects = AccessLevelManager() table = models.ForeignKey( Table, related_name='columns', on_delete=models.CASCADE, help_text=_('Table the column belongs to.') ) order = models.IntegerField( null=True, blank=True, verbose_name=_('Order'), help_text=_('Position in lists.') ) name = models.CharField( max_length=256, verbose_name=_('Name'), help_text=_('Identifier of the column on the database server.') ) description = models.TextField( null=True, blank=True, verbose_name=_('Description'), help_text=_('A brief description of the column to be displayed in the user interface.') ) unit = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('Unit') ) ucd = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('IVOA UCDs') ) utype = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('IVOA Utype') ) datatype = models.CharField( max_length=256, null=True, blank=True, verbose_name=_('Datatype'), help_text=_('The datatype of the column on the database server.') ) arraysize = models.IntegerField( null=True, blank=True, verbose_name=_('Arraysize'), help_text=_('The length of variable length datatypes, e.g. varchar(256).') ) principal = models.BooleanField( default=False, verbose_name=_('Principal'), help_text=_('Designates whether the column is considered a core part of the content.') ) indexed = models.BooleanField( default=False, verbose_name=_('Indexed'), help_text=_('Designates whether the column is indexed.') ) std = models.BooleanField( default=False, verbose_name=_('Standard'), help_text=_('Designates whether the column is defined by some standard.') ) index_for = models.CharField( max_length=256, blank=True, default='', verbose_name=_('Index for'), help_text=_('The columns which this column is an index for (e.g. for pgSphere).') ) access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Access level') ) metadata_access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Metadata access level') ) groups = models.ManyToManyField( Group, blank=True, verbose_name=_('Groups'), help_text=_('The groups which have access to the column.') ) class Meta: ordering = ('table__schema__order', 'table__order', 'order', ) verbose_name = _('Column') verbose_name_plural = _('Columns') def __str__(self): return self.table.schema.name + '.' + self.table.name + '.' + self.name @property def query_strings(self): return [self.name] @property def indexed_columns(self): if self.index_for: return [(self.table.schema.name, self.table.name, name.strip()) for name in self.index_for.split(',')] + [self.name] else: return None class Function(models.Model): objects = AccessLevelManager() order = models.IntegerField( null=True, blank=True, verbose_name=_('Order'), help_text=_('Position in lists.') ) name = models.CharField( max_length=256, verbose_name=_('Name'), help_text=_('Identifier of the function on the server.') ) description = models.TextField( null=True, blank=True, verbose_name=_('Description'), help_text=_('A brief description of the function to be displayed in the user interface.') ) query_string = models.CharField( max_length=256, verbose_name=_('Query string'), help_text=_('Prototype of this function in a SQL query.') ) access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Access level') ) metadata_access_level = models.CharField( max_length=8, choices=settings.ACCESS_LEVEL_CHOICES, verbose_name=_('Metadata access level') ) groups = models.ManyToManyField( Group, blank=True, verbose_name=_('Groups'), help_text=_('The groups which have access to this function.') ) class Meta: ordering = ('order', ) verbose_name = _('Function') verbose_name_plural = _('Functions') def __str__(self): return self.name
#!/usr/bin/env python from math import sqrt class Sudoku(object): """docstring for Sudoku""" def __init__(self, rows=[], dim=9, boxes=9): super(Sudoku, self).__init__() self.dim = dim self.box_lim = int(sqrt(dim)) self.numbers_range = set(range(1, dim+1)) self.grid = [self.numbers_range] * (self.dim * self.dim) if rows != []: self.initialize_by_rows(rows) self.to_grid() self.solve() def make_boxes(self): boxes = [] dim = self.dim rows = self.get_rows() for ri in range(0, dim, 3): for ci in range(0, dim, 3): boxes.append([rows[i][j] for i in range(ri, ri+3) for j in range(ci, ci+3)]) return boxes def make_columns(self): rows = self.get_rows() return [[rows[i][j] for i in range(self.dim)] for j in range(self.dim)] def from_grid(self, grid=None): if grid == None: grid = self.grid else: self.grid = grid def f(arr): if len(arr) > 1 or len(arr) == 0: return 0 else: return next(iter(arr)) self.rows = [[f(grid[ri*self.dim + ji]) for ji in range(self.dim)] for ri in range(self.dim)] self.initialize_by_rows(self.rows) self.to_grid() def row_of_grid(self, index): return self.grid[index * self.dim : (index + 1) * self.dim] def column_of_grid(self, index): return [self.grid[i] for i in range(index, len(self.grid), self.dim)] def box_of_grid(self, index): return [self.grid[i] for i in range(len(self.grid)) if self.box_by_grid(i) == index] def row_by_grid(self, index): return index // self.dim def column_by_grid(self, index): return index % self.dim def box_by_grid(self, index): ri = self.row_by_grid(index) ci = self.column_by_grid(index) return (ri // self.box_lim) * self.box_lim + ci // self.box_lim def possibilities(self, index, miss_rows = None, miss_columns = None, miss_boxes = None): if miss_rows == None: miss_rows = self.get_rows_missing_values() if miss_columns == None: miss_columns = self.get_columns_missing_values() if miss_boxes == None: miss_boxes = self.get_boxes_missing_values() ri = self.row_by_grid(index) ci = self.column_by_grid(index) bi = self.box_by_grid(index) tmp_poss = self.grid[index] & miss_rows[ri] & miss_columns[ci] & miss_boxes[bi] if len(tmp_poss) == 1: return tmp_poss for p in tmp_poss: for items in [self.row_of_grid(ri), self.column_of_grid(ci), self.box_of_grid(bi)]: pos_cnt = 0 for item in items: if p in item: pos_cnt += 1 if pos_cnt == 1: return set([p]) return tmp_poss def to_grid(self): miss_rows = self.get_rows_missing_values() miss_columns = self.get_columns_missing_values() miss_boxes = self.get_boxes_missing_values() i = 0 for row in self.rows: for value in row: if value == 0: self.grid[i] = self.possibilities(i, miss_rows, miss_columns, miss_boxes) else: self.grid[i] = set([value]) i += 1 def get_rows(self): return self.rows def get_boxes(self): return self.boxes def get_columns(self): return self.columns def get_grid(self): return self.grid def initialize_by_rows(self, rows): self.rows = rows self.columns = self.make_columns() self.boxes = self.make_boxes() def solve(self,param=None): change = True while change: old = self.wrong_values_count() self.from_grid() change = (old != self.wrong_values_count()) # guess and try if self.wrong_values_count() == 0: return # print(param) # print(self) min_index = 0 min_cnt = len(self.numbers_range) + 1 newgrid = self.grid[:] min_field = None for i in range(len(newgrid)): # nalezeni prvku s nejmensim poctem moznosti field = newgrid[i] if len(field) < min_cnt and len(field) > 1: min_cnt = len(field) min_index = i min_field = field # mam policko s nejmensim poctem if min_field == None: # print(self) # print(self.wrong_values_count()) return newsudoku = None rows_copy = self.get_rows()[:] for var in min_field: try: rows_copy[self.row_by_grid(min_index)][self.column_by_grid(min_index)] = var newsudoku = Sudoku(rows_copy) except: continue if newsudoku.wrong_values_count() == 0: self.from_grid(newsudoku.get_grid()) break def is_valid(self): return 0 == self.wrong_values_count() def missing_values(self, arr): return self.numbers_range - set(arr) def get_rows_missing_values(self): return [self.missing_values(row) for row in self.rows] def get_columns_missing_values(self): return [self.missing_values(col) for col in self.columns] def get_boxes_missing_values(self): return [self.missing_values(box) for box in self.boxes] def get_grid_missing_values(self): possible_values = [] miss_rows = self.get_rows_missing_values() miss_columns = self.get_columns_missing_values() miss_boxes = self.get_boxes_missing_values() for i in range(len(self.grid)): poss = self.possibilities(i, miss_rows, miss_columns, miss_boxes) if len(poss) > 1: possible_values.append(poss) return possible_values def wrong_values_count(self): wrong_cnt = 0 element_functions = [self.get_rows, self.get_columns, self.get_boxes] for getlist in element_functions: ilist = getlist() for item in ilist: wrong_cnt += self.dim - len(self.numbers_range & set(item)) return wrong_cnt def __repr__(self): s = "" for ri in range(len(self.grid)): if ri > 0: if ri % (self.dim * sqrt(self.dim)) == 0: s += "\n" + "-" * int((self.dim + self.dim // sqrt(self.dim) - 1)*2 - 1) if ri % self.dim == 0: s += "\n" elif ri % (self.dim // sqrt(self.dim)) == 0: s += " | " else: s += " " els = self.grid[ri] s += str(0 if len(els) > 1 or 0 == len(els) else next(iter(els))) return s def to_numbers(lines): return [[int(lines[i][j]) for j in range(len(lines[i].strip()))] for i in range(len(lines))] def replace_zeros(row, values): return [values.pop() if i == 0 else i for i in row] def replace_zeros_in_grid(grid, values): replaced = [set([values.pop()]) if len(field) > 1 else field for field in grid] # print(grid, replaced) return replaced def missing_row_values_to_specimen(missing_in_row): return [[0, maxvalue] for maxvalue in range(len(missing_in_row) - 1, 0, -1)] def missing_grid_values_to_specimen(missing_in_grid): return [[0, len(field)-1] for field in missing_in_grid]
# Copyright (c) 2012-2014 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and/or associated documentation files (the # "Materials"), to deal in the Materials without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Materials, and to # permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. ## \file gredit.py ## \author Frank Brill <brill@ti.com> """ Implements a GUI for a graph object editor. The top-level object is the GrEditor, which has all the GUI elements for menus, buttons, and most importantly a GraphCanvas, which is the drawing area for the graph. The graph consists of nodes with ports, and links between the ports. The user can interactively create a graph object and save and load it from an XML file. The graph is validated incrementally as it is constructed, with a final check before saving. Classes defined are: GrEditor: The top-level GUI GraphCanvas: The graph drawing area of the GUI CanvasObject: An object that can be placed on a GraphCanvas CanvasNode, CanvasPort, and CanvasLink: Types (subclasses) of CanvasObject and the corresponding 'regular' graph objects """ import math from tkinter import * import tkinter.font from tkinter import messagebox from vxGraph import * from vxCodeWriter import * # OPEN ISSUES # Need to save, restore, and use the data node "name"s in generating code # Need to clean up how the object dictionaries are maintained and the way icons are mapped # to objects. Could be a bit simpler. General cleanup needed. Eliminate globals. # # The automatic layout/rearrangement and tree dragging interaction could be improved. # Would like to be able to select an entire group of nodes and drag them all at once. # Editing should be more "modal": normally in "move" mode, only add items in "add" mode # When using the drag method of drawing a link, the target node isn't highlighted (WHY NOT?) # Should add scroll bars to canvas when the graph gets too big to fit on it. # # Should eliminate duplicate code in GraphCanvas.loadFromTree() vs. Graph.fromTree. # Error checking is sparse (e.g. don't check for quit without saving, etc.) # # Should put image, scalar, buffer on a different palette from the kernel nodes? # Will need better way to select kernels when the kernel list gets large. Will need # scrollbar at least, probably a hierarchy of kernels to navigate. # Table defining various color constants colorTable = [('I', "#4444CF"), # images and unselected image links are blue ('B', "#AFAF00"), # buffers and unselected buffer links are yellow ('S', "#00BF00") # scalars and unselected scalar are green ] kernelNodeColor = "#CF2222" # kernel nodes are red defaultOutlineColor = "gray20" # all node outlines are dark gray linkHilightColor = "gray80" # links turn light gray when they're selected portOutlineColor = "black" # ports are outlined in black canvasBackgroundColor = "gray50" # the background color of the graph canvas listboxBackgroundColor = "gray80" # the background color of the kernel list class CanvasObject: """ A CanvasObject is something that can be placed on a GraphCanvas. It is the superclass for CanvasNode, CanvasPort, and CanvasLink. """ def __init__(self, canvas, x, y, w, h): """Store the object's size (w, h), location (x, y), and the canvas it lives on.""" self.canvas = canvas self.x = x self.y = y self.w = w self.h = h def attachIcon(self, icon, objecttype): """Store the subclass type string (node, port, or link) and the actual Tk canvas icon representing the object, bind some GUI interactions to it, and update the canvas' object dictionary. Should only be called by the subclass constructors.""" self.icon = icon self.canvas.addtag_withtag(self.id, self.icon) self.canvas.tag_bind(self.id, "<Any-Enter>", self.highlight) self.canvas.tag_bind(self.id, "<Any-Leave>", self.unhighlight) self.canvas.tag_bind(self.id, "<Button-3>", self.delete) def highlight(self, event): """Highlight the object by thickening its outline, and set the current focus to this object.""" self.canvas.focus_set() self.canvas.currentObject = self self.canvas.itemconfig(self.icon, {'width':'3'}) def unhighlight(self, event): """Unhighlight the object, and clear the current focus.""" self.canvas.currentObject = None self.canvas.itemconfig(self.icon, {'width':'1'}) def move(self, dx, dy): """Move the object by an amount relative to the current location.""" self.x = self.x + dx self.y = self.y + dy self.canvas.move(self.icon, dx, dy) def moveTo(self, x, y): """Move the object to an absolute location on the canvas.""" self.x = x self.y = y def delete(self, event): """Destroy the icon and remove self from the graph.""" self.canvas.delete(self.icon) self.canvas.graph.deleteObject(self) class CanvasNode(Node, CanvasObject): """ A CanvasNode is a CanvasObject that represents a particular kernel, image, buffer, or scalar. Each of these types of nodes has an icon of a characteristic shape and color. CanvasNodes have lists of input and output CanvasPorts, which can be connected by CanvasLinks. """ def __init__(self, canvas, x, y, kernel, nodeid=None): Node.__init__(self, canvas.graph, kernel.name, nodeid) CanvasObject.__init__(self, canvas, x, y, 25, 12) self.kernel = kernel def finishInit(self, icon): """Attach the given icon and label and place the icon at the given (x, y), location.""" self.attachIcon(icon, "node") if self.kernel == None: self.nodetype = self.id # if no kernel assigned, just use ID else: self.nodetype = self.kernel.name self.label = self.canvas.create_text((self.x, self.y), tags=self.id, text=self.nodetype, fill='white') labelfont = self.canvas.itemcget(self.label, 'font') myfont = tkinter.font.Font(font=labelfont) myfont['weight'] = 'bold' self.canvas.itemconfig(self.label, font=myfont) self.moveTo(self.x, self.y) @classmethod def fromTreeElement(cls, canvas, n): """Create a node based on the information in the tree element.""" if n.get('type') in Kernel.dataKernels: return CanvasDataNode.fromTreeElement(canvas, n) else: return CanvasFunctionNode(canvas, float(n.get('x')), float(n.get('y')), canvas.app.kernel[n.get('type')], n.get('id')) @classmethod def fromEventAndKernel(cls, event, kernel): """Create a node at the event indicated in the event with the given kernel.""" if kernel == None: return None if kernel.name in Kernel.dataKernels: return CanvasDataNode(event.widget, event.x, event.y, kernel) else: return CanvasFunctionNode(event.widget, event.x, event.y, kernel) def createPortsForKernel(self): if self.kernel != None: for i in range(self.kernel.numInputs): CanvasPort(self, "in", self.kernel.inputs[i]) for i in range(self.kernel.numOutputs): CanvasPort(self, "out", self.kernel.outputs[i]) else: CanvasPort(self, "in", 'I') CanvasPort(self, "out", 'I') self.moveTo(self.x, self.y) def move(self, dx, dy): """Move the entire node's icon, including shape, label, and ports by a relative amount. The moveTo() method must have been called at least once prior to this to correctly place the port and label sub-icons.""" CanvasObject.move(self, dx, dy) self.canvas.move(self.label, dx, dy) for port in self.inports: port.move(dx, dy) for port in self.outports: port.move(dx, dy) return (dx, dy) def moveTo(self, x, y): """Place the node at an absolute (x, y) location on the canvas. Correctly lay out all the sub-parts of the icon, including shape, label, and ports.""" CanvasObject.moveTo(self, x, y) self.canvas.coords(self.icon, x-self.w, y-self.h, x+self.w, y+self.h) self.canvas.coords(self.label, x, y) for i in range(len(self.inports)): dx = 2*self.w*(i+1)/(len(self.inports)+1) - self.w dy = self.h*math.sqrt(1.0 - (dx*dx)/(self.w*self.w)) self.inports[i].moveTo(x+dx, y-dy) for i in range(len(self.outports)): dx = 2*self.w*(i+1)/(len(self.outports)+1) - self.w dy = self.h*math.sqrt(1.0 - (dx*dx)/(self.w*self.w)) self.outports[i].moveTo(x+dx, y+dy) def moveTreeTo(self, x, y): """Move the node to an absolute location and drag the subtree.""" dx = x - self.x dy = y - self.y self.moveTree(dx, dy); def moveTree(self, dx, dy): """Wrapper around recursive function to the node and its entire subtree by a relative amount.""" movedNodes = set() # Keep track of the nodes we've moved self.moveTreeRecursive(dx, dy, movedNodes) def moveTreeRecursive(self, dx, dy, movedNodes): """Recursive function to move the node and its entire subtree by a relative amount.""" self.move(dx, dy) movedNodes.add(self.id) # Mark node as moved so recursion for port in self.outports: # doesn't move it more than once for link in port.links: if not link.dst.node.id in movedNodes: link.dst.node.moveTreeRecursive(dx, dy, movedNodes) def delete(self, event): """Destroy the node, its label, and all of its ports.""" self.canvas.delete(self.label) for port in self.inports: port.delete(event) for port in self.outports: port.delete(event) CanvasObject.delete(self, event) class CanvasDataNode(CanvasNode, DataNode): """ A canvas node that represents a data object instead of a function. They look and behave somewhat differently from function nodes, so we have two subclasses. """ def __init__(self, canvas, x, y, kernel, nodeid=None): CanvasNode.__init__(self, canvas, x, y, kernel, nodeid) DataNode.__init__(self, canvas.graph, kernel.name, nodeid) fill = self.canvas.dataColor[self.kernel.name[0]] self.w = self.w - 5 self.h = self.h - 5 icon = self.canvas.create_rectangle(-self.w, -self.h, self.w, self.h, fill=fill, width=1, outline=defaultOutlineColor) self.finishInit(icon) @classmethod def fromTreeElement(cls, canvas, n): """Create a data node based on the information in the tree element.""" node = CanvasDataNode(canvas, float(n.get('x')), float(n.get('y')), canvas.app.kernel[n.get('type')], n.get('id')) node.updateAttributesFromElement(n) return node class CanvasFunctionNode(CanvasNode, FunctionNode): """ A canvas node that represents a function object. They look and behave somewhat differently from data nodes, so we have two subclasses. """ def __init__(self, canvas, x, y, kernel, nodeid=None): CanvasNode.__init__(self, canvas, x, y, kernel, nodeid) icon = self.canvas.create_oval(-self.w, -self.h, self.w, self.h, fill=self.canvas.nodeColor, width=1, outline=defaultOutlineColor) self.finishInit(icon) class CanvasPort(Port, CanvasObject): """ A CanvasPort is a CanvasObject that represents an input or output parameter of a CanvasNode. A port has a direction ("in" or "out") and a data type (image, buffer, or scalar). These attributes need to be checked when linking ports together. """ def __init__(self, node, direction, datatype): """Store the CanvasPort's node, direction, type, and links (initially the empty set). Creates the icon and binds some GUI interaction to it.""" Port.__init__(self, direction, datatype, node) CanvasObject.__init__(self, node.canvas, 0, 0, 2, 2) color = node.canvas.dataColor[datatype] polygon = self.canvas.create_polygon(-self.w, -self.h, self.w, -self.h, self.w, self.h, -self.w, self.h, fill=color, outline=portOutlineColor) self.attachIcon(polygon, "port") self.canvas.tag_bind(self.id, "<Button-3>", self.node.delete) self.canvas.tag_bind(self.id, '<B1-Enter>', self.highlight) # doesn't help (WHY NOT?) node.moveTo(node.x, node.y) def move(self, dx, dy): """Move the CanvasPort by an amount relative to the current location and drag the links along for the ride.""" CanvasObject.move(self, dx, dy) for link in self.links: link.moveTo(0, 0) def moveTo(self, x, y): """Move the CanvasPort relative to to an absolute location on the canvas and drag the links along for the ride.""" CanvasObject.moveTo(self, x, y) self.canvas.coords(self.icon, (x-self.w, y-self.h, x+self.w, y-self.h, x+self.w, y+self.h, x-self.w, y+self.h)) for link in self.links: link.moveTo(0, 0) def delete(self, event): """Delete the CanvasPort *and* all the links that go to or from it.""" linklist = list(self.links) for link in linklist: link.delete(event) CanvasObject.delete(self, event) class CanvasLink(Link, CanvasObject): """ A CanvasLink is a CanvasObject that represents a connection from an output port of one node to the input port of another one. It is typed (image, buffer, or scalar) and this type needs to match on both ends of the link. """ def __init__(self, port1, port2): """Store the CanvasLink's endpoints and data type, and create the line object that graphically represents the link.""" (src, dst) = port1.orderLink(port2) Link.__init__(self, src, dst) CanvasObject.__init__(self, self.src.canvas, 0, 0, 0, 0) fill = self.canvas.dataColor[self.src.datatype] line = self.canvas.create_line(self.src.x, self.src.y, self.dst.x, self.dst.y, width=3, fill=fill) self.canvas.tag_lower(line) self.attachIcon(line, "link") self.moveTo(self.x, self.y) def highlight(self, event): """Highlight the link by changing its color and grab the focus.""" self.canvas.focus_set() self.canvas.currentObject = self self.canvas.itemconfig(self.icon, fill=linkHilightColor) def unhighlight(self, event): """Un-highlight the link to make it specific to data type and release the focus.""" self.canvas.currentObject = None fill = self.canvas.dataColor[self.src.datatype] self.canvas.itemconfig(self.icon, fill=fill) def moveTo(self, x, y): """Move line to match the endpoints. The (x, y) arguments are ignored.""" CanvasObject.moveTo(self, x, y) self.canvas.coords(self.icon, self.src.x, self.src.y, self.dst.x, self.dst.y) def delete(self, event): """Remove link from endpoints and delete the line.""" self.src.links.remove(self) self.dst.links.remove(self) CanvasObject.delete(self, event) class GraphCanvas(Canvas): """ A GraphCanvas is Tk canvas object with special facilities to enable the user to create, save, and load graph objects consisting of nodes and links. """ def __init__(self, root, app): """Initialize the canvas and data structures needed to maintain the graph: the set of nodes, ports, and links (maintained in Python dictionaries). Set the editing mode to "normal". """ Canvas.__init__(self, root, width=400, height=300) self.root = root self.app = app self.graph = Graph() self.currentObject = None self.currNode = None self.setMode("normal") global colorTable, kernelNodeColor self.dataColor = dict() for c in colorTable: self.dataColor[c[0]] = c[1] self["bg"] = canvasBackgroundColor self.nodeColor = kernelNodeColor def clear(self): """Delete all the nodes, which in turn deletes the ports and links. Confirm that the user wants to do this, and if not, just return False. Otherwise, go ahead and clear the canvas and return True. """ if self.graph.numNodes() == 0: return True if not messagebox.askokcancel("Graph Clear", "Are you sure you want to clear the graph canvas?"): return False self.graph.clear() self.currentObject = None self.currNode = None self.setMode("normal") self.app.message("Graph cleared.") return True def setMode(self, mode): """Set the current editing mode. Currently support a "normal" mode for drawing and moving nodes, and a "drawlink" mode for creating link between ports on nodes.""" if mode == "normal": self.bind('<Button-1>', self.makeOrDragNodeOrStartLink) self.bind('<Double-Button-1>', self.editCurrentObject) self.bind('<B1-Motion>', self.dragCurrNode) self.bind('<ButtonRelease-1>', self.autoArrange) self.unbind('<Any-Motion>') self.bind('<Delete>', self.deleteCurrentObject) self.bind('<Left>', self.nudgeCurrentObject) self.bind('<Right>', self.nudgeCurrentObject) self.bind('<Up>', self.nudgeCurrentObject) self.bind('<Down>', self.nudgeCurrentObject) elif mode == "drawlink": self.unbind("<Delete>") self.unbind("<Left>") self.unbind("<Right>") self.unbind("<Up>") self.unbind("<Down>") self.bind('<Button-1>', self.completeLink) self.unbind('<B1-Motion>') self.bind('<ButtonRelease-1>', self.completeLink) self.bind('<Any-Motion>', self.moveTempLink) def getTopItemAtLocation(self, x, y): """Return the item at the given location on the canvas.""" itemsHere = self.find_overlapping(x, y, x, y) if (len(itemsHere) > 0): return itemsHere[-1] else: return None def getObjectFromIcon(self, item): """Given a canvas icon, return the object structure it represents.""" tags = self.gettags(item) for tag in tags: # look up all the tags until we get a hit obj = self.graph.getObject(tag) if obj != None: return obj return None def makeOrDragNodeOrStartLink(self, event): """Initiate the user-directed action when the user has a mouse interaction with the canvas in "normal" mode. If the user clicks on an empty spot on the canvas, create a node at the click location. If they click on a node, get ready to drag/move it. If they click on a port, get ready to draw a link.""" thisItem = self.getTopItemAtLocation(event.x, event.y) if thisItem == None: # Nothing at selected location, so create a new node kernel = self.app.getCurrentKernel() self.currNode = CanvasNode.fromEventAndKernel(event, kernel) if self.currNode != None: self.currNode.createPortsForKernel() else: obj = self.getObjectFromIcon(thisItem) if (obj.objecttype == "node"): # Start dragging selected node self.currNode = obj elif (obj.objecttype == "port"): # Start creating link from selected port self.currNode = obj.node self.newLinkSrc = obj self.tempLink = self.create_line(obj.x, obj.y, event.x, event.y, tag="templine") self.tag_lower(self.tempLink) self.setMode("drawlink") def moveTempLink(self, event): """Update the "rubber-banding" line (tempLink) that represents the new link being created.""" self.coords(self.tempLink, self.newLinkSrc.x, self.newLinkSrc.y, event.x, event.y) def deleteCurrentObject(self, event): """Delete the object at the mouse event location. If it's a port, delete the corresponding node too.""" if self.currentObject != None: if self.currentObject.objecttype == "port": self.currentObject.node.delete(event) else: self.currentObject.delete(event) def editCurrentObject(self, event): """Pop up an attribute editor for the current object.""" # Some ugly stuff in here--needs cleanup. This pop-up thing # works, but is clunky. Should be able to just edit the label # text directly. Check out the Quad "display" method in # quadcanvas.py to see how this is done. if not isinstance(self.currentObject, DataNode): return # only edit data nodes editObj = self.currentObject # Need to save this because self.app.editDataNode(editObj, event) # editing may reset currentObject if editObj.name != '': self.itemconfig(editObj.label, text=editObj.name) else: self.itemconfig(editObj.label, text=editObj.kernel.name) def nudgeCurrentObject(self, event): """Move the current object one pixel in direction indicated by an arrow keypress event.""" if self.currentObject != None: if self.currentObject.objecttype == "node": if event.keysym == "Up": self.currentObject.move(0, -1) elif event.keysym == "Down": self.currentObject.move(0, 1) elif event.keysym == "Left": self.currentObject.move(-1, 0) elif event.keysym == "Right": self.currentObject.move(1, 0) def completeLink(self, event): """Check to make sure that the user has indicated a valid link between compatible ports, and if so, create the link and drop back into "normal" mode.""" dstIcon = self.getTopItemAtLocation(event.x, event.y) if dstIcon != None: dst = self.getObjectFromIcon(dstIcon) if dst == self.newLinkSrc: return self.delete(self.tempLink) if (dst != None) and (dst.objecttype == "port"): if dst.linkable(self.newLinkSrc): newLink = CanvasLink(self.newLinkSrc, dst) self.autoArrange(event) else: self.app.message(self.graph.getLastError()) self.setMode("normal") def dragCurrNode(self, event): """Move the current node to the location indicated by the mouse event.""" if self.currNode != None: self.currNode.moveTreeTo(event.x, event.y) def saveAll(self): """Save an XML representation of the graph to the app's current saveFile. The graph is validated (checked to make sure all of its inputs and outputs are met and all nodes have kernels attached) before saving. """ if not self.graph.allNodesHaveKernels(): self.app.message(self.graph.getLastError()) return if not self.graph.allInputsAndOutputsMet(): self.app.message(self.graph.getLastError()) return filename = self.app.getSaveFile() if os.path.exists(filename): if not messagebox.askokcancel("Graph XML Save", "File %s exists. Overwrite?" % filename): return self.graph.saveToXML(filename) self.app.message("Saved", filename) # Also write the corresponding C file filename = os.path.splitext(filename)[0]+'.c' writer = GraphCodeWriter("context", self.graph) writer.writeCfile(filename) def loadFile(self): """Load an XML representation of the graph from the app's current saveFile. Parse the file to create a Python ElementTree. Make sure the user is OK with clearing the graph, and if so, initialize graph from the tree and notify the user that we're done. """ try: tree = etree.parse(self.app.getSaveFile()) except: messagebox.showerror("Graph XML Load", "Cannot load file "+self.app.getSaveFile()) return if self.clear(): self.loadFromTree(tree) self.app.message("Loaded", self.app.getSaveFile()) def loadFromTree(self, tree): """Create a graph structure from an elementTree structure. This function mirrors the Graph.fromTree function exactly. Still trying to figure out how to make that one create Canvas objects so we can do away with this function. """ for n in tree.iter("node"): # Create each node node = CanvasNode.fromTreeElement(self, n) for p in n.iter("port"): # Create each port port = CanvasPort(node, p.get('direction'), p.get('type')) for l in tree.iter("link"): # Create each link CanvasLink(self.graph.getObject(l.get('src')), self.graph.getObject(l.get('dst'))) def autoArrange(self, event): """Arrange the nodes to make the graph lay out prettier.""" self.enforceTopToBottom(event) self.spreadNodes(event) return def enforceTopToBottom(self, event): """Make sure children are below their parents in the graph.""" speed = 0.5 while True: changed = False for link in self.graph.links.values(): if link.src.y > link.dst.y - 10: link.src.node.move(0, -speed) link.dst.node.moveTree(0, speed) self.root.update_idletasks() changed = True if not changed: break return def sign(self, x): """Utility to determine the sign of a number.""" if x < 0: return -1; if x > 0: return 1; return 0; def spreadNodes(self, event): """Iteratively move the node icons to auto-arrange them such that nodes aren't on top of each other.""" speed = 0.5 while True: changed = False for thisNode in self.graph.nodes.values(): (fx, fy) = (0, 0) for otherNode in self.graph.nodes.values(): if thisNode != otherNode: (dx, dy) = (thisNode.x-otherNode.x, thisNode.y-otherNode.y) if ((abs(dx) < thisNode.w+otherNode.w) and (abs(dy) < thisNode.h+otherNode.h)): (fx, fy) = (fx + self.sign(dx), fy + self.sign(dy)) if (dx, dy) == (0, 0): (fx, fy) = (1, 1) thisNode.move(fx*speed, fy*speed) self.root.update_idletasks() changed = True if not changed: break class GrEditor: def __init__(self): """Initialize the table of kernels and create all the GUI widgets, including the GraphCanvas, button controls, message area, load/store filename text widget, and kernel selection list, and then launch the app. """ self.kernel = Node.kernelTable root = Tk() root.title("Graph Editor") self.defaultSaveFile = "mygraph.xml" self.root = root controls = Frame(root) self.messageString = StringVar() msg = Message(controls, anchor=W, width=330, textvariable=self.messageString) canv = GraphCanvas(root, self) self.listbox = Listbox(root, width=9, bg=listboxBackgroundColor) for k in self.kernel.keys(): self.listbox.insert(END, k) self.listbox.select_set(0) load = Button(controls, text="Load", command=canv.loadFile) save = Button(controls, text="Save", command=canv.saveAll) clear = Button(controls, text="Clear", command=canv.clear) quit = Button(controls, text="Quit", command=root.quit) msg.pack({"side":"left", "fill":X, "expand":1}) load.pack({"side":"left"}) save.pack({"side":"left"}) clear.pack({"side":"left"}) quit.pack({"side":"left"}) self.fileEntry = Entry(root) self.fileEntry.insert(0, self.defaultSaveFile) controls.pack(side="bottom", fill=X) self.listbox.pack(side="left", fill=Y) self.fileEntry.pack(side="top", fill=X) canv.pack(fill=BOTH, expand=1) canv.focus_set() self.messageString.set("Hello") root.mainloop() exit() def addField(self, window, form, name, value): "Add a field to a form in the given window" paramArea = Frame(window) Label(paramArea, text=name).pack(side='left') form[name] = Entry(paramArea) form[name].pack(padx=5, side='right', fill=X) form[name].insert(0, value) paramArea.pack() def editDataNode(self, node, event): "Create a popup to edit a data node's attributes." self.nodeForm = dict() self.nodeBeingEdited = node self.editPopup = Toplevel(self.root) self.addField(self.editPopup, self.nodeForm, 'name', node.name) for param, value in node.params.items(): self.addField(self.editPopup, self.nodeForm, param, value) button = Button(self.editPopup, text="OK", command=self.ok) button.pack(pady=5) self.editPopup.grab_set() self.editPopup.geometry("+%s+%s" % (event.x+self.root.winfo_rootx(), event.y+self.root.winfo_rooty())) self.root.wait_window(self.editPopup) return def ok(self): "Stuff the form values into the node being edited" self.nodeBeingEdited.name = self.nodeForm['name'].get() for param in self.nodeBeingEdited.params: self.nodeBeingEdited.params[param] = self.nodeForm[param].get() # should use default if blank self.editPopup.destroy() def getSaveFile(self): "Return the name of the user-supplied filename." return self.fileEntry.get() def message(self, *args): """Display the given message in the app's message area. Format the arguments similar to the print() function.""" self.messageString.set(''.join([str(arg)+" " for arg in args])[:-1]) def getCurrentKernel(self): """Return the kernel the user has indicated in the selection box.""" selection = self.listbox.curselection() if len(selection) == 0: return None elif len(selection) == 1: return self.kernel[self.listbox.get(selection[0])] else: print("ERROR: multiple selections") return None app = GrEditor()
''' Created on Apr 5, 2016 @author: Jakub Bernat ''' import numpy as np import numpy.linalg as la import matplotlib.pyplot as plt import random import core.observerlibrary as mo import core.tools as ctls import logging class Simulation: logger = logging.getLogger('Simulation') def __init__(self, name, x0_1 = -0.25, x0_2 = -0.25, x0_3 = -0.25): self.name = name; self.Tp = 0.0001; self.Np = 450; self.N = 3 # state size self.M = 1 # output number self.P = 1 # input number # motor parameters self.R = 3.2 # Ohm self.L = 0.0086 # mH self.Kt = 0.0319 # Nm/A self.fd = 0.00012 # Nms/rad self.J = 30*10.0**-6 # kgm2 self.mi = -0.06 # Nm/As # DC motor self.A = np.matrix(np.zeros((self.N-1,self.N-1))) self.A[0,0] = - self.fd/self.J self.A[0,1] = self.Kt/self.J self.A[1,0] = - self.Kt/self.L self.A[1,1] = - self.R/self.L self.B = np.matrix(np.zeros((self.N-1,self.N-1))) self.B[0,0] = - 1.0/self.J self.B[1,1] = 1.0/self.L self.C = np.matrix(np.zeros((1,self.N-1))) self.C[0,0] = 0.0 self.C[0,1] = 1.0 # observer definition self.Ao = np.matrix(np.zeros((self.N,self.N))) self.Ao[0,0] = - self.fd/self.J self.Ao[0,1] = self.Kt/self.J self.Ao[0,2] = - 1.0/self.J self.Ao[1,0] = - self.Kt/self.L self.Ao[1,1] = - self.R/self.L self.Bo = np.matrix(np.zeros((self.N,1))) self.Bo[0,0] = 0.0 self.Bo[1,0] = 1.0/self.L self.Bo[2,0] = 0.0 self.Co = np.matrix(np.zeros((1,self.N))) self.Co[0,0] = 0.0 self.Co[0,1] = 1.0 self.Co[0,2] = 0.0 # observer gain self.Lo = np.matrix(np.zeros((self.N,1))) self.Lo[2,0] = -self.mi # check closed loop self.logger.debug('Ao-LoCo:') self.logger.debug(self.Ao-self.Lo*self.Co) w, _ = la.eig(self.Ao-self.Lo*self.Co) self.logger.debug('Poles of matrix Ao-LoCo:') self.logger.debug(w) def runObserverMulti(s, observerType='RLS', mapping='integral-finite'): # time definition time = np.linspace(0.0, (s.Np-1)*s.Tp, s.Np) # input signal u = np.asmatrix(10.0*np.sign(np.sin(2*np.pi*(1.0/8.0)*time))) # DC motor virtual input v = np.asmatrix(np.zeros((2,1))) # output signal y = np.asmatrix(np.zeros((1,s.Np))) # plant state x = np.asmatrix(np.zeros((2,s.Np))) # load torque Tload = 0.01*np.asmatrix(np.ones((1,s.Np))) # multi-observer mmObserver = mo.MMObserverLTI(s.N, s.M, s.Tp, s.Ao, s.Bo, s.Co, s.Lo, mapping, observerType, 50.0, 1.0, [100.0, 1.0, 0.02]) # estimated state xe = np.asmatrix(np.zeros((3,s.Np))) # simulation for n in range(s.Np-1): # virtual input - load torque v[0,0] = Tload[0,n] # virtual input - voltage v[1,0] = u[0,n] # calculate system state x[:,n+1] = x[:,n] + s.Tp*(s.A*x[:,n] + s.B*v) y[:,n] = s.C*x[:,n] + 0.05*(random.random()-0.5) # call observer xe[:,n] = mmObserver.observe(u[:,n], y[:,n]) # last iteration n = s.Np-1 y[:,n] = s.C*x[:,n] + 0.05*(random.random()-0.5) xe[:,n] = mmObserver.observe(u[:,n], y[:,n]) result = dict() result['time'] = time result['e1'] = xe[0,:]-x[0,:] result['e2'] = xe[1,:]-x[1,:] result['e3'] = xe[2,:]-Tload result['x1'] = x[0,:] result['x2'] = x[1,:] result['x3'] = Tload result['xe1'] = xe[0,:] result['xe2'] = xe[1,:] result['xe3'] = xe[2,:] result['u'] = u return result ctls.configureLogger() s = Simulation('Simple example') # run simulation without second layer (single model) ra = runObserverMulti(s, 'none') # run simulation with second layer (multi model) rMMa = runObserverMulti(s, 'RLS') time = ra['time'] x1 = ra['x1'] x2 = ra['x2'] x3 = ra['x3'] xe1S = ra['xe1'] xe2S = ra['xe2'] xe3S = ra['xe3'] xe1M = rMMa['xe1'] xe2M = rMMa['xe2'] xe3M = rMMa['xe3'] e1 = ra['e1'] e2 = ra['e2'] e3 = ra['e3'] e1MM = rMMa['e1'] e2MM = rMMa['e2'] e3MM = rMMa['e3'] con = 10000.0 suffix = '.png' ax = plt.subplot(111) ax.plot(time, ctls.to_plot_array(x1, con), label='motor speed $\\omega(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(xe1S, con), label='estimated motor speed (SO) $\\hat{\\omega}(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(xe1M, con), label='estimated motor speed (MO) $\\hat{\\omega}(t)$', linewidth=3.0) plt.xlabel('time $[s]$') plt.ylabel('motor speed $[rad/s]$') plt.legend(loc = 4) plt.savefig('sensorless_speed' + suffix, bbox_inches=0); plt.clf(); ax = plt.subplot(111) ax.plot(time, ctls.to_plot_array(x2, con), label='motor current $i(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(xe2S, con), label='estimated motor current (SO) $\\hat{i}(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(xe2M, con), label='estimated motor current (MO) $\\hat{i}(t)$', linewidth=3.0) plt.xlabel('time $[s]$') plt.ylabel('motor current $[A]$') plt.legend(loc = 4) plt.savefig('sensorless_current' + suffix, bbox_inches=0); plt.clf(); ax = plt.subplot(111) ax.plot(time, ctls.to_plot_array(x3, con), label='load torque $T_L(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(xe3S, con), label='estimated load torque (SO) $\\hat{T}_L(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(xe3M, con), label='estimated load torque (MO) $\\hat{T}_L(t)$', linewidth=3.0) plt.xlabel('time $[s]$') plt.ylabel('load torque $[Nm]$') plt.legend(loc = 4) plt.savefig('sensorless_load_torque' + suffix, bbox_inches=0); plt.clf(); ax = plt.subplot(111) ax.plot(time, ctls.to_plot_array(e1, con), label='single layer observer $e_1(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(e1MM, con), label='multi layer observer $e_1(t)$', linewidth=3.0) plt.xlabel('time $[s]$') plt.ylabel('observation error $e_1$') plt.savefig('sensorless_e1' + suffix, bbox_inches=0); plt.clf(); ax = plt.subplot(111) ax.plot(time, ctls.to_plot_array(e2, con), label='single layer observer $e_2(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(e2MM, con), label='multi layer observer $e_2(t)$', linewidth=3.0) plt.xlabel('time $[s]$') plt.ylabel('observation error $e_2$') plt.savefig('sensorless_e2' + suffix, bbox_inches=0); plt.clf(); ax = plt.subplot(111) ax.plot(time, ctls.to_plot_array(e3, con), label='single layer observer $e_3(t)$', linewidth=3.0) ax.plot(time, ctls.to_plot_array(e3MM, con), label='multi layer observer $e_3(t)$', linewidth=3.0) plt.xlabel('time $[s]$') plt.ylabel('observation error $e_3$') plt.savefig('sensorless_e3' + suffix, bbox_inches=0); plt.clf();
try: import wpilib except ImportError: from pyfrc import wpilib # import components here from autonomous import AutonomousModeManager from components import drive, intake, catapult from common import delay # keep in sync with the driver station MODE_DISABLED = 0 MODE_AUTONOMOUS = 1 MODE_TELEOPERATED = 2 class MyRobot(wpilib.SimpleRobot): ''' This is where it all starts ''' def __init__ (self): ''' Constructor. ''' super().__init__() print("Team 1418 robot code for 2014") ################################################################# # THIS CODE IS SHARED BETWEEN THE MAIN ROBOT AND THE ELECTRICAL # # TEST CODE. WHEN CHANGING IT, CHANGE BOTH PLACES! # ################################################################# wpilib.SmartDashboard.init() # Joysticks self.joystick1 = wpilib.Joystick(1) self.joystick2 = wpilib.Joystick(2) # Motors self.lf_motor = wpilib.Jaguar(1) self.lf_motor.label = 'lf_motor' self.lr_motor = wpilib.Jaguar(2) self.lr_motor.label = 'lr_motor' self.rr_motor = wpilib.Jaguar(3) self.rr_motor.label = 'rr_motor' self.rf_motor = wpilib.Jaguar(4) self.rf_motor.label = 'rf_motor' self.winch_motor = wpilib.CANJaguar(5) self.winch_motor.label = 'winch' self.intake_motor = wpilib.Jaguar(6) self.intake_motor.label = 'intake' # Catapult gearbox control self.gearbox_solenoid=wpilib.DoubleSolenoid(2, 1) self.gearbox_solenoid.label = 'gearbox' # Arm up/down control self.vent_bottom_solenoid = wpilib.Solenoid(3) self.vent_bottom_solenoid.label = 'vent bottom' self.fill_bottom_solenoid = wpilib.Solenoid(4) self.fill_bottom_solenoid.label = 'fill bottom' self.fill_top_solenoid = wpilib.Solenoid(5) self.fill_top_solenoid.label = 'fill top' self.vent_top_solenoid = wpilib.Solenoid(6) self.vent_top_solenoid.label = 'vent top' self.pass_solenoid = wpilib.Solenoid(7) self.pass_solenoid.label = 'pass' self.robot_drive = wpilib.RobotDrive(self.lr_motor, self.rr_motor, self.lf_motor, self.rf_motor) self.robot_drive.SetSafetyEnabled(False) self.robot_drive.SetInvertedMotor(wpilib.RobotDrive.kFrontLeftMotor, True) self.robot_drive.SetInvertedMotor(wpilib.RobotDrive.kRearLeftMotor, True) # Sensors self.gyro = wpilib.Gyro(1) self.ultrasonic_sensor = wpilib.AnalogChannel(3) self.ultrasonic_sensor.label = 'Ultrasonic' self.arm_angle_sensor = wpilib.AnalogChannel(4) self.arm_angle_sensor.label = 'Arm angle' self.ball_sensor = wpilib.AnalogChannel(6) self.ball_sensor.label = 'Ball sensor' self.accelerometer = wpilib.ADXL345_I2C(1, wpilib.ADXL345_I2C.kRange_2G) self.compressor = wpilib.Compressor(1,1) ################################################################# # END SHARED CODE # ################################################################# # # Initialize robot components here # self.drive = drive.Drive(self.robot_drive, self.ultrasonic_sensor,self.gyro) self.initSmartDashboard() self.pushTimer=wpilib.Timer() self.catapultTimer=wpilib.Timer() self.catapult=catapult.Catapult(self.winch_motor,self.gearbox_solenoid,self.pass_solenoid,self.arm_angle_sensor,self.ball_sensor,self.catapultTimer) self.intakeTimer=wpilib.Timer() self.intake=intake.Intake(self.vent_top_solenoid,self.fill_top_solenoid,self.fill_bottom_solenoid,self.vent_bottom_solenoid,self.intake_motor,self.intakeTimer) self.pulldowntoggle=False self.components = { 'drive': self.drive, 'catapult': self.catapult, 'intake': self.intake } self.control_loop_wait_time = 0.025 self.autonomous = AutonomousModeManager(self.components) def Autonomous(self): '''Called when the robot is in autonomous mode''' wpilib.SmartDashboard.PutNumber('RobotMode', MODE_AUTONOMOUS) self.autonomous.run(self, self.control_loop_wait_time) def Disabled(self): '''Called when the robot is in disabled mode''' wpilib.SmartDashboard.PutNumber('RobotMode', MODE_DISABLED) while self.IsDisabled(): self.communicateWithSmartDashboard(True) wpilib.Wait(0.01) def OperatorControl(self): '''Called when the robot is in Teleoperated mode''' wpilib.SmartDashboard.PutNumber('RobotMode', MODE_TELEOPERATED) dog = self.GetWatchdog() dog.SetExpiration(0.25) dog.SetEnabled(True) self.compressor.Start() preciseDelay = delay.PreciseDelay(self.control_loop_wait_time) while self.IsOperatorControl()and self.IsEnabled(): self.robotMode=1 dog.Feed() # # Driving # if self.joystick2.GetZ()==1: self.drive.move((-1)*self.joystick1.GetX(), self.joystick1.GetY(), self.joystick2.GetX()) else: self.drive.move(self.joystick1.GetX(), (-1)*self.joystick1.GetY(), self.joystick2.GetX()) # Intake # if self.joystick1.GetRawButton(2): self.intake.armDown() if self.joystick1.GetRawButton(3): self.intake.armUp() if self.joystick1.GetRawButton(5): self.intake.ballIn() if self.joystick1.GetRawButton(4): self.intake.ballOut() if self.joystick1.GetRawButton(6): self.drive.angle_rotation(-10) if self.joystick1.GetRawButton(7): self.drive.angle_rotation(10) # # Catapult # if wpilib.SmartDashboard.GetBoolean("AutoWinch"): self.catapult.autoWinch() if self.joystick2.GetRawButton(1): self.catapult.launchNoSensor() if self.joystick1.GetRawButton(1): self.catapult.pulldownNoSensor() # # Other # self.communicateWithSmartDashboard(False) self.update() preciseDelay.wait() # Disable the watchdog at the end dog.SetEnabled(False) # only run the compressor in teleoperated mode self.compressor.Stop() def update(self): '''This function calls all of the doit functions for each component''' for component in self.components.values(): component.doit() def initSmartDashboard(self): self.sdTimer = wpilib.Timer() self.sdTimer.Start() wpilib.SmartDashboard.PutBoolean("AutoWinch", False) wpilib.SmartDashboard.PutBoolean("EnableTuning", False) wpilib.SmartDashboard.PutNumber("FirePower", 100) wpilib.SmartDashboard.PutNumber("ArmSet", 0) wpilib.SmartDashboard.PutBoolean("Fire", False) wpilib.SmartDashboard.PutBoolean("GyroEnabled", True) wpilib.SmartDashboard.PutNumber("GyroAngle",self.gyro.GetAngle()) wpilib.SmartDashboard.PutNumber("Compressor", self.compressor.GetPressureSwitchValue()) wpilib.SmartDashboard.PutNumber("AngleConstant", self.drive.angle_constant) print (self.compressor.GetPressureSwitchValue()) def communicateWithSmartDashboard(self, in_disabled): '''Sends and recieves values to/from the SmartDashboard''' # only send values every once in awhile if self.sdTimer.HasPeriodPassed(0.1): # Send the distance to the driver station wpilib.SmartDashboard.PutNumber("Distance",self.ultrasonic_sensor.GetVoltage()) wpilib.SmartDashboard.PutNumber("GyroAngle",self.gyro.GetAngle()) # Battery can actually be done dashboard side, fix that self (Shayne) # Put the arm state wpilib.SmartDashboard.PutNumber("ArmState",self.intake.GetMode()) # Get if a ball is loaded wpilib.SmartDashboard.PutBoolean("BallLoaded", self.catapult.check_ready()) wpilib.SmartDashboard.PutNumber("ShootAngle",self.catapult.getCatapultLocation()) wpilib.SmartDashboard.PutNumber("Compressor", self.compressor.GetPressureSwitchValue()) # don't remove this, this allows us to disable the gyro self.drive.set_gyro_enabled(wpilib.SmartDashboard.GetBoolean('GyroEnabled')) # don't set any of the other variables in disabled mode! if in_disabled: return # Get the number to set the winch power #self.WinchPowerVar = wpilib.SmartDashboard.PutNumber("FirePower",1) # TODO: Cleanup catapult.py and finish this self.drive.set_angle_constant(wpilib.SmartDashboard.GetNumber('AngleConstant')) # If its 0 then update the arm state arm_state = wpilib.SmartDashboard.GetNumber("ArmSet") if arm_state != 0: self.intake.SetMode(arm_state) wpilib.SmartDashboard.PutNumber("ArmSet", 0) # 0 it to avoid locking the driver out of arm controls if wpilib.SmartDashboard.GetBoolean("Fire"): self.catapult.launchNoSensor() wpilib.SmartDashboard.PutBoolean("Fire", False) self.catapult.setWinchLocation(wpilib.SmartDashboard.GetNumber('FirePower')) def run(): ''' When the robot starts, this is the very first function that gets called :returns: a new instance of the `MyRobot` class ''' robot = MyRobot() robot.StartCompetition() return robot if __name__ == '__main__': if not hasattr(wpilib, 'require_version'): print("ERROR: You must have pyfrc 2014.7.3 or above installed!") # pragma: no cover else: wpilib.require_version('2014.7.3') import physics wpilib.internal.physics_controller.setup(physics) wpilib.run()
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import heatclient import mock from neutron.openstack.common import uuidutils from neutron.plugins.common import constants from oslo_serialization import jsonutils import webob from gbpservice.neutron.services.servicechain.plugins.msc import config from gbpservice.neutron.services.servicechain.plugins.msc.drivers import ( simplechain_driver as simplechain_driver) from gbpservice.neutron.tests.unit.services.servicechain import ( test_servicechain_plugin as test_servicechain_plugin) STACK_DELETE_RETRIES = 5 STACK_DELETE_RETRY_WAIT = 3 class MockStackObject(object): def __init__(self, status): self.stack_status = status class MockHeatClientFunctions(object): def delete(self, stack_id): raise heatclient.exc.HTTPNotFound() def create(self, **fields): return {'stack': {'id': uuidutils.generate_uuid()}} def get(self, stack_id): return MockStackObject('DELETE_COMPLETE') class MockHeatClient(object): def __init__(self, api_version, endpoint, **kwargs): self.stacks = MockHeatClientFunctions() class SimpleChainDriverTestCase( test_servicechain_plugin.ServiceChainPluginTestCase): def setUp(self): config.cfg.CONF.set_override('servicechain_drivers', ['simplechain_driver'], group='servicechain') config.cfg.CONF.set_override('stack_delete_retries', STACK_DELETE_RETRIES, group='simplechain') config.cfg.CONF.set_override('stack_delete_retry_wait', STACK_DELETE_RETRY_WAIT, group='simplechain') super(SimpleChainDriverTestCase, self).setUp() key_client = mock.patch( 'gbpservice.neutron.services.servicechain.plugins.msc.drivers.' 'simplechain_driver.HeatClient._get_auth_token').start() key_client.return_value = 'mysplendidtoken' class TestServiceChainInstance(SimpleChainDriverTestCase): def test_invalid_service_type_rejected(self): res = self.create_service_profile( service_type="test", expected_res_status=webob.exc.HTTPBadRequest.code) self.assertEqual('InvalidServiceTypeForReferenceDriver', res['NeutronError']['type']) def test_chain_node_create_success(self): res = self._create_profiled_servicechain_node( service_type=constants.FIREWALL, config='{}', expected_res_status=webob.exc.HTTPCreated.code) self.assertEqual('{}', res['servicechain_node']['config']) def test_in_use_node_config_update_rejected(self): node = self.create_servicechain_node( service_type=constants.FIREWALL, config='{}', expected_res_status=webob.exc.HTTPCreated.code)[ 'servicechain_node'] self.assertEqual('{}', node['config']) spec = self.create_servicechain_spec( nodes=[node['id']], expected_res_status=webob.exc.HTTPCreated.code)[ 'servicechain_spec'] with mock.patch.object(simplechain_driver.HeatClient, 'create') as stack_create: stack_create.return_value = {'stack': { 'id': uuidutils.generate_uuid()}} self.create_servicechain_instance(servicechain_specs=[spec['id']]) res = self.update_servicechain_node( node['id'], config='{"key": "value"}', expected_res_status=webob.exc.HTTPBadRequest.code) self.assertEqual('NodeUpdateNotSupported', res['NeutronError']['type']) def test_chain_spec_update(self): template1 = '{"key1":"value1"}' scn = self._create_profiled_servicechain_node(config=template1) scn1_name = scn['servicechain_node']['name'] scn_id = scn['servicechain_node']['id'] name = "scs1" template2 = '{"key2":"value2"}' scn2 = self._create_profiled_servicechain_node(config=template2) scn2_id = scn2['servicechain_node']['id'] scn2_name = scn2['servicechain_node']['name'] scs = self.create_servicechain_spec(name=name, nodes=[scn_id]) sc_spec_id = scs['servicechain_spec']['id'] stack1 = {'stack': {'id': uuidutils.generate_uuid()}} stack2 = {'stack': {'id': uuidutils.generate_uuid()}} stack3 = {'stack': {'id': uuidutils.generate_uuid()}} expected_create_calls = [] expected_delete_calls = [] with mock.patch.object(simplechain_driver.HeatClient, 'create') as stack_create: with mock.patch.object(simplechain_driver.HeatClient, 'delete') as stack_delete: stack_create.return_value = stack1 instance1_name = "sc_instance_1" sc_instance1 = self.create_servicechain_instance( name=instance1_name, servicechain_specs=[sc_spec_id]) sci1_id = sc_instance1['servicechain_instance']['id'] self.assertEqual([sc_spec_id], sc_instance1['servicechain_instance'][ 'servicechain_specs']) stack_name = ("stack_" + instance1_name + scn1_name + sci1_id[:8]) expected_create_calls.append( mock.call(stack_name, jsonutils.loads(template1), {})) stack_create.return_value = stack2 instance2_name = "sc_instance_2" sc_instance2 = self.create_servicechain_instance( name=instance2_name, servicechain_specs=[sc_spec_id]) sci2_id = sc_instance2['servicechain_instance']['id'] self.assertEqual( [sc_spec_id], sc_instance2['servicechain_instance'][ 'servicechain_specs']) stack_name = ("stack_" + instance2_name + scn1_name + sci2_id[:8]) expected_create_calls.append( mock.call(stack_name, jsonutils.loads(template1), {})) #Now perform an update of the spec new_spec = {'servicechain_spec': {'nodes': [scn2_id]}} stack_create.return_value = stack3 req = self.new_update_request( 'servicechain_specs', new_spec, sc_spec_id) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPOk.code, res.status_int) # The two existing stacks will be deleted and two new stacks # will be created expected_delete_calls.append(mock.call(stack1['stack']['id'])) expected_delete_calls.append(mock.call(stack2['stack']['id'])) stack_name = ("stack_" + instance1_name + scn2_name + sci1_id[:8]) expected_create_calls.append( mock.call(stack_name, jsonutils.loads(template2), {})) stack_name = ("stack_" + instance2_name + scn2_name + sci2_id[:8]) expected_create_calls.append( mock.call(stack_name, jsonutils.loads(template2), {})) self.assertEqual(expected_delete_calls, stack_delete.call_args_list) self.assertEqual(expected_create_calls, stack_create.call_args_list) def test_chain_instance_create(self): name = "scs1" scn = self._create_profiled_servicechain_node() scn_id = scn['servicechain_node']['id'] scs = self.create_servicechain_spec(name=name, nodes=[scn_id]) sc_spec_id = scs['servicechain_spec']['id'] with mock.patch.object(simplechain_driver.HeatClient, 'create') as stack_create: stack_create.return_value = {'stack': { 'id': uuidutils.generate_uuid()}} sc_instance = self.create_servicechain_instance( name="sc_instance_1", servicechain_specs=[sc_spec_id]) expected_stack_name = ( "stack_" + "sc_instance_1" + scn['servicechain_node']['name'] + sc_instance['servicechain_instance']['id'][:8]) self.assertEqual( [sc_spec_id], sc_instance['servicechain_instance']['servicechain_specs']) stack_create.assert_called_once_with( expected_stack_name, mock.ANY, mock.ANY) def test_chain_instance_delete(self): name = "scs1" scn = self._create_profiled_servicechain_node() scn_id = scn['servicechain_node']['id'] scs = self.create_servicechain_spec(name=name, nodes=[scn_id]) sc_spec_id = scs['servicechain_spec']['id'] with mock.patch.object(simplechain_driver.HeatClient, 'create') as stack_create: stack_create.return_value = {'stack': { 'id': uuidutils.generate_uuid()}} sc_instance = self.create_servicechain_instance( name="sc_instance_1", servicechain_specs=[sc_spec_id]) self.assertEqual([sc_spec_id], sc_instance['servicechain_instance']['servicechain_specs']) with mock.patch.object(simplechain_driver.HeatClient, 'delete'): req = self.new_delete_request( 'servicechain_instances', sc_instance['servicechain_instance']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) def test_wait_stack_delete_for_instance_delete(self): name = "scs1" scn = self._create_profiled_servicechain_node() scn_id = scn['servicechain_node']['id'] scs = self.create_servicechain_spec(name=name, nodes=[scn_id]) sc_spec_id = scs['servicechain_spec']['id'] with mock.patch.object(simplechain_driver.HeatClient, 'create') as stack_create: stack_create.return_value = {'stack': { 'id': uuidutils.generate_uuid()}} sc_instance = self.create_servicechain_instance( name="sc_instance_1", servicechain_specs=[sc_spec_id]) self.assertEqual([sc_spec_id], sc_instance['servicechain_instance']['servicechain_specs']) # Verify that as part of delete service chain instance we call # get method for heat stack 5 times before giving up if the state # does not become DELETE_COMPLETE with mock.patch.object(simplechain_driver.HeatClient, 'delete') as stack_delete: with mock.patch.object(simplechain_driver.HeatClient, 'get') as stack_get: stack_get.return_value = MockStackObject('PENDING_DELETE') req = self.new_delete_request( 'servicechain_instances', sc_instance['servicechain_instance']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) stack_delete.assert_called_once_with(mock.ANY) self.assertEqual(STACK_DELETE_RETRIES, stack_get.call_count) # Create and delete another service chain instance and verify that # we call get method for heat stack only once if the stack state # is DELETE_COMPLETE sc_instance = self.create_servicechain_instance( name="sc_instance_1", servicechain_specs=[sc_spec_id]) self.assertEqual( [sc_spec_id], sc_instance['servicechain_instance']['servicechain_specs']) with mock.patch.object(simplechain_driver.HeatClient, 'delete') as stack_delete: with mock.patch.object(simplechain_driver.HeatClient, 'get') as stack_get: stack_get.return_value = MockStackObject( 'DELETE_COMPLETE') req = self.new_delete_request( 'servicechain_instances', sc_instance['servicechain_instance']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int) stack_delete.assert_called_once_with(mock.ANY) self.assertEqual(1, stack_get.call_count) def test_stack_not_found_ignored(self): name = "scs1" scn = self._create_profiled_servicechain_node() scn_id = scn['servicechain_node']['id'] scs = self.create_servicechain_spec(name=name, nodes=[scn_id]) sc_spec_id = scs['servicechain_spec']['id'] mock.patch(heatclient.__name__ + ".client.Client", new=MockHeatClient).start() sc_instance = self.create_servicechain_instance( name="sc_instance_1", servicechain_specs=[sc_spec_id]) req = self.new_delete_request( 'servicechain_instances', sc_instance['servicechain_instance']['id']) res = req.get_response(self.ext_api) self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# # Copyright 2016 Quantopian, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import OrderedDict import logbook import pandas as pd from pandas_datareader.data import DataReader import pytz from six import iteritems from six.moves.urllib_error import HTTPError from .benchmarks import get_benchmark_returns from . import treasuries, treasuries_can from ..utils.paths import ( cache_root, data_root, ) from zipline.utils.calendars import get_calendar logger = logbook.Logger('Loader') # Mapping from index symbol to appropriate bond data INDEX_MAPPING = { 'SPY': (treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'), '^GSPTSE': (treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'), '^FTSE': # use US treasuries until UK bonds implemented (treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'), } ONE_HOUR = pd.Timedelta(hours=1) def last_modified_time(path): """ Get the last modified time of path as a Timestamp. """ return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC') def get_data_filepath(name, environ=None): """ Returns a handle to data file. Creates containing directory, if needed. """ dr = data_root(environ) if not os.path.exists(dr): os.makedirs(dr) return os.path.join(dr, name) def get_cache_filepath(name): cr = cache_root() if not os.path.exists(cr): os.makedirs(cr) return os.path.join(cr, name) def get_benchmark_filename(symbol): return "%s_benchmark.csv" % symbol def has_data_for_dates(series_or_df, first_date, last_date): """ Does `series_or_df` have data on or before first_date and on or after last_date? """ dts = series_or_df.index if not isinstance(dts, pd.DatetimeIndex): raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts)) first, last = dts[[0, -1]] return (first <= first_date) and (last >= last_date) def load_market_data(trading_day=None, trading_days=None, bm_symbol='SPY', environ=None): """ Load benchmark returns and treasury yield curves for the given calendar and benchmark symbol. Benchmarks are downloaded as a Series from Google Finance. Treasury curves are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov' by default. For Canadian exchanges, a loader for Canadian bonds from the Bank of Canada is also available. Results downloaded from the internet are cached in ~/.zipline/data. Subsequent loads will attempt to read from the cached files before falling back to redownload. Parameters ---------- trading_day : pandas.CustomBusinessDay, optional A trading_day used to determine the latest day for which we expect to have data. Defaults to an NYSE trading day. trading_days : pd.DatetimeIndex, optional A calendar of trading days. Also used for determining what cached dates we should expect to have cached. Defaults to the NYSE calendar. bm_symbol : str, optional Symbol for the benchmark index to load. Defaults to 'SPY', the Google ticker for the S&P 500. Returns ------- (benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame) Notes ----- Both return values are DatetimeIndexed with values dated to midnight in UTC of each stored date. The columns of `treasury_curves` are: '1month', '3month', '6month', '1year','2year','3year','5year','7year','10year','20year','30year' """ if trading_day is None: trading_day = get_calendar('NYSE').trading_day if trading_days is None: trading_days = get_calendar('NYSE').all_sessions first_date = trading_days[0] now = pd.Timestamp.utcnow() # We expect to have benchmark and treasury data that's current up until # **two** full trading days prior to the most recently completed trading # day. # Example: # On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21. # However, data for Oct 21 doesn't become available until the early morning # hours of Oct 22. This means that there are times on the 22nd at which we # cannot reasonably expect to have data for the 21st available. To be # conservative, we instead expect that at any time on the 22nd, we can # download data for Tuesday the 20th, which is two full trading days prior # to the date on which we're running a test. # We'll attempt to download new data if the latest entry in our cache is # before this date. last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2] br = ensure_benchmark_data( bm_symbol, first_date, last_date, now, # We need the trading_day to figure out the close prior to the first # date so that we can compute returns for the first date. trading_day, environ, ) tc = ensure_treasury_data( bm_symbol, first_date, last_date, now, environ, ) benchmark_returns = br[br.index.slice_indexer(first_date, last_date)] treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)] return benchmark_returns, treasury_curves def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day, environ=None): """ Ensure we have benchmark data for `symbol` from `first_date` to `last_date` Parameters ---------- symbol : str The symbol for the benchmark to load. first_date : pd.Timestamp First required date for the cache. last_date : pd.Timestamp Last required date for the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. trading_day : pd.CustomBusinessDay A trading day delta. Used to find the day before first_date so we can get the close of the day prior to first_date. We attempt to download data unless we already have data stored at the data cache for `symbol` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path. """ filename = get_benchmark_filename(symbol) data = _load_cached_data(filename, first_date, last_date, now, 'benchmark', environ) if data is not None: return data # If no cached data was found or it was missing any dates then download the # necessary data. logger.info( ('Downloading benchmark data for {symbol!r} ' 'from {first_date} to {last_date}'), symbol=symbol, first_date=first_date - trading_day, last_date=last_date ) try: data = get_benchmark_returns( symbol, first_date - trading_day, last_date, ) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception('Failed to cache the new benchmark returns') raise if not has_data_for_dates(data, first_date, last_date): logger.warn("Still don't have expected data after redownload!") return data def ensure_treasury_data(symbol, first_date, last_date, now, environ=None): """ Ensure we have treasury data from treasury module associated with `symbol`. Parameters ---------- symbol : str Benchmark symbol for which we're loading associated treasury curves. first_date : pd.Timestamp First date required to be in the cache. last_date : pd.Timestamp Last date required to be in the cache. now : pd.Timestamp The current time. This is used to prevent repeated attempts to re-download data that isn't available due to scheduling quirks or other failures. We attempt to download data unless we already have data stored in the cache for `module_name` whose first entry is before or on `first_date` and whose last entry is on or after `last_date`. If we perform a download and the cache criteria are not satisfied, we wait at least one hour before attempting a redownload. This is determined by comparing the current time to the result of os.path.getmtime on the cache path. """ loader_module, filename, source = INDEX_MAPPING.get( symbol, INDEX_MAPPING['SPY'], ) first_date = max(first_date, loader_module.earliest_possible_date()) data = _load_cached_data(filename, first_date, last_date, now, 'treasury', environ) if data is not None: return data # If no cached data was found or it was missing any dates then download the # necessary data. logger.info('Downloading treasury data for {symbol!r}.', symbol=symbol) try: data = loader_module.get_treasury_data(first_date, last_date) data.to_csv(get_data_filepath(filename, environ)) except (OSError, IOError, HTTPError): logger.exception('failed to cache treasury data') if not has_data_for_dates(data, first_date, last_date): logger.warn("Still don't have expected data after redownload!") return data def _load_cached_data(filename, first_date, last_date, now, resource_name, environ=None): if resource_name == 'benchmark': from_csv = pd.Series.from_csv else: from_csv = pd.DataFrame.from_csv # Path for the cache. path = get_data_filepath(filename, environ) # If the path does not exist, it means the first download has not happened # yet, so don't try to read from 'path'. if os.path.exists(path): try: data = from_csv(path) data.index = data.index.to_datetime().tz_localize('UTC') if has_data_for_dates(data, first_date, last_date): return data # Don't re-download if we've successfully downloaded and written a # file in the last hour. last_download_time = last_modified_time(path) if (now - last_download_time) <= ONE_HOUR: logger.warn( "Refusing to download new {resource} data because a " "download succeeded at {time}.", resource=resource_name, time=last_download_time, ) return data except (OSError, IOError, ValueError) as e: # These can all be raised by various versions of pandas on various # classes of malformed input. Treat them all as cache misses. logger.info( "Loading data for {path} failed with error [{error}].", path=path, error=e, ) logger.info( "Cache at {path} does not have data from {start} to {end}.\n", start=first_date, end=last_date, path=path, ) return None def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None): """Load closing prices from yahoo finance. :Optional: indexes : dict (Default: {'SPX': '^SPY'}) Financial indexes to load. stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT', 'XOM', 'AA', 'JNJ', 'PEP', 'KO']) Stock closing prices to load. start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)) Retrieve prices from start date on. end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)) Retrieve prices until end date. :Note: This is based on code presented in a talk by Wes McKinney: http://wesmckinney.com/files/20111017/notebook_output.pdf """ assert indexes is not None or stocks is not None, """ must specify stocks or indexes""" if start is None: start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc) if start is not None and end is not None: assert start < end, "start date is later than end date." data = OrderedDict() if stocks is not None: for stock in stocks: logger.info('Loading stock: {}'.format(stock)) stock_pathsafe = stock.replace(os.path.sep, '--') cache_filename = "{stock}-{start}-{end}.csv".format( stock=stock_pathsafe, start=start, end=end).replace(':', '-') cache_filepath = get_cache_filepath(cache_filename) if os.path.exists(cache_filepath): stkd = pd.DataFrame.from_csv(cache_filepath) else: stkd = DataReader(stock, 'yahoo', start, end).sort_index() stkd.to_csv(cache_filepath) data[stock] = stkd if indexes is not None: for name, ticker in iteritems(indexes): logger.info('Loading index: {} ({})'.format(name, ticker)) stkd = DataReader(ticker, 'yahoo', start, end).sort_index() data[name] = stkd return data def load_from_yahoo(indexes=None, stocks=None, start=None, end=None, adjusted=True): """ Loads price data from Yahoo into a dataframe for each of the indicated assets. By default, 'price' is taken from Yahoo's 'Adjusted Close', which removes the impact of splits and dividends. If the argument 'adjusted' is False, then the non-adjusted 'close' field is used instead. :param indexes: Financial indexes to load. :type indexes: dict :param stocks: Stock closing prices to load. :type stocks: list :param start: Retrieve prices from start date on. :type start: datetime :param end: Retrieve prices until end date. :type end: datetime :param adjusted: Adjust the price for splits and dividends. :type adjusted: bool """ data = _load_raw_yahoo_data(indexes, stocks, start, end) if adjusted: close_key = 'Adj Close' else: close_key = 'Close' df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)}) df.index = df.index.tz_localize(pytz.utc) return df def load_prices_from_csv(filepath, identifier_col, tz='UTC'): data = pd.read_csv(filepath, index_col=identifier_col) data.index = pd.DatetimeIndex(data.index, tz=tz) data.sort_index(inplace=True) return data def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'): data = None for file in os.listdir(folderpath): if '.csv' not in file: continue raw = load_prices_from_csv(os.path.join(folderpath, file), identifier_col, tz) if data is None: data = raw else: data = pd.concat([data, raw], axis=1) return data
<<<<<<< HEAD <<<<<<< HEAD import unittest import gc import sys import weakref from unittest.test.support import LoggingResult, TestEquality ### Support code for Test_TestSuite ################################################################ class Test(object): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def test_3(self): pass def runTest(self): pass def _mk_TestSuite(*names): return unittest.TestSuite(Test.Foo(n) for n in names) ################################################################ class Test_TestSuite(unittest.TestCase, TestEquality): ### Set up attributes needed by inherited tests ################################################################ # Used by TestEquality.test_eq eq_pairs = [(unittest.TestSuite(), unittest.TestSuite()) ,(unittest.TestSuite(), unittest.TestSuite([])) ,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))] # Used by TestEquality.test_ne ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1')) ,(unittest.TestSuite([]), _mk_TestSuite('test_1')) ,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3')) ,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))] ################################################################ ### /Set up attributes needed by inherited tests ### Tests for TestSuite.__init__ ################################################################ # "class TestSuite([tests])" # # The tests iterable should be optional def test_init__tests_optional(self): suite = unittest.TestSuite() self.assertEqual(suite.countTestCases(), 0) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 0) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # TestSuite should deal with empty tests iterables by allowing the # creation of an empty suite def test_init__empty_tests(self): suite = unittest.TestSuite([]) self.assertEqual(suite.countTestCases(), 0) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 0) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # TestSuite should allow any iterable to provide tests def test_init__tests_from_any_iterable(self): def tests(): yield unittest.FunctionTestCase(lambda: None) yield unittest.FunctionTestCase(lambda: None) suite_1 = unittest.TestSuite(tests()) self.assertEqual(suite_1.countTestCases(), 2) suite_2 = unittest.TestSuite(suite_1) self.assertEqual(suite_2.countTestCases(), 2) suite_3 = unittest.TestSuite(set(suite_1)) self.assertEqual(suite_3.countTestCases(), 2) # countTestCases() still works after tests are run suite_1.run(unittest.TestResult()) self.assertEqual(suite_1.countTestCases(), 2) suite_2.run(unittest.TestResult()) self.assertEqual(suite_2.countTestCases(), 2) suite_3.run(unittest.TestResult()) self.assertEqual(suite_3.countTestCases(), 2) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # Does TestSuite() also allow other TestSuite() instances to be present # in the tests iterable? def test_init__TestSuite_instances_in_tests(self): def tests(): ftc = unittest.FunctionTestCase(lambda: None) yield unittest.TestSuite([ftc]) yield unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite(tests()) self.assertEqual(suite.countTestCases(), 2) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 2) ################################################################ ### /Tests for TestSuite.__init__ # Container types should support the iter protocol def test_iter(self): test1 = unittest.FunctionTestCase(lambda: None) test2 = unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite((test1, test2)) self.assertEqual(list(suite), [test1, test2]) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Presumably an empty TestSuite returns 0? def test_countTestCases_zero_simple(self): suite = unittest.TestSuite() self.assertEqual(suite.countTestCases(), 0) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Presumably an empty TestSuite (even if it contains other empty # TestSuite instances) returns 0? def test_countTestCases_zero_nested(self): class Test1(unittest.TestCase): def test(self): pass suite = unittest.TestSuite([unittest.TestSuite()]) self.assertEqual(suite.countTestCases(), 0) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" def test_countTestCases_simple(self): test1 = unittest.FunctionTestCase(lambda: None) test2 = unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite((test1, test2)) self.assertEqual(suite.countTestCases(), 2) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 2) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Make sure this holds for nested TestSuite instances, too def test_countTestCases_nested(self): class Test1(unittest.TestCase): def test1(self): pass def test2(self): pass test2 = unittest.FunctionTestCase(lambda: None) test3 = unittest.FunctionTestCase(lambda: None) child = unittest.TestSuite((Test1('test2'), test2)) parent = unittest.TestSuite((test3, child, Test1('test1'))) self.assertEqual(parent.countTestCases(), 4) # countTestCases() still works after tests are run parent.run(unittest.TestResult()) self.assertEqual(parent.countTestCases(), 4) self.assertEqual(child.countTestCases(), 2) # "Run the tests associated with this suite, collecting the result into # the test result object passed as result." # # And if there are no tests? What then? def test_run__empty_suite(self): events = [] result = LoggingResult(events) suite = unittest.TestSuite() suite.run(result) self.assertEqual(events, []) # "Note that unlike TestCase.run(), TestSuite.run() requires the # "result object to be passed in." def test_run__requires_result(self): suite = unittest.TestSuite() try: suite.run() except TypeError: pass else: self.fail("Failed to raise TypeError") # "Run the tests associated with this suite, collecting the result into # the test result object passed as result." def test_run(self): events = [] result = LoggingResult(events) class LoggingCase(unittest.TestCase): def run(self, result): events.append('run %s' % self._testMethodName) def test1(self): pass def test2(self): pass tests = [LoggingCase('test1'), LoggingCase('test2')] unittest.TestSuite(tests).run(result) self.assertEqual(events, ['run test1', 'run test2']) # "Add a TestCase ... to the suite" def test_addTest__TestCase(self): class Foo(unittest.TestCase): def test(self): pass test = Foo('test') suite = unittest.TestSuite() suite.addTest(test) self.assertEqual(suite.countTestCases(), 1) self.assertEqual(list(suite), [test]) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 1) # "Add a ... TestSuite to the suite" def test_addTest__TestSuite(self): class Foo(unittest.TestCase): def test(self): pass suite_2 = unittest.TestSuite([Foo('test')]) suite = unittest.TestSuite() suite.addTest(suite_2) self.assertEqual(suite.countTestCases(), 1) self.assertEqual(list(suite), [suite_2]) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 1) # "Add all the tests from an iterable of TestCase and TestSuite # instances to this test suite." # # "This is equivalent to iterating over tests, calling addTest() for # each element" def test_addTests(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass test_1 = Foo('test_1') test_2 = Foo('test_2') inner_suite = unittest.TestSuite([test_2]) def gen(): yield test_1 yield test_2 yield inner_suite suite_1 = unittest.TestSuite() suite_1.addTests(gen()) self.assertEqual(list(suite_1), list(gen())) # "This is equivalent to iterating over tests, calling addTest() for # each element" suite_2 = unittest.TestSuite() for t in gen(): suite_2.addTest(t) self.assertEqual(suite_1, suite_2) # "Add all the tests from an iterable of TestCase and TestSuite # instances to this test suite." # # What happens if it doesn't get an iterable? def test_addTest__noniterable(self): suite = unittest.TestSuite() try: suite.addTests(5) except TypeError: pass else: self.fail("Failed to raise TypeError") def test_addTest__noncallable(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTest, 5) def test_addTest__casesuiteclass(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTest, Test_TestSuite) self.assertRaises(TypeError, suite.addTest, unittest.TestSuite) def test_addTests__string(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTests, "foo") def test_function_in_suite(self): def f(_): pass suite = unittest.TestSuite() suite.addTest(f) # when the bug is fixed this line will not crash suite.run(unittest.TestResult()) def test_remove_test_at_index(self): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") suite = unittest.TestSuite() suite._tests = [1, 2, 3] suite._removeTestAtIndex(1) self.assertEqual([1, None, 3], suite._tests) def test_remove_test_at_index_not_indexable(self): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") suite = unittest.TestSuite() suite._tests = None # if _removeAtIndex raises for noniterables this next line will break suite._removeTestAtIndex(2) def assert_garbage_collect_test_after_run(self, TestSuiteClass): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") class Foo(unittest.TestCase): def test_nothing(self): pass test = Foo('test_nothing') wref = weakref.ref(test) suite = TestSuiteClass([wref()]) suite.run(unittest.TestResult()) del test # for the benefit of non-reference counting implementations gc.collect() self.assertEqual(suite._tests, [None]) self.assertIsNone(wref()) def test_garbage_collect_test_after_run_BaseTestSuite(self): self.assert_garbage_collect_test_after_run(unittest.BaseTestSuite) def test_garbage_collect_test_after_run_TestSuite(self): self.assert_garbage_collect_test_after_run(unittest.TestSuite) def test_basetestsuite(self): class Test(unittest.TestCase): wasSetUp = False wasTornDown = False @classmethod def setUpClass(cls): cls.wasSetUp = True @classmethod def tearDownClass(cls): cls.wasTornDown = True def testPass(self): pass def testFail(self): fail class Module(object): wasSetUp = False wasTornDown = False @staticmethod def setUpModule(): Module.wasSetUp = True @staticmethod def tearDownModule(): Module.wasTornDown = True Test.__module__ = 'Module' sys.modules['Module'] = Module self.addCleanup(sys.modules.pop, 'Module') suite = unittest.BaseTestSuite() suite.addTests([Test('testPass'), Test('testFail')]) self.assertEqual(suite.countTestCases(), 2) result = unittest.TestResult() suite.run(result) self.assertFalse(Module.wasSetUp) self.assertFalse(Module.wasTornDown) self.assertFalse(Test.wasSetUp) self.assertFalse(Test.wasTornDown) self.assertEqual(len(result.errors), 1) self.assertEqual(len(result.failures), 0) self.assertEqual(result.testsRun, 2) self.assertEqual(suite.countTestCases(), 2) def test_overriding_call(self): class MySuite(unittest.TestSuite): called = False def __call__(self, *args, **kw): self.called = True unittest.TestSuite.__call__(self, *args, **kw) suite = MySuite() result = unittest.TestResult() wrapper = unittest.TestSuite() wrapper.addTest(suite) wrapper(result) self.assertTrue(suite.called) # reusing results should be permitted even if abominable self.assertFalse(result._testRunEntered) if __name__ == '__main__': unittest.main() ======= import unittest import gc import sys import weakref from unittest.test.support import LoggingResult, TestEquality ### Support code for Test_TestSuite ################################################################ class Test(object): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def test_3(self): pass def runTest(self): pass def _mk_TestSuite(*names): return unittest.TestSuite(Test.Foo(n) for n in names) ################################################################ class Test_TestSuite(unittest.TestCase, TestEquality): ### Set up attributes needed by inherited tests ################################################################ # Used by TestEquality.test_eq eq_pairs = [(unittest.TestSuite(), unittest.TestSuite()) ,(unittest.TestSuite(), unittest.TestSuite([])) ,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))] # Used by TestEquality.test_ne ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1')) ,(unittest.TestSuite([]), _mk_TestSuite('test_1')) ,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3')) ,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))] ################################################################ ### /Set up attributes needed by inherited tests ### Tests for TestSuite.__init__ ################################################################ # "class TestSuite([tests])" # # The tests iterable should be optional def test_init__tests_optional(self): suite = unittest.TestSuite() self.assertEqual(suite.countTestCases(), 0) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 0) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # TestSuite should deal with empty tests iterables by allowing the # creation of an empty suite def test_init__empty_tests(self): suite = unittest.TestSuite([]) self.assertEqual(suite.countTestCases(), 0) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 0) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # TestSuite should allow any iterable to provide tests def test_init__tests_from_any_iterable(self): def tests(): yield unittest.FunctionTestCase(lambda: None) yield unittest.FunctionTestCase(lambda: None) suite_1 = unittest.TestSuite(tests()) self.assertEqual(suite_1.countTestCases(), 2) suite_2 = unittest.TestSuite(suite_1) self.assertEqual(suite_2.countTestCases(), 2) suite_3 = unittest.TestSuite(set(suite_1)) self.assertEqual(suite_3.countTestCases(), 2) # countTestCases() still works after tests are run suite_1.run(unittest.TestResult()) self.assertEqual(suite_1.countTestCases(), 2) suite_2.run(unittest.TestResult()) self.assertEqual(suite_2.countTestCases(), 2) suite_3.run(unittest.TestResult()) self.assertEqual(suite_3.countTestCases(), 2) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # Does TestSuite() also allow other TestSuite() instances to be present # in the tests iterable? def test_init__TestSuite_instances_in_tests(self): def tests(): ftc = unittest.FunctionTestCase(lambda: None) yield unittest.TestSuite([ftc]) yield unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite(tests()) self.assertEqual(suite.countTestCases(), 2) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 2) ################################################################ ### /Tests for TestSuite.__init__ # Container types should support the iter protocol def test_iter(self): test1 = unittest.FunctionTestCase(lambda: None) test2 = unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite((test1, test2)) self.assertEqual(list(suite), [test1, test2]) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Presumably an empty TestSuite returns 0? def test_countTestCases_zero_simple(self): suite = unittest.TestSuite() self.assertEqual(suite.countTestCases(), 0) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Presumably an empty TestSuite (even if it contains other empty # TestSuite instances) returns 0? def test_countTestCases_zero_nested(self): class Test1(unittest.TestCase): def test(self): pass suite = unittest.TestSuite([unittest.TestSuite()]) self.assertEqual(suite.countTestCases(), 0) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" def test_countTestCases_simple(self): test1 = unittest.FunctionTestCase(lambda: None) test2 = unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite((test1, test2)) self.assertEqual(suite.countTestCases(), 2) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 2) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Make sure this holds for nested TestSuite instances, too def test_countTestCases_nested(self): class Test1(unittest.TestCase): def test1(self): pass def test2(self): pass test2 = unittest.FunctionTestCase(lambda: None) test3 = unittest.FunctionTestCase(lambda: None) child = unittest.TestSuite((Test1('test2'), test2)) parent = unittest.TestSuite((test3, child, Test1('test1'))) self.assertEqual(parent.countTestCases(), 4) # countTestCases() still works after tests are run parent.run(unittest.TestResult()) self.assertEqual(parent.countTestCases(), 4) self.assertEqual(child.countTestCases(), 2) # "Run the tests associated with this suite, collecting the result into # the test result object passed as result." # # And if there are no tests? What then? def test_run__empty_suite(self): events = [] result = LoggingResult(events) suite = unittest.TestSuite() suite.run(result) self.assertEqual(events, []) # "Note that unlike TestCase.run(), TestSuite.run() requires the # "result object to be passed in." def test_run__requires_result(self): suite = unittest.TestSuite() try: suite.run() except TypeError: pass else: self.fail("Failed to raise TypeError") # "Run the tests associated with this suite, collecting the result into # the test result object passed as result." def test_run(self): events = [] result = LoggingResult(events) class LoggingCase(unittest.TestCase): def run(self, result): events.append('run %s' % self._testMethodName) def test1(self): pass def test2(self): pass tests = [LoggingCase('test1'), LoggingCase('test2')] unittest.TestSuite(tests).run(result) self.assertEqual(events, ['run test1', 'run test2']) # "Add a TestCase ... to the suite" def test_addTest__TestCase(self): class Foo(unittest.TestCase): def test(self): pass test = Foo('test') suite = unittest.TestSuite() suite.addTest(test) self.assertEqual(suite.countTestCases(), 1) self.assertEqual(list(suite), [test]) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 1) # "Add a ... TestSuite to the suite" def test_addTest__TestSuite(self): class Foo(unittest.TestCase): def test(self): pass suite_2 = unittest.TestSuite([Foo('test')]) suite = unittest.TestSuite() suite.addTest(suite_2) self.assertEqual(suite.countTestCases(), 1) self.assertEqual(list(suite), [suite_2]) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 1) # "Add all the tests from an iterable of TestCase and TestSuite # instances to this test suite." # # "This is equivalent to iterating over tests, calling addTest() for # each element" def test_addTests(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass test_1 = Foo('test_1') test_2 = Foo('test_2') inner_suite = unittest.TestSuite([test_2]) def gen(): yield test_1 yield test_2 yield inner_suite suite_1 = unittest.TestSuite() suite_1.addTests(gen()) self.assertEqual(list(suite_1), list(gen())) # "This is equivalent to iterating over tests, calling addTest() for # each element" suite_2 = unittest.TestSuite() for t in gen(): suite_2.addTest(t) self.assertEqual(suite_1, suite_2) # "Add all the tests from an iterable of TestCase and TestSuite # instances to this test suite." # # What happens if it doesn't get an iterable? def test_addTest__noniterable(self): suite = unittest.TestSuite() try: suite.addTests(5) except TypeError: pass else: self.fail("Failed to raise TypeError") def test_addTest__noncallable(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTest, 5) def test_addTest__casesuiteclass(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTest, Test_TestSuite) self.assertRaises(TypeError, suite.addTest, unittest.TestSuite) def test_addTests__string(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTests, "foo") def test_function_in_suite(self): def f(_): pass suite = unittest.TestSuite() suite.addTest(f) # when the bug is fixed this line will not crash suite.run(unittest.TestResult()) def test_remove_test_at_index(self): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") suite = unittest.TestSuite() suite._tests = [1, 2, 3] suite._removeTestAtIndex(1) self.assertEqual([1, None, 3], suite._tests) def test_remove_test_at_index_not_indexable(self): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") suite = unittest.TestSuite() suite._tests = None # if _removeAtIndex raises for noniterables this next line will break suite._removeTestAtIndex(2) def assert_garbage_collect_test_after_run(self, TestSuiteClass): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") class Foo(unittest.TestCase): def test_nothing(self): pass test = Foo('test_nothing') wref = weakref.ref(test) suite = TestSuiteClass([wref()]) suite.run(unittest.TestResult()) del test # for the benefit of non-reference counting implementations gc.collect() self.assertEqual(suite._tests, [None]) self.assertIsNone(wref()) def test_garbage_collect_test_after_run_BaseTestSuite(self): self.assert_garbage_collect_test_after_run(unittest.BaseTestSuite) def test_garbage_collect_test_after_run_TestSuite(self): self.assert_garbage_collect_test_after_run(unittest.TestSuite) def test_basetestsuite(self): class Test(unittest.TestCase): wasSetUp = False wasTornDown = False @classmethod def setUpClass(cls): cls.wasSetUp = True @classmethod def tearDownClass(cls): cls.wasTornDown = True def testPass(self): pass def testFail(self): fail class Module(object): wasSetUp = False wasTornDown = False @staticmethod def setUpModule(): Module.wasSetUp = True @staticmethod def tearDownModule(): Module.wasTornDown = True Test.__module__ = 'Module' sys.modules['Module'] = Module self.addCleanup(sys.modules.pop, 'Module') suite = unittest.BaseTestSuite() suite.addTests([Test('testPass'), Test('testFail')]) self.assertEqual(suite.countTestCases(), 2) result = unittest.TestResult() suite.run(result) self.assertFalse(Module.wasSetUp) self.assertFalse(Module.wasTornDown) self.assertFalse(Test.wasSetUp) self.assertFalse(Test.wasTornDown) self.assertEqual(len(result.errors), 1) self.assertEqual(len(result.failures), 0) self.assertEqual(result.testsRun, 2) self.assertEqual(suite.countTestCases(), 2) def test_overriding_call(self): class MySuite(unittest.TestSuite): called = False def __call__(self, *args, **kw): self.called = True unittest.TestSuite.__call__(self, *args, **kw) suite = MySuite() result = unittest.TestResult() wrapper = unittest.TestSuite() wrapper.addTest(suite) wrapper(result) self.assertTrue(suite.called) # reusing results should be permitted even if abominable self.assertFalse(result._testRunEntered) if __name__ == '__main__': unittest.main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= import unittest import gc import sys import weakref from unittest.test.support import LoggingResult, TestEquality ### Support code for Test_TestSuite ################################################################ class Test(object): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass def test_3(self): pass def runTest(self): pass def _mk_TestSuite(*names): return unittest.TestSuite(Test.Foo(n) for n in names) ################################################################ class Test_TestSuite(unittest.TestCase, TestEquality): ### Set up attributes needed by inherited tests ################################################################ # Used by TestEquality.test_eq eq_pairs = [(unittest.TestSuite(), unittest.TestSuite()) ,(unittest.TestSuite(), unittest.TestSuite([])) ,(_mk_TestSuite('test_1'), _mk_TestSuite('test_1'))] # Used by TestEquality.test_ne ne_pairs = [(unittest.TestSuite(), _mk_TestSuite('test_1')) ,(unittest.TestSuite([]), _mk_TestSuite('test_1')) ,(_mk_TestSuite('test_1', 'test_2'), _mk_TestSuite('test_1', 'test_3')) ,(_mk_TestSuite('test_1'), _mk_TestSuite('test_2'))] ################################################################ ### /Set up attributes needed by inherited tests ### Tests for TestSuite.__init__ ################################################################ # "class TestSuite([tests])" # # The tests iterable should be optional def test_init__tests_optional(self): suite = unittest.TestSuite() self.assertEqual(suite.countTestCases(), 0) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 0) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # TestSuite should deal with empty tests iterables by allowing the # creation of an empty suite def test_init__empty_tests(self): suite = unittest.TestSuite([]) self.assertEqual(suite.countTestCases(), 0) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 0) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # TestSuite should allow any iterable to provide tests def test_init__tests_from_any_iterable(self): def tests(): yield unittest.FunctionTestCase(lambda: None) yield unittest.FunctionTestCase(lambda: None) suite_1 = unittest.TestSuite(tests()) self.assertEqual(suite_1.countTestCases(), 2) suite_2 = unittest.TestSuite(suite_1) self.assertEqual(suite_2.countTestCases(), 2) suite_3 = unittest.TestSuite(set(suite_1)) self.assertEqual(suite_3.countTestCases(), 2) # countTestCases() still works after tests are run suite_1.run(unittest.TestResult()) self.assertEqual(suite_1.countTestCases(), 2) suite_2.run(unittest.TestResult()) self.assertEqual(suite_2.countTestCases(), 2) suite_3.run(unittest.TestResult()) self.assertEqual(suite_3.countTestCases(), 2) # "class TestSuite([tests])" # ... # "If tests is given, it must be an iterable of individual test cases # or other test suites that will be used to build the suite initially" # # Does TestSuite() also allow other TestSuite() instances to be present # in the tests iterable? def test_init__TestSuite_instances_in_tests(self): def tests(): ftc = unittest.FunctionTestCase(lambda: None) yield unittest.TestSuite([ftc]) yield unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite(tests()) self.assertEqual(suite.countTestCases(), 2) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 2) ################################################################ ### /Tests for TestSuite.__init__ # Container types should support the iter protocol def test_iter(self): test1 = unittest.FunctionTestCase(lambda: None) test2 = unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite((test1, test2)) self.assertEqual(list(suite), [test1, test2]) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Presumably an empty TestSuite returns 0? def test_countTestCases_zero_simple(self): suite = unittest.TestSuite() self.assertEqual(suite.countTestCases(), 0) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Presumably an empty TestSuite (even if it contains other empty # TestSuite instances) returns 0? def test_countTestCases_zero_nested(self): class Test1(unittest.TestCase): def test(self): pass suite = unittest.TestSuite([unittest.TestSuite()]) self.assertEqual(suite.countTestCases(), 0) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" def test_countTestCases_simple(self): test1 = unittest.FunctionTestCase(lambda: None) test2 = unittest.FunctionTestCase(lambda: None) suite = unittest.TestSuite((test1, test2)) self.assertEqual(suite.countTestCases(), 2) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 2) # "Return the number of tests represented by the this test object. # ...this method is also implemented by the TestSuite class, which can # return larger [greater than 1] values" # # Make sure this holds for nested TestSuite instances, too def test_countTestCases_nested(self): class Test1(unittest.TestCase): def test1(self): pass def test2(self): pass test2 = unittest.FunctionTestCase(lambda: None) test3 = unittest.FunctionTestCase(lambda: None) child = unittest.TestSuite((Test1('test2'), test2)) parent = unittest.TestSuite((test3, child, Test1('test1'))) self.assertEqual(parent.countTestCases(), 4) # countTestCases() still works after tests are run parent.run(unittest.TestResult()) self.assertEqual(parent.countTestCases(), 4) self.assertEqual(child.countTestCases(), 2) # "Run the tests associated with this suite, collecting the result into # the test result object passed as result." # # And if there are no tests? What then? def test_run__empty_suite(self): events = [] result = LoggingResult(events) suite = unittest.TestSuite() suite.run(result) self.assertEqual(events, []) # "Note that unlike TestCase.run(), TestSuite.run() requires the # "result object to be passed in." def test_run__requires_result(self): suite = unittest.TestSuite() try: suite.run() except TypeError: pass else: self.fail("Failed to raise TypeError") # "Run the tests associated with this suite, collecting the result into # the test result object passed as result." def test_run(self): events = [] result = LoggingResult(events) class LoggingCase(unittest.TestCase): def run(self, result): events.append('run %s' % self._testMethodName) def test1(self): pass def test2(self): pass tests = [LoggingCase('test1'), LoggingCase('test2')] unittest.TestSuite(tests).run(result) self.assertEqual(events, ['run test1', 'run test2']) # "Add a TestCase ... to the suite" def test_addTest__TestCase(self): class Foo(unittest.TestCase): def test(self): pass test = Foo('test') suite = unittest.TestSuite() suite.addTest(test) self.assertEqual(suite.countTestCases(), 1) self.assertEqual(list(suite), [test]) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 1) # "Add a ... TestSuite to the suite" def test_addTest__TestSuite(self): class Foo(unittest.TestCase): def test(self): pass suite_2 = unittest.TestSuite([Foo('test')]) suite = unittest.TestSuite() suite.addTest(suite_2) self.assertEqual(suite.countTestCases(), 1) self.assertEqual(list(suite), [suite_2]) # countTestCases() still works after tests are run suite.run(unittest.TestResult()) self.assertEqual(suite.countTestCases(), 1) # "Add all the tests from an iterable of TestCase and TestSuite # instances to this test suite." # # "This is equivalent to iterating over tests, calling addTest() for # each element" def test_addTests(self): class Foo(unittest.TestCase): def test_1(self): pass def test_2(self): pass test_1 = Foo('test_1') test_2 = Foo('test_2') inner_suite = unittest.TestSuite([test_2]) def gen(): yield test_1 yield test_2 yield inner_suite suite_1 = unittest.TestSuite() suite_1.addTests(gen()) self.assertEqual(list(suite_1), list(gen())) # "This is equivalent to iterating over tests, calling addTest() for # each element" suite_2 = unittest.TestSuite() for t in gen(): suite_2.addTest(t) self.assertEqual(suite_1, suite_2) # "Add all the tests from an iterable of TestCase and TestSuite # instances to this test suite." # # What happens if it doesn't get an iterable? def test_addTest__noniterable(self): suite = unittest.TestSuite() try: suite.addTests(5) except TypeError: pass else: self.fail("Failed to raise TypeError") def test_addTest__noncallable(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTest, 5) def test_addTest__casesuiteclass(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTest, Test_TestSuite) self.assertRaises(TypeError, suite.addTest, unittest.TestSuite) def test_addTests__string(self): suite = unittest.TestSuite() self.assertRaises(TypeError, suite.addTests, "foo") def test_function_in_suite(self): def f(_): pass suite = unittest.TestSuite() suite.addTest(f) # when the bug is fixed this line will not crash suite.run(unittest.TestResult()) def test_remove_test_at_index(self): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") suite = unittest.TestSuite() suite._tests = [1, 2, 3] suite._removeTestAtIndex(1) self.assertEqual([1, None, 3], suite._tests) def test_remove_test_at_index_not_indexable(self): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") suite = unittest.TestSuite() suite._tests = None # if _removeAtIndex raises for noniterables this next line will break suite._removeTestAtIndex(2) def assert_garbage_collect_test_after_run(self, TestSuiteClass): if not unittest.BaseTestSuite._cleanup: raise unittest.SkipTest("Suite cleanup is disabled") class Foo(unittest.TestCase): def test_nothing(self): pass test = Foo('test_nothing') wref = weakref.ref(test) suite = TestSuiteClass([wref()]) suite.run(unittest.TestResult()) del test # for the benefit of non-reference counting implementations gc.collect() self.assertEqual(suite._tests, [None]) self.assertIsNone(wref()) def test_garbage_collect_test_after_run_BaseTestSuite(self): self.assert_garbage_collect_test_after_run(unittest.BaseTestSuite) def test_garbage_collect_test_after_run_TestSuite(self): self.assert_garbage_collect_test_after_run(unittest.TestSuite) def test_basetestsuite(self): class Test(unittest.TestCase): wasSetUp = False wasTornDown = False @classmethod def setUpClass(cls): cls.wasSetUp = True @classmethod def tearDownClass(cls): cls.wasTornDown = True def testPass(self): pass def testFail(self): fail class Module(object): wasSetUp = False wasTornDown = False @staticmethod def setUpModule(): Module.wasSetUp = True @staticmethod def tearDownModule(): Module.wasTornDown = True Test.__module__ = 'Module' sys.modules['Module'] = Module self.addCleanup(sys.modules.pop, 'Module') suite = unittest.BaseTestSuite() suite.addTests([Test('testPass'), Test('testFail')]) self.assertEqual(suite.countTestCases(), 2) result = unittest.TestResult() suite.run(result) self.assertFalse(Module.wasSetUp) self.assertFalse(Module.wasTornDown) self.assertFalse(Test.wasSetUp) self.assertFalse(Test.wasTornDown) self.assertEqual(len(result.errors), 1) self.assertEqual(len(result.failures), 0) self.assertEqual(result.testsRun, 2) self.assertEqual(suite.countTestCases(), 2) def test_overriding_call(self): class MySuite(unittest.TestSuite): called = False def __call__(self, *args, **kw): self.called = True unittest.TestSuite.__call__(self, *args, **kw) suite = MySuite() result = unittest.TestResult() wrapper = unittest.TestSuite() wrapper.addTest(suite) wrapper(result) self.assertTrue(suite.called) # reusing results should be permitted even if abominable self.assertFalse(result._testRunEntered) if __name__ == '__main__': unittest.main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for Resources that use the OpenStack Nova API.""" import email from email.mime import multipart from email.mime import text import os import pkgutil import string import warnings from novaclient import exceptions as nova_exceptions from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils import six from six.moves.urllib import parse as urlparse from heat.common import exception from heat.common.i18n import _ from heat.common.i18n import _LW from heat.engine import scheduler LOG = logging.getLogger(__name__) deferred_server_statuses = ['BUILD', 'HARD_REBOOT', 'PASSWORD', 'REBOOT', 'RESCUE', 'RESIZE', 'REVERT_RESIZE', 'SHUTOFF', 'SUSPENDED', 'VERIFY_RESIZE'] def refresh_server(server): ''' Refresh server's attributes and log warnings for non-critical API errors. ''' warnings.warn('nova_utils.refresh_server is deprecated. ' 'Use self.client_plugin("nova").refresh_server') try: server.get() except nova_exceptions.OverLimit as exc: LOG.warn(_LW("Server %(name)s (%(id)s) received an OverLimit " "response during server.get(): %(exception)s"), {'name': server.name, 'id': server.id, 'exception': exc}) except nova_exceptions.ClientException as exc: http_status = (getattr(exc, 'http_status', None) or getattr(exc, 'code', None)) if http_status in (500, 503): LOG.warn(_LW('Server "%(name)s" (%(id)s) received the following ' 'exception during server.get(): %(exception)s'), {'name': server.name, 'id': server.id, 'exception': exc}) else: raise def get_ip(server, net_type, ip_version): """Return the server's IP of the given type and version.""" warnings.warn('nova_utils.get_ip is deprecated. ' 'Use self.client_plugin("nova").get_ip') if net_type in server.addresses: for ip in server.addresses[net_type]: if ip['version'] == ip_version: return ip['addr'] def get_flavor_id(nova_client, flavor): warnings.warn('nova_utils.get_flavor_id is deprecated. ' 'Use self.client_plugin("nova").get_flavor_id') ''' Get the id for the specified flavor name. If the specified value is flavor id, just return it. :param nova_client: the nova client to use :param flavor: the name of the flavor to find :returns: the id of :flavor: :raises: exception.FlavorMissing ''' flavor_id = None flavor_list = nova_client.flavors.list() for o in flavor_list: if o.name == flavor: flavor_id = o.id break if o.id == flavor: flavor_id = o.id break if flavor_id is None: raise exception.FlavorMissing(flavor_id=flavor) return flavor_id def get_keypair(nova_client, key_name): warnings.warn('nova_utils.get_keypair is deprecated. ' 'Use self.client_plugin("nova").get_keypair') ''' Get the public key specified by :key_name: :param nova_client: the nova client to use :param key_name: the name of the key to look for :returns: the keypair (name, public_key) for :key_name: :raises: exception.UserKeyPairMissing ''' try: return nova_client.keypairs.get(key_name) except nova_exceptions.NotFound: raise exception.UserKeyPairMissing(key_name=key_name) def build_userdata(resource, userdata=None, instance_user=None, user_data_format='HEAT_CFNTOOLS'): warnings.warn('nova_utils.build_userdata is deprecated. ' 'Use self.client_plugin("nova").build_userdata') ''' Build multipart data blob for CloudInit which includes user-supplied Metadata, user data, and the required Heat in-instance configuration. :param resource: the resource implementation :type resource: heat.engine.Resource :param userdata: user data string :type userdata: str or None :param instance_user: the user to create on the server :type instance_user: string :param user_data_format: Format of user data to return :type user_data_format: string :returns: multipart mime as a string ''' if user_data_format == 'RAW': return userdata is_cfntools = user_data_format == 'HEAT_CFNTOOLS' is_software_config = user_data_format == 'SOFTWARE_CONFIG' def make_subpart(content, filename, subtype=None): if subtype is None: subtype = os.path.splitext(filename)[0] msg = text.MIMEText(content, _subtype=subtype) msg.add_header('Content-Disposition', 'attachment', filename=filename) return msg def read_cloudinit_file(fn): return pkgutil.get_data('heat', 'cloudinit/%s' % fn) if instance_user: config_custom_user = 'user: %s' % instance_user # FIXME(shadower): compatibility workaround for cloud-init 0.6.3. We # can drop this once we stop supporting 0.6.3 (which ships with Ubuntu # 12.04 LTS). # # See bug https://bugs.launchpad.net/heat/+bug/1257410 boothook_custom_user = r"""useradd -m %s echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers """ % (instance_user, instance_user) else: config_custom_user = '' boothook_custom_user = '' cloudinit_config = string.Template( read_cloudinit_file('config')).safe_substitute( add_custom_user=config_custom_user) cloudinit_boothook = string.Template( read_cloudinit_file('boothook.sh')).safe_substitute( add_custom_user=boothook_custom_user) attachments = [(cloudinit_config, 'cloud-config'), (cloudinit_boothook, 'boothook.sh', 'cloud-boothook'), (read_cloudinit_file('part_handler.py'), 'part-handler.py')] if is_cfntools: attachments.append((userdata, 'cfn-userdata', 'x-cfninitdata')) elif is_software_config: # attempt to parse userdata as a multipart message, and if it # is, add each part as an attachment userdata_parts = None try: userdata_parts = email.message_from_string(userdata) except Exception: pass if userdata_parts and userdata_parts.is_multipart(): for part in userdata_parts.get_payload(): attachments.append((part.get_payload(), part.get_filename(), part.get_content_subtype())) else: attachments.append((userdata, 'userdata', 'x-shellscript')) if is_cfntools: attachments.append((read_cloudinit_file('loguserdata.py'), 'loguserdata.py', 'x-shellscript')) metadata = resource.metadata_get() if metadata: attachments.append((jsonutils.dumps(metadata), 'cfn-init-data', 'x-cfninitdata')) attachments.append((cfg.CONF.heat_watch_server_url, 'cfn-watch-server', 'x-cfninitdata')) if is_cfntools: attachments.append((cfg.CONF.heat_metadata_server_url, 'cfn-metadata-server', 'x-cfninitdata')) # Create a boto config which the cfntools on the host use to know # where the cfn and cw API's are to be accessed cfn_url = urlparse.urlparse(cfg.CONF.heat_metadata_server_url) cw_url = urlparse.urlparse(cfg.CONF.heat_watch_server_url) is_secure = cfg.CONF.instance_connection_is_secure vcerts = cfg.CONF.instance_connection_https_validate_certificates boto_cfg = "\n".join(["[Boto]", "debug = 0", "is_secure = %s" % is_secure, "https_validate_certificates = %s" % vcerts, "cfn_region_name = heat", "cfn_region_endpoint = %s" % cfn_url.hostname, "cloudwatch_region_name = heat", "cloudwatch_region_endpoint = %s" % cw_url.hostname]) attachments.append((boto_cfg, 'cfn-boto-cfg', 'x-cfninitdata')) subparts = [make_subpart(*args) for args in attachments] mime_blob = multipart.MIMEMultipart(_subparts=subparts) return mime_blob.as_string() def delete_server(server): ''' A co-routine that deletes the server and waits for it to disappear from Nova. ''' warnings.warn('nova_utils.delete_server is deprecated. ' 'Use self.client_plugin("nova").delete_server') if not server: return try: server.delete() except nova_exceptions.NotFound: return while True: yield try: refresh_server(server) except nova_exceptions.NotFound: break else: # Some clouds append extra (STATUS) strings to the status short_server_status = server.status.split('(')[0] if short_server_status == "DELETED": break if short_server_status == "ERROR": fault = getattr(server, 'fault', {}) message = fault.get('message', 'Unknown') code = fault.get('code') errmsg = (_("Server %(name)s delete failed: (%(code)s) " "%(message)s")) raise exception.Error(errmsg % {"name": server.name, "code": code, "message": message}) @scheduler.wrappertask def resize(server, flavor, flavor_id): """Resize the server and then call check_resize task to verify.""" warnings.warn('nova_utils.resize is deprecated. ' 'Use self.client_plugin("nova").resize') server.resize(flavor_id) yield check_resize(server, flavor, flavor_id) def rename(server, name): """Update the name for a server.""" warnings.warn('nova_utils.rename is deprecated. ' 'Use self.client_plugin("nova").rename') server.update(name) def check_resize(server, flavor, flavor_id): """ Verify that a resizing server is properly resized. If that's the case, confirm the resize, if not raise an error. """ warnings.warn('nova_utils.check_resize is deprecated. ' 'Use self.client_plugin("nova").check_resize') refresh_server(server) while server.status == 'RESIZE': yield refresh_server(server) if server.status == 'VERIFY_RESIZE': server.confirm_resize() else: raise exception.Error( _("Resizing to '%(flavor)s' failed, status '%(status)s'") % dict(flavor=flavor, status=server.status)) @scheduler.wrappertask def rebuild(server, image_id, preserve_ephemeral=False): """Rebuild the server and call check_rebuild to verify.""" warnings.warn('nova_utils.rebuild is deprecated. ' 'Use self.client_plugin("nova").rebuild') server.rebuild(image_id, preserve_ephemeral=preserve_ephemeral) yield check_rebuild(server, image_id) def check_rebuild(server, image_id): """ Verify that a rebuilding server is rebuilt. Raise error if it ends up in an ERROR state. """ warnings.warn('nova_utils.check_rebuild is deprecated. ' 'Use self.client_plugin("nova").check_rebuild') refresh_server(server) while server.status == 'REBUILD': yield refresh_server(server) if server.status == 'ERROR': raise exception.Error( _("Rebuilding server failed, status '%s'") % server.status) def meta_serialize(metadata): """ Serialize non-string metadata values before sending them to Nova. """ warnings.warn('nova_utils.meta_serialize is deprecated. ' 'Use self.client_plugin("nova").meta_serialize') return dict((key, (value if isinstance(value, six.string_types) else jsonutils.dumps(value)) ) for (key, value) in metadata.items()) def meta_update(client, server, metadata): """Delete/Add the metadata in nova as needed.""" warnings.warn('nova_utils.meta_update is deprecated. ' 'Use self.client_plugin("nova").meta_update') metadata = meta_serialize(metadata) current_md = server.metadata to_del = [key for key in six.iterkeys(current_md) if key not in metadata] if len(to_del) > 0: client.servers.delete_meta(server, to_del) client.servers.set_meta(server, metadata) def server_to_ipaddress(client, server): ''' Return the server's IP address, fetching it from Nova. ''' warnings.warn('nova_utils.server_to_ipaddress is deprecated. ' 'Use self.client_plugin("nova").server_to_ipaddress') try: server = client.servers.get(server) except nova_exceptions.NotFound as ex: LOG.warn(_LW('Instance (%(server)s) not found: %(ex)s'), {'server': server, 'ex': ex}) else: for n in server.networks: if len(server.networks[n]) > 0: return server.networks[n][0] def absolute_limits(nova_client): """Return the absolute limits as a dictionary.""" warnings.warn('nova_utils.absolute_limits is deprecated. ' 'Use self.client_plugin("nova").absolute_limits') limits = nova_client.limits.get() return dict([(limit.name, limit.value) for limit in list(limits.absolute)])
# coding=utf-8 """Module related to test for all the models.""" from django.test import TestCase from django.test.client import Client from django.core.urlresolvers import reverse from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes from user_map.tests.model_factories import UserFactory class UserMapViewTests(TestCase): """Class for testing user map view.""" def setUp(self): """Run for each test.""" self.email = 'test@gmail.com' self.password = 'test' self.user = UserFactory.create( email=self.email, password=self.password, is_confirmed=True) self.client = Client() def test_index(self): """Test for index view.""" response = self.client.get(reverse('user_map:index')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'user_map/data_privacy.html') self.assertContains(response, 'Sign Up') self.assertContains(response, 'Log In') def test_index_login(self): """Test for index view after logging in.""" self.assertTrue( self.client.login(email=self.email, password=self.password)) response = self.client.get(reverse('user_map:index')) self.assertNotContains(response, 'Sign Up') self.assertNotContains(response, 'Log In') self.assertContains(response, 'Hi, %s' % self.user.name) def test_get_users(self): """Test for get_users view.""" # This user belongs to openstreetmap project as it has osm roles response = self.client.get( reverse('user_map:get_users'), {'project': 'openstreetmap'}) self.assertEqual(response['Content-Type'], 'application/json') self.assertContains(response, 'FeatureCollection') # not gonna contains this user since he got both roles self.assertNotContains(response, self.user.name) response = self.client.get( reverse('user_map:get_users'), {'project': ''}) self.assertEqual(response['Content-Type'], 'application/json') self.assertContains(response, 'FeatureCollection') self.assertContains(response, self.user.name) def test_get_users_with_post(self): """Test get_users view.""" response = self.client.post( reverse('user_map:get_users'), {'project': 'inasafe'}) self.assertEqual(response.status_code, 404) def test_show_register_page(self): """Test register view using get.""" response = self.client.get(reverse('user_map:register')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'user_map/account/registration.html') def test_register_success(self): """Test register view using post.""" response = self.client.post( reverse('user_map:register'), { 'name': 'John Doe', 'email': 'john.doe@gmail.com', 'image': '', 'password': 'password', 'password2': 'password', 'website': '', 'inasafe_roles': '1', 'osm_roles': '2', 'osm_username': '', 'location': ( '{"type":"Point","coordinates":[22.5,-16.63619187839765]}') }) self.assertRedirects( response, reverse('user_map:register'), 302, 200) def test_confirm_registration_invalid(self): """Test confirm_registration using invalid link.""" response = self.client.get( reverse('user_map:confirm_registration', args=('l1nk', 'inV4lid'))) self.assertTemplateUsed(response, 'user_map/information.html') self.assertContains(response, 'Your link is not valid') def test_confirm_registration_valid(self): """Test confirm_registration using valid link.""" # Create unconfirmed user first user = UserFactory.create(is_confirmed=False) uid = urlsafe_base64_encode(force_bytes(user.pk)) response = self.client.get( reverse('user_map:confirm_registration', args=(uid, user.key))) self.assertTemplateUsed(response, 'user_map/information.html') self.assertContains( response, 'Congratulations! Your account has been successfully confirmed.') def test_show_login_page(self): """Test if showing login page is OK.""" response = self.client.get(reverse('user_map:login')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'user_map/account/login.html') def test_login_invalid(self): """Test login with invalid user.""" response = self.client.post( reverse('user_map:login'), { 'email': 'invalid@user.com', 'password': 'invaliduserpass' } ) self.assertTemplateUsed(response, 'user_map/account/login.html') self.assertContains( response, 'Please enter a correct email and password') def test_login_valid(self): """Test login with valid user.""" # Create a user first with is_confirmed = True UserFactory.create( email='test@mail.com', password='test', is_confirmed=True) response = self.client.post( reverse('user_map:login'), { 'email': 'test@mail.com', 'password': 'test' } ) self.assertRedirects( response, reverse('user_map:index'), 302, 200) def test_logout(self): """Test logout view.""" response = self.client.post(reverse('user_map:logout')) self.assertRedirects( response, reverse('user_map:index'), 302, 200) def test_show_update_page(self): """Test showing update user page view.""" # Login first self.assertTrue( self.client.login(email=self.email, password=self.password)) response = self.client.get(reverse('user_map:update_user')) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'user_map/account/edit_user.html') def test_update_basic_information(self): """Test update basic information.""" # Login first self.assertTrue( self.client.login(email=self.email, password=self.password)) form_content = dict( { 'name': 'UpdatedName', 'email': self.email, 'website': 'http://updated-site.com', 'inasafe_roles': '1', 'location': ('{"type":"Point","coordinates":[22.5, ' '-16.63619187839765]}'), 'email_updates': 'on', 'change_basic_info': 'Submit' } ) response = self.client.post( reverse('user_map:update_user'), form_content) self.assertRedirects( response, reverse('user_map:update_user') + '#basic-information', 302, 200) user = UserFactory(email=self.email) self.assertEqual(user.name, form_content['name']) def test_change_password(self): """Test change password.""" # Login first self.assertTrue( self.client.login(email=self.email, password=self.password)) new_password = 'UpdatedPassword' form_content = dict( { 'old_password': self.password, 'new_password1': new_password, 'new_password2': new_password, 'change_password': 'Submit' } ) response = self.client.post( reverse('user_map:update_user'), form_content) # As it will go to login page after successfully changing password, # the target status code will be 302 self.assertRedirects( response, reverse('user_map:update_user') + '#security', 302, 302) # Logout self.client.logout() # Login with old password will fail self.assertFalse( self.client.login(email=self.email, password=self.password)) # Login with new password self.assertTrue( self.client.login(email=self.email, password=new_password)) def test_delete_user(self): """Test delete_user view.""" # Login first self.assertTrue( self.client.login(email=self.email, password=self.password)) response = self.client.post(reverse('user_map:delete_user')) self.assertTemplateUsed(response, 'user_map/information.html') self.assertContains( response, 'You have deleted your account') def test_download(self): """Test download view.""" response = self.client.post(reverse('user_map:download')) self.assertEqual(response['Content-Type'], 'text/csv') self.assertEqual( response['Content-Disposition'], 'attachment; filename="users.csv"') self.assertContains(response, self.user.name)
from collections.abc import Iterable from contextlib import contextmanager from functools import partial from inspect import signature from bonobo.config import Option from bonobo.errors import UnrecoverableTypeError from bonobo.util import deprecated_alias, ensure_tuple _raw = object() _args = object() _none = object() INPUT_FORMATS = {_raw, _args, _none} class ContextProcessor(Option): """ A ContextProcessor is a kind of transformation decorator that can setup and teardown a transformation and runtime related dependencies, at the execution level. It works like a yielding context manager, and is the recommended way to setup and teardown objects you'll need in the context of one execution. It's the way to overcome the stateless nature of transformations. The yielded values will be passed as positional arguments to the next context processors (order does matter), and finally to the __call__ method of the transformation. Warning: this may change for a similar but simpler implementation, don't rely too much on it (yet). Example: >>> from bonobo.config import Configurable >>> from bonobo.util import ValueHolder >>> class Counter(Configurable): ... @ContextProcessor ... def counter(self, context): ... yield ValueHolder(0) ... ... def __call__(self, counter, *args, **kwargs): ... counter += 1 ... yield counter.get() """ @property def __name__(self): return self.func.__name__ def __init__(self, func): self.func = func super(ContextProcessor, self).__init__(required=False, default=self.__name__) self.name = self.__name__ def __repr__(self): return repr(self.func).replace("<function", "<{}".format(type(self).__name__)) def __call__(self, *args, **kwargs): return self.func(*args, **kwargs) class bound(partial): @property def kwargs(self): return self.keywords class ContextCurrifier: """ This is a helper to resolve processors. """ def __init__(self, wrapped, *args, **kwargs): self.wrapped = wrapped self.args = args self.kwargs = kwargs self.format = getattr(wrapped, "__input_format__", _args) self._stack, self._stack_values = None, None def __iter__(self): yield from self.wrapped def _bind(self, _input): try: bind = signature(self.wrapped).bind except ValueError: bind = partial(bound, self.wrapped) if self.format is _args: return bind(*self.args, *_input, **self.kwargs) if self.format is _raw: return bind(*self.args, _input, **self.kwargs) if self.format is _none: return bind(*self.args, **self.kwargs) raise NotImplementedError("Invalid format {!r}.".format(self.format)) def __call__(self, _input): if not callable(self.wrapped): if isinstance(self.wrapped, Iterable): return self.__iter__() raise UnrecoverableTypeError("Uncallable node {}".format(self.wrapped)) try: bound = self._bind(_input) except TypeError as exc: raise UnrecoverableTypeError( ( "Input of {wrapped!r} does not bind to the node signature.\n" "Args: {args}\n" "Input: {input}\n" "Kwargs: {kwargs}\n" "Signature: {sig}" ).format( wrapped=self.wrapped, args=self.args, input=_input, kwargs=self.kwargs, sig=signature(self.wrapped) ) ) from exc return self.wrapped(*bound.args, **bound.kwargs) def setup(self, *context): if self._stack is not None: raise RuntimeError("Cannot setup context currification twice.") self._stack, self._stack_values = list(), list() for processor in resolve_processors(self.wrapped): _processed = processor(self.wrapped, *context, *self.args, **self.kwargs) _append_to_context = next(_processed) self._stack_values.append(_append_to_context) if _append_to_context is not None: self.args += ensure_tuple(_append_to_context) self._stack.append(_processed) def teardown(self): while self._stack: processor = self._stack.pop() try: # todo yield from ? how to ? processor.send(self._stack_values.pop()) except StopIteration: # This is normal, and wanted. pass else: # No error ? We should have had StopIteration ... raise RuntimeError("Context processors should not yield more than once.") self._stack, self._stack_values = None, None @contextmanager def as_contextmanager(self, *context): """ Convenience method to use it as a contextmanager, mostly for test purposes. Example: >>> with ContextCurrifier(node).as_contextmanager(context) as stack: ... stack() :param context: :return: """ self.setup(*context) yield self self.teardown() def resolve_processors(mixed): try: yield from mixed.__processors__ except AttributeError: yield from () get_context_processors = deprecated_alias("get_context_processors", resolve_processors) def use_context(f): def context(self, context, *args, **kwargs): yield context return use_context_processor(context)(f) def use_context_processor(context_processor): def using_context_processor(cls_or_func): nonlocal context_processor try: cls_or_func.__processors__ except AttributeError: cls_or_func.__processors__ = [] cls_or_func.__processors__.append(ContextProcessor(context_processor)) return cls_or_func return using_context_processor def _use_input_format(input_format): if input_format not in INPUT_FORMATS: raise ValueError( "Invalid input format {!r}. Choices: {}".format(input_format, ", ".join(sorted(INPUT_FORMATS))) ) def _set_input_format(f): setattr(f, "__input_format__", input_format) return f return _set_input_format use_no_input = _use_input_format(_none) use_raw_input = _use_input_format(_raw)