code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def tags(self, ticket_id): """ Lists the most popular recent tags in decreasing popularity from a specific ticket. """ return self._query_zendesk(self.endpoint.tags, 'tag', id=ticket_id)
Lists the most popular recent tags in decreasing popularity from a specific ticket.
def p_property_list(self, p): """property_list : property_assignment | property_list COMMA property_assignment """ if len(p) == 2: p[0] = [p[1]] else: p[1].append(p[3]) p[0] = p[1]
property_list : property_assignment | property_list COMMA property_assignment
def exc_message(exc_info): """Return the exception's message.""" exc = exc_info[1] if exc is None: # str exception result = exc_info[0] else: try: result = str(exc) except UnicodeEncodeError: try: result = unicode(exc) # flake8: noqa except UnicodeError: # Fallback to args as neither str nor # unicode(Exception(u'\xe6')) work in Python < 2.6 result = exc.args[0] return result
Return the exception's message.
def update(self): """ This method should be called when you want to ensure all cached attributes are in sync with the actual object attributes at runtime. This happens because attributes could store mutable objects and be modified outside the scope of this class. The most common idiom that isn't automagically caught is mutating a list or dictionary. Lets say 'user' object have an attribute named 'friends' containing a list, calling 'user.friends.append(new_friend)' only get the attribute, SIWrapper isn't aware that the object returned was modified and the cached data is not updated. """ self.holder = siget(self.holder.FullName) # fix dispatch issues for key, value in self.__dict__.iteritems(): key = self.namespace + key if self._validate_key(key): if not self.holder.Parameters(key): self.holder.AddParameter3(key, C.siString) self.holder.Parameters(key).Value = encode(value)
This method should be called when you want to ensure all cached attributes are in sync with the actual object attributes at runtime. This happens because attributes could store mutable objects and be modified outside the scope of this class. The most common idiom that isn't automagically caught is mutating a list or dictionary. Lets say 'user' object have an attribute named 'friends' containing a list, calling 'user.friends.append(new_friend)' only get the attribute, SIWrapper isn't aware that the object returned was modified and the cached data is not updated.
def load(theTask, canExecute=True, strict=True, defaults=False): """ Shortcut to load TEAL .cfg files for non-GUI access where loadOnly=True. """ return teal(theTask, parent=None, loadOnly=True, returnAs="dict", canExecute=canExecute, strict=strict, errorsToTerm=True, defaults=defaults)
Shortcut to load TEAL .cfg files for non-GUI access where loadOnly=True.
def setup_data_split(X, y, tokenizer, proc_data_dir, **kwargs): """Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data """ X_train, X_val, X_test, y_train, y_val, y_test = split_data(X, y) # only build vocabulary on training data tokenizer.build_vocab(X_train) process_save(X_train, y_train, tokenizer, path.join( proc_data_dir, 'train.bin'), train=True, **kwargs) process_save(X_val, y_val, tokenizer, path.join( proc_data_dir, 'val.bin'), **kwargs) process_save(X_test, y_test, tokenizer, path.join( proc_data_dir, 'test.bin'), **kwargs)
Setup data while splitting into a training, validation, and test set. Args: X: text data, y: data labels, tokenizer: A Tokenizer instance proc_data_dir: Directory for the split and processed data
def negated(self): """ Negates this instance and returns it. :return self """ op = QueryCompound.Op.And if self.__op == QueryCompound.Op.Or else QueryCompound.Op.Or return QueryCompound(*self.__queries, op=op)
Negates this instance and returns it. :return self
def __getBarFCName(pressure): """Parse the pressure and return FC (String).""" if pressure is None: return None press = __to_float1(pressure) if press < 974: return "Thunderstorms" if press < 990: return "Stormy" if press < 1002: return "Rain" if press < 1010: return "Cloudy" if press < 1022: return "Unstable" if press < 1035: return "Stable" return "Very dry"
Parse the pressure and return FC (String).
def flatten(*caches): """Flatten a nested list of cache entries Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- flat : `list` A flat `list` containing the unique set of entries across each input. """ return list(OrderedDict.fromkeys(e for c in caches for e in c))
Flatten a nested list of cache entries Parameters ---------- *caches : `list` One or more lists of file paths (`str` or :class:`~lal.utils.CacheEntry`). Returns ------- flat : `list` A flat `list` containing the unique set of entries across each input.
def _check_args(self, source): '''Validate the argument section. Args may be either a dict or a list (to allow multiple positional args). ''' path = [source] args = self.parsed_yaml.get('args', {}) self._assert_struct_type(args, 'args', (dict, list), path) path.append('args') if isinstance(args, dict): for argn, argattrs in args.items(): self._check_one_arg(path, argn, argattrs) else: # must be list - already asserted struct type for argdict in args: self._assert_command_dict(argdict, '[list-item]', path) argn, argattrs = list(argdict.items())[0] # safe - length asserted on previous line self._check_one_arg(path, argn, argattrs)
Validate the argument section. Args may be either a dict or a list (to allow multiple positional args).
def max_await_time_ms(self, max_await_time_ms): """Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. .. note:: `max_await_time_ms` requires server version **>= 3.2** :Parameters: - `max_await_time_ms`: the time limit after which the operation is aborted .. versionadded:: 3.2 """ if (not isinstance(max_await_time_ms, integer_types) and max_await_time_ms is not None): raise TypeError("max_await_time_ms must be an integer or None") self.__check_okay_to_chain() # Ignore max_await_time_ms if not tailable or await_data is False. if self.__query_flags & CursorType.TAILABLE_AWAIT: self.__max_await_time_ms = max_await_time_ms return self
Specifies a time limit for a getMore operation on a :attr:`~pymongo.cursor.CursorType.TAILABLE_AWAIT` cursor. For all other types of cursor max_await_time_ms is ignored. Raises :exc:`TypeError` if `max_await_time_ms` is not an integer or ``None``. Raises :exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has already been used. .. note:: `max_await_time_ms` requires server version **>= 3.2** :Parameters: - `max_await_time_ms`: the time limit after which the operation is aborted .. versionadded:: 3.2
def subsample(partitions,dataset,seed): ''' Function to generate randomly sampled datasets with replacement. This is in the context of cells in the native dataset which are the rows of the matrix :param partitions: int designating the number of evenly spaced sample sizes to randomly select from the native dataset :param dataset: DataFrame of the native dataset compatible with the suffle function :param seed: pseudorandom seed, compatible with the replicate wrapper since it adds the index to the seed :return subOut: dictionary of the randomly sampled datasets, keys are the number of cells ''' parts=np.arange(dataset.shape[0]/partitions,dataset.shape[0],dataset.shape[0]/partitions).astype(int) subOut={} for i in range(parts.shape[0]): subOut["{0}cells".format(parts[i])]=np.asarray(shuffle(dataset,random_state=seed))[0:parts[i],:] return subOut
Function to generate randomly sampled datasets with replacement. This is in the context of cells in the native dataset which are the rows of the matrix :param partitions: int designating the number of evenly spaced sample sizes to randomly select from the native dataset :param dataset: DataFrame of the native dataset compatible with the suffle function :param seed: pseudorandom seed, compatible with the replicate wrapper since it adds the index to the seed :return subOut: dictionary of the randomly sampled datasets, keys are the number of cells
def add_digital_object( self, parent_archival_object, identifier, title=None, uri=None, location_of_originals=None, object_type="text", xlink_show="embed", xlink_actuate="onLoad", restricted=False, use_statement="", use_conditions=None, access_conditions=None, size=None, format_name=None, format_version=None, inherit_dates=False, inherit_notes=False, ): """ Creates a new digital object. :param string parent_archival_object: The archival object to which the newly-created digital object will be parented. :param string identifier: A unique identifier for the digital object, in any format. :param string title: The title of the digital object. :param string uri: The URI to an instantiation of the digital object. :param string location_of_originals: If provided, will create an `originalsloc` (location of originals) note in the digital object using this text. :param string object_type: The type of the digital object. Defaults to "text". :param string xlink_show: Controls how the file will be displayed. For supported values, see: http://www.w3.org/TR/xlink/#link-behaviors :param string xlink_actuate: :param string use_statement: :param string use_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing use" note in the digital object. :param string access_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing access" note in the digital object. :param int size: Size in bytes of the digital object :param str format_name: Name of the digital object's format :param str format_version: Name of the digital object's format version :param bool inherit_dates: Inherit dates :param bool inherit_notes: Inherit parent notes """ parent_record = self.get_record(parent_archival_object) repository = parent_record["repository"]["ref"] language = parent_record.get("language", "") if not title: filename = os.path.basename(uri) if uri is not None else "Untitled" title = parent_record.get("display_string", filename) new_object = { "title": title, "digital_object_id": identifier, "digital_object_type": object_type, "language": language, "notes": [], "restrictions": restricted, "subjects": parent_record["subjects"], "linked_agents": parent_record["linked_agents"], } if inherit_dates: new_object["dates"] = parent_record["dates"] if location_of_originals is not None: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "originalsloc", "content": [location_of_originals], "publish": False, } ) if uri is not None: new_object["file_versions"] = [ { "file_uri": uri, "use_statement": use_statement, "xlink_show_attribute": xlink_show, "xlink_actuate_attribute": xlink_actuate, } ] note_digital_object_type = [ "summary", "bioghist", "accessrestrict", "userestrict", "custodhist", "dimensions", "edition", "extent", "altformavail", "originalsloc", "note", "acqinfo", "inscription", "langmaterial", "legalstatus", "physdesc", "prefercite", "processinfo", "relatedmaterial", ] if inherit_notes: for pnote in parent_record["notes"]: if pnote["type"] in note_digital_object_type: dnote = pnote["type"] else: dnote = "note" if "subnotes" in pnote: content = [] for subnote in pnote["subnotes"]: if "content" in subnote: content.append(subnote["content"]) else: LOGGER.info( "No content field in %s, skipping adding to child digital object.", subnote, ) else: content = pnote.get("content", "") new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": dnote, "label": pnote.get("label", ""), "content": content, "publish": pnote["publish"], } ) if use_conditions: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "userestrict", "content": [use_conditions], "publish": True, } ) if access_conditions: new_object["notes"].append( { "jsonmodel_type": "note_digital_object", "type": "accessrestrict", "content": [access_conditions], "publish": True, } ) if restricted: new_object["file_versions"][0]["publish"] = False new_object["publish"] = False if size: new_object["file_versions"][0]["file_size_bytes"] = size if format_name: new_object["file_versions"][0]["file_format_name"] = format_name if format_version: new_object["file_versions"][0]["file_format_version"] = format_version new_object_uri = self._post( repository + "/digital_objects", data=json.dumps(new_object) ).json()["uri"] # Now we need to update the parent object with a link to this instance parent_record["instances"].append( { "instance_type": "digital_object", "digital_object": {"ref": new_object_uri}, } ) self._post(parent_archival_object, data=json.dumps(parent_record)) new_object["id"] = new_object_uri return new_object
Creates a new digital object. :param string parent_archival_object: The archival object to which the newly-created digital object will be parented. :param string identifier: A unique identifier for the digital object, in any format. :param string title: The title of the digital object. :param string uri: The URI to an instantiation of the digital object. :param string location_of_originals: If provided, will create an `originalsloc` (location of originals) note in the digital object using this text. :param string object_type: The type of the digital object. Defaults to "text". :param string xlink_show: Controls how the file will be displayed. For supported values, see: http://www.w3.org/TR/xlink/#link-behaviors :param string xlink_actuate: :param string use_statement: :param string use_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing use" note in the digital object. :param string access_conditions: A paragraph of human-readable text to specify conditions of use for the digital object. If provided, creates a "conditions governing access" note in the digital object. :param int size: Size in bytes of the digital object :param str format_name: Name of the digital object's format :param str format_version: Name of the digital object's format version :param bool inherit_dates: Inherit dates :param bool inherit_notes: Inherit parent notes
def find_taskruns(project_id, **kwargs): """Return a list of matched task runs for a given project ID. :param project_id: PYBOSSA Project ID :type project_id: integer :param kwargs: PYBOSSA Task Run members :rtype: list :returns: A List of task runs that match the query members """ try: kwargs['project_id'] = project_id res = _pybossa_req('get', 'taskrun', params=kwargs) if type(res).__name__ == 'list': return [TaskRun(taskrun) for taskrun in res] else: return res except: # pragma: no cover raise
Return a list of matched task runs for a given project ID. :param project_id: PYBOSSA Project ID :type project_id: integer :param kwargs: PYBOSSA Task Run members :rtype: list :returns: A List of task runs that match the query members
def filter_by_col(self, column_names): """filters sheet/table by columns (input is column header) The routine returns the serial numbers with values>1 in the selected columns. Args: column_names (list): the column headers. Returns: pandas.DataFrame """ if not isinstance(column_names, (list, tuple)): column_names = [column_names, ] sheet = self.table identity = self.db_sheet_cols.id exists = self.db_sheet_cols.exists criterion = True for column_name in column_names: _criterion = sheet.loc[:, column_name] > 0 _exists = sheet.loc[:, exists] > 0 criterion = criterion & _criterion & _exists return sheet.loc[criterion, identity].values.astype(int)
filters sheet/table by columns (input is column header) The routine returns the serial numbers with values>1 in the selected columns. Args: column_names (list): the column headers. Returns: pandas.DataFrame
def handle(self, request_headers={}, signature_header=None): """Handle request.""" if self.client.webhook_secret is None: raise ValueError('Error: no webhook secret.') encoded_header = self._get_signature_header(signature_header, request_headers) decoded_request = self._decode_request(encoded_header) if 'type' not in decoded_request: raise ValueError("Error invalid request: no type field found.") handler = self._getHandlerForEvent(decoded_request['type']) if handler is None: return if (self._get_fct_number_of_arg(handler) == 1): handler(decoded_request) return handler(decoded_request, decoded_request['type'])
Handle request.
def hgmd(self): """Hi Index """ # calculate time thread took to finish # logging.info('Starting HI score') tstart = datetime.now() if os.path.isfile(settings.hgmd_file): hgmd_obj = hgmd.HGMD(self.vcf_file) hgmd_obj.run() tend = datetime.now() execution_time = tend - tstart
Hi Index
def loads( s, record_store=None, schema=None, loader=from_json_compatible, record_class=None # deprecated in favor of schema ): """ Create a Record instance from a json serialized dictionary :param s: String with a json-serialized dictionary :param record_store: Record store to use for schema lookups (when $schema field is present) :param loader: Function called to fetch attributes from json. Typically shouldn't be used by end users :param schema: PySchema Record class for the record to load. This will override any $schema fields specified in `s` :param record_class: DEPRECATED option, old name for the `schema` parameter """ if record_class is not None: warnings.warn( "The record_class parameter is deprecated in favour of schema", DeprecationWarning, stacklevel=2 ) schema = record_class if not isinstance(s, unicode): s = s.decode('utf8') if s.startswith(u"{"): json_dct = json.loads(s) return load_json_dct(json_dct, record_store, schema, loader) else: raise ParseError("Not a json record")
Create a Record instance from a json serialized dictionary :param s: String with a json-serialized dictionary :param record_store: Record store to use for schema lookups (when $schema field is present) :param loader: Function called to fetch attributes from json. Typically shouldn't be used by end users :param schema: PySchema Record class for the record to load. This will override any $schema fields specified in `s` :param record_class: DEPRECATED option, old name for the `schema` parameter
def copy_file_upload(self, targetdir): ''' Copies the currently valid file upload into the given directory. If possible, the content is un-archived in the target directory. ''' assert(self.file_upload) # unpack student data to temporary directory # os.chroot is not working with tarfile support tempdir = tempfile.mkdtemp() try: if zipfile.is_zipfile(self.file_upload.absolute_path()): f = zipfile.ZipFile(self.file_upload.absolute_path(), 'r') f.extractall(targetdir) elif tarfile.is_tarfile(self.file_upload.absolute_path()): tar = tarfile.open(self.file_upload.absolute_path()) tar.extractall(targetdir) tar.close() else: # unpacking not possible, just copy it shutil.copyfile(self.file_upload.absolute_path(), targetdir + "/" + self.file_upload.basename()) except IOError: logger.error("I/O exception while accessing %s." % (self.file_upload.absolute_path())) pass except (UnicodeEncodeError, NotImplementedError) as e: # unpacking not possible, just copy it shutil.copyfile(self.file_upload.absolute_path(), targetdir + "/" + self.file_upload.basename()) pass
Copies the currently valid file upload into the given directory. If possible, the content is un-archived in the target directory.
def get_tax_class_by_id(cls, tax_class_id, **kwargs): """Find TaxClass Return single instance of TaxClass by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tax_class_by_id(tax_class_id, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to return (required) :return: TaxClass If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs) else: (data) = cls._get_tax_class_by_id_with_http_info(tax_class_id, **kwargs) return data
Find TaxClass Return single instance of TaxClass by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_tax_class_by_id(tax_class_id, async=True) >>> result = thread.get() :param async bool :param str tax_class_id: ID of taxClass to return (required) :return: TaxClass If the method is called asynchronously, returns the request thread.
def get_callee_account( global_state: GlobalState, callee_address: str, dynamic_loader: DynLoader ): """Gets the callees account from the global_state. :param global_state: state to look in :param callee_address: address of the callee :param dynamic_loader: dynamic loader to use :return: Account belonging to callee """ environment = global_state.environment accounts = global_state.accounts try: return global_state.accounts[callee_address] except KeyError: # We have a valid call address, but contract is not in the modules list log.debug("Module with address " + callee_address + " not loaded.") if dynamic_loader is None: raise ValueError() log.debug("Attempting to load dependency") try: code = dynamic_loader.dynld(callee_address) except ValueError as error: log.debug("Unable to execute dynamic loader because: {}".format(str(error))) raise error if code is None: log.debug("No code returned, not a contract account?") raise ValueError() log.debug("Dependency loaded: " + callee_address) callee_account = Account( callee_address, code, callee_address, dynamic_loader=dynamic_loader ) accounts[callee_address] = callee_account return callee_account
Gets the callees account from the global_state. :param global_state: state to look in :param callee_address: address of the callee :param dynamic_loader: dynamic loader to use :return: Account belonging to callee
def parse_at_root( self, root, # type: ET.Element state # type: _ProcessorState ): # type: (...) -> Any """Parse the given element as the root of the document.""" xml_value = self._processor.parse_at_root(root, state) return _hooks_apply_after_parse(self._hooks, state, xml_value)
Parse the given element as the root of the document.
def _add_section_to_report(self, data): """ Adds found data to the report via several HTML generators """ # Count samples that have errors and/or warnings pass_count = error_count = only_warning_count = 0 for sample_data in data.values(): if sample_data['file_validation_status'] == 'fail': error_count += 1 elif sample_data['file_validation_status'] == 'warn': only_warning_count += 1 else: pass_count += 1 # Add overview note plot_html = [] note_html = _generate_overview_note( pass_count=pass_count, only_warning_count=only_warning_count, error_count=error_count, total_count=len(data) ) plot_html.append(note_html) # Add the detailed table, but only if we have something to show if error_count or only_warning_count: table_html = _generate_detailed_table(data) plot_html.append(table_html) # Finally, add the html to the report as a section self.add_section( name='SAM/BAM File Validation', anchor='picard_validatesamfile', description=('This tool reports on the validity of a SAM or BAM ' 'file relative to the SAM-format specification.'), helptext=''' A detailed table is only shown if errors or warnings are found. Details about the errors and warnings are only shown if a `SUMMARY` report was parsed. For more information on the warnings, errors and possible fixes please read [this broadinstitute article](https://software.broadinstitute.org/gatk/documentation/article.php?id=7571).''', plot="\n".join(plot_html), )
Adds found data to the report via several HTML generators
def contains(self, data): """ Check if an item has been added to the bloomfilter. :param bytes data: a bytestring representing the item to check. :returns: a boolean indicating whether or not the item is present in the bloomfilter. False-positives are possible, but a negative return value is definitive. """ bfo = BitFieldOperation(self.database, self.key) for bit_index in self._get_seeds(data): bfo.get('u1', bit_index) return all(bfo.execute())
Check if an item has been added to the bloomfilter. :param bytes data: a bytestring representing the item to check. :returns: a boolean indicating whether or not the item is present in the bloomfilter. False-positives are possible, but a negative return value is definitive.
def cached(cls, minimum_version=None, maximum_version=None, jdk=False): """Finds a java distribution that meets the given constraints and returns it. :API: public First looks for a cached version that was previously located, otherwise calls locate(). :param minimum_version: minimum jvm version to look for (eg, 1.7). The stricter of this and `--jvm-distributions-minimum-version` is used. :param maximum_version: maximum jvm version to look for (eg, 1.7.9999). The stricter of this and `--jvm-distributions-maximum-version` is used. :param bool jdk: whether the found java distribution is required to have a jdk. :return: the Distribution. :rtype: :class:`Distribution` :raises: :class:`Distribution.Error` if no suitable java distribution could be found. """ try: return cls.global_instance()._locator().locate( minimum_version=minimum_version, maximum_version=maximum_version, jdk=jdk) except _Locator.Error as e: raise cls.Error('Problem locating a java distribution: {}'.format(e))
Finds a java distribution that meets the given constraints and returns it. :API: public First looks for a cached version that was previously located, otherwise calls locate(). :param minimum_version: minimum jvm version to look for (eg, 1.7). The stricter of this and `--jvm-distributions-minimum-version` is used. :param maximum_version: maximum jvm version to look for (eg, 1.7.9999). The stricter of this and `--jvm-distributions-maximum-version` is used. :param bool jdk: whether the found java distribution is required to have a jdk. :return: the Distribution. :rtype: :class:`Distribution` :raises: :class:`Distribution.Error` if no suitable java distribution could be found.
def hist(data): """Plots histogram""" win = CurveDialog(edit=False, toolbar=True, wintitle="Histogram test") plot = win.get_plot() plot.add_item(make.histogram(data)) win.show() win.exec_()
Plots histogram
def walk_perimeter(self, startx, starty): """ Starting at a point on the perimeter of a region, 'walk' the perimeter to return to the starting point. Record the path taken. Parameters ---------- startx, starty : int The starting location. Assumed to be on the perimeter of a region. Returns ------- perimeter : list A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region. """ # checks startx = max(startx, 0) startx = min(startx, self.xsize) starty = max(starty, 0) starty = min(starty, self.ysize) points = [] x, y = startx, starty while True: self.step(x, y) if 0 <= x <= self.xsize and 0 <= y <= self.ysize: points.append((x, y)) if self.next == self.UP: y -= 1 elif self.next == self.LEFT: x -= 1 elif self.next == self.DOWN: y += 1 elif self.next == self.RIGHT: x += 1 # stop if we meet some kind of error elif self.next == self.NOWHERE: break # stop when we return to the starting location if x == startx and y == starty: break return points
Starting at a point on the perimeter of a region, 'walk' the perimeter to return to the starting point. Record the path taken. Parameters ---------- startx, starty : int The starting location. Assumed to be on the perimeter of a region. Returns ------- perimeter : list A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region.
def has(self, id, domain): """ Checks if a message has a translation. @rtype: bool @return: true if the message has a translation, false otherwise """ assert isinstance(id, (str, unicode)) assert isinstance(domain, (str, unicode)) if self.defines(id, domain): return True if self.fallback_catalogue is not None: return self.fallback_catalogue.has(id, domain) return False
Checks if a message has a translation. @rtype: bool @return: true if the message has a translation, false otherwise
def swarm_denovo_cluster(seq_path, d=1, threads=1, HALT_EXEC=False): """ Function : launch the Swarm de novo OTU picker Parameters: seq_path, filepath to reads d, resolution threads, number of threads to use Return : clusters, list of lists """ # Check sequence file exists if not exists(seq_path): raise ValueError("%s does not exist" % seq_path) # Instantiate the object swarm = Swarm(HALT_EXEC=HALT_EXEC) # Set the resolution if d > 0: swarm.Parameters['-d'].on(d) else: raise ValueError("Resolution -d must be a positive integer.") # Set the number of threads if threads > 0: swarm.Parameters['-t'].on(threads) else: raise ValueError("Number of threads must be a positive integer.") # create temporary file for Swarm OTU-map f, tmp_swarm_otumap = mkstemp(prefix='temp_otumap_', suffix='.swarm') close(f) swarm.Parameters['-o'].on(tmp_swarm_otumap) # Remove this file later, the final OTU-map # is output by swarm_breaker.py and returned # as a list of lists (clusters) swarm.files_to_remove.append(tmp_swarm_otumap) # Launch Swarm # set the data string to include the read filepath # (to be passed as final arguments in the swarm command) clusters = swarm(seq_path) remove_files(swarm.files_to_remove, error_on_missing=False) # Return clusters return clusters
Function : launch the Swarm de novo OTU picker Parameters: seq_path, filepath to reads d, resolution threads, number of threads to use Return : clusters, list of lists
def is_native_ion_gate(gate: ops.Gate) -> bool: """Check if a gate is a native ion gate. Args: gate: Input gate. Returns: True if the gate is native to the ion, false otherwise. """ return isinstance(gate, (ops.XXPowGate, ops.MeasurementGate, ops.XPowGate, ops.YPowGate, ops.ZPowGate))
Check if a gate is a native ion gate. Args: gate: Input gate. Returns: True if the gate is native to the ion, false otherwise.
def mkstemp(self, suffix, prefix, directory=None): """ Generate temp file name in artifacts base dir and close temp file handle """ if not directory: directory = self.artifacts_dir fd, fname = tempfile.mkstemp(suffix, prefix, directory) os.close(fd) os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode? return fname
Generate temp file name in artifacts base dir and close temp file handle
def act(self, world_state, agent_host, current_r ): """take 1 action in response to the current world state""" obs_text = world_state.observations[-1].text obs = json.loads(obs_text) # most recent observation self.logger.debug(obs) if not u'XPos' in obs or not u'ZPos' in obs: self.logger.error("Incomplete observation received: %s" % obs_text) return 0 current_s = "%d:%d" % (int(obs[u'XPos']), int(obs[u'ZPos'])) self.logger.debug("State: %s (x = %.2f, z = %.2f)" % (current_s, float(obs[u'XPos']), float(obs[u'ZPos']))) if current_s not in self.q_table: self.q_table[current_s] = ([0] * len(self.actions)) # update Q values if self.prev_s is not None and self.prev_a is not None: self.updateQTable( current_r, current_s ) self.drawQ( curr_x = int(obs[u'XPos']), curr_y = int(obs[u'ZPos']) ) # select the next action rnd = random.random() if rnd < self.epsilon: a = random.randint(0, len(self.actions) - 1) self.logger.info("Random action: %s" % self.actions[a]) else: m = max(self.q_table[current_s]) self.logger.debug("Current values: %s" % ",".join(str(x) for x in self.q_table[current_s])) l = list() for x in range(0, len(self.actions)): if self.q_table[current_s][x] == m: l.append(x) y = random.randint(0, len(l)-1) a = l[y] self.logger.info("Taking q action: %s" % self.actions[a]) # try to send the selected action, only update prev_s if this succeeds try: agent_host.sendCommand(self.actions[a]) self.prev_s = current_s self.prev_a = a except RuntimeError as e: self.logger.error("Failed to send command: %s" % e) return current_r
take 1 action in response to the current world state
def status_subversion(path, ignore_set, options): """Run svn status. Returns a 2-element tuple: * Text lines describing the status of the repository. * Empty sequence of subrepos, since svn does not support them. """ subrepos = () if path in ignore_set: return None, subrepos keepers = [] for line in run(['svn', 'st', '-v'], cwd=path): if not line.strip(): continue if line.startswith(b'Performing') or line[0] in b'X?': continue status = line[:8] ignored_states = options.ignore_svn_states if ignored_states and status.strip() in ignored_states: continue filename = line[8:].split(None, 3)[-1] ignore_set.add(os.path.join(path, filename)) if status.strip(): keepers.append(b' ' + status + filename) return keepers, subrepos
Run svn status. Returns a 2-element tuple: * Text lines describing the status of the repository. * Empty sequence of subrepos, since svn does not support them.
def StartCli(args, adb_commands, extra=None, **device_kwargs): """Starts a common CLI interface for this usb path and protocol.""" try: dev = adb_commands() dev.ConnectDevice(port_path=args.port_path, serial=args.serial, default_timeout_ms=args.timeout_ms, **device_kwargs) except usb_exceptions.DeviceNotFoundError as e: print('No device found: {}'.format(e), file=sys.stderr) return 1 except usb_exceptions.CommonUsbError as e: print('Could not connect to device: {}'.format(e), file=sys.stderr) return 1 try: return _RunMethod(dev, args, extra or {}) except Exception as e: # pylint: disable=broad-except sys.stdout.write(str(e)) return 1 finally: dev.Close()
Starts a common CLI interface for this usb path and protocol.
def geometries(self): """Return an iterator of (shapely) geometries for this feature.""" # Ensure that the associated files are in the cache fname = '{}_{}'.format(self.name, self.scale) for extension in ['.dbf', '.shx']: get_test_data(fname + extension) path = get_test_data(fname + '.shp', as_file_obj=False) return iter(tuple(shpreader.Reader(path).geometries()))
Return an iterator of (shapely) geometries for this feature.
def paths_from_version(version): """Get the EnergyPlus install directory and executable path. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory. """ if platform.system() == 'Windows': eplus_home = "C:/EnergyPlusV{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus.exe') elif platform.system() == "Linux": eplus_home = "/usr/local/EnergyPlus-{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus') else: eplus_home = "/Applications/EnergyPlus-{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus') return eplus_exe, eplus_home
Get the EnergyPlus install directory and executable path. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory.
def is_published(self): """Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False """ field773 = record_get_field_instances(self.record, '773') for f773 in field773: if 'c' in field_get_subfields(f773): return True return False
Check fields 980 and 773 to see if the record has already been published. :return: True is published, else False
def dirty(name, target, user=None, username=None, password=None, ignore_unversioned=False): ''' Determine if the working directory has been changed. ''' ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} return _fail(ret, 'This function is not implemented yet.')
Determine if the working directory has been changed.
def upload_sticker_file(self, user_id, png_sticker): """ Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success. https://core.telegram.org/bots/api#uploadstickerfile Parameters: :param user_id: User identifier of sticker file owner :type user_id: int :param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files » :type png_sticker: pytgbot.api_types.sendable.files.InputFile Returns: :return: Returns the uploaded File on success :rtype: pytgbot.api_types.receivable.media.File """ from pytgbot.api_types.sendable.files import InputFile assert_type_or_raise(user_id, int, parameter_name="user_id") assert_type_or_raise(png_sticker, InputFile, parameter_name="png_sticker") result = self.do("uploadStickerFile", user_id=user_id, png_sticker=png_sticker) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) from pytgbot.api_types.receivable.media import File try: return File.from_array(result) except TgApiParseException: logger.debug("Failed parsing as api_type File", exc_info=True) # end try # no valid parsing so far raise TgApiParseException("Could not parse result.") # See debug log for details! # end if return_python_objects return result
Use this method to upload a .png file with a sticker for later use in createNewStickerSet and addStickerToSet methods (can be used multiple times). Returns the uploaded File on success. https://core.telegram.org/bots/api#uploadstickerfile Parameters: :param user_id: User identifier of sticker file owner :type user_id: int :param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size, dimensions must not exceed 512px, and either width or height must be exactly 512px. More info on Sending Files » :type png_sticker: pytgbot.api_types.sendable.files.InputFile Returns: :return: Returns the uploaded File on success :rtype: pytgbot.api_types.receivable.media.File
def save_signal(self,filename=None): """ Saves TransitSignal. Calls :func:`TransitSignal.save`; default filename is ``trsig.pkl`` in ``self.folder``. """ if filename is None: filename = os.path.join(self.folder,'trsig.pkl') self.trsig.save(filename)
Saves TransitSignal. Calls :func:`TransitSignal.save`; default filename is ``trsig.pkl`` in ``self.folder``.
def process_target(self): """Return target with transformations, if any""" if isinstance(self.target, str): # Replace single and double quotes with escaped single-quote self.target = self.target.replace("'", "\'").replace('"', "\'") return "\"{target}\"".format(target=self.target) return self.target
Return target with transformations, if any
def usb_control_out(library, session, request_type_bitmap_field, request_id, request_value, index, data=""): """Performs a USB control pipe transfer to the device. Corresponds to viUsbControlOut function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param request_type_bitmap_field: bmRequestType parameter of the setup stage of a USB control transfer. :param request_id: bRequest parameter of the setup stage of a USB control transfer. :param request_value: wValue parameter of the setup stage of a USB control transfer. :param index: wIndex parameter of the setup stage of a USB control transfer. This is usually the index of the interface or endpoint. :param data: The data buffer that sends the data in the optional data stage of the control transfer. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ length = len(data) return library.viUsbControlOut(session, request_type_bitmap_field, request_id, request_value, index, length, data)
Performs a USB control pipe transfer to the device. Corresponds to viUsbControlOut function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param request_type_bitmap_field: bmRequestType parameter of the setup stage of a USB control transfer. :param request_id: bRequest parameter of the setup stage of a USB control transfer. :param request_value: wValue parameter of the setup stage of a USB control transfer. :param index: wIndex parameter of the setup stage of a USB control transfer. This is usually the index of the interface or endpoint. :param data: The data buffer that sends the data in the optional data stage of the control transfer. :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode`
def overlay_gateway_monitor_vlan_range(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels") name_key = ET.SubElement(overlay_gateway, "name") name_key.text = kwargs.pop('name') monitor = ET.SubElement(overlay_gateway, "monitor") session_key = ET.SubElement(monitor, "session") session_key.text = kwargs.pop('session') vlan_range = ET.SubElement(monitor, "vlan-range") vlan_range.text = kwargs.pop('vlan_range') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def normalise_angle(th): """Normalise an angle to be in the range [-pi, pi].""" return th - (2.0 * np.pi) * np.floor((th + np.pi) / (2.0 * np.pi))
Normalise an angle to be in the range [-pi, pi].
def findReference(self, name, cls=QtGui.QWidget): """ Looks up a reference from the widget based on its object name. :param name | <str> cls | <subclass of QtGui.QObject> :return <QtGui.QObject> || None """ return self.scene().findReference(name, cls)
Looks up a reference from the widget based on its object name. :param name | <str> cls | <subclass of QtGui.QObject> :return <QtGui.QObject> || None
def find_records(self, check, keys=None): """Find records matching a query dict, optionally extracting subset of keys. Returns list of matching records. Parameters ---------- check: dict mongodb-style query argument keys: list of strs [optional] if specified, the subset of keys to extract. msg_id will *always* be included. """ if keys: bad_keys = [ key for key in keys if key not in self._keys ] if bad_keys: raise KeyError("Bad record key(s): %s"%bad_keys) if keys: # ensure msg_id is present and first: if 'msg_id' in keys: keys.remove('msg_id') keys.insert(0, 'msg_id') req = ', '.join(keys) else: req = '*' expr,args = self._render_expression(check) query = """SELECT %s FROM %s WHERE %s"""%(req, self.table, expr) cursor = self._db.execute(query, args) matches = cursor.fetchall() records = [] for line in matches: rec = self._list_to_dict(line, keys) records.append(rec) return records
Find records matching a query dict, optionally extracting subset of keys. Returns list of matching records. Parameters ---------- check: dict mongodb-style query argument keys: list of strs [optional] if specified, the subset of keys to extract. msg_id will *always* be included.
def get_num_ruptures(self): """ :returns: the number of ruptures per source group ID """ return {grp.id: sum(src.num_ruptures for src in grp) for grp in self.src_groups}
:returns: the number of ruptures per source group ID
def _format_syslog_config(cmd_ret): ''' Helper function to format the stdout from the get_syslog_config function. cmd_ret The return dictionary that comes from a cmd.run_all call. ''' ret_dict = {'success': cmd_ret['retcode'] == 0} if cmd_ret['retcode'] != 0: ret_dict['message'] = cmd_ret['stdout'] else: for line in cmd_ret['stdout'].splitlines(): line = line.strip() cfgvars = line.split(': ') key = cfgvars[0].strip() value = cfgvars[1].strip() ret_dict[key] = value return ret_dict
Helper function to format the stdout from the get_syslog_config function. cmd_ret The return dictionary that comes from a cmd.run_all call.
def read(self, pin, is_differential=False): """I2C Interface for ADS1x15-based ADCs reads. params: :param pin: individual or differential pin. :param bool is_differential: single-ended or differential read. """ pin = pin if is_differential else pin + 0x04 return self._read(pin)
I2C Interface for ADS1x15-based ADCs reads. params: :param pin: individual or differential pin. :param bool is_differential: single-ended or differential read.
def _adj(self, k): """ Description: Adjacent breaking Paramters: k: not used """ G = np.zeros((self.m, self.m)) for i in range(self.m): for j in range(self.m): if i == j+1 or j == i+1: G[i][j] = 1 return G
Description: Adjacent breaking Paramters: k: not used
def mother(self): """Parent of this individual""" if self._mother == []: self._mother = self.sub_tag("FAMC/WIFE") return self._mother
Parent of this individual
def get_value(row, field_name): ''' Returns the value found in the field_name attribute of the row dictionary. ''' result = None dict_row = convert_to_dict(row) if detect_list(field_name): temp = row for field in field_name: dict_temp = convert_to_dict(temp) temp = dict_temp.get(field, None) result = temp else: result = dict_row.get(field_name, None) return result
Returns the value found in the field_name attribute of the row dictionary.
def get_contract(firma, pravni_forma, sidlo, ic, dic, zastoupen): """ Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file. """ contract_fn = _resource_context( "Licencni_smlouva_o_dodavani_elektronickych_publikaci" "_a_jejich_uziti.rst" ) # load contract with open(contract_fn) as f: contract = f.read()#.decode("utf-8").encode("utf-8") # make sure that `firma` has its heading mark firma = firma.strip() firma = firma + "\n" + ((len(firma) + 1) * "-") # patch template contract = Template(contract).substitute( firma=firma, pravni_forma=pravni_forma.strip(), sidlo=sidlo.strip(), ic=ic.strip(), dic=dic.strip(), zastoupen=zastoupen.strip(), resources_path=RES_PATH ) return gen_pdf( contract, open(_resource_context("style.json")).read(), )
Compose contract and create PDF. Args: firma (str): firma pravni_forma (str): pravni_forma sidlo (str): sidlo ic (str): ic dic (str): dic zastoupen (str): zastoupen Returns: obj: StringIO file instance containing PDF file.
def chk(self, annotations, fout_err): """Check annotations.""" for idx, ntd in enumerate(annotations): self._chk_fld(ntd, "Qualifier") # optional 0 or greater self._chk_fld(ntd, "DB_Reference", 1) # required 1 or greater self._chk_fld(ntd, "With_From") # optional 0 or greater self._chk_fld(ntd, "DB_Name", 0, 1) # optional 0 or 1 self._chk_fld(ntd, "DB_Synonym") # optional 0 or greater self._chk_fld(ntd, "Taxon", 1, 2) flds = list(ntd) self._chk_qty_eq_1(flds) # self._chk_qualifier(ntd.Qualifier, flds, idx) if not ntd.Taxon or len(ntd.Taxon) not in {1, 2}: self.illegal_lines['BAD TAXON'].append((idx, '**{I}) TAXON: {NT}'.format(I=idx, NT=ntd))) if self.illegal_lines: self.prt_error_summary(fout_err) return not self.illegal_lines
Check annotations.
def service_present(name, service_type, description=None, profile=None, **connection_args): ''' Ensure service present in Keystone catalog name The name of the service service_type The type of Openstack Service description (optional) Description of the service ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Service "{0}" already exists'.format(name)} # Check if service is already present role = __salt__['keystone.service_get'](name=name, profile=profile, **connection_args) if 'Error' not in role: return ret else: if __opts__.get('test'): ret['result'] = None ret['comment'] = 'Service "{0}" will be added'.format(name) return ret # Create service __salt__['keystone.service_create'](name, service_type, description, profile=profile, **connection_args) ret['comment'] = 'Service "{0}" has been added'.format(name) ret['changes']['Service'] = 'Created' return ret
Ensure service present in Keystone catalog name The name of the service service_type The type of Openstack Service description (optional) Description of the service
def calc_system(self, x, Y, Y_agg=None, L=None, population=None): """ Calculates the missing part of the extension plus accounts This method allows to specify an aggregated Y_agg for the account calculation (see Y_agg below). However, the full Y needs to be specified for the calculation of FY or SY. Calculates: - for each sector and country: S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector, D_exp_sector - for each region: D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg, - for each region (if population vector is given): D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap Notes ----- Only attributes which are not None are recalculated (for D_* this is checked for each group (reg, cap, and w/o appendix)). Parameters ---------- x : pandas.DataFrame or numpy.array Industry output column vector Y : pandas.DataFrame or numpy.arry Full final demand array Y_agg : pandas.DataFrame or np.array, optional The final demand aggregated (one category per country). Can be used to restrict the calculation of CBA of a specific category (e.g. households). Default: y is aggregated over all categories L : pandas.DataFrame or numpy.array, optional Leontief input output table L. If this is not given, the method recalculates M based on D_cba (must be present in the extension). population : pandas.DataFrame or np.array, optional Row vector with population per region """ if Y_agg is None: try: Y_agg = Y.sum(level='region', axis=1).reindex(self.get_regions(), axis=1) except (AssertionError, KeyError): Y_agg = Y.sum(level=0, axis=1,).reindex(self.get_regions(), axis=1) y_vec = Y.sum(axis=0) if self.F is None: self.F = calc_F(self.S, x) logging.debug( '{} - F calculated'.format(self.name)) if self.S is None: self.S = calc_S(self.F, x) logging.debug('{} - S calculated'.format(self.name)) if (self.FY is None) and (self.SY is not None): self.FY = calc_FY(self.SY, y_vec) logging.debug('{} - FY calculated'.format(self.name)) if (self.SY is None) and (self.FY is not None): self.SY = calc_SY(self.FY, y_vec) logging.debug('{} - SY calculated'.format(self.name)) if self.M is None: if L is not None: self.M = calc_M(self.S, L) logging.debug('{} - M calculated based on L'.format( self.name)) else: try: self.M = recalc_M(self.S, self.D_cba, Y=Y_agg, nr_sectors=self.get_sectors().size) logging.debug( '{} - M calculated based on ' 'D_cba and Y'.format(self.name)) except Exception as ex: logging.debug( 'Recalculation of M not possible - cause: {}'. format(ex)) FY_agg = 0 if self.FY is not None: # FY_agg = ioutil.agg_columns( # ext['FY'], self.get_Y_categories().size) try: FY_agg = (self.FY.sum(level='region', axis=1). reindex(self.get_regions(), axis=1)) except (AssertionError, KeyError): FY_agg = (self.FY.sum(level=0, axis=1). reindex(self.get_regions(), axis=1)) if ((self.D_cba is None) or (self.D_pba is None) or (self.D_imp is None) or (self.D_exp is None)): if L is None: logging.debug( 'Not possilbe to calculate D accounts - L not present') return else: self.D_cba, self.D_pba, self.D_imp, self.D_exp = ( calc_accounts(self.S, L, Y_agg, self.get_sectors().size)) logging.debug( '{} - Accounts D calculated'.format(self.name)) # aggregate to country if ((self.D_cba_reg is None) or (self.D_pba_reg is None) or (self.D_imp_reg is None) or (self.D_exp_reg is None)): try: self.D_cba_reg = ( self.D_cba.sum(level='region', axis=1). reindex(self.get_regions(), axis=1) + FY_agg) except (AssertionError, KeyError): self.D_cba_reg = ( self.D_cba.sum(level=0, axis=1). reindex(self.get_regions(), axis=1) + FY_agg) try: self.D_pba_reg = ( self.D_pba.sum(level='region', axis=1). reindex(self.get_regions(), axis=1) + FY_agg) except (AssertionError, KeyError): self.D_pba_reg = ( self.D_pba.sum(level=0, axis=1). reindex(self.get_regions(), axis=1) + FY_agg) try: self.D_imp_reg = ( self.D_imp.sum(level='region', axis=1). reindex(self.get_regions(), axis=1)) except (AssertionError, KeyError): self.D_imp_reg = ( self.D_imp.sum(level=0, axis=1). reindex(self.get_regions(), axis=1)) try: self.D_exp_reg = ( self.D_exp.sum(level='region', axis=1). reindex(self.get_regions(), axis=1)) except (AssertionError, KeyError): self.D_exp_reg = ( self.D_exp.sum(level=0, axis=1). reindex(self.get_regions(), axis=1)) logging.debug( '{} - Accounts D for regions calculated'.format(self.name)) # calc accounts per capita if population data is available if population is not None: if type(population) is pd.DataFrame: # check for right order: if (population.columns.tolist() != self.D_cba_reg.columns.tolist()): logging.warning( 'Population regions are inconsistent with IO regions') population = population.values if ((self.D_cba_cap is None) or (self.D_pba_cap is None) or (self.D_imp_cap is None) or (self.D_exp_cap is None)): self.D_cba_cap = self.D_cba_reg.dot( np.diagflat(1./population)) self.D_pba_cap = self.D_pba_reg.dot( np.diagflat(1./population)) self.D_imp_cap = self.D_imp_reg.dot( np.diagflat(1./population)) self.D_exp_cap = self.D_exp_reg.dot( np.diagflat(1./population)) self.D_cba_cap.columns = self.D_cba_reg.columns self.D_pba_cap.columns = self.D_pba_reg.columns self.D_imp_cap.columns = self.D_imp_reg.columns self.D_exp_cap.columns = self.D_exp_reg.columns logging.debug( '{} - Accounts D per capita calculated'.format(self.name)) return self
Calculates the missing part of the extension plus accounts This method allows to specify an aggregated Y_agg for the account calculation (see Y_agg below). However, the full Y needs to be specified for the calculation of FY or SY. Calculates: - for each sector and country: S, SY (if FY available), M, D_cba, D_pba_sector, D_imp_sector, D_exp_sector - for each region: D_cba_reg, D_pba_reg, D_imp_reg, D_exp_reg, - for each region (if population vector is given): D_cba_cap, D_pba_cap, D_imp_cap, D_exp_cap Notes ----- Only attributes which are not None are recalculated (for D_* this is checked for each group (reg, cap, and w/o appendix)). Parameters ---------- x : pandas.DataFrame or numpy.array Industry output column vector Y : pandas.DataFrame or numpy.arry Full final demand array Y_agg : pandas.DataFrame or np.array, optional The final demand aggregated (one category per country). Can be used to restrict the calculation of CBA of a specific category (e.g. households). Default: y is aggregated over all categories L : pandas.DataFrame or numpy.array, optional Leontief input output table L. If this is not given, the method recalculates M based on D_cba (must be present in the extension). population : pandas.DataFrame or np.array, optional Row vector with population per region
def docinfo2dict(doctree): """ Return the docinfo field list from a doctree as a dictionary Note: there can be multiple instances of a single field in the docinfo. Since a dictionary is returned, the last instance's value will win. Example: pub = rst2pub(rst_string) print docinfo2dict(pub.document) """ nodes = doctree.traverse(docutils.nodes.docinfo) md = {} if not nodes: return md for node in nodes[0]: # copied this logic from Sphinx, not exactly sure why they use it, but # I figured it can't hurt if isinstance(node, docutils.nodes.authors): md['authors'] = [author.astext() for author in node] elif isinstance(node, docutils.nodes.TextElement): # e.g. author md[node.__class__.__name__] = node.astext() else: name, body = node md[name.astext()] = body.astext() return md
Return the docinfo field list from a doctree as a dictionary Note: there can be multiple instances of a single field in the docinfo. Since a dictionary is returned, the last instance's value will win. Example: pub = rst2pub(rst_string) print docinfo2dict(pub.document)
def assuan_serialize(data): """Serialize data according to ASSUAN protocol (for GPG daemon communication).""" for c in [b'%', b'\n', b'\r']: escaped = '%{:02X}'.format(ord(c)).encode('ascii') data = data.replace(c, escaped) return data
Serialize data according to ASSUAN protocol (for GPG daemon communication).
def create_parser(prog): """Create an argument parser, adding in the list of providers.""" parser = argparse.ArgumentParser(prog=prog, formatter_class=DsubHelpFormatter) parser.add_argument( '--provider', default='google-v2', choices=['local', 'google', 'google-v2', 'test-fails'], help="""Job service provider. Valid values are "google-v2" (Google's Pipeline API v2) and "local" (local Docker execution). "test-*" providers are for testing purposes only.""", metavar='PROVIDER') return parser
Create an argument parser, adding in the list of providers.
def set_source_filter(self, source): """ Only search for tweets entered via given source :param source: String. Name of the source to search for. An example \ would be ``source=twitterfeed`` for tweets submitted via TwitterFeed :raises: TwitterSearchException """ if isinstance(source, str if py3k else basestring) and len(source) >= 2: self.source_filter = source else: raise TwitterSearchException(1009)
Only search for tweets entered via given source :param source: String. Name of the source to search for. An example \ would be ``source=twitterfeed`` for tweets submitted via TwitterFeed :raises: TwitterSearchException
def clusterStatus(self): """ Returns a dict of cluster nodes and their status information """ servers = yield self.getClusterServers() d = { 'workers': {}, 'crons': {}, 'queues': {} } now = time.time() reverse_map = {} for sname in servers: last = yield self._get_key('/server/%s/heartbeat' % sname) status = yield self._get_key('/server/%s/status' % sname) uuid = yield self._get_key('/server/%s/uuid' % sname) reverse_map[uuid] = sname if not last: last = 0 last = float(last) if (status == 'ready') and (now - last > 5): status = 'offline' if not sname in d['workers']: d['workers'][sname] = [] d['workers'][sname].append({ 'lastseen': last, 'status': status, 'id': uuid }) # Crons crons = yield self.keys('/crons') for queue in crons: if queue not in d['crons']: d['crons'][queue] = {'methods': {}} methods = yield self.keys('/crons/%s' % queue) for method in methods: last = yield self._get_key('/crons/%s/%s' % (queue, method)) if last: d['crons'][queue]['methods'][method] = float(last) uid = yield self._get_key('/croner/%s' % queue) if uid: d['crons'][queue]['master'] = '%s:%s' % (uid, reverse_map[uid]) # Queues queue_keys = yield self.keys('/qstats') for qname in queue_keys: if qname not in d['queues']: qlen = yield self.queueSize(qname) stats = yield self.getQueueMessageStats(qname) d['queues'][qname] = { 'waiting': qlen, 'messages': stats } defer.returnValue(d)
Returns a dict of cluster nodes and their status information
def to_pwm(self, precision=4, extra_str=""): """Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format. """ motif_id = self.id if extra_str: motif_id += "_%s" % extra_str if not self.pwm: self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()] return ">%s\n%s" % ( motif_id, self._pwm_to_str(precision) )
Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format.
def simple_moving_average(data, period): """ Simple Moving Average. Formula: SUM(data / N) """ catch_errors.check_for_period_error(data, period) # Mean of Empty Slice RuntimeWarning doesn't affect output so it is # supressed with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) sma = [np.mean(data[idx-(period-1):idx+1]) for idx in range(0, len(data))] sma = fill_for_noncomputable_vals(data, sma) return sma
Simple Moving Average. Formula: SUM(data / N)
def diff_charsToLines(self, diffs, lineArray): """Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings. """ for i in range(len(diffs)): text = [] for char in diffs[i][1]: text.append(lineArray[ord(char)]) diffs[i] = (diffs[i][0], "".join(text))
Rehydrate the text in a diff from a string of line hashes to real lines of text. Args: diffs: Array of diff tuples. lineArray: Array of unique strings.
def connect_delete_namespaced_pod_proxy_with_path(self, name, namespace, path, **kwargs): # noqa: E501 """connect_delete_namespaced_pod_proxy_with_path # noqa: E501 connect DELETE requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_delete_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.connect_delete_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 else: (data) = self.connect_delete_namespaced_pod_proxy_with_path_with_http_info(name, namespace, path, **kwargs) # noqa: E501 return data
connect_delete_namespaced_pod_proxy_with_path # noqa: E501 connect DELETE requests to proxy of Pod # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.connect_delete_namespaced_pod_proxy_with_path(name, namespace, path, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the PodProxyOptions (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param str path: path to the resource (required) :param str path2: Path is the URL path to use for the current proxy request to pod. :return: str If the method is called asynchronously, returns the request thread.
def create_user(self, username, first_name=None, last_name=None): """ Creates a new user object on database Returns the User Object. Must be linked to a new trainer soon after """ url = api_url+'users/' payload = { 'username':username } if first_name: payload['first_name'] = first_name if last_name: payload['last_name'] = last_name r = requests.post(url, data=json.dumps(payload), headers=self.headers) print(request_status(r)) r.raise_for_status() return User(r.json())
Creates a new user object on database Returns the User Object. Must be linked to a new trainer soon after
def register_properties_handler(self, handler_function): """register `handler_function` to receive `signal_name`. Uses dbus interface IPROPERTIES and objects path self.OBJ_PATH to match 'PropertiesChanged' signal. :param function handler_function: The function to be called. """ handler = filter_properties_signals( signal_wrapper(handler_function), self.IFACE) self.bus.add_signal_receiver(handler, signal_name='PropertiesChanged', dbus_interface=IPROPERTIES, bus_name=self.name, path=self.OBJ_PATH)
register `handler_function` to receive `signal_name`. Uses dbus interface IPROPERTIES and objects path self.OBJ_PATH to match 'PropertiesChanged' signal. :param function handler_function: The function to be called.
def wait(self, readfds, writefds, timeout): """ Wait for file descriptors or timeout. Adds the current process in the correspondent waiting list and yield the cpu to another running process. """ logger.debug("WAIT:") logger.debug(f"\tProcess {self._current} is going to wait for [ {readfds!r} {writefds!r} {timeout!r} ]") logger.debug(f"\tProcess: {self.procs!r}") logger.debug(f"\tRunning: {self.running!r}") logger.debug(f"\tRWait: {self.rwait!r}") logger.debug(f"\tTWait: {self.twait!r}") logger.debug(f"\tTimers: {self.timers!r}") for fd in readfds: self.rwait[fd].add(self._current) for fd in writefds: self.twait[fd].add(self._current) if timeout is not None: self.timers[self._current] = self.clocks + timeout procid = self._current # self.sched() next_index = (self.running.index(procid) + 1) % len(self.running) self._current = self.running[next_index] logger.debug(f"\tTransfer control from process {procid} to {self._current}") logger.debug(f"\tREMOVING {procid!r} from {self.running!r}. Current: {self._current!r}") self.running.remove(procid) if self._current not in self.running: logger.debug("\tCurrent not running. Checking for timers...") self._current = None self.check_timers()
Wait for file descriptors or timeout. Adds the current process in the correspondent waiting list and yield the cpu to another running process.
def js2str(js, sort_keys=True, indent=4): """Encode js to nicely formatted human readable string. (utf-8 encoding) Usage:: >>> from weatherlab.lib.dataIO.js import js2str >>> s = js2str({"a": 1, "b": 2}) >>> print(s) { "a": 1, "b": 2 } **中文文档** 将可Json化的Python对象转化成格式化的字符串。 """ return json.dumps(js, sort_keys=sort_keys, indent=indent, separators=(",", ": "))
Encode js to nicely formatted human readable string. (utf-8 encoding) Usage:: >>> from weatherlab.lib.dataIO.js import js2str >>> s = js2str({"a": 1, "b": 2}) >>> print(s) { "a": 1, "b": 2 } **中文文档** 将可Json化的Python对象转化成格式化的字符串。
def setAlternatingRowColors( self, state ): """ Sets the alternating row colors state for this widget. :param state | <bool> """ self._alternatingRowColors = state self.treeWidget().setAlternatingRowColors(state)
Sets the alternating row colors state for this widget. :param state | <bool>
def draw(data, size=(600, 400), node_size=2.0, edge_size=0.25, default_node_color=0x5bc0de, default_edge_color=0xaaaaaa, z=100, shader='basic', optimize=True, directed=True, display_html=True, show_save=False): """Draws an interactive 3D visualization of the inputted graph. Args: data: Either an adjacency list of tuples (ie. [(1,2),...]) or object size: (Optional) Dimensions of visualization, in pixels node_size: (Optional) Defaults to 2.0 edge_size: (Optional) Defaults to 0.25 default_node_color: (Optional) If loading data without specified 'color' properties, this will be used. Default is 0x5bc0de default_edge_color: (Optional) If loading data without specified 'color' properties, this will be used. Default is 0xaaaaaa z: (Optional) Starting z position of the camera. Default is 100. shader: (Optional) Specifies shading algorithm to use. Can be 'toon', 'basic', 'phong', or 'lambert'. Default is 'basic'. optimize: (Optional) Runs a force-directed layout algorithm on the graph. Default True. directed: (Optional) Includes arrows on edges to indicate direction. Default True. display_html: If True (default), embed the html in a IPython display. If False, return the html as a string. show_save: If True, displays a save icon for rendering graph as an image. Inputting an adjacency list into `data` results in a 'default' graph type. For more customization, use the more expressive object format. """ # Catch errors on string-based input before getting js involved shader_options = ['toon', 'basic', 'phong', 'lambert'] if shader not in shader_options: raise Exception('Invalid shader! Please use one of: ' + ', '.join(shader_options)) if isinstance(default_edge_color, int): default_edge_color = hex(default_edge_color) if isinstance(default_node_color, int): default_node_color = hex(default_node_color) # Guess the input format and handle accordingly if isinstance(data, list): graph = json_formatter.dumps(generate(data, iterations=1)) elif isinstance(data, dict): # Convert color hex to string for json handling for node_key in data['nodes']: node = data['nodes'][node_key] if 'color' in node and isinstance(node['color'], int): node['color'] = hex(node['color']) for edge in data['edges']: if 'color' in edge and isinstance(edge['color'], int): edge['color'] = hex(edge['color']) graph = json_formatter.dumps(data) else: # Support both files and strings try: with open(data) as in_file: graph = in_file.read() except: graph = data div_id = uuid.uuid4() html = '''<div id="graph-%(id)s"></div> <script type="text/javascript"> require.config({baseUrl: '/', paths: {jgraph: ['%(local)s', '%(remote)s']}}); require(['jgraph'], function () { var $d = $('#graph-%(id)s'); $d.width(%(w)d); $d.height(%(h)d); $d.jgraph = jQuery.extend({}, jgraph); $d.jgraph.create($d, {nodeSize: %(node_size)f, edgeSize: %(edge_size)f, defaultNodeColor: '%(node_color)s', defaultEdgeColor: '%(edge_color)s', shader: '%(shader)s', z: %(z)d, runOptimization: %(optimize)s, directed: %(directed)s, showSave: %(show_save)s}); $d.jgraph.draw(%(graph)s); $d.resizable({ aspectRatio: %(w)d / %(h)d, resize: function (evt, ui) { $d.jgraph.renderer.setSize(ui.size.width, ui.size.height); } }); }); </script>''' % dict(id=div_id, local=local_path[:-3], remote=remote_path[:-3], w=size[0], h=size[1], node_size=node_size, edge_size=edge_size, node_color=default_node_color, edge_color=default_edge_color, shader=shader, z=z, graph=graph, optimize='true' if optimize else 'false', directed='true' if directed else 'false', show_save='true' if show_save else 'false') # Execute js and display the results in a div (see script for more) if display_html: display(HTML(html)) else: return html
Draws an interactive 3D visualization of the inputted graph. Args: data: Either an adjacency list of tuples (ie. [(1,2),...]) or object size: (Optional) Dimensions of visualization, in pixels node_size: (Optional) Defaults to 2.0 edge_size: (Optional) Defaults to 0.25 default_node_color: (Optional) If loading data without specified 'color' properties, this will be used. Default is 0x5bc0de default_edge_color: (Optional) If loading data without specified 'color' properties, this will be used. Default is 0xaaaaaa z: (Optional) Starting z position of the camera. Default is 100. shader: (Optional) Specifies shading algorithm to use. Can be 'toon', 'basic', 'phong', or 'lambert'. Default is 'basic'. optimize: (Optional) Runs a force-directed layout algorithm on the graph. Default True. directed: (Optional) Includes arrows on edges to indicate direction. Default True. display_html: If True (default), embed the html in a IPython display. If False, return the html as a string. show_save: If True, displays a save icon for rendering graph as an image. Inputting an adjacency list into `data` results in a 'default' graph type. For more customization, use the more expressive object format.
def linkify(self, timeperiods): """ Will make timeperiod in exclude with id of the timeperiods :param timeperiods: Timeperiods object :type timeperiods: :return: None """ new_exclude = [] if hasattr(self, 'exclude') and self.exclude != []: logger.debug("[timeentry::%s] have excluded %s", self.get_name(), self.exclude) excluded_tps = self.exclude for tp_name in excluded_tps: timepriod = timeperiods.find_by_name(tp_name.strip()) if timepriod is not None: new_exclude.append(timepriod.uuid) else: msg = "[timeentry::%s] unknown %s timeperiod" % (self.get_name(), tp_name) self.add_error(msg) self.exclude = new_exclude
Will make timeperiod in exclude with id of the timeperiods :param timeperiods: Timeperiods object :type timeperiods: :return: None
def parse_uri_path(self, path): """ Given a uri path, return the Redis specific configuration options in that path string according to iana definition http://www.iana.org/assignments/uri-schemes/prov/redis :param path: string containing the path. Example: "/0" :return: mapping containing the options. Example: {"db": "0"} """ options = {} db, *_ = path[1:].split("/") if db: options["db"] = db return options
Given a uri path, return the Redis specific configuration options in that path string according to iana definition http://www.iana.org/assignments/uri-schemes/prov/redis :param path: string containing the path. Example: "/0" :return: mapping containing the options. Example: {"db": "0"}
def forward(self, obj): """ Forward an object to clients. :param obj: The object to be forwarded :type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus :raises Exception: if any of the clients failed """ assert isinstance(obj, (IncomingMessage, MessageStatus)), 'Tried to forward an object of an unsupported type: {}'.format(obj) clients = self.choose_clients(obj) if Parallel: pll = Parallel(self._forward_object_to_client) for client in clients: pll(client, obj) results, errors = pll.join() if errors: raise errors[0] else: for client in clients: self._forward_object_to_client(client, obj)
Forward an object to clients. :param obj: The object to be forwarded :type obj: smsframework.data.IncomingMessage|smsframework.data.MessageStatus :raises Exception: if any of the clients failed
def upload(self, src_dir, replica, staging_bucket, timeout_seconds=1200): """ Upload a directory of files from the local filesystem and create a bundle containing the uploaded files. :param str src_dir: file path to a directory of files to upload to the replica. :param str replica: the replica to upload to. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param str staging_bucket: a client controlled AWS S3 storage bucket to upload from. :param int timeout_seconds: the time to wait for a file to upload to replica. Upload a directory of files from the local filesystem and create a bundle containing the uploaded files. This method requires the use of a client-controlled object storage bucket to stage the data for upload. """ bundle_uuid = str(uuid.uuid4()) version = datetime.utcnow().strftime("%Y-%m-%dT%H%M%S.%fZ") files_to_upload, files_uploaded = [], [] for filename in iter_paths(src_dir): full_file_name = filename.path files_to_upload.append(open(full_file_name, "rb")) logger.info("Uploading %i files from %s to %s", len(files_to_upload), src_dir, staging_bucket) file_uuids, uploaded_keys, abs_file_paths = upload_to_cloud(files_to_upload, staging_bucket=staging_bucket, replica=replica, from_cloud=False) for file_handle in files_to_upload: file_handle.close() filenames = [object_name_builder(p, src_dir) for p in abs_file_paths] filename_key_list = list(zip(filenames, file_uuids, uploaded_keys)) for filename, file_uuid, key in filename_key_list: filename = filename.replace('\\', '/') # for windows paths if filename.startswith('/'): filename = filename.lstrip('/') logger.info("File %s: registering...", filename) # Generating file data creator_uid = self.config.get("creator_uid", 0) source_url = "s3://{}/{}".format(staging_bucket, key) logger.info("File %s: registering from %s -> uuid %s", filename, source_url, file_uuid) response = self.put_file._request(dict( uuid=file_uuid, bundle_uuid=bundle_uuid, version=version, creator_uid=creator_uid, source_url=source_url )) files_uploaded.append(dict(name=filename, version=version, uuid=file_uuid, creator_uid=creator_uid)) if response.status_code in (requests.codes.ok, requests.codes.created): logger.info("File %s: Sync copy -> %s", filename, version) else: assert response.status_code == requests.codes.accepted logger.info("File %s: Async copy -> %s", filename, version) timeout = time.time() + timeout_seconds wait = 1.0 while time.time() < timeout: try: self.head_file(uuid=file_uuid, replica="aws", version=version) break except SwaggerAPIException as e: if e.code != requests.codes.not_found: msg = "File {}: Unexpected server response during registration" req_id = 'X-AWS-REQUEST-ID: {}'.format(response.headers.get("X-AWS-REQUEST-ID")) raise RuntimeError(msg.format(filename), req_id) time.sleep(wait) wait = min(60.0, wait * self.UPLOAD_BACKOFF_FACTOR) else: # timed out. :( req_id = 'X-AWS-REQUEST-ID: {}'.format(response.headers.get("X-AWS-REQUEST-ID")) raise RuntimeError("File {}: registration FAILED".format(filename), req_id) logger.debug("Successfully uploaded file") file_args = [{'indexed': file_["name"].endswith(".json"), 'name': file_['name'], 'version': file_['version'], 'uuid': file_['uuid']} for file_ in files_uploaded] logger.info("%s", "Bundle {}: Registering...".format(bundle_uuid)) response = self.put_bundle(uuid=bundle_uuid, version=version, replica=replica, creator_uid=creator_uid, files=file_args) logger.info("%s", "Bundle {}: Registered successfully".format(bundle_uuid)) return { "bundle_uuid": bundle_uuid, "creator_uid": creator_uid, "replica": replica, "version": response["version"], "files": files_uploaded }
Upload a directory of files from the local filesystem and create a bundle containing the uploaded files. :param str src_dir: file path to a directory of files to upload to the replica. :param str replica: the replica to upload to. The supported replicas are: `aws` for Amazon Web Services, and `gcp` for Google Cloud Platform. [aws, gcp] :param str staging_bucket: a client controlled AWS S3 storage bucket to upload from. :param int timeout_seconds: the time to wait for a file to upload to replica. Upload a directory of files from the local filesystem and create a bundle containing the uploaded files. This method requires the use of a client-controlled object storage bucket to stage the data for upload.
def day_fraction(time): """Convert a 24-hour time to a fraction of a day. For example, midnight corresponds to 0.0, and noon to 0.5. :param time: Time in the form of 'HH:MM' (24-hour time) :type time: string :return: A day fraction :rtype: float :Examples: .. code-block:: python day_fraction("18:30") """ hour = int(time.split(":")[0]) minute = int(time.split(":")[1]) return hour/24 + minute/1440
Convert a 24-hour time to a fraction of a day. For example, midnight corresponds to 0.0, and noon to 0.5. :param time: Time in the form of 'HH:MM' (24-hour time) :type time: string :return: A day fraction :rtype: float :Examples: .. code-block:: python day_fraction("18:30")
def delete_project(project_id): """Delete Project.""" project = get_data_or_404('project', project_id) if project['owner_id'] != get_current_user_id(): return jsonify(message='forbidden'), 403 delete_instance('project', project_id) return jsonify({})
Delete Project.
def set_offset( self, offset ): """Set the current read offset (in bytes) for the instance.""" assert offset in range( len( self.buffer ) ) self.pos = offset self._fill_buffer()
Set the current read offset (in bytes) for the instance.
def get_import_resource_kwargs(self, request, *args, **kwargs): """Prepares/returns kwargs used when initializing Resource""" return self.get_resource_kwargs(request, *args, **kwargs)
Prepares/returns kwargs used when initializing Resource
def sequence(self, per_exon=False): """ Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented """ db = self.db if not per_exon: start = self.txStart + 1 return _sequence(db, self.chrom, start, self.txEnd) else: # TODO: use same strategy as cds_sequence to reduce # of requests. seqs = [] for start, end in self.exons: seqs.append(_sequence(db, self.chrom, start + 1, end)) return seqs
Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented
def remove_sources(self, sources): """ Remove sources from the decomposition. This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and connectivity estimates. Parameters ---------- sources : {slice, int, array of ints} Indices of components to remove. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a source decomposition. """ if self.unmixing_ is None or self.mixing_ is None: raise RuntimeError("No sources available (run do_mvarica first)") self.mixing_ = np.delete(self.mixing_, sources, 0) self.unmixing_ = np.delete(self.unmixing_, sources, 1) if self.activations_ is not None: self.activations_ = np.delete(self.activations_, sources, 1) self.var_model_ = None self.var_cov_ = None self.connectivity_ = None self.mixmaps_ = [] self.unmixmaps_ = [] return self
Remove sources from the decomposition. This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and connectivity estimates. Parameters ---------- sources : {slice, int, array of ints} Indices of components to remove. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a source decomposition.
def _null_sia(subsystem, phi=0.0): """Return a |SystemIrreducibilityAnalysis| with zero |big_phi| and empty cause-effect structures. This is the analysis result for a reducible subsystem. """ return SystemIrreducibilityAnalysis(subsystem=subsystem, cut_subsystem=subsystem, phi=phi, ces=_null_ces(subsystem), partitioned_ces=_null_ces(subsystem))
Return a |SystemIrreducibilityAnalysis| with zero |big_phi| and empty cause-effect structures. This is the analysis result for a reducible subsystem.
def find_endurance_tier_iops_per_gb(volume): """Find the tier for the given endurance volume (IOPS per GB) :param volume: The volume for which the tier level is desired :return: Returns a float value indicating the IOPS per GB for the volume """ tier = volume['storageTierLevel'] iops_per_gb = 0.25 if tier == "LOW_INTENSITY_TIER": iops_per_gb = 0.25 elif tier == "READHEAVY_TIER": iops_per_gb = 2 elif tier == "WRITEHEAVY_TIER": iops_per_gb = 4 elif tier == "10_IOPS_PER_GB": iops_per_gb = 10 else: raise ValueError("Could not find tier IOPS per GB for this volume") return iops_per_gb
Find the tier for the given endurance volume (IOPS per GB) :param volume: The volume for which the tier level is desired :return: Returns a float value indicating the IOPS per GB for the volume
def sync(self): """Sync the timeout index entry with the shelf.""" if self.writeback and self.cache: super(_TimeoutMixin, self).__delitem__(self._INDEX) super(_TimeoutMixin, self).sync() self.writeback = False super(_TimeoutMixin, self).__setitem__(self._INDEX, self._index) self.writeback = True if hasattr(self.dict, 'sync'): self.dict.sync()
Sync the timeout index entry with the shelf.
def wrap_results(self, **kwargs): """ Wrap returned http response into a well formatted dict :param kwargs: this dict param should contains following keys: fd: file directory to url: the test url fo the result files_count: the number of files under har/ directory :return (dict): the results of all """ if 'fd' not in kwargs \ or 'url' not in kwargs \ or 'files_count' not in kwargs: logging.error("Missing arguments in wrap_results function") return {} external = kwargs['external'] if 'external' in kwargs else None fd = kwargs['fd'] url = kwargs['url'] length = kwargs['files_count'] results = {} files = [] wait_time = 15 host = self.divide_url(url)[0] time.sleep(0.5) # wait until the har file is generated while len(os.listdir(fd)) <= length + self.parsed: time.sleep(1) wait_time -= 1 if wait_time == 0: logging.warning("%s waiting har file result timed out" % url) results['error'] = "wrap har file timeout" if external is not None: external[url] = results return results time.sleep(1) # find all har files under har/ directory for fn in os.listdir(fd): if fn.endswith(".har") and host in fn: path = os.path.join(fd, fn) files.append((fn, os.stat(path).st_mtime)) # sort all har files and parse the latest one files.sort(key=lambda x: x[1]) if len(files) > 0: with open(fd + '/' + files[-1][0]) as f: raw_data = json.load(f)['log']['entries'] results = [{} for i in range(0, len(raw_data))] for i in range(0, len(results)): results[i]['request'] = {} results[i]['request']['method'] = raw_data[i]['request']['method'] headers = {} for header in raw_data[i]['request']['headers']: headers[header['name']] = header['value'] results[i]['request']['headers'] = headers results[i]['response'] = {} results[i]['response']['status'] = raw_data[i]['response']['status'] results[i]['response']['reason'] = raw_data[i]['response']['statusText'] headers = {} for header in raw_data[i]['response']['headers']: headers[header['name']] = header['value'] results[i]['response']['headers'] = headers results[i]['response']['redirect'] = raw_data[i]['response']['redirectURL'] results[i]['response']['body'] = raw_data[i]['response']['content'] self.parsed += 1 # increment the number of parsed har files else: logging.warning("Cannot find har file for %s" % url) # save test result of this url to the external result object or # return the result if external is not None: external[url] = results else: return results
Wrap returned http response into a well formatted dict :param kwargs: this dict param should contains following keys: fd: file directory to url: the test url fo the result files_count: the number of files under har/ directory :return (dict): the results of all
def dump_process_memory(self, pid, working_dir="c:\\windows\\carbonblack\\", path_to_procdump=None): """Use sysinternals procdump to dump process memory on a specific process. If only the pid is specified, the default behavior is to use the version of ProcDump supplied with cbinterface's pip3 installer. :requires: SysInternals ProcDump v9.0 included with cbinterface==1.1.0 :arguments pid: Process id to dump memory for :arguments working_dir: Specify a directoy on the windows sensor to work out of. Default: C:\\Windows\\CarbonBlack\\ :arguments path_to_procdump: Specify the path to a version of procdump you want to use. Default is included copy """ self.go_live() print("~ dumping memory where pid={} for {}".format(pid, self.sensor.computer_name)) # need to make sure procdump.exe is on the sensor procdump_host_path = None dir_output = self.lr_session.list_directory(working_dir) for dir_item in dir_output: if dir_item['filename'] == 'procdump.exe': logging.info("procdump.exe already on host.") procdump_host_path = working_dir + "procdump.exe" break else: logging.info("Dropping procdump.exe on host.") if not procdump_host_path: if not os.path.exists(path_to_procdump): HOME_DIR = os.path.abspath(os.path.join(os.path.realpath(__file__),'..','..')) path_to_procdump = os.path.join(HOME_DIR, 'lr_tools', 'procdump.exe') if not os.path.exists(path_to_procdump): logging.warn("{} not found".format(path_to_procdump)) return False print("~ dropping procdump.exe on host.") filedata = None with open(path_to_procdump, 'rb') as f: filedata = f.read() try: self.lr_session.create_directory(working_dir) except LiveResponseError: logging.debug("working directory already exists") self.lr_session.put_file(filedata, working_dir + "procdump.exe") procdump_host_path = working_dir + "procdump.exe" print("~ Executing procdump..") command_str = procdump_host_path + " -accepteula -ma " + str(pid) result = self.lr_session.create_process(command_str) time.sleep(1) print("+ procdump output:\n-------------------------") result = result.decode('utf-8') print(result + "\n-------------------------") # cut off the carriage return and line feed from filename dumpfile_name = result[result.rfind('\\')+1:result.rfind('.dmp')+4] while True: if 'procdump.exe' not in str(self.lr_session.list_processes()): break else: time.sleep(1) # download dumpfile to localdir self.getFile_with_timeout(working_dir + dumpfile_name)
Use sysinternals procdump to dump process memory on a specific process. If only the pid is specified, the default behavior is to use the version of ProcDump supplied with cbinterface's pip3 installer. :requires: SysInternals ProcDump v9.0 included with cbinterface==1.1.0 :arguments pid: Process id to dump memory for :arguments working_dir: Specify a directoy on the windows sensor to work out of. Default: C:\\Windows\\CarbonBlack\\ :arguments path_to_procdump: Specify the path to a version of procdump you want to use. Default is included copy
def open(self): """Opens the connection.""" self._id = str(uuid.uuid4()) self._client.open_connection(self._id, info=self._connection_args)
Opens the connection.
def write_file_list(filename, file_list=[], glob=None): """Write a list of files to a file. :param filename: the name of the file to write the list to :param file_list: a list of filenames to write to a file :param glob: if glob is specified, it will ignore file_list and instead create a list of files based on the pattern provide by glob (ex. *.cub) """ if glob: file_list = iglob(glob) with open(filename, 'w') as f: for line in file_list: f.write(line + '\n')
Write a list of files to a file. :param filename: the name of the file to write the list to :param file_list: a list of filenames to write to a file :param glob: if glob is specified, it will ignore file_list and instead create a list of files based on the pattern provide by glob (ex. *.cub)
def split_text(text: str, length: int = MAX_MESSAGE_LENGTH) -> typing.List[str]: """ Split long text :param text: :param length: :return: list of parts :rtype: :obj:`typing.List[str]` """ return [text[i:i + length] for i in range(0, len(text), length)]
Split long text :param text: :param length: :return: list of parts :rtype: :obj:`typing.List[str]`
def setLevel(self, level): r"""Overrides the parent method to adapt the formatting string to the level. Parameters ---------- level : int The new log level to set. See the logging levels in the logging module for details. Examples -------- >>> import logging >>> Logger.setLevel(logging.DEBUG) """ if logging.DEBUG >= level: formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s (in %(module)s.%(funcName)s:%(lineno)s)", "%d.%m.%Y %H:%M:%S") self._handler.setFormatter(formatter) else: formatter = logging.Formatter("%(asctime)s [%(levelname)-8s] %(message)s", "%d.%m.%Y %H:%M:%S") self._handler.setFormatter(formatter) NativeLogger.setLevel(self, level)
r"""Overrides the parent method to adapt the formatting string to the level. Parameters ---------- level : int The new log level to set. See the logging levels in the logging module for details. Examples -------- >>> import logging >>> Logger.setLevel(logging.DEBUG)
def shift(self, delta): """Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards in time Examples -------- >>> from gwpy.types import Series >>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m') >>> print(a.x0) 0.0 m >>> a.shift(5) >>> print(a.x0) 5.0 m >>> a.shift('-1 km') -995.0 m """ self.x0 = self.x0 + Quantity(delta, self.xunit)
Shift this `Series` forward on the X-axis by ``delta`` This modifies the series in-place. Parameters ---------- delta : `float`, `~astropy.units.Quantity`, `str` The amount by which to shift (in x-axis units if `float`), give a negative value to shift backwards in time Examples -------- >>> from gwpy.types import Series >>> a = Series([1, 2, 3, 4, 5], x0=0, dx=1, xunit='m') >>> print(a.x0) 0.0 m >>> a.shift(5) >>> print(a.x0) 5.0 m >>> a.shift('-1 km') -995.0 m
def get_person(people_id): ''' Return a single person ''' result = _get(people_id, settings.PEOPLE) return People(result.content)
Return a single person
def _create_alpha(self, data, fill_value=None): """Create an alpha band DataArray object. If `fill_value` is provided and input data is an integer type then it is used to determine invalid "null" pixels instead of xarray's `isnull` and `notnull` methods. The returned array is 1 where data is valid, 0 where invalid. """ not_alpha = [b for b in data.coords['bands'].values if b != 'A'] null_mask = data.sel(bands=not_alpha) if np.issubdtype(data.dtype, np.integer) and fill_value is not None: null_mask = null_mask != fill_value else: null_mask = null_mask.notnull() # if any of the bands are valid, we don't want transparency null_mask = null_mask.any(dim='bands') null_mask = null_mask.expand_dims('bands') null_mask['bands'] = ['A'] # match data dtype return null_mask
Create an alpha band DataArray object. If `fill_value` is provided and input data is an integer type then it is used to determine invalid "null" pixels instead of xarray's `isnull` and `notnull` methods. The returned array is 1 where data is valid, 0 where invalid.
def _initialize(self, provide_data: List[mx.io.DataDesc], provide_label: List[mx.io.DataDesc], default_bucket_key: Tuple[int, int]) -> None: """ Initializes model components, creates scoring symbol and module, and binds it. :param provide_data: List of data descriptors. :param provide_label: List of label descriptors. :param default_bucket_key: The default maximum (source, target) lengths. """ source = mx.sym.Variable(C.SOURCE_NAME) source_words = source.split(num_outputs=self.config.config_embed_source.num_factors, axis=2, squeeze_axis=True)[0] source_length = utils.compute_lengths(source_words) target = mx.sym.Variable(C.TARGET_NAME) target_length = utils.compute_lengths(target) # labels shape: (batch_size, target_length) (usually the maximum target sequence length) labels = mx.sym.Variable(C.TARGET_LABEL_NAME) data_names = [C.SOURCE_NAME, C.TARGET_NAME] label_names = [C.TARGET_LABEL_NAME] # check provide_{data,label} names provide_data_names = [d[0] for d in provide_data] utils.check_condition(provide_data_names == data_names, "incompatible provide_data: %s, names should be %s" % (provide_data_names, data_names)) provide_label_names = [d[0] for d in provide_label] utils.check_condition(provide_label_names == label_names, "incompatible provide_label: %s, names should be %s" % (provide_label_names, label_names)) def sym_gen(seq_lens): """ Returns a (grouped) symbol containing the summed score for each sentence, as well as the entire target distributions for each word. Also returns data and label names for the BucketingModule. """ source_seq_len, target_seq_len = seq_lens # source embedding (source_embed, source_embed_length, source_embed_seq_len) = self.embedding_source.encode(source, source_length, source_seq_len) # target embedding (target_embed, target_embed_length, target_embed_seq_len) = self.embedding_target.encode(target, target_length, target_seq_len) # encoder # source_encoded: (batch_size, source_encoded_length, encoder_depth) (source_encoded, source_encoded_length, source_encoded_seq_len) = self.encoder.encode(source_embed, source_embed_length, source_embed_seq_len) # decoder # target_decoded: (batch-size, target_len, decoder_depth) target_decoded = self.decoder.decode_sequence(source_encoded, source_encoded_length, source_encoded_seq_len, target_embed, target_embed_length, target_embed_seq_len) # output layer # logits: (batch_size * target_seq_len, target_vocab_size) logits = self.output_layer(mx.sym.reshape(data=target_decoded, shape=(-3, 0))) # logits after reshape: (batch_size, target_seq_len, target_vocab_size) logits = mx.sym.reshape(data=logits, shape=(-4, -1, target_embed_seq_len, 0)) if self.softmax_temperature is not None: logits = logits / self.softmax_temperature # Compute the softmax along the final dimension. # target_dists: (batch_size, target_seq_len, target_vocab_size) target_dists = mx.sym.softmax(data=logits, axis=2, name=C.SOFTMAX_NAME) # Select the label probability, then take their logs. # probs and scores: (batch_size, target_seq_len) probs = mx.sym.pick(target_dists, labels) scores = mx.sym.log(probs) if self.score_type == C.SCORING_TYPE_NEGLOGPROB: scores = -1 * scores # Sum, then apply length penalty. The call to `mx.sym.where` masks out invalid values from scores. # zeros and sums: (batch_size,) zeros = mx.sym.zeros_like(scores) sums = mx.sym.sum(mx.sym.where(labels != 0, scores, zeros), axis=1) / (self.length_penalty(target_length - 1)) # Deal with the potential presence of brevity penalty # length_ratio: (batch_size,) if self.constant_length_ratio > 0.0: # override all ratios with the constant value length_ratio = self.constant_length_ratio * mx.sym.ones_like(sums) else: # predict length ratio if supported length_ratio = self.length_ratio(source_encoded, source_encoded_length).reshape((-1,)) \ if self.length_ratio is not None else mx.sym.zeros_like(sums) sums = sums - self.brevity_penalty(target_length - 1, length_ratio * source_encoded_length) # Return the sums and the target distributions # sums: (batch_size,) target_dists: (batch_size, target_seq_len, target_vocab_size) return mx.sym.Group([sums, target_dists]), data_names, label_names symbol, _, __ = sym_gen(default_bucket_key) self.module = mx.mod.Module(symbol=symbol, data_names=data_names, label_names=label_names, logger=logger, context=self.context) self.module.bind(data_shapes=provide_data, label_shapes=provide_label, for_training=False, force_rebind=False, grad_req='null')
Initializes model components, creates scoring symbol and module, and binds it. :param provide_data: List of data descriptors. :param provide_label: List of label descriptors. :param default_bucket_key: The default maximum (source, target) lengths.
def show_account(): """ Exports current account configuration in shell-friendly form. Takes into account explicit top-level flags like --organization. """ click.echo("# tonomi api") for (key, env) in REVERSE_MAPPING.items(): value = QUBELL.get(key, None) if value: click.echo("export %s='%s'" % (env, value)) if any(map(lambda x: PROVIDER.get(x), REVERSE_PROVIDER_MAPPING.keys())): click.echo("# cloud account") for (key, env) in REVERSE_PROVIDER_MAPPING.items(): value = PROVIDER.get(key, None) if value: click.echo("export %s='%s'" % (env, value))
Exports current account configuration in shell-friendly form. Takes into account explicit top-level flags like --organization.
def silence_warnings(*warnings): """ Context manager for silencing bokeh validation warnings. """ for warning in warnings: silence(warning) try: yield finally: for warning in warnings: silence(warning, False)
Context manager for silencing bokeh validation warnings.
def vsreenqueue(item_id, item_s, args, **kwargs): '''Enqueue a string, or string-like object to other queues, with arbitrary arguments, sreenqueue is to reenqueue what sprintf is to printf, sreenqueue is to vsreenqueue what sprintf is to vsprintf. ''' charset = kwargs.get('charset', _c.FSQ_CHARSET) if kwargs.has_key('charset'): del kwargs['charset'] kwargs['item_id'] = item_id # we coerce here because StringIO.StringIO will coerce on file-write, # and cStringIO.StringIO has a bug which injects NULs for unicode if isinstance(item_s, unicode): try: item_s = item_s.encode(charset) except UnicodeEncodeError: raise FSQCoerceError(errno.EINVAL, u'cannot encode item with'\ u' charset {0}'.format(charset)) return vreenqueue(StringIO(item_s), item_id, args, **kwargs)
Enqueue a string, or string-like object to other queues, with arbitrary arguments, sreenqueue is to reenqueue what sprintf is to printf, sreenqueue is to vsreenqueue what sprintf is to vsprintf.
def run_process(self, process): """Runs a single action.""" message = u'#{bright}' message += u'{} '.format(str(process)[:68]).ljust(69, '.') stashed = False if self.unstaged_changes and not self.include_unstaged_changes: out, err, code = self.git.stash(keep_index=True, quiet=True) stashed = code == 0 try: result = process(files=self.files, cwd=self.cwd, fix=self.fix) # Check for modified files out, err, code = self.git.status(porcelain=True, untracked_files='no') for line in out.splitlines(): file_status = Status(line) # Make sure the file is one of the files that was processed if file_status.path in self.files and file_status.is_modified: mtime = os.path.getmtime(file_status.path) if os.path.exists(file_status.path) else 0 if mtime > self.file_mtimes.get(file_status.path, 0): self.file_mtimes[file_status.path] = mtime result.add_modified_file(file_status.path) if self.stage_modified_files: self.git.add(file_status.path) except: # noqa: E722 raise finally: if stashed: self.git.reset(hard=True, quiet=True) self.git.stash.pop(index=True, quiet=True) if result.is_success: message += u' #{green}[SUCCESS]' elif result.is_failure: message += u' #{red}[FAILURE]' elif result.is_skip: message += u' #{cyan}[SKIPPED]' elif result.is_error: message += u' #{red}[ERROR!!]' return result, message
Runs a single action.
def import_from_dict(session, data, sync=[]): """Imports databases and druid clusters from dictionary""" if isinstance(data, dict): logging.info('Importing %d %s', len(data.get(DATABASES_KEY, [])), DATABASES_KEY) for database in data.get(DATABASES_KEY, []): Database.import_from_dict(session, database, sync=sync) logging.info('Importing %d %s', len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY) for datasource in data.get(DRUID_CLUSTERS_KEY, []): DruidCluster.import_from_dict(session, datasource, sync=sync) session.commit() else: logging.info('Supplied object is not a dictionary.')
Imports databases and druid clusters from dictionary
def evaluate(): """ Evaluate loop for the trained model """ print(eval_model) eval_model.initialize(mx.init.Xavier(), ctx=context[0]) eval_model.hybridize(static_alloc=True, static_shape=True) epoch = args.from_epoch if args.from_epoch else 0 while epoch < args.epochs: checkpoint_name = '%s.%s'%(args.save, format(epoch, '02d')) if not os.path.exists(checkpoint_name): print('Wait for a new checkpoint...') # check again after 600 seconds time.sleep(600) continue eval_model.load_parameters(checkpoint_name) print('Loaded parameters from checkpoint %s'%(checkpoint_name)) start_epoch_time = time.time() final_test_L = test(test_data, test_batch_size, ctx=context[0]) end_epoch_time = time.time() print('[Epoch %d] test loss %.2f, test ppl %.2f'% (epoch, final_test_L, math.exp(final_test_L))) print('Epoch %d took %.2f seconds.'%(epoch, end_epoch_time - start_epoch_time)) sys.stdout.flush() epoch += 1
Evaluate loop for the trained model