code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def set_section_order(self, section_name_list): """Set the order of the sections, which are by default unorderd. Any unlisted sections that exist will be placed at the end of the document in no particular order. """ self.section_headings = section_name_list[:] for section_name in self.sections.keys(): if section_name not in section_name_list: self.section_headings.append(section_name) return
Set the order of the sections, which are by default unorderd. Any unlisted sections that exist will be placed at the end of the document in no particular order.
def _parse_binary(v, header_d): """ Parses binary string. Note: <str> for py2 and <binary> for py3. """ # This is often a no-op, but it ocassionally converts numbers into strings v = nullify(v) if v is None: return None if six.PY2: try: return six.binary_type(v).strip() except UnicodeEncodeError: return six.text_type(v).strip() else: # py3 try: return six.binary_type(v, 'utf-8').strip() except UnicodeEncodeError: return six.text_type(v).strip()
Parses binary string. Note: <str> for py2 and <binary> for py3.
def copy(self): """ Retrieve a copy of the Environment. Note that this is a shallow copy. """ return self.__class__(self._data.copy(), self._sensitive.copy(), self._cwd)
Retrieve a copy of the Environment. Note that this is a shallow copy.
def delete(self, key, **opts): """Remove a key from the cache.""" key, store = self._expand_opts(key, opts) try: del store[key] except KeyError: pass
Remove a key from the cache.
def ordered(self): """An equivalent unit cell with the active cell vectors coming first""" active, inactive = self.active_inactive order = active + inactive return UnitCell(self.matrix[:,order], self.active[order])
An equivalent unit cell with the active cell vectors coming first
def rebuildtable(cls): """Regenerate the entire closuretree.""" cls._closure_model.objects.all().delete() cls._closure_model.objects.bulk_create([cls._closure_model( parent_id=x['pk'], child_id=x['pk'], depth=0 ) for x in cls.objects.values("pk")]) for node in cls.objects.all(): node._closure_createlink()
Regenerate the entire closuretree.
def get_new_links(self, url, resp): """Get new links from a URL and filter them.""" links_on_page = resp.xpath('//a/@href') links = [utils.clean_url(u, url) for u in links_on_page] # Remove non-links through filtering by protocol links = [x for x in links if utils.check_protocol(x)] # Restrict new URLs by the domain of the input URL if not self.args['nonstrict']: domain = utils.get_domain(url) links = [x for x in links if utils.get_domain(x) == domain] # Filter URLs by regex keywords, if any if self.args['crawl']: links = utils.re_filter(links, self.args['crawl']) return links
Get new links from a URL and filter them.
def import_csv(filepath: str, currency: str): """ Import prices from CSV file """ logger.debug(f"currency = {currency}") # auto-convert to uppercase. currency = currency.upper() app = PriceDbApplication() app.logger = logger app.import_prices(filepath, currency)
Import prices from CSV file
def unregister_widget(self, widget_cls): """Unregisters the given widget.""" if widget_cls.__name__ in self.widgets: del self.widgets[widget_cls().get_name()]
Unregisters the given widget.
def vert_dpi(self): """ Integer dots per inch for the height of this image. Defaults to 72 when not present in the file, as is often the case. """ pHYs = self._chunks.pHYs if pHYs is None: return 72 return self._dpi(pHYs.units_specifier, pHYs.vert_px_per_unit)
Integer dots per inch for the height of this image. Defaults to 72 when not present in the file, as is often the case.
def terminate(self, nowait=False): """Finalize and stop service Args: nowait: set to True to terminate immediately and skip processing messages still in the queue """ logger.debug("Acquiring lock for service termination") with self.lock: logger.debug("Terminating service") if not self.listener: logger.warning("Service already stopped.") return self.listener.stop(nowait) try: if not nowait: self._post_log_batch() except Exception: if self.error_handler: self.error_handler(sys.exc_info()) else: raise finally: self.queue = None self.listener = None
Finalize and stop service Args: nowait: set to True to terminate immediately and skip processing messages still in the queue
def _require(*names): """Helper for @require decorator.""" from IPython.parallel.error import UnmetDependency user_ns = globals() for name in names: if name in user_ns: continue try: exec 'import %s'%name in user_ns except ImportError: raise UnmetDependency(name) return True
Helper for @require decorator.
def Froude_densimetric(V, L, rho1, rho2, heavy=True, g=g): r'''Calculates the densimetric Froude number :math:`Fr_{den}` for velocity `V` geometric length `L`, heavier fluid density `rho1`, and lighter fluid density `rho2`. If desired, gravity can be specified as well. Depending on the application, this dimensionless number may be defined with the heavy phase or the light phase density in the numerator of the square root. For some applications, both need to be calculated. The default is to calculate with the heavy liquid ensity on top; set `heavy` to False to reverse this. .. math:: Fr = \frac{V}{\sqrt{gL}} \sqrt{\frac{\rho_\text{(1 or 2)}} {\rho_1 - \rho_2}} Parameters ---------- V : float Velocity of the specified phase, [m/s] L : float Characteristic length, no typical definition [m] rho1 : float Density of the heavier phase, [kg/m^3] rho2 : float Density of the lighter phase, [kg/m^3] heavy : bool, optional Whether or not the density used in the numerator is the heavy phase or the light phase, [-] g : float, optional Acceleration due to gravity, [m/s^2] Returns ------- Fr_den : float Densimetric Froude number, [-] Notes ----- Many alternate definitions including density ratios have been used. .. math:: Fr = \frac{\text{Inertial Force}}{\text{Gravity Force}} Where the gravity force is reduced by the relative densities of one fluid in another. Note that an Exception will be raised if rho1 > rho2, as the square root becomes negative. Examples -------- >>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81) 0.4134543386272418 >>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81, heavy=False) 0.016013017679205096 References ---------- .. [1] Hall, A, G Stobie, and R Steven. "Further Evaluation of the Performance of Horizontally Installed Orifice Plate and Cone Differential Pressure Meters with Wet Gas Flows." In International SouthEast Asia Hydrocarbon Flow Measurement Workshop, KualaLumpur, Malaysia, 2008. ''' if heavy: rho3 = rho1 else: rho3 = rho2 return V/((g*L)**0.5)*(rho3/(rho1 - rho2))**0.5
r'''Calculates the densimetric Froude number :math:`Fr_{den}` for velocity `V` geometric length `L`, heavier fluid density `rho1`, and lighter fluid density `rho2`. If desired, gravity can be specified as well. Depending on the application, this dimensionless number may be defined with the heavy phase or the light phase density in the numerator of the square root. For some applications, both need to be calculated. The default is to calculate with the heavy liquid ensity on top; set `heavy` to False to reverse this. .. math:: Fr = \frac{V}{\sqrt{gL}} \sqrt{\frac{\rho_\text{(1 or 2)}} {\rho_1 - \rho_2}} Parameters ---------- V : float Velocity of the specified phase, [m/s] L : float Characteristic length, no typical definition [m] rho1 : float Density of the heavier phase, [kg/m^3] rho2 : float Density of the lighter phase, [kg/m^3] heavy : bool, optional Whether or not the density used in the numerator is the heavy phase or the light phase, [-] g : float, optional Acceleration due to gravity, [m/s^2] Returns ------- Fr_den : float Densimetric Froude number, [-] Notes ----- Many alternate definitions including density ratios have been used. .. math:: Fr = \frac{\text{Inertial Force}}{\text{Gravity Force}} Where the gravity force is reduced by the relative densities of one fluid in another. Note that an Exception will be raised if rho1 > rho2, as the square root becomes negative. Examples -------- >>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81) 0.4134543386272418 >>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81, heavy=False) 0.016013017679205096 References ---------- .. [1] Hall, A, G Stobie, and R Steven. "Further Evaluation of the Performance of Horizontally Installed Orifice Plate and Cone Differential Pressure Meters with Wet Gas Flows." In International SouthEast Asia Hydrocarbon Flow Measurement Workshop, KualaLumpur, Malaysia, 2008.
def points_possible(self, include_hidden=False): """Return the total points possible for this project.""" return sum([test_case.points for testable in self.testables for test_case in testable.test_cases if include_hidden or not testable.is_hidden])
Return the total points possible for this project.
def fix_contig_names(asseembly_path): """Removes whitespace from the assembly contig names Parameters ---------- asseembly_path : path to assembly file Returns ------- str: Path to new assembly file with fixed contig names """ fixed_assembly = "fixed_assembly.fa" with open(asseembly_path) as in_hf, open(fixed_assembly, "w") as ou_fh: for line in in_hf: if line.startswith(">"): fixed_line = line.replace(" ", "_") ou_fh.write(fixed_line) else: ou_fh.write(line) return fixed_assembly
Removes whitespace from the assembly contig names Parameters ---------- asseembly_path : path to assembly file Returns ------- str: Path to new assembly file with fixed contig names
def run(self, ): """Start the configeditor :returns: None :rtype: None :raises: None """ ra = SceneReleaseActions() mayawin = maya_main_window() self.rw = ReleaseWin(FILETYPES["mayamainscene"], parent=mayawin) self.rw.set_release_actions(ra) pm = MayaPluginManager.get() genesis = pm.get_plugin("MayaGenesis") c = genesis.get_config() try: f = models.TaskFile.objects.get(pk=c['lastfile']) except models.TaskFile.DoesNotExist: pass else: if f.releasetype == 'work': self.rw.browser.set_selection(f) self.rw.show()
Start the configeditor :returns: None :rtype: None :raises: None
def aggregate_hazard_summary(impact, aggregate_hazard): """Compute the summary from the source layer to the aggregate_hazard layer. Source layer : |exp_id|exp_class|haz_id|haz_class|aggr_id|aggr_name|affected|extra*| Target layer : | aggr_id | aggr_name | haz_id | haz_class | extra* | Output layer : |aggr_id| aggr_name|haz_id|haz_class|affected|extra*|count ber exposure*| :param impact: The layer to aggregate vector layer. :type impact: QgsVectorLayer :param aggregate_hazard: The aggregate_hazard vector layer where to write statistics. :type aggregate_hazard: QgsVectorLayer :return: The new aggregate_hazard layer with summary. :rtype: QgsVectorLayer .. versionadded:: 4.0 """ source_fields = impact.keywords['inasafe_fields'] target_fields = aggregate_hazard.keywords['inasafe_fields'] target_compulsory_fields = [ aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field ] check_inputs(target_compulsory_fields, target_fields) source_compulsory_fields = [ exposure_id_field, exposure_class_field, aggregation_id_field, aggregation_name_field, hazard_id_field, hazard_class_field ] check_inputs(source_compulsory_fields, source_fields) aggregation_id = target_fields[aggregation_id_field['key']] hazard_id = target_fields[hazard_id_field['key']] hazard_class = target_fields[hazard_class_field['key']] exposure_class = source_fields[exposure_class_field['key']] exposure_class_index = impact.fields().lookupField(exposure_class) unique_exposure = list(impact.uniqueValues(exposure_class_index)) fields = ['aggregation_id', 'hazard_id'] absolute_values = create_absolute_values_structure(impact, fields) # We need to know what kind of exposure we are going to count. # the size, or the number of features or population. field_index = report_on_field(impact) aggregate_hazard.startEditing() shift = aggregate_hazard.fields().count() dynamic_structure = [ [exposure_count_field, unique_exposure], ] add_fields( aggregate_hazard, absolute_values, [affected_field, total_field], dynamic_structure, ) flat_table = FlatTable('aggregation_id', 'hazard_id', 'exposure_class') request = QgsFeatureRequest() request.setFlags(QgsFeatureRequest.NoGeometry) LOGGER.debug('Computing the aggregate hazard summary.') for feature in impact.getFeatures(request): # Field_index can be equal to 0. if field_index is not None: value = feature[field_index] else: value = 1 aggregation_value = feature[aggregation_id] hazard_value = feature[hazard_id] if (hazard_value is None or hazard_value == '' or (hasattr(hazard_value, 'isNull') and hazard_value.isNull())): hazard_value = not_exposed_class['key'] exposure_value = feature[exposure_class] if (exposure_value is None or exposure_value == '' or (hasattr(exposure_value, 'isNull') and exposure_value.isNull())): exposure_value = 'NULL' flat_table.add_value( value, aggregation_id=aggregation_value, hazard_id=hazard_value, exposure_class=exposure_value ) # We summarize every absolute values. for field, field_definition in list(absolute_values.items()): value = feature[field] if (value == '' or value is None or (hasattr(value, 'isNull') and value.isNull())): value = 0 field_definition[0].add_value( value, aggregation_id=aggregation_value, hazard_id=hazard_value ) hazard_keywords = aggregate_hazard.keywords['hazard_keywords'] hazard = hazard_keywords['hazard'] classification = hazard_keywords['classification'] exposure_keywords = impact.keywords['exposure_keywords'] exposure = exposure_keywords['exposure'] for area in aggregate_hazard.getFeatures(request): aggregation_value = area[aggregation_id] feature_hazard_id = area[hazard_id] if (feature_hazard_id == '' or feature_hazard_id is None or (hasattr(feature_hazard_id, 'isNull') and feature_hazard_id.isNull())): feature_hazard_id = not_exposed_class['key'] feature_hazard_value = area[hazard_class] total = 0 for i, val in enumerate(unique_exposure): sum = flat_table.get_value( aggregation_id=aggregation_value, hazard_id=feature_hazard_id, exposure_class=val ) total += sum aggregate_hazard.changeAttributeValue(area.id(), shift + i, sum) affected = post_processor_affected_function( exposure=exposure, hazard=hazard, classification=classification, hazard_class=feature_hazard_value) affected = tr(str(affected)) aggregate_hazard.changeAttributeValue( area.id(), shift + len(unique_exposure), affected) aggregate_hazard.changeAttributeValue( area.id(), shift + len(unique_exposure) + 1, total) for i, field in enumerate(absolute_values.values()): value = field[0].get_value( aggregation_id=aggregation_value, hazard_id=feature_hazard_id ) aggregate_hazard.changeAttributeValue( area.id(), shift + len(unique_exposure) + 2 + i, value) aggregate_hazard.commitChanges() aggregate_hazard.keywords['title'] = ( layer_purpose_aggregate_hazard_impacted['name']) if qgis_version() >= 21800: aggregate_hazard.setName(aggregate_hazard.keywords['title']) else: aggregate_hazard.setLayerName(aggregate_hazard.keywords['title']) aggregate_hazard.keywords['layer_purpose'] = ( layer_purpose_aggregate_hazard_impacted['key']) aggregate_hazard.keywords['exposure_keywords'] = impact.keywords.copy() check_layer(aggregate_hazard) return aggregate_hazard
Compute the summary from the source layer to the aggregate_hazard layer. Source layer : |exp_id|exp_class|haz_id|haz_class|aggr_id|aggr_name|affected|extra*| Target layer : | aggr_id | aggr_name | haz_id | haz_class | extra* | Output layer : |aggr_id| aggr_name|haz_id|haz_class|affected|extra*|count ber exposure*| :param impact: The layer to aggregate vector layer. :type impact: QgsVectorLayer :param aggregate_hazard: The aggregate_hazard vector layer where to write statistics. :type aggregate_hazard: QgsVectorLayer :return: The new aggregate_hazard layer with summary. :rtype: QgsVectorLayer .. versionadded:: 4.0
def get_or_create(cls, issue, header, text=None): """Get or create the dashboard comment in this issue. """ for comment in get_comments(issue): try: if comment.body.splitlines()[0] == header: obj = cls(comment, header) break except IndexError: # The comment body is empty pass # Hooooooo, no dashboard comment, let's create one else: comment = create_comment(issue, header) obj = cls(comment, header) if text: obj.edit(text) return obj
Get or create the dashboard comment in this issue.
def summaries(self, sc, limit=None): """Summary of the files contained in the current dataset Every item in the summary is a dict containing a key name and the corresponding size of the key item in bytes, e.g.:: {'key': 'full/path/to/my/key', 'size': 200} :param limit: Max number of objects to retrieve :return: An iterable of summaries """ clauses = copy(self.clauses) schema = self.schema if self.prefix: schema = ['prefix'] + schema # Add a clause for the prefix that always returns True, in case # the output is not filtered at all (so that we do a scan/filter # on the prefix directory) clauses['prefix'] = lambda x: True with futures.ThreadPoolExecutor(self.max_concurrency) as executor: scanned = self._scan(schema, [self.prefix], clauses, executor) keys = sc.parallelize(scanned).flatMap(self.store.list_keys) return keys.take(limit) if limit else keys.collect()
Summary of the files contained in the current dataset Every item in the summary is a dict containing a key name and the corresponding size of the key item in bytes, e.g.:: {'key': 'full/path/to/my/key', 'size': 200} :param limit: Max number of objects to retrieve :return: An iterable of summaries
def must_open(filename, mode="r", checkexists=False, skipcheck=False, \ oappend=False): """ Accepts filename and returns filehandle. Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file. """ if isinstance(filename, list): assert "r" in mode if filename[0].endswith((".gz", ".bz2")): filename = " ".join(filename) # allow opening multiple gz/bz2 files else: import fileinput return fileinput.input(filename) if filename.startswith("s3://"): from jcvi.utils.aws import pull_from_s3 filename = pull_from_s3(filename) if filename in ("-", "stdin"): assert "r" in mode fp = sys.stdin elif filename == "stdout": assert "w" in mode fp = sys.stdout elif filename == "stderr": assert "w" in mode fp = sys.stderr elif filename == "tmp" and mode == "w": from tempfile import NamedTemporaryFile fp = NamedTemporaryFile(delete=False) elif filename.endswith(".gz"): if 'r' in mode: cmd = "gunzip -c {0}".format(filename) fp = popen(cmd, debug=False) elif 'w' in mode: import gzip fp = gzip.open(filename, mode) elif filename.endswith(".bz2"): if 'r' in mode: cmd = "bzcat {0}".format(filename) fp = popen(cmd, debug=False) elif 'w' in mode: import bz2 fp = bz2.BZ2File(filename, mode) else: if checkexists: assert mode == "w" overwrite = (not op.exists(filename)) if skipcheck \ else check_exists(filename, oappend) if overwrite: if oappend: fp = open(filename, "a") else: fp = open(filename, "w") else: logging.debug("File `{0}` already exists. Skipped."\ .format(filename)) return None else: fp = open(filename, mode) return fp
Accepts filename and returns filehandle. Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
def clean_data(freqs, data, chunk, avg_bin): """ Extract time-varying (wandering) lines from strain data. Parameters ---------- freqs: list List containing the frequencies of the wandering lines. data: pycbc.types.TimeSeries Strain data to extract the wandering lines from. chunk: float Duration of the chunks the data will be divided into to account for the time variation of the wandering lines. Should be smaller than data.duration, and allow for at least a few chunks. avg_bin: float Duration of the bins each chunk will be divided into for averaging the inner product when measuring the parameters of the line. Should be smaller than chunk. Returns ------- data: pycbc.types.TimeSeries The strain data with the wandering lines removed. """ if avg_bin >= chunk: raise ValueError('The bin size for averaging the inner product ' 'must be less than the chunk size.') if chunk >= data.duration: raise ValueError('The chunk size must be less than the ' 'data duration.') steps = numpy.arange(0, int(data.duration/chunk)-0.5, 0.5) seglen = chunk * data.sample_rate tref = float(data.start_time) for freq in freqs: for step in steps: start, end = int(step*seglen), int((step+1)*seglen) chunk_line = matching_line(freq, data[start:end], tref, bin_size=avg_bin) # Apply hann window on sides of chunk_line to smooth boundaries # and avoid discontinuities hann_window = numpy.hanning(len(chunk_line)) apply_hann = TimeSeries(numpy.ones(len(chunk_line)), delta_t=chunk_line.delta_t, epoch=chunk_line.start_time) if step == 0: apply_hann.data[len(hann_window)/2:] *= \ hann_window[len(hann_window)/2:] elif step == steps[-1]: apply_hann.data[:len(hann_window)/2] *= \ hann_window[:len(hann_window)/2] else: apply_hann.data *= hann_window chunk_line.data *= apply_hann.data data.data[start:end] -= chunk_line.data.real return data
Extract time-varying (wandering) lines from strain data. Parameters ---------- freqs: list List containing the frequencies of the wandering lines. data: pycbc.types.TimeSeries Strain data to extract the wandering lines from. chunk: float Duration of the chunks the data will be divided into to account for the time variation of the wandering lines. Should be smaller than data.duration, and allow for at least a few chunks. avg_bin: float Duration of the bins each chunk will be divided into for averaging the inner product when measuring the parameters of the line. Should be smaller than chunk. Returns ------- data: pycbc.types.TimeSeries The strain data with the wandering lines removed.
def export_file(self, record, field, event=None, return_format='json'): """ Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary """ self._check_file_field(field) # load up payload pl = self.__basepl(content='file', format=return_format) # there's no format field in this call del pl['format'] pl['returnFormat'] = return_format pl['action'] = 'export' pl['field'] = field pl['record'] = record if event: pl['event'] = event content, headers = self._call_api(pl, 'exp_file') #REDCap adds some useful things in content-type if 'content-type' in headers: splat = [kv.strip() for kv in headers['content-type'].split(';')] kv = [(kv.split('=')[0], kv.split('=')[1].replace('"', '')) for kv in splat if '=' in kv] content_map = dict(kv) else: content_map = {} return content, content_map
Export the contents of a file stored for a particular record Notes ----- Unlike other export methods, this works on a single record. Parameters ---------- record : str record ID field : str field name containing the file to be exported. event: str for longitudinal projects, specify the unique event here return_format: ('json'), 'csv', 'xml' format of error message Returns ------- content : bytes content of the file content_map : dict content-type dictionary
def set_length_and_maybe_checksums(self, record, payload_offset=None): '''Set the content length and possibly the checksums.''' if self._params.digests: record.compute_checksum(payload_offset) else: record.set_content_length()
Set the content length and possibly the checksums.
def batch_remove_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11): """ Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
Batch Remove Absolute (retrain) xlabel = "Fraction of features removed" ylabel = "1 - ROC AUC" transform = "one_minus" sort_order = 13
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False, get_abstracts=False, prepend_title=False, mesh_annotations=False): """Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'. """ # Iterate over the articles and build the results dict results = {} pm_articles = tree.findall('./PubmedArticle') for art_ix, pm_article in enumerate(pm_articles): medline_citation = pm_article.find('./MedlineCitation') article_info = _get_article_info(medline_citation, pm_article.find('PubmedData')) journal_info = _get_journal_info(medline_citation, get_issns_from_nlm) context_info = _get_annotations(medline_citation) # Build the result result = {} result.update(article_info) result.update(journal_info) result.update(context_info) # Get the abstracts if requested if get_abstracts: abstract = _abstract_from_article_element( medline_citation.find('Article'), prepend_title=prepend_title ) result['abstract'] = abstract # Add to dict results[article_info['pmid']] = result return results
Get metadata for an XML tree containing PubmedArticle elements. Documentation on the XML structure can be found at: - https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html - https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html Parameters ---------- tree : xml.etree.ElementTree ElementTree containing one or more PubmedArticle elements. get_issns_from_nlm : boolean Look up the full list of ISSN number for the journal associated with the article, which helps to match articles to CrossRef search results. Defaults to False, since it slows down performance. get_abstracts : boolean Indicates whether to include the Pubmed abstract in the results. prepend_title : boolean If get_abstracts is True, specifies whether the article title should be prepended to the abstract text. mesh_annotations : boolean If True, extract mesh annotations from the pubmed entries and include in the returned data. If false, don't. Returns ------- dict of dicts Dictionary indexed by PMID. Each value is a dict containing the following fields: 'doi', 'title', 'authors', 'journal_title', 'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
def pickledump(theobject, fname): """same as pickle.dump(theobject, fhandle).takes filename as parameter""" fhandle = open(fname, 'wb') pickle.dump(theobject, fhandle)
same as pickle.dump(theobject, fhandle).takes filename as parameter
def check_lazy_load_gemeente(f): ''' Decorator function to lazy load a :class:`Gemeente`. ''' def wrapper(*args): gemeente = args[0] if ( gemeente._centroid is None or gemeente._bounding_box is None or gemeente._taal_id is None or gemeente._metadata is None ): log.debug('Lazy loading Gemeente %d', gemeente.id) gemeente.check_gateway() g = gemeente.gateway.get_gemeente_by_id(gemeente.id) gemeente._taal_id = g._taal_id gemeente._centroid = g._centroid gemeente._bounding_box = g._bounding_box gemeente._metadata = g._metadata return f(*args) return wrapper
Decorator function to lazy load a :class:`Gemeente`.
def point(self, t): """returns the coordinates of the Bezier curve evaluated at t.""" return (1 - t)**2*self.start + 2*(1 - t)*t*self.control + t**2*self.end
returns the coordinates of the Bezier curve evaluated at t.
def put_group_policy(self, group_name, policy_name, policy_json): """ Adds or updates the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to get. :type policy_json: string :param policy_json: The policy document. """ params = {'GroupName' : group_name, 'PolicyName' : policy_name, 'PolicyDocument' : policy_json} return self.get_response('PutGroupPolicy', params, verb='POST')
Adds or updates the specified policy document for the specified group. :type group_name: string :param group_name: The name of the group the policy is associated with. :type policy_name: string :param policy_name: The policy document to get. :type policy_json: string :param policy_json: The policy document.
def draw_panel(self, data, panel_params, coord, ax, **params): """ Plot all groups For effeciency, geoms that do not need to partition different groups before plotting should override this method and avoid the groupby. Parameters ---------- data : dataframe Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : dict The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are:: 'x_range' # tuple 'y_range' # tuple coord : coord Coordinate (e.g. coord_cartesian) system of the geom. ax : axes Axes on which to plot. params : dict Combined parameters for the geom and stat. Also includes the 'zorder'. """ for _, gdata in data.groupby('group'): gdata.reset_index(inplace=True, drop=True) self.draw_group(gdata, panel_params, coord, ax, **params)
Plot all groups For effeciency, geoms that do not need to partition different groups before plotting should override this method and avoid the groupby. Parameters ---------- data : dataframe Data to be plotted by this geom. This is the dataframe created in the plot_build pipeline. panel_params : dict The scale information as may be required by the axes. At this point, that information is about ranges, ticks and labels. Keys of interest to the geom are:: 'x_range' # tuple 'y_range' # tuple coord : coord Coordinate (e.g. coord_cartesian) system of the geom. ax : axes Axes on which to plot. params : dict Combined parameters for the geom and stat. Also includes the 'zorder'.
def send_Linux_notify(title, content, img_path): '''发送Linux桌面通知''' # Note: GNOME 下需要有任意的用户操作(鼠标、键盘等)才会自动隐藏通知 command = [ 'notify-send', '-a', 'Douban.fm', # 发送通知的应用名 '-t', '5000', # 自动隐藏的时间 '--hint=int:transient:1', # *建议* 桌面管理器不要保留通知 ] if img_path is not None: command.extend(['-i', img_path]) # 图标 subprocess.call(command + [title, content])
发送Linux桌面通知
def read_json(self): """Read a single line and decode it as JSON. Can raise an EOFError() when the input source was closed. """ line = self.stdin.readline() if line == '': raise EOFError() return json.loads(line)
Read a single line and decode it as JSON. Can raise an EOFError() when the input source was closed.
def _parse_unit(measure_or_unit_abbreviation): """ Helper function that extracts constant factors from unit specifications. This allows to specify units similar to this: 10^6 m^3. Return a couple (unit, factor) """ try: float(measure_or_unit_abbreviation[0]) # The measure contains the values and the unit_abbreviation factor, unit_abbreviation = measure_or_unit_abbreviation.split(' ', 1) return unit_abbreviation, float(factor) except ValueError: # The measure just contains the unit_abbreviation return measure_or_unit_abbreviation, 1.0
Helper function that extracts constant factors from unit specifications. This allows to specify units similar to this: 10^6 m^3. Return a couple (unit, factor)
def _get_argspec(f): """ Get argspec of a function. Supports both Python 2 and Python 3. """ if sys.version_info[0] < 3: argspec = inspect.getargspec(f) else: # `getargspec` is deprecated since python3.0 (incompatible with function annotations). # See SPARK-23569. argspec = inspect.getfullargspec(f) return argspec
Get argspec of a function. Supports both Python 2 and Python 3.
def save_ewif_file(self, path: str, password: str) -> None: """ Save an Encrypted Wallet Import Format file (WIF v2) :param path: Path to file :param password: """ # version version = 1 # add version to seed salt = libnacl.crypto_hash_sha256( libnacl.crypto_hash_sha256( Base58Encoder.decode(self.pubkey)))[0:4] # SCRYPT password_bytes = password.encode("utf-8") scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64) derivedhalf1 = scrypt_seed[0:32] derivedhalf2 = scrypt_seed[32:64] # XOR seed1_xor_derivedhalf1_1 = bytes(xor_bytes(self.seed[0:16], derivedhalf1[0:16])) seed2_xor_derivedhalf1_2 = bytes(xor_bytes(self.seed[16:32], derivedhalf1[16:32])) # AES aes = pyaes.AESModeOfOperationECB(derivedhalf2) encryptedhalf1 = aes.encrypt(seed1_xor_derivedhalf1_1) encryptedhalf2 = aes.encrypt(seed2_xor_derivedhalf1_2) # add format to final seed (1=WIF,2=EWIF) seed_bytes = b'\x02' + salt + encryptedhalf1 + encryptedhalf2 # calculate checksum sha256_v1 = libnacl.crypto_hash_sha256(seed_bytes) sha256_v2 = libnacl.crypto_hash_sha256(sha256_v1) checksum = sha256_v2[0:2] # B58 encode final key string ewif_key = Base58Encoder.encode(seed_bytes + checksum) # save file with open(path, 'w') as fh: fh.write( """Type: EWIF Version: {version} Data: {data}""".format(version=version, data=ewif_key) )
Save an Encrypted Wallet Import Format file (WIF v2) :param path: Path to file :param password:
def _track(self, class_name): """Keep track of which test cases have executed.""" if self._test_cases.get(class_name) is None: if self.streaming and self.header: self._write_test_case_header(class_name, self.stream) self._test_cases[class_name] = [] if self.combined: self.combined_test_cases_seen.append(class_name)
Keep track of which test cases have executed.
def ipv6_ipv6route_route_dest(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ipv6 = ET.SubElement(config, "ipv6", xmlns="urn:brocade.com:mgmt:brocade-common-def") ipv6route = ET.SubElement(ipv6, "ipv6route", xmlns="urn:brocade.com:mgmt:brocade-ip-forward") route = ET.SubElement(ipv6route, "route") dest = ET.SubElement(route, "dest") dest.text = kwargs.pop('dest') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def op_paths(self, path_base=None): # type: (Union[str, UrlPath]) -> Generator[Tuple[UrlPath, Operation]] """ Return all operations stored in containers. """ if path_base: path_base += self.path_prefix else: path_base = self.path_prefix or UrlPath() for container in self.containers: for op_path in container.op_paths(path_base): yield op_path
Return all operations stored in containers.
def generate_filename(self, mark, **kwargs): """Comes up with a good filename for the watermarked image""" kwargs = kwargs.copy() kwargs['opacity'] = int(kwargs['opacity'] * 100) kwargs['st_mtime'] = kwargs['fstat'].st_mtime kwargs['st_size'] = kwargs['fstat'].st_size params = [ '%(original_basename)s', 'wm', 'w%(watermark)i', 'o%(opacity)i', 'gs%(greyscale)i', 'r%(rotation)i', 'fm%(st_mtime)i', 'fz%(st_size)i', 'p%(position)s', ] scale = kwargs.get('scale', None) if scale and scale != mark.size: params.append('_s%i' % (float(kwargs['scale'][0]) / mark.size[0] * 100)) if kwargs.get('tile', None): params.append('_tiled') # make thumbnail filename filename = '%s%s' % ('_'.join(params), kwargs['ext']) return filename % kwargs
Comes up with a good filename for the watermarked image
def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None): ''' Parse EEP based on FUNC and TYPE ''' # set EEP profile, if demanded if rorg_func is not None and rorg_type is not None: self.select_eep(rorg_func, rorg_type, direction, command) # parse data provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status) self.parsed.update(values) return list(provides)
Parse EEP based on FUNC and TYPE
def threaded(system, func, *args, **kwargs): """ uses thread_init as a decorator-style """ @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: if system.raven_client: system.raven_client.captureException() logger.exception('Exception occurred in thread: %s', e) return False return lambda: wrapper(*args, **kwargs)
uses thread_init as a decorator-style
def retry_shipper_tasks(self, project_name, logstore_name, shipper_name, task_list): """ retry failed tasks , only the failed task can be retried Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shipper_name: string :param shipper_name: the shipper name :type task_list: string array :param task_list: the failed task_id list, e.g ['failed_task_id_1', 'failed_task_id_2',...], currently the max retry task count 10 every time :return: RetryShipperTasksResponse :raise: LogException """ headers = {} params = {} body = six.b(json.dumps(task_list)) headers['Content-Type'] = 'application/json' headers['x-log-bodyrawsize'] = str(len(body)) resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name + "/tasks" (resp, header) = self._send("PUT", project_name, body, resource, params, headers) return RetryShipperTasksResponse(header, resp)
retry failed tasks , only the failed task can be retried Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shipper_name: string :param shipper_name: the shipper name :type task_list: string array :param task_list: the failed task_id list, e.g ['failed_task_id_1', 'failed_task_id_2',...], currently the max retry task count 10 every time :return: RetryShipperTasksResponse :raise: LogException
def parse_date_created(dct): """Helper function to parse date-created from profile.""" date = dct['date-created'] if date: return (int(date['@year']), int(date['@month']), int(date['@day'])) else: return (None, None, None)
Helper function to parse date-created from profile.
def pivoted_cholesky(matrix, max_rank, diag_rtol=1e-3, name=None): """Computes the (partial) pivoted cholesky decomposition of `matrix`. The pivoted Cholesky is a low rank approximation of the Cholesky decomposition of `matrix`, i.e. as described in [(Harbrecht et al., 2012)][1]. The currently-worst-approximated diagonal element is selected as the pivot at each iteration. This yields from a `[B1...Bn, N, N]` shaped `matrix` a `[B1...Bn, N, K]` shaped rank-`K` approximation `lr` such that `lr @ lr.T ~= matrix`. Note that, unlike the Cholesky decomposition, `lr` is not triangular even in a rectangular-matrix sense. However, under a permutation it could be made triangular (it has one more zero in each column as you move to the right). Such a matrix can be useful as a preconditioner for conjugate gradient optimization, i.e. as in [(Wang et al. 2019)][2], as matmuls and solves can be cheaply done via the Woodbury matrix identity, as implemented by `tf.linalg.LinearOperatorLowRankUpdate`. Args: matrix: Floating point `Tensor` batch of symmetric, positive definite matrices. max_rank: Scalar `int` `Tensor`, the rank at which to truncate the approximation. diag_rtol: Scalar floating point `Tensor` (same dtype as `matrix`). If the errors of all diagonal elements of `lr @ lr.T` are each lower than `element * diag_rtol`, iteration is permitted to terminate early. name: Optional name for the op. Returns: lr: Low rank pivoted Cholesky approximation of `matrix`. #### References [1]: H Harbrecht, M Peters, R Schneider. On the low-rank approximation by the pivoted Cholesky decomposition. _Applied numerical mathematics_, 62(4):428-440, 2012. [2]: K. A. Wang et al. Exact Gaussian Processes on a Million Data Points. _arXiv preprint arXiv:1903.08114_, 2019. https://arxiv.org/abs/1903.08114 """ with tf.compat.v2.name_scope(name or 'pivoted_cholesky'): dtype = dtype_util.common_dtype([matrix, diag_rtol], preferred_dtype=tf.float32) matrix = tf.convert_to_tensor(value=matrix, name='matrix', dtype=dtype) if tensorshape_util.rank(matrix.shape) is None: raise NotImplementedError('Rank of `matrix` must be known statically') max_rank = tf.convert_to_tensor( value=max_rank, name='max_rank', dtype=tf.int64) max_rank = tf.minimum(max_rank, prefer_static.shape(matrix, out_type=tf.int64)[-1]) diag_rtol = tf.convert_to_tensor( value=diag_rtol, dtype=dtype, name='diag_rtol') matrix_diag = tf.linalg.diag_part(matrix) # matrix is P.D., therefore all matrix_diag > 0, so we don't need abs. orig_error = tf.reduce_max(input_tensor=matrix_diag, axis=-1) def cond(m, pchol, perm, matrix_diag): """Condition for `tf.while_loop` continuation.""" del pchol del perm error = tf.linalg.norm(tensor=matrix_diag, ord=1, axis=-1) max_err = tf.reduce_max(input_tensor=error / orig_error) return (m < max_rank) & (tf.equal(m, 0) | (max_err > diag_rtol)) batch_dims = tensorshape_util.rank(matrix.shape) - 2 def batch_gather(params, indices, axis=-1): return tf.gather(params, indices, axis=axis, batch_dims=batch_dims) def body(m, pchol, perm, matrix_diag): """Body of a single `tf.while_loop` iteration.""" # Here is roughly a numpy, non-batched version of what's going to happen. # (See also Algorithm 1 of Harbrecht et al.) # 1: maxi = np.argmax(matrix_diag[perm[m:]]) + m # 2: maxval = matrix_diag[perm][maxi] # 3: perm[m], perm[maxi] = perm[maxi], perm[m] # 4: row = matrix[perm[m]][perm[m + 1:]] # 5: row -= np.sum(pchol[:m][perm[m + 1:]] * pchol[:m][perm[m]]], axis=-2) # 6: pivot = np.sqrt(maxval); row /= pivot # 7: row = np.concatenate([[[pivot]], row], -1) # 8: matrix_diag[perm[m:]] -= row**2 # 9: pchol[m, perm[m:]] = row # Find the maximal position of the (remaining) permuted diagonal. # Steps 1, 2 above. permuted_diag = batch_gather(matrix_diag, perm[..., m:]) maxi = tf.argmax( input=permuted_diag, axis=-1, output_type=tf.int64)[..., tf.newaxis] maxval = batch_gather(permuted_diag, maxi) maxi = maxi + m maxval = maxval[..., 0] # Update perm: Swap perm[...,m] with perm[...,maxi]. Step 3 above. perm = _swap_m_with_i(perm, m, maxi) # Step 4. row = batch_gather(matrix, perm[..., m:m + 1], axis=-2) row = batch_gather(row, perm[..., m + 1:]) # Step 5. prev_rows = pchol[..., :m, :] prev_rows_perm_m_onward = batch_gather(prev_rows, perm[..., m + 1:]) prev_rows_pivot_col = batch_gather(prev_rows, perm[..., m:m + 1]) row -= tf.reduce_sum( input_tensor=prev_rows_perm_m_onward * prev_rows_pivot_col, axis=-2)[..., tf.newaxis, :] # Step 6. pivot = tf.sqrt(maxval)[..., tf.newaxis, tf.newaxis] # Step 7. row = tf.concat([pivot, row / pivot], axis=-1) # TODO(b/130899118): Pad grad fails with int64 paddings. # Step 8. paddings = tf.concat([ tf.zeros([prefer_static.rank(pchol) - 1, 2], dtype=tf.int32), [[tf.cast(m, tf.int32), 0]]], axis=0) diag_update = tf.pad(tensor=row**2, paddings=paddings)[..., 0, :] reverse_perm = _invert_permutation(perm) matrix_diag -= batch_gather(diag_update, reverse_perm) # Step 9. row = tf.pad(tensor=row, paddings=paddings) # TODO(bjp): Defer the reverse permutation all-at-once at the end? row = batch_gather(row, reverse_perm) pchol_shape = pchol.shape pchol = tf.concat([pchol[..., :m, :], row, pchol[..., m + 1:, :]], axis=-2) tensorshape_util.set_shape(pchol, pchol_shape) return m + 1, pchol, perm, matrix_diag m = np.int64(0) pchol = tf.zeros_like(matrix[..., :max_rank, :]) matrix_shape = prefer_static.shape(matrix, out_type=tf.int64) perm = tf.broadcast_to( prefer_static.range(matrix_shape[-1]), matrix_shape[:-1]) _, pchol, _, _ = tf.while_loop( cond=cond, body=body, loop_vars=(m, pchol, perm, matrix_diag)) pchol = tf.linalg.matrix_transpose(pchol) tensorshape_util.set_shape( pchol, tensorshape_util.concatenate(matrix_diag.shape, [None])) return pchol
Computes the (partial) pivoted cholesky decomposition of `matrix`. The pivoted Cholesky is a low rank approximation of the Cholesky decomposition of `matrix`, i.e. as described in [(Harbrecht et al., 2012)][1]. The currently-worst-approximated diagonal element is selected as the pivot at each iteration. This yields from a `[B1...Bn, N, N]` shaped `matrix` a `[B1...Bn, N, K]` shaped rank-`K` approximation `lr` such that `lr @ lr.T ~= matrix`. Note that, unlike the Cholesky decomposition, `lr` is not triangular even in a rectangular-matrix sense. However, under a permutation it could be made triangular (it has one more zero in each column as you move to the right). Such a matrix can be useful as a preconditioner for conjugate gradient optimization, i.e. as in [(Wang et al. 2019)][2], as matmuls and solves can be cheaply done via the Woodbury matrix identity, as implemented by `tf.linalg.LinearOperatorLowRankUpdate`. Args: matrix: Floating point `Tensor` batch of symmetric, positive definite matrices. max_rank: Scalar `int` `Tensor`, the rank at which to truncate the approximation. diag_rtol: Scalar floating point `Tensor` (same dtype as `matrix`). If the errors of all diagonal elements of `lr @ lr.T` are each lower than `element * diag_rtol`, iteration is permitted to terminate early. name: Optional name for the op. Returns: lr: Low rank pivoted Cholesky approximation of `matrix`. #### References [1]: H Harbrecht, M Peters, R Schneider. On the low-rank approximation by the pivoted Cholesky decomposition. _Applied numerical mathematics_, 62(4):428-440, 2012. [2]: K. A. Wang et al. Exact Gaussian Processes on a Million Data Points. _arXiv preprint arXiv:1903.08114_, 2019. https://arxiv.org/abs/1903.08114
def get_or_create_user(self, provider, access, info): "Create a shell auth.User." digest = hashlib.sha1(smart_bytes(access)).digest() # Base 64 encode to get below 30 characters # Removed padding characters username = force_text(base64.urlsafe_b64encode(digest)).replace('=', '') User = get_user_model() kwargs = { User.USERNAME_FIELD: username, 'email': '', 'password': None } return User.objects.create_user(**kwargs)
Create a shell auth.User.
def ones(shape, ctx=None, dtype=None, **kwargs): """Returns a new array filled with all ones, with the given shape and type. Parameters ---------- shape : int or tuple of int or list of int The shape of the empty array. ctx : Context, optional An optional device context. Defaults to the current default context (``mxnet.context.current_context()``). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A new array of the specified shape filled with all ones. Examples -------- >>> mx.nd.ones(1).asnumpy() array([ 1.], dtype=float32) >>> mx.nd.ones((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.ones((1,2), dtype='float16').asnumpy() array([[ 1., 1.]], dtype=float16) """ # pylint: disable= unused-argument if ctx is None: ctx = current_context() dtype = mx_real_t if dtype is None else dtype # pylint: disable= no-member, protected-access return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
Returns a new array filled with all ones, with the given shape and type. Parameters ---------- shape : int or tuple of int or list of int The shape of the empty array. ctx : Context, optional An optional device context. Defaults to the current default context (``mxnet.context.current_context()``). dtype : str or numpy.dtype, optional An optional value type (default is `float32`). out : NDArray, optional The output NDArray (default is `None`). Returns ------- NDArray A new array of the specified shape filled with all ones. Examples -------- >>> mx.nd.ones(1).asnumpy() array([ 1.], dtype=float32) >>> mx.nd.ones((1,2), mx.gpu(0)) <NDArray 1x2 @gpu(0)> >>> mx.nd.ones((1,2), dtype='float16').asnumpy() array([[ 1., 1.]], dtype=float16)
def users_me_merge(self, data, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/users#merge-self-with-another-user" api_path = "/api/v2/users/me/merge.json" return self.call(api_path, method="PUT", data=data, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/users#merge-self-with-another-user
def multi_trace_plot(traces, corr=True, stack='linstack', size=(7, 12), **kwargs): """ Plot multiple traces (usually from the same station) on the same plot. Differs somewhat to obspy's stream.plot in that only relative time within traces is worried about, it will not merge traces together. :type traces: list :param traces: List of obspy.core.Trace :type corr: bool :param corr: To calculate the correlation or not, if True, will add this to the axes :type stack: str :param stack: To plot the stack as the first trace or not, select type of stack: 'linstack' or 'PWS', or None. :type size: tuple :param size: Size of figure. """ import matplotlib.pyplot as plt from eqcorrscan.core.match_filter import normxcorr2 n_axes = len(traces) if stack in ['linstack', 'PWS']: n_axes += 1 fig, axes = plt.subplots(n_axes, 1, sharex=True, figsize=size) if len(traces) > 1: axes = axes.ravel() traces = [(trace, trace.stats.starttime.datetime) for trace in traces] traces.sort(key=lambda tup: tup[1]) traces = [trace[0] for trace in traces] # Plot the traces for i, tr in enumerate(traces): y = tr.data x = np.arange(len(y)) x = x / tr.stats.sampling_rate # convert to seconds if not stack: ind = i else: ind = i + 1 axes[ind].plot(x, y, 'k', linewidth=1.1) axes[ind].yaxis.set_ticks([]) traces = [Stream(trace) for trace in traces] if stack == 'PWS': stacked = PWS_stack(traces) elif stack == 'linstack': stacked = linstack(traces) if stack in ['linstack', 'PWS']: tr = stacked[0] y = tr.data x = np.arange(len(y)) x = x / tr.stats.sampling_rate axes[0].plot(x, y, 'r', linewidth=2.0) axes[0].set_ylabel('Stack', rotation=0) axes[0].yaxis.set_ticks([]) for i, slave in enumerate(traces): if corr: cc = normxcorr2(tr.data, slave[0].data) if not stack: ind = i else: ind = i + 1 if corr: axes[ind].set_ylabel('cc=' + str(round(np.max(cc), 2)), rotation=0) axes[ind].text(0.9, 0.15, str(round(np.max(slave[0].data))), bbox=dict(facecolor='white', alpha=0.95), transform=axes[ind].transAxes) axes[ind].text(0.7, 0.85, slave[0].stats.starttime.datetime. strftime('%Y/%m/%d %H:%M:%S'), bbox=dict(facecolor='white', alpha=0.95), transform=axes[ind].transAxes) axes[-1].set_xlabel('Time (s)') fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover return fig
Plot multiple traces (usually from the same station) on the same plot. Differs somewhat to obspy's stream.plot in that only relative time within traces is worried about, it will not merge traces together. :type traces: list :param traces: List of obspy.core.Trace :type corr: bool :param corr: To calculate the correlation or not, if True, will add this to the axes :type stack: str :param stack: To plot the stack as the first trace or not, select type of stack: 'linstack' or 'PWS', or None. :type size: tuple :param size: Size of figure.
def _check_uninferable_call(self, node): """ Check that the given uninferable Call node does not call an actual function. """ if not isinstance(node.func, astroid.Attribute): return # Look for properties. First, obtain # the lhs of the Attribute node and search the attribute # there. If that attribute is a property or a subclass of properties, # then most likely it's not callable. # TODO: since astroid doesn't understand descriptors very well # we will not handle them here, right now. expr = node.func.expr klass = safe_infer(expr) if ( klass is None or klass is astroid.Uninferable or not isinstance(klass, astroid.Instance) ): return try: attrs = klass._proxied.getattr(node.func.attrname) except exceptions.NotFoundError: return for attr in attrs: if attr is astroid.Uninferable: continue if not isinstance(attr, astroid.FunctionDef): continue # Decorated, see if it is decorated with a property. # Also, check the returns and see if they are callable. if decorated_with_property(attr): try: all_returns_are_callable = all( return_node.callable() or return_node is astroid.Uninferable for return_node in attr.infer_call_result(node) ) except astroid.InferenceError: continue if not all_returns_are_callable: self.add_message( "not-callable", node=node, args=node.func.as_string() ) break
Check that the given uninferable Call node does not call an actual function.
def for_print(self): """ for_print """ s = "\033[34m" + self.get_object_info() + "\033[0m" s += "\n" s += self.as_string() return s
for_print
def values(self): "Returns a list of ConfigMap values." return (list(self._pb.IntMap.values()) + list(self._pb.StringMap.values()) + list(self._pb.FloatMap.values()) + list(self._pb.BoolMap.values()))
Returns a list of ConfigMap values.
def volume_disk_temp_avg(self, volume): """Average temperature of all disks making up the volume""" volume = self._get_volume(volume) if volume is not None: vol_disks = volume["disks"] if vol_disks is not None: total_temp = 0 total_disks = 0 for vol_disk in vol_disks: disk_temp = self.disk_temp(vol_disk) if disk_temp is not None: total_disks += 1 total_temp += disk_temp if total_temp > 0 and total_disks > 0: return round(total_temp / total_disks, 0)
Average temperature of all disks making up the volume
def get_soft_bounds(self): """ For each soft bound (upper and lower), if there is a defined bound (not equal to None) then it is returned, otherwise it defaults to the hard bound. The hard bound could still be None. """ if self.bounds is None: hl,hu=(None,None) else: hl,hu=self.bounds if self._softbounds is None: sl,su=(None,None) else: sl,su=self._softbounds if sl is None: l = hl else: l = sl if su is None: u = hu else: u = su return (l,u)
For each soft bound (upper and lower), if there is a defined bound (not equal to None) then it is returned, otherwise it defaults to the hard bound. The hard bound could still be None.
def stop_experiment(args): '''Stop the experiment which is running''' experiment_id_list = parse_ids(args) if experiment_id_list: experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() for experiment_id in experiment_id_list: print_normal('Stoping experiment %s' % experiment_id) nni_config = Config(experiment_dict[experiment_id]['fileName']) rest_port = nni_config.get_config('restServerPort') rest_pid = nni_config.get_config('restServerPid') if rest_pid: kill_command(rest_pid) tensorboard_pid_list = nni_config.get_config('tensorboardPidList') if tensorboard_pid_list: for tensorboard_pid in tensorboard_pid_list: try: kill_command(tensorboard_pid) except Exception as exception: print_error(exception) nni_config.set_config('tensorboardPidList', []) print_normal('Stop experiment success!') experiment_config.update_experiment(experiment_id, 'status', 'STOPPED') time_now = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) experiment_config.update_experiment(experiment_id, 'endTime', str(time_now))
Stop the experiment which is running
def load_fd(self, key, noexpire=False): '''Look up an item in the cache and return an open file descriptor for the object. It is the caller's responsibility to close the file descriptor.''' cachekey = self.xform_key(key) path = self.path(cachekey) try: stat = path.stat() if not noexpire and stat.st_mtime < time.time() - self.lifetime: LOG.debug('%s has expired', key) path.unlink() raise KeyError(key) LOG.debug('%s found in cache', key) return path.open('rb') except OSError: LOG.debug('%s not found in cache', key) raise KeyError(key)
Look up an item in the cache and return an open file descriptor for the object. It is the caller's responsibility to close the file descriptor.
def batchDF(symbols, fields=None, range_='1m', last=10, token='', version=''): '''Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json ''' x = batch(symbols, fields, range_, last, token, version) ret = {} if isinstance(symbols, str): for field in x.keys(): ret[field] = _MAPPING[field](x[field]) else: for symbol in x.keys(): for field in x[symbol].keys(): if field not in ret: ret[field] = pd.DataFrame() dat = x[symbol][field] dat = _MAPPING[field](dat) dat['symbol'] = symbol ret[field] = pd.concat([ret[field], dat], sort=True) return ret
Batch several data requests into one invocation https://iexcloud.io/docs/api/#batch-requests Args: symbols (list); List of tickers to request fields (list); List of fields to request range_ (string); Date range for chart last (int); token (string); Access token version (string); API version Returns: DataFrame: results in json
def get_extra_info(self, name, default=None): """ Return optional transport information. - `'related_address'`: the related address - `'sockname'`: the relayed address """ if name == 'related_address': return self.__inner_protocol.transport.get_extra_info('sockname') elif name == 'sockname': return self.__relayed_address return default
Return optional transport information. - `'related_address'`: the related address - `'sockname'`: the relayed address
def get_broker_list(cluster_config): """Returns a list of brokers in the form [(id: host)] :param cluster_config: the configuration of the cluster :type cluster_config: map """ with ZK(cluster_config) as zk: brokers = sorted(list(zk.get_brokers().items()), key=itemgetter(0)) return [(id, data['host']) for id, data in brokers]
Returns a list of brokers in the form [(id: host)] :param cluster_config: the configuration of the cluster :type cluster_config: map
def find_module(self, module_name, path=None): """ Searches the paths for the required module. :param module_name: the full name of the module to find :param path: set to None when the module in being searched for is a top-level module - otherwise this is set to package.__path__ for submodules and subpackages (unused) """ module_path = os.path.join(*module_name.split(MODULE_PATH_SEP)) for search_root in self.paths: target_path = os.path.join(search_root, module_path) is_pkg = False # If the target references a directory, try to load it as # a module by referencing the __init__.py file, otherwise # append .py and attempt to resolve it. if os.path.isdir(target_path): target_file = os.path.join(target_path, '__init__.py') is_pkg = True else: target_file = '{}.py'.format(target_path) if os.path.exists(target_file): return ModuleLoader( target_path, module_name, target_file, is_pkg) return None
Searches the paths for the required module. :param module_name: the full name of the module to find :param path: set to None when the module in being searched for is a top-level module - otherwise this is set to package.__path__ for submodules and subpackages (unused)
def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None): """Return the frequencies needed to compress a waveform with the given chirp mass. This is based on the estimate in rough_time_estimate. Parameters ---------- m1: float mass of first component object in solar masses m2: float mass of second component object in solar masses fmin : float The starting frequency of the compressed waveform. fmax : float The ending frequency of the compressed waveform. min_seglen : float The inverse of this gives the maximum frequency step that is used. df_multiple : {None, float} Make the compressed sampling frequencies a multiple of the given value. If None provided, the returned sample points can have any floating point value. Returns ------- array The frequencies at which to evaluate the compressed waveform. """ sample_points = [] f = fmin while f < fmax: if df_multiple is not None: f = int(f/df_multiple)*df_multiple sample_points.append(f) f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen) # add the last point if sample_points[-1] < fmax: sample_points.append(fmax) return numpy.array(sample_points)
Return the frequencies needed to compress a waveform with the given chirp mass. This is based on the estimate in rough_time_estimate. Parameters ---------- m1: float mass of first component object in solar masses m2: float mass of second component object in solar masses fmin : float The starting frequency of the compressed waveform. fmax : float The ending frequency of the compressed waveform. min_seglen : float The inverse of this gives the maximum frequency step that is used. df_multiple : {None, float} Make the compressed sampling frequencies a multiple of the given value. If None provided, the returned sample points can have any floating point value. Returns ------- array The frequencies at which to evaluate the compressed waveform.
def send(self, channel, payload): """Send a message with the given payload on the given channel. Messages are broadcast to all players in the group. """ with track('send_channel=' + channel): with track('create event'): Event.objects.create( group=self, channel=channel, value=payload) ChannelGroup(str(self.pk)).send( {'text': json.dumps({ 'channel': channel, 'payload': payload })})
Send a message with the given payload on the given channel. Messages are broadcast to all players in the group.
def proto_0201(theABF): """protocol: membrane test.""" abf=ABF(theABF) abf.log.info("analyzing as a membrane test") plot=ABFplot(abf) plot.figure_height,plot.figure_width=SQUARESIZE/2,SQUARESIZE/2 plot.figure_sweeps() # save it plt.tight_layout() frameAndSave(abf,"membrane test") plt.close('all')
protocol: membrane test.
def _states(self): """Sets grid states""" # The currently visible table self.current_table = 0 # The cell that has been selected before the latest selection self._last_selected_cell = 0, 0, 0 # If we are viewing cells based on their frozen status or normally # (When true, a cross-hatch is displayed for frozen cells) self._view_frozen = False # Timer for updating frozen cells self.timer_running = False
Sets grid states
def calculate_lux(r, g, b): """Converts the raw R/G/B values to luminosity in lux.""" illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b) return int(illuminance)
Converts the raw R/G/B values to luminosity in lux.
def print_banner(filename: str, template: str = DEFAULT_BANNER_TEMPLATE) -> None: """ Print text file to output. :param filename: Which file to print. :param template: Format string which specified banner arrangement. :return: Does not return anything """ if not os.path.isfile(filename): logger.warning("Can't find logo banner at %s", filename) return with open(filename, "r") as f: banner = f.read() formatted_banner = template.format(banner) print(formatted_banner)
Print text file to output. :param filename: Which file to print. :param template: Format string which specified banner arrangement. :return: Does not return anything
def split_url(url): """ Split the given URL ``base#anchor`` into ``(base, anchor)``, or ``(base, None)`` if no anchor is present. In case there are two or more ``#`` characters, return only the first two tokens: ``a#b#c => (a, b)``. :param string url: the url :rtype: list of str """ if url is None: return (None, None) array = url.split("#") if len(array) == 1: array.append(None) return tuple(array[0:2])
Split the given URL ``base#anchor`` into ``(base, anchor)``, or ``(base, None)`` if no anchor is present. In case there are two or more ``#`` characters, return only the first two tokens: ``a#b#c => (a, b)``. :param string url: the url :rtype: list of str
def casefold_with_i_dots(text): """ Convert capital I's and capital dotted İ's to lowercase in the way that's appropriate for Turkish and related languages, then case-fold the rest of the letters. """ text = unicodedata.normalize('NFC', text).replace('İ', 'i').replace('I', 'ı') return text.casefold()
Convert capital I's and capital dotted İ's to lowercase in the way that's appropriate for Turkish and related languages, then case-fold the rest of the letters.
def _parse_options(opts, delim): """Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion.""" options = {} for opt in opts.split(delim): key, val = opt.split("=") if key.lower() == 'readpreferencetags': options.setdefault('readpreferencetags', []).append(val) else: # str(option) to ensure that a unicode URI results in plain 'str' # option names. 'normalized' is then suitable to be passed as # kwargs in all Python versions. if str(key) in options: warnings.warn("Duplicate URI option %s" % (str(key),)) options[str(key)] = unquote_plus(val) # Special case for deprecated options if "wtimeout" in options: if "wtimeoutMS" in options: options.pop("wtimeout") warnings.warn("Option wtimeout is deprecated, use 'wtimeoutMS'" " instead") return options
Helper method for split_options which creates the options dict. Also handles the creation of a list for the URI tag_sets/ readpreferencetags portion.
def qtePrepareToRun(self): """ This method is called by Qtmacs to prepare the macro for execution. It is probably a bad idea to overload this method as it only administrates the macro execution and calls the ``qteRun`` method (which *should* be overloaded by the macro programmer in order for the macro to do something). |Args| * **None** |Returns| * **None** |Raises| * **None** """ # Report the execution attempt. msgObj = QtmacsMessage((self.qteMacroName(), self.qteWidget), None) msgObj.setSignalName('qtesigMacroStart') self.qteMain.qtesigMacroStart.emit(msgObj) # Try to run the macro and radio the success via the # ``qtesigMacroFinished`` signal. try: self.qteRun() self.qteMain.qtesigMacroFinished.emit(msgObj) except Exception as err: if self.qteApplet is None: appID = appSig = None else: appID = self.qteApplet.qteAppletID() appSig = self.qteApplet.qteAppletSignature() msg = ('Macro <b>{}</b> (called from the <b>{}</b> applet' ' with ID <b>{}</b>) did not execute properly.') msg = msg.format(self.qteMacroName(), appSig, appID) if isinstance(err, QtmacsArgumentError): msg += '<br/>' + str(err) # Irrespective of the error, log it, enable macro # processing (in case it got disabled), and trigger the # error signal. self.qteMain.qteEnableMacroProcessing() self.qteMain.qtesigMacroError.emit(msgObj) self.qteLogger.exception(msg, exc_info=True, stack_info=True)
This method is called by Qtmacs to prepare the macro for execution. It is probably a bad idea to overload this method as it only administrates the macro execution and calls the ``qteRun`` method (which *should* be overloaded by the macro programmer in order for the macro to do something). |Args| * **None** |Returns| * **None** |Raises| * **None**
def chmod_r(root: str, permission: int) -> None: """ Recursive ``chmod``. Args: root: directory to walk down permission: e.g. ``e.g. stat.S_IWUSR`` """ os.chmod(root, permission) for dirpath, dirnames, filenames in os.walk(root): for d in dirnames: os.chmod(os.path.join(dirpath, d), permission) for f in filenames: os.chmod(os.path.join(dirpath, f), permission)
Recursive ``chmod``. Args: root: directory to walk down permission: e.g. ``e.g. stat.S_IWUSR``
def version(core_name=None): ''' Gets the solr version for the core specified. You should specify a core here as all the cores will run under the same servlet container and so will all have the same version. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.version ''' ret = _get_return_dict() # do we want to check for all the cores? if _get_none_or_value(core_name) is None and _check_for_cores(): success = True for name in __opts__['solr.cores']: resp = _get_admin_info('system', core_name=name) if resp['success']: lucene = resp['data']['lucene'] data = {name: {'version': lucene['solr-spec-version']}} else: success = False data = {name: {'version': None}} ret = _update_return_dict(ret, success, data, resp['errors'], resp['warnings']) return ret else: resp = _get_admin_info('system', core_name=core_name) if resp['success']: version_num = resp['data']['lucene']['solr-spec-version'] return _get_return_dict(True, {'version': version_num}, resp['errors'], resp['warnings']) else: return resp
Gets the solr version for the core specified. You should specify a core here as all the cores will run under the same servlet container and so will all have the same version. core_name : str (None) The name of the solr core if using cores. Leave this blank if you are not using cores or if you want to check all cores. Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.version
def setAllColor(self, color): """ Command: 0x06 sets all colors in the array Data: [Command][r][g][b] """ header = bytearray() header.append(LightProtocolCommand.SetAllColor) light = bytearray() light.extend(color) buff = header + light return self.send(buff)
Command: 0x06 sets all colors in the array Data: [Command][r][g][b]
def parse_input(s): """Parse the given input and intelligently transform it into an absolute, non-naive, timezone-aware datetime object for the UTC timezone. The input can be specified as a millisecond-precision UTC timestamp (or delta against Epoch), with or without a terminating 'L'. Alternatively, the input can be specified as a human-readable delta string with unit-separated segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as long as the segments are in descending unit span order.""" if isinstance(s, six.integer_types): s = str(s) elif not isinstance(s, six.string_types): raise ValueError(s) original = s if s[-1:] == 'L': s = s[:-1] sign = {'-': -1, '=': 0, '+': 1}.get(s[0], None) if sign is not None: s = s[1:] ts = 0 for unit in _SORTED_UNITS: pos = s.find(unit[0]) if pos == 0: raise ValueError(original) elif pos > 0: # If we find a unit letter, we're dealing with an offset. Default # to positive offset if a sign wasn't specified. if sign is None: sign = 1 ts += int(s[:pos]) * __timedelta_millis(unit[1]) s = s[min(len(s), pos + 1):] if s: ts += int(s) return date_from_utc_ts(ts) if not sign else \ utc() + sign * delta(milliseconds=ts)
Parse the given input and intelligently transform it into an absolute, non-naive, timezone-aware datetime object for the UTC timezone. The input can be specified as a millisecond-precision UTC timestamp (or delta against Epoch), with or without a terminating 'L'. Alternatively, the input can be specified as a human-readable delta string with unit-separated segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as long as the segments are in descending unit span order.
def add_command_option(command, name, doc, is_bool=False): """ Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value. """ dist = get_dummy_distribution() cmdcls = dist.get_command_class(command) if (hasattr(cmdcls, '_astropy_helpers_options') and name in cmdcls._astropy_helpers_options): return attr = name.replace('-', '_') if hasattr(cmdcls, attr): raise RuntimeError( '{0!r} already has a {1!r} class attribute, barring {2!r} from ' 'being usable as a custom option name.'.format(cmdcls, attr, name)) for idx, cmd in enumerate(cmdcls.user_options): if cmd[0] == name: log.warn('Overriding existing {0!r} option ' '{1!r}'.format(command, name)) del cmdcls.user_options[idx] if name in cmdcls.boolean_options: cmdcls.boolean_options.remove(name) break cmdcls.user_options.append((name, None, doc)) if is_bool: cmdcls.boolean_options.append(name) # Distutils' command parsing requires that a command object have an # attribute with the same name as the option (with '-' replaced with '_') # in order for that option to be recognized as valid setattr(cmdcls, attr, None) # This caches the options added through add_command_option so that if it is # run multiple times in the same interpreter repeated adds are ignored # (this way we can still raise a RuntimeError if a custom option overrides # a built-in option) if not hasattr(cmdcls, '_astropy_helpers_options'): cmdcls._astropy_helpers_options = set([name]) else: cmdcls._astropy_helpers_options.add(name)
Add a custom option to a setup command. Issues a warning if the option already exists on that command. Parameters ---------- command : str The name of the command as given on the command line name : str The name of the build option doc : str A short description of the option, for the `--help` message is_bool : bool, optional When `True`, the option is a boolean option and doesn't require an associated value.
def effect_ratio(self, mechanism, purview): """The effect ratio of the ``purview`` given ``mechanism``.""" return self._ratio(Direction.EFFECT, mechanism, purview)
The effect ratio of the ``purview`` given ``mechanism``.
def strip_harakat(text): """Strip Harakat from arabic word except Shadda. The striped marks are : - FATHA, DAMMA, KASRA - SUKUN - FATHATAN, DAMMATAN, KASRATAN, @param text: arabic text. @type text: unicode. @return: return a striped text. @rtype: unicode. """ # if text: # return re.sub(HARAKAT_PATTERN, u'', text) # return text if not text: return text elif is_vocalized(text): for char in HARAKAT: text = text.replace(char, '') return text
Strip Harakat from arabic word except Shadda. The striped marks are : - FATHA, DAMMA, KASRA - SUKUN - FATHATAN, DAMMATAN, KASRATAN, @param text: arabic text. @type text: unicode. @return: return a striped text. @rtype: unicode.
def _map_player_request_to_func(self, player_request_type): """Provides appropriate parameters to the on_playback functions.""" # calbacks for on_playback requests are optional view_func = self._intent_view_funcs.get(player_request_type, lambda: None) argspec = inspect.getargspec(view_func) arg_names = argspec.args arg_values = self._map_params_to_view_args(player_request_type, arg_names) return partial(view_func, *arg_values)
Provides appropriate parameters to the on_playback functions.
def set_handler(self, handler): """ Connect with a coroutine, which is scheduled when connection is made. This function will create a task, and when connection is closed, the task will be canceled. :param handler: :return: None """ if self._handler: raise Exception('Handler was already set') if handler: self._handler = async_task(handler, loop=self._loop)
Connect with a coroutine, which is scheduled when connection is made. This function will create a task, and when connection is closed, the task will be canceled. :param handler: :return: None
def _get_log_lines(self, n=300): """Returns a list with the last ``n`` lines of the nextflow log file Parameters ---------- n : int Number of last lines from the log file Returns ------- list List of strings with the nextflow log """ with open(self.log_file) as fh: last_lines = fh.readlines()[-n:] return last_lines
Returns a list with the last ``n`` lines of the nextflow log file Parameters ---------- n : int Number of last lines from the log file Returns ------- list List of strings with the nextflow log
def json(self): """ returns a dict that represents a NetJSON NetworkGraph object """ nodes = [] links = [] for link in self.link_set.all(): if self.is_layer2: source = link.interface_a.mac destination = link.interface_b.mac else: source = str(link.interface_a.ip_set.first().address) destination = str(link.interface_b.ip_set.first().address) nodes.append({ 'id': source }) nodes.append({ 'id': destination }) links.append(OrderedDict(( ('source', source), ('target', destination), ('cost', link.metric_value) ))) return OrderedDict(( ('type', 'NetworkGraph'), ('protocol', self.parser.protocol), ('version', self.parser.version), ('metric', self.parser.metric), ('nodes', nodes), ('links', links) ))
returns a dict that represents a NetJSON NetworkGraph object
def post_processor_affected_function( exposure=None, hazard=None, classification=None, hazard_class=None): """Private function used in the affected postprocessor. It returns a boolean if it's affected or not, or not exposed. :param exposure: The exposure to use. :type exposure: str :param hazard: The hazard to use. :type hazard: str :param classification: The hazard classification to use. :type classification: str :param hazard_class: The hazard class of the feature. :type hazard_class: str :return: If this hazard class is affected or not. It can be `not exposed`. The not exposed value returned is the key defined in `hazard_classification.py` at the top of the file. :rtype: bool,'not exposed' """ if exposure == exposure_population['key']: affected = is_affected( hazard, classification, hazard_class) else: classes = None for hazard in hazard_classes_all: if hazard['key'] == classification: classes = hazard['classes'] break for the_class in classes: if the_class['key'] == hazard_class: affected = the_class['affected'] break else: affected = not_exposed_class['key'] return affected
Private function used in the affected postprocessor. It returns a boolean if it's affected or not, or not exposed. :param exposure: The exposure to use. :type exposure: str :param hazard: The hazard to use. :type hazard: str :param classification: The hazard classification to use. :type classification: str :param hazard_class: The hazard class of the feature. :type hazard_class: str :return: If this hazard class is affected or not. It can be `not exposed`. The not exposed value returned is the key defined in `hazard_classification.py` at the top of the file. :rtype: bool,'not exposed'
def format_title(self): def asciify(_title): _title = unicodedata.normalize('NFD', unicode(_title)) ascii = True out = [] ok = u"1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM- '," for ch in _title: if ch in ok: out.append(ch) elif unicodedata.category(ch)[0] == ("L"): #a letter out.append(hex(ord(ch))) ascii = False elif ch in u'\r\n\t': out.append(u'-') return (ascii, sub("[ ',-]+", '-', "".join(out)) ) """ Takes a string and sanitizes it for Github's url name format """ (ascii, _title) = asciify(self.meta.title) if not ascii and self.meta.alternative_title: (ascii, _title2) = asciify(self.meta.alternative_title) if ascii: _title = _title2 title_length = 99 - len(str(self.book_id)) - 1 if len(_title) > title_length: # if the title was shortened, replace the trailing _ with an ellipsis repo_title = "{0}__{1}".format(_title[:title_length], self.book_id) else: repo_title = "{0}_{1}".format(_title[:title_length], self.book_id) logger.debug("%s %s" % (len(repo_title), repo_title)) self.meta.metadata['_repo'] = repo_title return repo_title
Takes a string and sanitizes it for Github's url name format
def signalbus(self): """The associated `SignalBus` object.""" try: signalbus = self.__signalbus except AttributeError: signalbus = self.__signalbus = SignalBus(self, init_app=False) return signalbus
The associated `SignalBus` object.
def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache=None): """Retrieves dependencies for the given install requirement from the pip resolver. :param dep: A single InstallRequirement :type dep: :class:`~pip._internal.req.req_install.InstallRequirement` :param sources: Pipfile-formatted sources, defaults to None :type sources: list[dict], optional :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None """ finder = get_finder(sources=sources, pip_options=pip_options) if not wheel_cache: wheel_cache = WHEEL_CACHE dep.is_direct = True reqset = pip_shims.shims.RequirementSet() reqset.add_requirement(dep) requirements = None setup_requires = {} with temp_environ(), start_resolver(finder=finder, wheel_cache=wheel_cache) as resolver: os.environ['PIP_EXISTS_ACTION'] = 'i' dist = None if dep.editable and not dep.prepared and not dep.req: with cd(dep.setup_py_dir): from setuptools.dist import distutils try: dist = distutils.core.run_setup(dep.setup_py) except (ImportError, TypeError, AttributeError): dist = None else: setup_requires[dist.get_name()] = dist.setup_requires if not dist: try: dist = dep.get_dist() except (TypeError, ValueError, AttributeError): pass else: setup_requires[dist.get_name()] = dist.setup_requires resolver.require_hashes = False try: results = resolver._resolve_one(reqset, dep) except Exception: # FIXME: Needs to bubble the exception somehow to the user. results = [] finally: try: wheel_cache.cleanup() except AttributeError: pass resolver_requires_python = getattr(resolver, "requires_python", None) requires_python = getattr(reqset, "requires_python", resolver_requires_python) if requires_python: add_marker = fix_requires_python_marker(requires_python) reqset.remove(dep) if dep.req.marker: dep.req.marker._markers.extend(['and',].extend(add_marker._markers)) else: dep.req.marker = add_marker reqset.add(dep) requirements = set() for r in results: if requires_python: if r.req.marker: r.req.marker._markers.extend(['and',].extend(add_marker._markers)) else: r.req.marker = add_marker requirements.add(format_requirement(r)) for section in setup_requires: python_version = section not_python = not is_python(section) # This is for cleaning up :extras: formatted markers # by adding them to the results of the resolver # since any such extra would have been returned as a result anyway for value in setup_requires[section]: # This is a marker. if is_python(section): python_version = value[1:-1] else: not_python = True if ':' not in value and not_python: try: requirement_str = "{0}{1}".format(value, python_version).replace(":", ";") requirements.add(format_requirement(make_install_requirement(requirement_str).ireq)) # Anything could go wrong here -- can't be too careful. except Exception: pass if not dep.editable and is_pinned_requirement(dep) and requirements is not None: DEPENDENCY_CACHE[dep] = list(requirements) return requirements
Retrieves dependencies for the given install requirement from the pip resolver. :param dep: A single InstallRequirement :type dep: :class:`~pip._internal.req.req_install.InstallRequirement` :param sources: Pipfile-formatted sources, defaults to None :type sources: list[dict], optional :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None
def copy(self): """Create a copy of this pen.""" pen = Pen() pen.__dict__ = self.__dict__.copy() return pen
Create a copy of this pen.
def _get_html(self, url): """ Get html from url """ self.log.info(u"/GET {}".format(url)) r = requests.get(url) if hasattr(r, 'from_cache'): if r.from_cache: self.log.info("(from cache)") if r.status_code != 200: throw_request_err(r) return r.content
Get html from url
def clean_password(self): """Check that the password is valid.""" value = self.cleaned_data.get('password') if value not in self.valid_passwords: raise forms.ValidationError('Incorrect password.') return value
Check that the password is valid.
def portfolio_from_orders(orders, funds=1e5, price_type='close'): """Create a DataFrame of portfolio holdings (#'s' of shares for the symbols and dates) Appends the "$CASH" symbol to the porfolio and initializes it to `funds` indicated. Appends the symbol "total_value" to store the total value of cash + stocks at each timestamp. The symbol holdings are found by multipling each element of the orders matrix by the price matrix for those symbols and then computing a cumulative sum of those purchases. portfolio["$CASH"] = funds - (orders * prices).sum(axis=1).cumsum() portfolio["total_value"] = portfolio["$CASH"] + (orders.cumsum() * prices).sum(axis=1) """ portfolio = orders.copy() prices = price_dataframe(orders.columns, start=orders.index[0], end=orders.index[-1], price_type=price_type, cleaner=clean_dataframe) portfolio["$CASH"] = funds - (orders * prices).sum(axis=1).cumsum() portfolio["total_value"] = portfolio["$CASH"] + (orders.cumsum() * prices).sum(axis=1) return portfolio
Create a DataFrame of portfolio holdings (#'s' of shares for the symbols and dates) Appends the "$CASH" symbol to the porfolio and initializes it to `funds` indicated. Appends the symbol "total_value" to store the total value of cash + stocks at each timestamp. The symbol holdings are found by multipling each element of the orders matrix by the price matrix for those symbols and then computing a cumulative sum of those purchases. portfolio["$CASH"] = funds - (orders * prices).sum(axis=1).cumsum() portfolio["total_value"] = portfolio["$CASH"] + (orders.cumsum() * prices).sum(axis=1)
def get_reversed_aliases(self): """ Return the reversed aliases dict. Instead of being in the form {'alias': mapping}, the dict is in the form {mapping: 'alias'}. """ return dict((v, k) for k, v in six.iteritems(self.aliases))
Return the reversed aliases dict. Instead of being in the form {'alias': mapping}, the dict is in the form {mapping: 'alias'}.
def load_formatter_fn(formatter): ''' >>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS <function basescript at 0x...> ''' obj = util.load_object(formatter) if not hasattr(obj, 'ispartial'): obj.ispartial = util.ispartial return obj
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS <function basescript at 0x...>
def contains_duplicates(values: Iterable[Any]) -> bool: """ Does the iterable contain any duplicate values? """ for v in Counter(values).values(): if v > 1: return True return False
Does the iterable contain any duplicate values?
def close(self): """ Closes the event streaming. """ if not self._response.raw.closed: # find the underlying socket object # based on api.client._get_raw_response_socket sock_fp = self._response.raw._fp.fp if hasattr(sock_fp, 'raw'): sock_raw = sock_fp.raw if hasattr(sock_raw, 'sock'): sock = sock_raw.sock elif hasattr(sock_raw, '_sock'): sock = sock_raw._sock elif hasattr(sock_fp, 'channel'): # We're working with a paramiko (SSH) channel, which doesn't # support cancelable streams with the current implementation raise DockerException( 'Cancellable streams not supported for the SSH protocol' ) else: sock = sock_fp._sock if hasattr(urllib3.contrib, 'pyopenssl') and isinstance( sock, urllib3.contrib.pyopenssl.WrappedSocket): sock = sock.socket sock.shutdown(socket.SHUT_RDWR) sock.close()
Closes the event streaming.
def login(self, username: str, password: str, course: int) -> requests.Response: """ 登入課程 """ try: # 操作所需資訊 payload = { 'name': username, 'passwd': password, 'rdoCourse': course } # 回傳嘗試登入的回應 return self.__session.post( self.__url + '/Login', data=payload, timeout=0.5, verify=False) except requests.exceptions.Timeout: return None
登入課程
def run(self, row, **kwargs): """Methods takes a row and depending if a dict or list, runs RML rules. Args: ----- row(Dict, List): Row from CSV Reader """ self.source = row kwargs['output'] = self.__graph__() super(CSVRowProcessor, self).run(**kwargs) return kwargs['output']
Methods takes a row and depending if a dict or list, runs RML rules. Args: ----- row(Dict, List): Row from CSV Reader
def _set_intrinsics(self): """Read the intrinsics matrix from the stream. """ strm = self._profile.get_stream(rs.stream.color) obj = strm.as_video_stream_profile().get_intrinsics() self._intrinsics[0, 0] = obj.fx self._intrinsics[1, 1] = obj.fy self._intrinsics[0, 2] = obj.ppx self._intrinsics[1, 2] = obj.ppy
Read the intrinsics matrix from the stream.
def _parse_access_vlan(self, config): """Scans the specified config and parse the access-vlan value Args: config (str): The interface configuration block to scan Returns: dict: A Python dict object with the value of switchport access value. The dict returned is intended to be merged into the resource dict """ value = re.search(r'switchport access vlan (\d+)', config) return dict(access_vlan=value.group(1))
Scans the specified config and parse the access-vlan value Args: config (str): The interface configuration block to scan Returns: dict: A Python dict object with the value of switchport access value. The dict returned is intended to be merged into the resource dict
def AjustarLiquidacionUnificadoPapel(self): "Ajustar Liquidación realizada en un formulario F1116 B / C (papel)" # limpiar arrays no enviados: if not self.ajuste['ajusteBase']['certificados']: del self.ajuste['ajusteBase']['certificados'] for k1 in ('ajusteCredito', 'ajusteDebito'): for k2 in ('retenciones', 'deducciones'): if not self.ajuste[k1][k2]: del self.ajuste[k1][k2] ret = self.client.liquidacionAjustarUnificadoPapel( auth={ 'token': self.Token, 'sign': self.Sign, 'cuit': self.Cuit, }, **self.ajuste ) ret = ret['ajustePapelReturn'] self.__analizar_errores(ret) if 'ajustePapel' in ret: aut = ret['ajustePapel'] self.AnalizarAjuste(aut) return True
Ajustar Liquidación realizada en un formulario F1116 B / C (papel)
def delete_project(id=None, name=None): """ Delete a Project by ID or name. """ content = delete_project_raw(id, name) if content: return utils.format_json(content)
Delete a Project by ID or name.
def switch(self, first, second): """Switch two entries in the queue. Return False if an entry doesn't exist.""" allowed_states = ['queued', 'stashed'] if first in self.queue and second in self.queue \ and self.queue[first]['status'] in allowed_states\ and self.queue[second]['status'] in allowed_states: tmp = self.queue[second].copy() self.queue[second] = self.queue[first].copy() self.queue[first] = tmp self.write() return True return False
Switch two entries in the queue. Return False if an entry doesn't exist.
def get_offload(self, name, **kwargs): """Return a dictionary describing the connected offload target. :param offload: Name of offload target to get information about. :type offload: str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **GET offload/::offload** :type \*\*kwargs: optional :returns: A dictionary describing the offload connection. :rtype: ResponseDict """ # Unbox if a list to accommodate a bug in REST 1.14 result = self._request("GET", "offload/{0}".format(name), kwargs) if isinstance(result, list): headers = result.headers result = ResponseDict(result[0]) result.headers = headers return result
Return a dictionary describing the connected offload target. :param offload: Name of offload target to get information about. :type offload: str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **GET offload/::offload** :type \*\*kwargs: optional :returns: A dictionary describing the offload connection. :rtype: ResponseDict