code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def start_segment_address(cs, ip): """Return Start Segment Address Record. @param cs 16-bit value for CS register. @param ip 16-bit value for IP register. @return String representation of Intel Hex SSA record. """ b = [4, 0, 0, 0x03, (cs>>8)&0x0FF, cs&0x0FF, (ip>>8)&0x0FF, ip&0x0FF] return Record._from_bytes(b)
Return Start Segment Address Record. @param cs 16-bit value for CS register. @param ip 16-bit value for IP register. @return String representation of Intel Hex SSA record.
async def fetch_token(self): """Fetch new session token from api.""" url = '{}/login'.format(API_URL) payload = 'email={}&password={}'.format(self._email, self._password) reg = await self.api_post(url, None, payload) if reg is None: _LOGGER.error('Unable to authenticate and fetch eight token.') else: self._userid = reg['session']['userId'] self._token = reg['session']['token'] self._expdate = reg['session']['expirationDate'] _LOGGER.debug('UserID: %s, Token: %s', self._userid, self.token)
Fetch new session token from api.
def update_config(config_new, config_default): ''' Updates the loaded method configuration with default values. ''' if any([isinstance(v, dict) for v in list(config_new.values())]): for k,v in list(config_new.items()): if isinstance(v,dict) and k in config_default: update_config(config_new[k],config_default[k]) else: config_default[k] = v else: config_default.update(config_new) return config_default
Updates the loaded method configuration with default values.
def _set_shared_instances(self): """Sets attributes from the shared instances.""" self.inqueue = self.em.get_inqueue() self.outqueue = self.em.get_outqueue() self.namespace = self.em.get_namespace()
Sets attributes from the shared instances.
def _vmf_normalize(kappa, dim): """Compute normalization constant using built-in numpy/scipy Bessel approximations. Works well on small kappa and mu. """ num = np.power(kappa, dim / 2. - 1.) if dim / 2. - 1. < 1e-15: denom = np.power(2. * np.pi, dim / 2.) * i0(kappa) else: denom = np.power(2. * np.pi, dim / 2.) * iv(dim / 2. - 1., kappa) if np.isinf(num): raise ValueError("VMF scaling numerator was inf.") if np.isinf(denom): raise ValueError("VMF scaling denominator was inf.") if np.abs(denom) < 1e-15: raise ValueError("VMF scaling denominator was 0.") return num / denom
Compute normalization constant using built-in numpy/scipy Bessel approximations. Works well on small kappa and mu.
def parse(station: str, txt: str) -> TafData: """ Returns TafData and Units dataclasses with parsed data and their associated units """ core.valid_station(station) while len(txt) > 3 and txt[:4] in ('TAF ', 'AMD ', 'COR '): txt = txt[4:] _, station, time = core.get_station_and_time(txt[:20].split(' ')) retwx = { 'end_time': None, 'raw': txt, 'remarks': None, 'start_time': None, 'station': station, 'time': core.make_timestamp(time) } txt = txt.replace(station, '') txt = txt.replace(time, '').strip() if core.uses_na_format(station): use_na = True units = Units(**NA_UNITS) # type: ignore else: use_na = False units = Units(**IN_UNITS) # type: ignore # Find and remove remarks txt, retwx['remarks'] = core.get_taf_remarks(txt) # Split and parse each line lines = core.split_taf(txt) parsed_lines = parse_lines(lines, units, use_na) # Perform additional info extract and corrections if parsed_lines: parsed_lines[-1]['other'], retwx['max_temp'], retwx['min_temp'] \ = core.get_temp_min_and_max(parsed_lines[-1]['other']) if not (retwx['max_temp'] or retwx['min_temp']): parsed_lines[0]['other'], retwx['max_temp'], retwx['min_temp'] \ = core.get_temp_min_and_max(parsed_lines[0]['other']) # Set start and end times based on the first line start, end = parsed_lines[0]['start_time'], parsed_lines[0]['end_time'] parsed_lines[0]['end_time'] = None retwx['start_time'], retwx['end_time'] = start, end parsed_lines = core.find_missing_taf_times(parsed_lines, start, end) parsed_lines = core.get_taf_flight_rules(parsed_lines) # Extract Oceania-specific data if retwx['station'][0] == 'A': # type: ignore parsed_lines[-1]['other'], retwx['alts'], retwx['temps'] \ = core.get_oceania_temp_and_alt(parsed_lines[-1]['other']) # Convert to dataclass retwx['forecast'] = [TafLineData(**line) for line in parsed_lines] # type: ignore return TafData(**retwx), units
Returns TafData and Units dataclasses with parsed data and their associated units
def _get_layout(self): """ Get the outputs layout from xrandr and try to detect the currently active layout as best as we can on start. """ connected = list() active_layout = list() disconnected = list() layout = OrderedDict( {"connected": OrderedDict(), "disconnected": OrderedDict()} ) current = self.py3.command_output("xrandr") for line in current.splitlines(): try: s = line.split(" ") infos = line[line.find("(") :] if s[1] == "connected": output, state, mode = s[0], s[1], None for index, x in enumerate(s[2:], 2): if "x" in x and "+" in x: mode = x active_layout.append(output) infos = line[line.find(s[index + 1]) :] break elif "(" in x: break connected.append(output) elif s[1] == "disconnected": output, state, mode = s[0], s[1], None disconnected.append(output) else: continue except Exception as err: self.py3.log('xrandr error="{}"'.format(err)) else: layout[state][output] = {"infos": infos, "mode": mode, "state": state} # initialize the active layout if self.active_layout is None: self.active_comb = tuple(active_layout) self.active_layout = self._get_string_and_set_width( tuple(active_layout), self.active_mode ) return layout
Get the outputs layout from xrandr and try to detect the currently active layout as best as we can on start.
def _show(self, message, indent=0, enable_verbose=True): # pragma: no cover """Message printer. """ if enable_verbose: print(" " * indent + message)
Message printer.
def mime(self): """ Returns the finalised mime object, after applying the internal headers. Usually this is not to be overriden. """ mime = self.mime_object() self.headers.prepare(mime) return mime
Returns the finalised mime object, after applying the internal headers. Usually this is not to be overriden.
def render(self, target, data): """Render the table.""" rows = self.get_rows(target, data) rows = self._filter_rows(rows) renderer = getattr(self, "_render_%s" % target.name, None) if renderer is None: raise ValueError( "Cannot render %r for %s." % (self.value, target)) else: return renderer(rows)
Render the table.
def points_on_circle(radius, points): """ returns a set of uniform points around a circle :param radius: radius of the circle :param points: number of points on the circle :return: """ angle = np.linspace(0, 2*np.pi, points) x_coord = np.cos(angle)*radius y_coord = np.sin(angle)*radius return x_coord, y_coord
returns a set of uniform points around a circle :param radius: radius of the circle :param points: number of points on the circle :return:
async def count(self, query, clear_limit=False): """Perform *COUNT* aggregated query asynchronously. :return: number of objects in ``select()`` query """ query = self._swap_database(query) return (await count(query, clear_limit=clear_limit))
Perform *COUNT* aggregated query asynchronously. :return: number of objects in ``select()`` query
def ostree_compose(self, release): """Compose the OSTree in the mock container""" start = datetime.utcnow() treefile = os.path.join(release['git_dir'], 'treefile.json') cmd = release['ostree_compose'] % treefile with file(treefile, 'w') as tree: json.dump(release['treefile'], tree) # Only use new_chroot for the invocation, as --clean and --new-chroot are buggy together right now out, err, rcode = self.mock_chroot(release, cmd, new_chroot=True) ref = None commitid = None for line in out.split('\n'): if ' => ' in line: # This line is the: ref => commitid line line = line.replace('\n', '') ref, _, commitid = line.partition(' => ') self.log.info('rpm-ostree compose complete (%s), ref %s, commitid %s', datetime.utcnow() - start, ref, commitid) return ref, commitid
Compose the OSTree in the mock container
def _prepend_name(self, prefix, dict_): '''changes the keys of the dictionary prepending them with "name."''' return dict(['.'.join([prefix, name]), msg] for name, msg in dict_.iteritems())
changes the keys of the dictionary prepending them with "name."
def _words_by_score(words, score, least_to_most, n=None): """ Order a vector of `words` by a `score`, either `least_to_most` or reverse. Optionally return only the top `n` results. """ if words.shape != score.shape: raise ValueError('`words` and `score` must have the same shape') if n is not None and (n <= 0 or n > len(words)): raise ValueError('`n` must be in range [0, len(words)]') indices = np.argsort(score) if not least_to_most: indices = indices[::-1] ordered_words = words[indices] if n is not None: return ordered_words[:n] else: return ordered_words
Order a vector of `words` by a `score`, either `least_to_most` or reverse. Optionally return only the top `n` results.
async def create_student_container(self, job_id, parent_container_id, sockets_path, student_path, systemfiles_path, course_common_student_path, socket_id, environment_name, memory_limit, time_limit, hard_time_limit, share_network, write_stream): """ Creates a new student container. :param write_stream: stream on which to write the return value of the container (with a correctly formatted msgpack message) """ try: self._logger.debug("Starting new student container... %s %s %s %s", environment_name, memory_limit, time_limit, hard_time_limit) if environment_name not in self._containers: self._logger.warning("Student container asked for an unknown environment %s (not in aliases)", environment_name) await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": 254, "socket_id": socket_id}) return environment = self._containers[environment_name]["id"] try: socket_path = path_join(sockets_path, str(socket_id) + ".sock") container_id = await self._docker.create_container_student(parent_container_id, environment, share_network, memory_limit, student_path, socket_path, systemfiles_path, course_common_student_path) except Exception as e: self._logger.exception("Cannot create student container!") await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": 254, "socket_id": socket_id}) if isinstance(e, asyncio.CancelledError): raise return self._student_containers_for_job[job_id].add(container_id) self._student_containers_running[container_id] = job_id, parent_container_id, socket_id, write_stream # send to the container that the sibling has started await self._write_to_container_stdin(write_stream, {"type": "run_student_started", "socket_id": socket_id}) try: await self._docker.start_container(container_id) except Exception as e: self._logger.exception("Cannot start student container!") await self._write_to_container_stdin(write_stream, {"type": "run_student_retval", "retval": 254, "socket_id": socket_id}) if isinstance(e, asyncio.CancelledError): raise return # Verify the time limit await self._timeout_watcher.register_container(container_id, time_limit, hard_time_limit) except asyncio.CancelledError: raise except: self._logger.exception("Exception in create_student_container")
Creates a new student container. :param write_stream: stream on which to write the return value of the container (with a correctly formatted msgpack message)
def _finalize(self): """Reset the status and tell the database to finalize the traces.""" if self.status in ['running', 'halt']: if self.verbose > 0: print_('\nSampling finished normally.') self.status = 'ready' self.save_state() self.db._finalize()
Reset the status and tell the database to finalize the traces.
def overlap_cplx(vec1, vec2, psd=None, low_frequency_cutoff=None, high_frequency_cutoff=None, normalized=True): """Return the complex overlap between the two TimeSeries or FrequencySeries. Parameters ---------- vec1 : TimeSeries or FrequencySeries The input vector containing a waveform. vec2 : TimeSeries or FrequencySeries The input vector containing a waveform. psd : Frequency Series A power spectral density to weight the overlap. low_frequency_cutoff : {None, float}, optional The frequency to begin the overlap. high_frequency_cutoff : {None, float}, optional The frequency to stop the overlap. normalized : {True, boolean}, optional Set if the overlap is normalized. If true, it will range from 0 to 1. Returns ------- overlap: complex """ htilde = make_frequency_series(vec1) stilde = make_frequency_series(vec2) kmin, kmax = get_cutoff_indices(low_frequency_cutoff, high_frequency_cutoff, stilde.delta_f, (len(stilde)-1) * 2) if psd: inner = (htilde[kmin:kmax]).weighted_inner(stilde[kmin:kmax], psd[kmin:kmax]) else: inner = (htilde[kmin:kmax]).inner(stilde[kmin:kmax]) if normalized: sig1 = sigma(vec1, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) sig2 = sigma(vec2, psd=psd, low_frequency_cutoff=low_frequency_cutoff, high_frequency_cutoff=high_frequency_cutoff) norm = 1 / sig1 / sig2 else: norm = 1 return 4 * htilde.delta_f * inner * norm
Return the complex overlap between the two TimeSeries or FrequencySeries. Parameters ---------- vec1 : TimeSeries or FrequencySeries The input vector containing a waveform. vec2 : TimeSeries or FrequencySeries The input vector containing a waveform. psd : Frequency Series A power spectral density to weight the overlap. low_frequency_cutoff : {None, float}, optional The frequency to begin the overlap. high_frequency_cutoff : {None, float}, optional The frequency to stop the overlap. normalized : {True, boolean}, optional Set if the overlap is normalized. If true, it will range from 0 to 1. Returns ------- overlap: complex
def get_notmuch_setting(self, section, key, fallback=None): """ look up config values from notmuch's config :param section: key is in :type section: str :param key: key to look up :type key: str :param fallback: fallback returned if key is not present :type fallback: str :returns: config value with type as specified in the spec-file """ value = None if section in self._notmuchconfig: if key in self._notmuchconfig[section]: value = self._notmuchconfig[section][key] if value is None: value = fallback return value
look up config values from notmuch's config :param section: key is in :type section: str :param key: key to look up :type key: str :param fallback: fallback returned if key is not present :type fallback: str :returns: config value with type as specified in the spec-file
def statistical_axes(fit, **kw): """ Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data. """ method = kw.pop('method', 'noise') confidence_level = kw.pop('confidence_level', 0.95) dof = kw.pop('dof',2) nominal = fit.eigenvalues if method == 'sampling': cov = sampling_covariance(fit,**kw) elif method == 'noise': cov = noise_covariance(fit,**kw) if kw.pop('chisq', False): # Model the incorrect behaviour of using the # Chi2 distribution instead of the Fisher # distribution (which is a measure of the # ratio between the two). z = chi2.ppf(confidence_level,dof) else: z = fisher_statistic(fit.n,confidence_level,dof=dof) # Apply two fisher F parameters (one along each axis) # Since we apply to each axis without division, # it is as if we are applying N.sqrt(2*F) to the entire # distribution, aligning us with (Francq, 2014) err = z*N.sqrt(cov) return apply_error_scaling(nominal, err, n=fit.n, **kw)
Hyperbolic error using a statistical process (either sampling or noise errors) Integrates covariance with error level and degrees of freedom for plotting confidence intervals. Degrees of freedom is set to 2, which is the relevant number of independent dimensions to planar fitting of *a priori* centered data.
def assumes(*args): '''Stores a function's assumptions as an attribute.''' args = tuple(args) def decorator(func): func.assumptions = args return func return decorator
Stores a function's assumptions as an attribute.
def getProductUIDs(self): """ return the uids of the products referenced by order items """ uids = [] for orderitem in self.objectValues('XupplyOrderItem'): product = orderitem.getProduct() if product is not None: uids.append(orderitem.getProduct().UID()) return uids
return the uids of the products referenced by order items
def p_theory(self, p): """theory : LBRACKET superclauses_sum RBRACKET | LBRACKET superclauses_sum RBRACKET literal_list""" if len(p) == 4: p[0] = Theory(p[2]) else: p[0] = Theory(p[2], literals=p[4])
theory : LBRACKET superclauses_sum RBRACKET | LBRACKET superclauses_sum RBRACKET literal_list
def object_data(self): '''Process the archival export and return a buffer with foxml content for ingest into the destination repository. :returns: :class:`io.BytesIO` for ingest, with references to uploaded datastream content or content location urls ''' self.foxml_buffer = io.BytesIO() if self.progress_bar: self.progress_bar.start() previous_section = None while True: try: section = self.get_next_section() except StopIteration: break if section == BINARY_CONTENT_START: self.within_file = True # get datastream info from the end of the section just before this one # (needed to provide size to upload request) dsinfo = self.get_datastream_info(previous_section) if dsinfo: logger.info('Found encoded datastream %(id)s (%(mimetype)s, size %(size)s, %(type)s %(digest)s)', dsinfo) else: # error if datastream info is not found, because either # size or version date is required to handle content raise Exception('Failed to find datastream information for %s from \n%s' \ % (self.obj.pid, previous_section)) if self.xml_only and not \ dsinfo['mimetype'] in ['text/xml', 'application/rdf+xml', 'application/xml']: # possibly other mimetypes also? try: dsid = dsinfo['id'].split('.')[0] except ValueError: # if dsid doesn't include a .# (for versioning), # use the id as is. dsid = dsinfo['id'] if self.url_credentials: # if url credentials are set, parse the base fedora api # url so they can be inserted at the right place parsed_url = urlparse(self.obj.api.base_url) # reassemble base url, adding in credentials base_url = ''.join([parsed_url.scheme, '://', self.url_credentials, parsed_url.netloc, parsed_url.path]) else: base_url = self.obj.api.base_url # versioned datastream dissemination url content_location = '%sobjects/%s/datastreams/%s/content?asOfDateTime=%s' % \ (base_url, self.obj.pid, dsid, dsinfo['created']) else: upload_args = {} if self.progress_bar: def upload_callback(monitor): self.progress_bar.upload = monitor.bytes_read upload_args = {'callback': upload_callback} # use upload id as content location content_location = self.dest_repo.api.upload(self.encoded_datastream(), size=int(dsinfo['size']), **upload_args) self.foxml_buffer.write(force_bytes('<foxml:contentLocation REF="%s" TYPE="URL"/>' \ % content_location)) elif section == BINARY_CONTENT_END: # should not occur here; this section will be processed by # encoded_datastream method self.within_file = False elif self.within_file: # should not occur here; this section will be pulled by # encoded_datastream method # binary content within a file - ignore here # (handled by encoded_datastream method) continue else: # not start or end of binary content, and not # within a file, so yield as is (e.g., datastream tags # between small files) self.foxml_buffer.write(section) previous_section = section return self.foxml_buffer
Process the archival export and return a buffer with foxml content for ingest into the destination repository. :returns: :class:`io.BytesIO` for ingest, with references to uploaded datastream content or content location urls
def _init_metadata(self): """stub""" self._provenance_metadata = { 'element_id': Id(self.my_osid_object_form._authority, self.my_osid_object_form._namespace, 'provenanceId'), 'element_label': 'provenanceId', 'instructions': 'The item that "gave birth" to this item.', 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_object_values': [''], 'syntax': 'STRING', 'minimum_string_length': None, 'maximum_string_length': None, 'string_set': [] }
stub
def get_aggregations(self, query, group_by, stats_field, percents=(50, 95, 99, 99.9), size=100): """ Returns aggregations (rows count + percentile stats) for a given query This is basically the same as the following pseudo-SQL query: SELECT PERCENTILE(stats_field, 75) FROM query GROUP BY group_by LIMIT size https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-aggregations-bucket-terms-aggregation.html https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-aggregations-metrics-percentile-aggregation.html Please note that group_by should be provided by a "keyword" field: Fielddata is disabled on text fields by default. Set fielddata=true on [@context.caller] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory.\ Alternatively use a keyword field instead. :type query str :type group_by str :type stats_field str :type percents tuple[int] :type size int :rtype: dict """ body = { "query": { "bool": { "must": [{ "query_string": { "query": query, }, }] }, }, "aggregations": { "group_by_agg": { "terms": { "field": group_by, "size": size, # how many term buckets should be returned out of the overall terms list }, "aggregations": { "field_stats": { "percentiles": { "field": stats_field, "percents": percents } } } } } } # add @timestamp range body['query']['bool']['must'].append(self._get_timestamp_filer()) self._logger.info("Getting aggregations for %s field when grouped by %s", group_by, stats_field) res = self._es.search( body=body, index=self._index, size=0, # we don need any rows from the index, stats is all we need here ) # print(json.dumps(res, indent=True)) aggs = {} """ bucket = { "field_stats": { "values": { "95.0": 20.99858477419025, "99.0": 67.0506954238478, "50.0": 1.0, "99.9": 146.3865495436944 } }, "key": "Wikia\\Service\\Gateway\\ConsulUrlProvider:getUrl", "doc_count": 8912859 } """ for bucket in res['aggregations']['group_by_agg']['buckets']: entry = { "count": bucket['doc_count'] } entry.update(bucket['field_stats']['values']) aggs[bucket['key']] = entry return aggs
Returns aggregations (rows count + percentile stats) for a given query This is basically the same as the following pseudo-SQL query: SELECT PERCENTILE(stats_field, 75) FROM query GROUP BY group_by LIMIT size https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-aggregations-bucket-terms-aggregation.html https://www.elastic.co/guide/en/elasticsearch/reference/5.5/search-aggregations-metrics-percentile-aggregation.html Please note that group_by should be provided by a "keyword" field: Fielddata is disabled on text fields by default. Set fielddata=true on [@context.caller] in order to load fielddata in memory by uninverting the inverted index. Note that this can however use significant memory.\ Alternatively use a keyword field instead. :type query str :type group_by str :type stats_field str :type percents tuple[int] :type size int :rtype: dict
def tex_parse(string): """ Renders some basic TeX math to HTML. """ string = string.replace('{', '').replace('}', '') def tex_replace(match): return \ sub(r'\^(\w)', r'<sup>\1</sup>', sub(r'\^\{(.*?)\}', r'<sup>\1</sup>', sub(r'\_(\w)', r'<sub>\1</sub>', sub(r'\_\{(.*?)\}', r'<sub>\1</sub>', sub(r'\\(' + GREEK_LETTERS + ')', r'&\1;', match.group(1)))))) return mark_safe(sub(r'\$([^\$]*)\$', tex_replace, escape(string)))
Renders some basic TeX math to HTML.
def to_dict(self) -> Dict[str, Any]: """Message format according to monitoring service spec""" return { 'type': self.__class__.__name__, 'channel_identifier': self.channel_identifier, 'token_network_address': to_normalized_address(self.token_network_address), 'balance_hash': encode_hex(self.balance_hash), 'nonce': self.nonce, 'additional_hash': encode_hex(self.additional_hash), 'signature': encode_hex(self.signature), 'chain_id': self.chain_id, }
Message format according to monitoring service spec
def json_dumps(obj): """A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer. """ try: return json.dumps(obj, indent=2, sort_keys=True, allow_nan=False) except ValueError: pass # we don't want to call do_map on the original object since it can # contain objects that need to be converted for JSON. after reading # in the created JSON we get a limited set of possible types we # can encounter json_str = json.dumps(obj, indent=2, sort_keys=True, allow_nan=True) json_obj = json.loads(json_str) def do_map(obj): if obj is None: return None if isinstance(obj, basestring): return obj if isinstance(obj, dict): res = {} for (key, value) in obj.items(): res[key] = do_map(value) return res if isinstance(obj, collections.Iterable): res = [] for el in obj: res.append(do_map(el)) return res # diverging numbers need to be passed as strings otherwise it # will throw a parsing error on the ECMAscript consumer side if math.isnan(obj): return "NaN" if math.isinf(obj): return "Infinity" if obj > 0 else "-Infinity" return obj return json.dumps( do_map(json_obj), indent=2, sort_keys=True, allow_nan=False)
A safe JSON dump function that provides correct diverging numbers for a ECMAscript consumer.
def _generate_current_command(self): ''' Returns a constructed GCode string that contains this driver's axis-current settings, plus a small delay to wait for those settings to take effect. ''' values = ['{}{}'.format(axis, value) for axis, value in sorted(self.current.items())] current_cmd = '{} {}'.format( GCODES['SET_CURRENT'], ' '.join(values) ) command = '{currents} {code}P{seconds}'.format( currents=current_cmd, code=GCODES['DWELL'], seconds=CURRENT_CHANGE_DELAY ) log.debug("_generate_current_command: {}".format(command)) return command
Returns a constructed GCode string that contains this driver's axis-current settings, plus a small delay to wait for those settings to take effect.
def __updateJobResultsPeriodic(self): """ Periodic check to see if this is the best model. This should only have an effect if this is the *first* model to report its progress """ if self._isBestModelStored and not self._isBestModel: return while True: jobResultsStr = self._jobsDAO.jobGetFields(self._jobID, ['results'])[0] if jobResultsStr is None: jobResults = {} else: self._isBestModelStored = True if not self._isBestModel: return jobResults = json.loads(jobResultsStr) bestModel = jobResults.get('bestModel', None) bestMetric = jobResults.get('bestValue', None) isSaved = jobResults.get('saved', False) # If there is a best model, and it is not the same as the current model # we should wait till we have processed all of our records to see if # we are the the best if (bestModel is not None) and (self._modelID != bestModel): self._isBestModel = False return # Make sure prediction output stream is ready before we present our model # as "bestModel"; sometimes this takes a long time, so update the model's # timestamp to help avoid getting orphaned self.__flushPredictionCache() self._jobsDAO.modelUpdateTimestamp(self._modelID) metrics = self._getMetrics() jobResults['bestModel'] = self._modelID jobResults['bestValue'] = metrics[self._optimizedMetricLabel] jobResults['metrics'] = metrics jobResults['saved'] = False newResults = json.dumps(jobResults) isUpdated = self._jobsDAO.jobSetFieldIfEqual(self._jobID, fieldName='results', curValue=jobResultsStr, newValue=newResults) if isUpdated or (not isUpdated and newResults==jobResultsStr): self._isBestModel = True break
Periodic check to see if this is the best model. This should only have an effect if this is the *first* model to report its progress
def randomize_molecule_low(molecule, manipulations): """Return a randomized copy of the molecule, without the nonbond check.""" manipulations = copy.copy(manipulations) shuffle(manipulations) coordinates = molecule.coordinates.copy() for manipulation in manipulations: manipulation.apply(coordinates) return molecule.copy_with(coordinates=coordinates)
Return a randomized copy of the molecule, without the nonbond check.
def from_dataset(cls, dataset, constraints = (), **kwargs): """Construct a optimized inverse model from an existing dataset. A LWLR forward model is constructed by default. """ fm = LWLRForwardModel(dataset.dim_x, dataset.dim_y, **kwargs) fm.dataset = dataset im = cls.from_forward(fm, constraints = constraints, **kwargs) return im
Construct a optimized inverse model from an existing dataset. A LWLR forward model is constructed by default.
def main(argv=None): """Validate text parsed with FSM or validate an FSM via command line.""" if argv is None: argv = sys.argv try: opts, args = getopt.getopt(argv[1:], 'h', ['help']) except getopt.error as msg: raise Usage(msg) for opt, _ in opts: if opt in ('-h', '--help'): print(__doc__) print(help_msg) return 0 if not args or len(args) > 4: raise Usage('Invalid arguments.') # If we have an argument, parse content of file and display as a template. # Template displayed will match input template, minus any comment lines. with open(args[0], 'r') as template: fsm = TextFSM(template) print('FSM Template:\n%s\n' % fsm) if len(args) > 1: # Second argument is file with example cli input. # Prints parsed tabular result. with open(args[1], 'r') as f: cli_input = f.read() table = fsm.ParseText(cli_input) print('FSM Table:') result = str(fsm.header) + '\n' for line in table: result += str(line) + '\n' print(result, end='') if len(args) > 2: # Compare tabular result with data in third file argument. # Exit value indicates if processed data matched expected result. with open(args[2], 'r') as f: ref_table = f.read() if ref_table != result: print('Data mis-match!') return 1 else: print('Data match!')
Validate text parsed with FSM or validate an FSM via command line.
def allreduce_ring(xs, devices, reduction_fn_string="SUM"): """Compute the reduction of all Tensors and put the result everywhere. Performance-optimized for a ring of devices. Args: xs: a list of n tf.Tensors devices: a list of strings reduction_fn_string: "SUM" or "MAX" Returns: a list of n Tensors Raises: ValueError: if devices is not a list of n strings """ n = len(xs) if len(devices) != n: raise ValueError("devices must be a list of length len(xs)") if n == 1: return xs shape = xs[0].shape.as_list() # tf.logging.info("allreduce_ring shape = %s" % shape) size = None if None in shape else mtf.list_product(shape) if size is None or size < 1024 or size % n != 0: return allreduce_ring_single_shard(xs, devices, reduction_fn_string) def _circular_shift(l, n): n %= len(l) return l[-n:] + l[:-n] def _flatten_and_split(x): # tf.reshape treats [-1] as a special value denoting 1D flattening. return tf.split(tf.reshape(x, [-1]), n) def _concat_and_reshape(xs): return tf.reshape(tf.concat(xs, 0), shape) # [device, shard] x_split = mtf.parallel(devices, _flatten_and_split, xs) x_split_t = mtf.transpose_list_of_lists(x_split) y_split_t = [] for shard in xrange(n): shard_xs = _circular_shift(x_split_t[shard], shard) shard_devices = _circular_shift(devices, shard) shard_ys = allreduce_ring_single_shard( shard_xs, shard_devices, reduction_fn_string) y_split_t.append(_circular_shift(shard_ys, -shard)) y_split = mtf.transpose_list_of_lists(y_split_t) ys = mtf.parallel(devices, _concat_and_reshape, y_split) return ys
Compute the reduction of all Tensors and put the result everywhere. Performance-optimized for a ring of devices. Args: xs: a list of n tf.Tensors devices: a list of strings reduction_fn_string: "SUM" or "MAX" Returns: a list of n Tensors Raises: ValueError: if devices is not a list of n strings
def results(self, dataset_name, index_by, timeframe): """ Retrieve results from a Cached Dataset. Read key must be set. """ url = "{0}/{1}/results".format(self._cached_datasets_url, dataset_name) index_by = index_by if isinstance(index_by, str) else json.dumps(index_by) timeframe = timeframe if isinstance(timeframe, str) else json.dumps(timeframe) query_params = { "index_by": index_by, "timeframe": timeframe } return self._get_json( HTTPMethods.GET, url, self._get_read_key(), params=query_params )
Retrieve results from a Cached Dataset. Read key must be set.
def is_valid_address (s): """ returns True if address is a valid Bluetooth address valid address are always strings of the form XX:XX:XX:XX:XX:XX where X is a hexadecimal character. For example, 01:23:45:67:89:AB is a valid address, but IN:VA:LI:DA:DD:RE is not """ try: pairs = s.split (":") if len (pairs) != 6: return False if not all(0 <= int(b, 16) <= 255 for b in pairs): return False except: return False return True
returns True if address is a valid Bluetooth address valid address are always strings of the form XX:XX:XX:XX:XX:XX where X is a hexadecimal character. For example, 01:23:45:67:89:AB is a valid address, but IN:VA:LI:DA:DD:RE is not
def input(self, _in, out, **kw): """Input filtering.""" args = [self.binary or 'cleancss'] + self.rebase_opt if self.extra_args: args.extend(self.extra_args) self.subprocess(args, out, _in)
Input filtering.
def defocusThroughDepth(u, uf, f, fn, k=2.355): ''' return the defocus (mm std) through DOF u -> scene point (depth value) uf -> in-focus position (the distance at which the scene point should be placed in order to be focused) f -> focal length k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian fn --> f-number (relative aperture) equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736 Pertuz et.al. "Analysis of focus measure operators for shape-from-focus" all parameter should be in same physical unit [mm] !! assumes spatial invariant blur ''' # A = f/fn return (k/fn) * (f**2*abs(u-uf)) / (u*(uf-f))
return the defocus (mm std) through DOF u -> scene point (depth value) uf -> in-focus position (the distance at which the scene point should be placed in order to be focused) f -> focal length k -> camera dependent constant (transferring blur circle to PSF), 2.335 would be FHWD of 2dgaussian fn --> f-number (relative aperture) equation (3) taken from http://linkinghub.elsevier.com/retrieve/pii/S0031320312004736 Pertuz et.al. "Analysis of focus measure operators for shape-from-focus" all parameter should be in same physical unit [mm] !! assumes spatial invariant blur
def get_parent(self): """Return tile from previous zoom level.""" return None if self.zoom == 0 else self.tile_pyramid.tile( self.zoom - 1, self.row // 2, self.col // 2 )
Return tile from previous zoom level.
def register(self, name, func): """ Register a new callback.\ When the name/id is not found\ a new hook is created under its name,\ meaning the hook is usually created by\ the first registered callback :param str name: Hook name :param callable func: A func reference (callback) """ try: templatehook = self._registry[name] except KeyError: templatehook = self._register(name) templatehook.register(func)
Register a new callback.\ When the name/id is not found\ a new hook is created under its name,\ meaning the hook is usually created by\ the first registered callback :param str name: Hook name :param callable func: A func reference (callback)
def process_from_web(): """Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute. """ logger.info('Downloading table from %s' % trrust_human_url) res = requests.get(trrust_human_url) res.raise_for_status() df = pandas.read_table(io.StringIO(res.text)) tp = TrrustProcessor(df) tp.extract_statements() return tp
Return a TrrustProcessor based on the online interaction table. Returns ------- TrrustProcessor A TrrustProcessor object that has a list of INDRA Statements in its statements attribute.
def remove_item(self, val): """ Removes given item from the list. Args: val: Item Returns: Cache backend response. """ return cache.lrem(self.key, json.dumps(val))
Removes given item from the list. Args: val: Item Returns: Cache backend response.
def unregister_message_callback(self, type_, from_): """ Unregister a callback previously registered with :meth:`register_message_callback`. :param type_: Message type to listen for. :type type_: :class:`~.MessageType` or :data:`None` :param from_: Sender JID to listen for. :type from_: :class:`~aioxmpp.JID` or :data:`None` :raises KeyError: if no function is currently registered for the given ``(type_, from_)`` pair. :raises ValueError: if `type_` is not a valid :class:`~.MessageType` (and cannot be cast to a :class:`~.MessageType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to both arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering the super-wildcard with both arguments set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.MessageType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated in favour of and is now implemented in terms of the :class:`aioxmpp.dispatcher.SimpleMessageDispatcher` service. It is equivalent to call :meth:`~.SimpleStanzaDispatcher.unregister_callback`, except that the latter is not deprecated. """ if type_ is not None: type_ = self._coerce_enum(type_, structs.MessageType) warnings.warn( "unregister_message_callback is deprecated; use " "aioxmpp.dispatcher.SimpleMessageDispatcher instead", DeprecationWarning, stacklevel=2 ) self._xxx_message_dispatcher.unregister_callback( type_, from_, )
Unregister a callback previously registered with :meth:`register_message_callback`. :param type_: Message type to listen for. :type type_: :class:`~.MessageType` or :data:`None` :param from_: Sender JID to listen for. :type from_: :class:`~aioxmpp.JID` or :data:`None` :raises KeyError: if no function is currently registered for the given ``(type_, from_)`` pair. :raises ValueError: if `type_` is not a valid :class:`~.MessageType` (and cannot be cast to a :class:`~.MessageType`) The match is made on the exact pair; it is not possible to unregister arbitrary listeners by passing :data:`None` to both arguments (i.e. the wildcarding only applies for receiving stanzas, not for unregistering callbacks; unregistering the super-wildcard with both arguments set to :data:`None` is of course possible). .. versionchanged:: 0.7 The `type_` argument is now supposed to be a :class:`~.MessageType` member. .. deprecated:: 0.7 Passing a :class:`str` as `type_` argument is deprecated and will raise a :class:`TypeError` as of the 1.0 release. See the Changelog for :ref:`api-changelog-0.7` for further details on how to upgrade your code efficiently. .. deprecated:: 0.9 This method has been deprecated in favour of and is now implemented in terms of the :class:`aioxmpp.dispatcher.SimpleMessageDispatcher` service. It is equivalent to call :meth:`~.SimpleStanzaDispatcher.unregister_callback`, except that the latter is not deprecated.
def datetimes(self): """Return datetimes for this collection as a tuple.""" if self._datetimes is None: self._datetimes = tuple(self.header.analysis_period.datetimes) return self._datetimes
Return datetimes for this collection as a tuple.
def get_available_modes(self): """Return a list of available mode objects for an Arlo user.""" resource = "modes" resource_event = self.publish_and_get_event(resource) if resource_event: properties = resource_event.get("properties") return properties.get("modes") return None
Return a list of available mode objects for an Arlo user.
def clone(cls, model, use_json=True, use_lp=False): """ Make a copy of a model. The model being copied can be of the same type or belong to a different solver interface. This is the preferred way of copying models. Example ---------- >>> new_model = Model.clone(old_model) """ model.update() interface = sys.modules[cls.__module__] if use_lp: warnings.warn("Cloning with LP formats can change variable and constraint ID's.") new_model = cls.from_lp(model.to_lp()) new_model.configuration = interface.Configuration.clone(model.configuration, problem=new_model) return new_model if use_json: new_model = cls.from_json(model.to_json()) new_model.configuration = interface.Configuration.clone(model.configuration, problem=new_model) return new_model new_model = cls() for variable in model.variables: new_variable = interface.Variable.clone(variable) new_model._add_variable(new_variable) for constraint in model.constraints: new_constraint = interface.Constraint.clone(constraint, model=new_model) new_model._add_constraint(new_constraint) if model.objective is not None: new_model.objective = interface.Objective.clone(model.objective, model=new_model) new_model.configuration = interface.Configuration.clone(model.configuration, problem=new_model) return new_model
Make a copy of a model. The model being copied can be of the same type or belong to a different solver interface. This is the preferred way of copying models. Example ---------- >>> new_model = Model.clone(old_model)
def get_uri_template(urlname, args=None, prefix=""): ''' Utility function to return an URI Template from a named URL in django Copied from django-digitalpaper. Restrictions: - Only supports named urls! i.e. url(... name="toto") - Only support one namespace level - Only returns the first URL possibility. - Supports multiple pattern possibilities (i.e., patterns with non-capturing parenthesis in them) by trying to find a pattern whose optional parameters match those you specified (a parameter is considered optional if it doesn't appear in every pattern possibility) ''' def _convert(template, args=None): """URI template converter""" if not args: args = [] paths = template % dict([p, "{%s}" % p] for p in args) return u'%s/%s' % (prefix, paths) resolver = get_resolver(None) parts = urlname.split(':') if len(parts) > 1 and parts[0] in resolver.namespace_dict: namespace = parts[0] urlname = parts[1] nprefix, resolver = resolver.namespace_dict[namespace] prefix = prefix + '/' + nprefix.rstrip('/') possibilities = resolver.reverse_dict.getlist(urlname) for tmp in possibilities: possibility, pattern = tmp[:2] if not args: # If not args are specified, we only consider the first pattern # django gives us result, params = possibility[0] return _convert(result, params) else: # If there are optionnal arguments passed, use them to try to find # the correct pattern. # First, we need to build a list with all the arguments seen_params = [] for result, params in possibility: seen_params.append(params) # Then build a set to find the common ones, and use it to build the # list of all the expected params common_params = reduce(lambda x, y: set(x) & set(y), seen_params) expected_params = sorted(common_params.union(args)) # Then loop again over the pattern possibilities and return # the first one that strictly match expected params for result, params in possibility: if sorted(params) == expected_params: return _convert(result, params) return None
Utility function to return an URI Template from a named URL in django Copied from django-digitalpaper. Restrictions: - Only supports named urls! i.e. url(... name="toto") - Only support one namespace level - Only returns the first URL possibility. - Supports multiple pattern possibilities (i.e., patterns with non-capturing parenthesis in them) by trying to find a pattern whose optional parameters match those you specified (a parameter is considered optional if it doesn't appear in every pattern possibility)
def _vector_size(v): """ Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector """ if isinstance(v, Vector): return len(v) elif type(v) in (array.array, list, tuple, xrange): return len(v) elif type(v) == np.ndarray: if v.ndim == 1 or (v.ndim == 2 and v.shape[1] == 1): return len(v) else: raise ValueError("Cannot treat an ndarray of shape %s as a vector" % str(v.shape)) elif _have_scipy and scipy.sparse.issparse(v): assert v.shape[1] == 1, "Expected column vector" return v.shape[0] else: raise TypeError("Cannot treat type %s as a vector" % type(v))
Returns the size of the vector. >>> _vector_size([1., 2., 3.]) 3 >>> _vector_size((1., 2., 3.)) 3 >>> _vector_size(array.array('d', [1., 2., 3.])) 3 >>> _vector_size(np.zeros(3)) 3 >>> _vector_size(np.zeros((3, 1))) 3 >>> _vector_size(np.zeros((1, 3))) Traceback (most recent call last): ... ValueError: Cannot treat an ndarray of shape (1, 3) as a vector
def NewDefaultAgency(self, **kwargs): """Create a new Agency object and make it the default agency for this Schedule""" agency = self._gtfs_factory.Agency(**kwargs) if not agency.agency_id: agency.agency_id = util.FindUniqueId(self._agencies) self._default_agency = agency self.SetDefaultAgency(agency, validate=False) # Blank agency won't validate return agency
Create a new Agency object and make it the default agency for this Schedule
def convex_conj(self): """The conjugate functional of IndicatorLpUnitBall. The convex conjugate functional of an ``Lp`` norm, ``p < infty`` is the indicator function on the unit ball defined by the corresponding dual norm ``q``, given by ``1/p + 1/q = 1`` and where ``q = infty`` if ``p = 1`` [Roc1970]. By the Fenchel-Moreau theorem, the convex conjugate functional of indicator function on the unit ball in ``Lq`` is the corresponding Lp-norm [BC2011]. References ---------- [Roc1970] Rockafellar, R. T. *Convex analysis*. Princeton University Press, 1970. [BC2011] Bauschke, H H, and Combettes, P L. *Convex analysis and monotone operator theory in Hilbert spaces*. Springer, 2011. """ if self.exponent == np.inf: return L1Norm(self.domain) elif self.exponent == 2: return L2Norm(self.domain) else: return LpNorm(self.domain, exponent=conj_exponent(self.exponent))
The conjugate functional of IndicatorLpUnitBall. The convex conjugate functional of an ``Lp`` norm, ``p < infty`` is the indicator function on the unit ball defined by the corresponding dual norm ``q``, given by ``1/p + 1/q = 1`` and where ``q = infty`` if ``p = 1`` [Roc1970]. By the Fenchel-Moreau theorem, the convex conjugate functional of indicator function on the unit ball in ``Lq`` is the corresponding Lp-norm [BC2011]. References ---------- [Roc1970] Rockafellar, R. T. *Convex analysis*. Princeton University Press, 1970. [BC2011] Bauschke, H H, and Combettes, P L. *Convex analysis and monotone operator theory in Hilbert spaces*. Springer, 2011.
def validate_json_schema(self): """Validate the JSON schema. Return list of errors.""" errors = [] for work in self: for task in work: if not task.get_results().validate_json_schema(): errors.append(task) if not work.get_results().validate_json_schema(): errors.append(work) if not self.get_results().validate_json_schema(): errors.append(self) return errors
Validate the JSON schema. Return list of errors.
def to_xdr_object(self): """Creates an XDR Memo object for a transaction with MEMO_HASH.""" return Xdr.types.Memo(type=Xdr.const.MEMO_HASH, hash=self.memo_hash)
Creates an XDR Memo object for a transaction with MEMO_HASH.
def scan_uow_candidates(self): """ method performs two actions: - enlist stale or invalid units of work into reprocessing queue - cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago """ try: since = settings.settings['synergy_start_timeperiod'] uow_list = self.uow_dao.get_reprocessing_candidates(since) except LookupError as e: self.logger.info('flow: no UOW candidates found for reprocessing: {0}'.format(e)) return for uow in uow_list: try: if uow.process_name not in self.managed_handlers: self.logger.debug('process {0} is not known to the Synergy Scheduler. Skipping its UOW.' .format(uow.process_name)) continue thread_handler = self.managed_handlers[uow.process_name] assert isinstance(thread_handler, ManagedThreadHandler) if not thread_handler.process_entry.is_on: self.logger.debug('process {0} is inactive. Skipping its UOW.'.format(uow.process_name)) continue entry = PriorityEntry(uow) if entry in self.reprocess_uows[uow.process_name]: # given UOW is already registered in the reprocessing queue continue # ASSUMPTION: UOW is re-created by a state machine during reprocessing # thus - any UOW older 2 days could be marked as STATE_CANCELED if datetime.utcnow() - uow.created_at > timedelta(hours=settings.settings['gc_life_support_hours']): self._cancel_uow(uow) continue # if the UOW has been idle for more than 1 hour - resubmit it if datetime.utcnow() - uow.submitted_at > timedelta(hours=settings.settings['gc_resubmit_after_hours'])\ or uow.is_invalid: # enlist the UOW into the reprocessing queue self.reprocess_uows[uow.process_name].put(entry) except Exception as e: self.logger.error('flow exception: {0}'.format(e), exc_info=True)
method performs two actions: - enlist stale or invalid units of work into reprocessing queue - cancel UOWs that are older than 2 days and have been submitted more than 1 hour ago
def read_cf1_config(self): """Read a flash page from the specified target""" target = self._cload.targets[0xFF] config_page = target.flash_pages - 1 return self._cload.read_flash(addr=0xFF, page=config_page)
Read a flash page from the specified target
def _validate_zooms(zooms): """ Return a list of zoom levels. Following inputs are converted: - int --> [int] - dict{min, max} --> range(min, max + 1) - [int] --> [int] - [int, int] --> range(smaller int, bigger int + 1) """ if isinstance(zooms, dict): if any([a not in zooms for a in ["min", "max"]]): raise MapcheteConfigError("min and max zoom required") zmin = _validate_zoom(zooms["min"]) zmax = _validate_zoom(zooms["max"]) if zmin > zmax: raise MapcheteConfigError( "max zoom must not be smaller than min zoom") return list(range(zmin, zmax + 1)) elif isinstance(zooms, list): if len(zooms) == 1: return zooms elif len(zooms) == 2: zmin, zmax = sorted([_validate_zoom(z) for z in zooms]) return list(range(zmin, zmax + 1)) else: return zooms else: return [_validate_zoom(zooms)]
Return a list of zoom levels. Following inputs are converted: - int --> [int] - dict{min, max} --> range(min, max + 1) - [int] --> [int] - [int, int] --> range(smaller int, bigger int + 1)
def get_url_parameters(self): """Create a dictionary of parameters used in URLs for this model.""" url_fields = {} for field in self.url_fields: url_fields[field] = getattr(self, field) return url_fields
Create a dictionary of parameters used in URLs for this model.
def remove_all_cts_records_by(self, crypto_idfp): """ Remove all CTS records from the specified player :param crypto_idfp: :return: """ regex = re.compile('(.+)/cts100record/crypto_idfp(\d+)') to_remove = [] for k, v in self.filter(regex, is_regex=True): if v == crypto_idfp: match = regex.match(k) to_remove.append((match.group(1), int(match.group(2)))) for i in to_remove: self.remove_cts_record(*i)
Remove all CTS records from the specified player :param crypto_idfp: :return:
def ctrl_transfer(self, bmRequestType, bRequest, wValue=0, wIndex=0, data_or_wLength = None, timeout = None): r"""Do a control transfer on the endpoint 0. This method is used to issue a control transfer over the endpoint 0 (endpoint 0 is required to always be a control endpoint). The parameters bmRequestType, bRequest, wValue and wIndex are the same of the USB Standard Control Request format. Control requests may or may not have a data payload to write/read. In cases which it has, the direction bit of the bmRequestType field is used to infer the desired request direction. For host to device requests (OUT), data_or_wLength parameter is the data payload to send, and it must be a sequence type convertible to an array object. In this case, the return value is the number of bytes written in the data payload. For device to host requests (IN), data_or_wLength is either the wLength parameter of the control request specifying the number of bytes to read in data payload, and the return value is an array object with data read, or an array object which the data will be read to, and the return value is the number of bytes read. """ try: buff = util.create_buffer(data_or_wLength) except TypeError: buff = _interop.as_array(data_or_wLength) self._ctx.managed_open() # Thanks to Johannes Stezenbach to point me out that we need to # claim the recipient interface recipient = bmRequestType & 3 rqtype = bmRequestType & (3 << 5) if recipient == util.CTRL_RECIPIENT_INTERFACE \ and rqtype != util.CTRL_TYPE_VENDOR: interface_number = wIndex & 0xff self._ctx.managed_claim_interface(self, interface_number) ret = self._ctx.backend.ctrl_transfer( self._ctx.handle, bmRequestType, bRequest, wValue, wIndex, buff, self.__get_timeout(timeout)) if isinstance(data_or_wLength, array.array) \ or util.ctrl_direction(bmRequestType) == util.CTRL_OUT: return ret elif ret != len(buff) * buff.itemsize: return buff[:ret] else: return buff
r"""Do a control transfer on the endpoint 0. This method is used to issue a control transfer over the endpoint 0 (endpoint 0 is required to always be a control endpoint). The parameters bmRequestType, bRequest, wValue and wIndex are the same of the USB Standard Control Request format. Control requests may or may not have a data payload to write/read. In cases which it has, the direction bit of the bmRequestType field is used to infer the desired request direction. For host to device requests (OUT), data_or_wLength parameter is the data payload to send, and it must be a sequence type convertible to an array object. In this case, the return value is the number of bytes written in the data payload. For device to host requests (IN), data_or_wLength is either the wLength parameter of the control request specifying the number of bytes to read in data payload, and the return value is an array object with data read, or an array object which the data will be read to, and the return value is the number of bytes read.
def find_lemma(self, verb): """ Returns the base form of the given inflected verb, using a rule-based approach. """ v = verb.lower() # Common prefixes: be-finden and emp-finden probably inflect like finden. if not (v.startswith("ge") and v.endswith("t")): # Probably gerund. for prefix in prefixes: if v.startswith(prefix) and v[len(prefix):] in self.inflections: return prefix + self.inflections[v[len(prefix):]] # Common sufixes: setze nieder => niedersetzen. b, suffix = " " in v and v.split()[:2] or (v, "") # Infinitive -ln: trommeln. if b.endswith(("ln", "rn")): return b # Lemmatize regular inflections. for x in ("test", "est", "end", "ten", "tet", "en", "et", "te", "st", "e", "t"): if b.endswith(x): b = b[:-len(x)]; break # Subjunctive: hielte => halten, schnitte => schneiden. for x, y in ( ("ieb", "eib"), ( "ied", "eid"), ( "ief", "auf" ), ( "ieg", "eig" ), ("iel", "alt"), ("ien", "ein"), ("iess", "ass"), (u"ieß", u"aß" ), ( "iff", "eif" ), ("iss", "eiss"), (u"iß", u"eiß"), ( "it", "eid"), ( "oss", "iess"), (u"öss", "iess")): if b.endswith(x): b = b[:-len(x)] + y; break b = b.replace("eeiss", "eiss") b = b.replace("eeid", "eit") # Subjunctive: wechselte => wechseln if not b.endswith(("e", "l")) and not (b.endswith("er") and len(b) >= 3 and not b[-3] in VOWELS): b = b + "e" # abknallst != abknalln => abknallen if b.endswith(("hl", "ll", "ul", "eil")): b = b + "e" # Strip ge- from (likely) gerund: if b.startswith("ge") and v.endswith("t"): b = b[2:] # Corrections (these add about 1.5% accuracy): if b.endswith(("lnde", "rnde")): b = b[:-3] if b.endswith(("ae", "al", u"öe", u"üe")): b = b.rstrip("e") + "te" if b.endswith(u"äl"): b = b + "e" return suffix + b + "n"
Returns the base form of the given inflected verb, using a rule-based approach.
def do_selection_reduction_to_one_parent(selection): """ Find and reduce selection to one parent state. :param selection: :return: state model which is parent of selection or None if root state """ all_models_selected = selection.get_all() # check if all elements selected are on one hierarchy level -> TODO or in future are parts of sibling?! # if not take the state with the most siblings as the copy root parent_m_count_dict = {} for model in all_models_selected: parent_m_count_dict[model.parent] = parent_m_count_dict[model.parent] + 1 if model.parent in parent_m_count_dict else 1 parent_m = None current_count_parent = 0 for possible_parent_m, count in parent_m_count_dict.items(): parent_m = possible_parent_m if current_count_parent < count else parent_m # if root no parent exist and only on model can be selected if len(selection.states) == 1 and selection.get_selected_state().state.is_root_state: parent_m = None # kick all selection except root_state if len(all_models_selected) > 1: selection.set(selection.get_selected_state()) if parent_m is not None: # check and reduce selection for model in all_models_selected: if model.parent is not parent_m: selection.remove(model) return parent_m
Find and reduce selection to one parent state. :param selection: :return: state model which is parent of selection or None if root state
def get_stdev(self, col, row): """ Returns the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the standard deviation :rtype: float """ return javabridge.call(self.jobject, "getStdDev", "(II)D", col, row)
Returns the standard deviation at this location (if valid location). :param col: the 0-based column index :type col: int :param row: the 0-based row index :type row: int :return: the standard deviation :rtype: float
def process(self, plugin, context, instance=None, action=None): """Transmit a `process` request to host Arguments: plugin (PluginProxy): Plug-in to process context (ContextProxy): Filtered context instance (InstanceProxy, optional): Instance to process action (str, optional): Action to process """ plugin = plugin.to_json() instance = instance.to_json() if instance is not None else None return self._dispatch("process", args=[plugin, instance, action])
Transmit a `process` request to host Arguments: plugin (PluginProxy): Plug-in to process context (ContextProxy): Filtered context instance (InstanceProxy, optional): Instance to process action (str, optional): Action to process
def generate_value_label(self, byteorder, encoding): """ Parameters ---------- byteorder : str Byte order of the output encoding : str File encoding Returns ------- value_label : bytes Bytes containing the formatted value label """ self._encoding = encoding bio = BytesIO() null_string = '\x00' null_byte = b'\x00' # len bio.write(struct.pack(byteorder + 'i', self.len)) # labname labname = self._encode(_pad_bytes(self.labname[:32], 33)) bio.write(labname) # padding - 3 bytes for i in range(3): bio.write(struct.pack('c', null_byte)) # value_label_table # n - int32 bio.write(struct.pack(byteorder + 'i', self.n)) # textlen - int32 bio.write(struct.pack(byteorder + 'i', self.text_len)) # off - int32 array (n elements) for offset in self.off: bio.write(struct.pack(byteorder + 'i', offset)) # val - int32 array (n elements) for value in self.val: bio.write(struct.pack(byteorder + 'i', value)) # txt - Text labels, null terminated for text in self.txt: bio.write(self._encode(text + null_string)) bio.seek(0) return bio.read()
Parameters ---------- byteorder : str Byte order of the output encoding : str File encoding Returns ------- value_label : bytes Bytes containing the formatted value label
def convert_p(element, text): """ Adds 2 newlines to the end of text """ depth = -1 while element: if (not element.name == '[document]' and not element.parent.get('id') == '__RESTRUCTIFY_WRAPPER__'): depth += 1 element = element.parent if text: text = ' ' * depth + text return text
Adds 2 newlines to the end of text
def _is_locked(self): ''' Checks to see if we are already pulling items from the queue ''' if os.path.isfile(self._lck): try: import psutil except ImportError: return True #Lock file exists and no psutil #If psutil is imported with open(self._lck) as f: pid = f.read() return True if psutil.pid_exists(int(pid)) else False else: return False
Checks to see if we are already pulling items from the queue
def lower_key(fn): """ :param fn: a key function :return: a function that wraps around the supplied key function to ensure the returned key is in lowercase. """ def lower(key): try: return key.lower() except AttributeError: return key return process_key(lower, fn)
:param fn: a key function :return: a function that wraps around the supplied key function to ensure the returned key is in lowercase.
def get_assessments_by_query(self, assessment_query): """Gets a list of ``Assessments`` matching the given assessment query. arg: assessment_query (osid.assessment.AssessmentQuery): the assessment query return: (osid.assessment.AssessmentList) - the returned ``AssessmentList`` raise: NullArgument - ``assessment_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ """Gets a list of ``Assessments`` matching the given assessment query. arg: assessment_query (osid.assessment.AssessmentQuery): the assessment query return: (osid.assessment.AssessmentList) - the returned ``AssessmentList`` raise: NullArgument - ``assessment_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_query`` is not of this service *compliance: mandatory -- This method must be implemented.* """ if 'assessmentOfferedId' in assessment_query._query_terms: collection = JSONClientValidated('assessment', collection='AssessmentOffered', runtime=self._runtime) match = '$in' in assessment_query._query_terms['assessmentOfferedId'].keys() if match: match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms['assessmentOfferedId']['$in']] query = {'$in': match_identifiers} else: match_identifiers = [ObjectId(Id(i).identifier) for i in assessment_query._query_terms['assessmentOfferedId']['$in']] query = {'$nin': match_identifiers} result = collection.find({ "_id": query }) assessment_ids = [ObjectId(Id(r['assessmentId']).identifier) for r in result] collection = JSONClientValidated('assessment', collection='Assessment', runtime=self._runtime) result = collection.find({ "_id": {"$in": assessment_ids} }) return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) else: # and_list = list() # or_list = list() # for term in assessment_query._query_terms: # and_list.append({term: assessment_query._query_terms[term]}) # for term in assessment_query._keyword_terms: # or_list.append({term: assessment_query._keyword_terms[term]}) # if or_list: # and_list.append({'$or': or_list}) # view_filter = self._view_filter() # if view_filter: # and_list.append(view_filter) # if and_list: # query_terms = {'$and': and_list} # # collection = JSONClientValidated('assessment', # collection='Assessment', # runtime=self._runtime) # result = collection.find(query_terms).sort('_id', DESCENDING) # else: # result = [] # return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy) and_list = list() or_list = list() for term in assessment_query._query_terms: if '$in' in assessment_query._query_terms[term] and '$nin' in assessment_query._query_terms[term]: and_list.append( {'$or': [{term: {'$in': assessment_query._query_terms[term]['$in']}}, {term: {'$nin': assessment_query._query_terms[term]['$nin']}}]}) else: and_list.append({term: assessment_query._query_terms[term]}) for term in assessment_query._keyword_terms: or_list.append({term: assessment_query._keyword_terms[term]}) if or_list: and_list.append({'$or': or_list}) view_filter = self._view_filter() if view_filter: and_list.append(view_filter) if and_list: query_terms = {'$and': and_list} collection = JSONClientValidated('assessment', collection='Assessment', runtime=self._runtime) result = collection.find(query_terms).sort('_id', DESCENDING) else: result = [] return objects.AssessmentList(result, runtime=self._runtime, proxy=self._proxy)
Gets a list of ``Assessments`` matching the given assessment query. arg: assessment_query (osid.assessment.AssessmentQuery): the assessment query return: (osid.assessment.AssessmentList) - the returned ``AssessmentList`` raise: NullArgument - ``assessment_query`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_query`` is not of this service *compliance: mandatory -- This method must be implemented.*
def get_data_by_slug(model, slug, kind='', **kwargs): """Get instance data by slug and kind. Raise 404 Not Found if there is no data. This function requires model has a `slug` column. :param model: a string, model name in rio.models :param slug: a string used to query by `slug`. This requires there is a slug field in model definition. :param kind: a string specified which kind of dict tranformer should be called. :return: a dict or None. """ instance = get_instance_by_slug(model, slug, **kwargs) if not instance: return return ins2dict(instance, kind)
Get instance data by slug and kind. Raise 404 Not Found if there is no data. This function requires model has a `slug` column. :param model: a string, model name in rio.models :param slug: a string used to query by `slug`. This requires there is a slug field in model definition. :param kind: a string specified which kind of dict tranformer should be called. :return: a dict or None.
def get_lines(handle, line): """ Get zero-indexed line from an open file-like. """ for i, l in enumerate(handle): if i == line: return l
Get zero-indexed line from an open file-like.
def flip_coded(self): """Flips the coding of the alleles.""" self.genotypes = 2 - self.genotypes self.reference, self.coded = self.coded, self.reference
Flips the coding of the alleles.
def instantiate_components(self, context): """ Instantiate the defined components .. note:: This method requires the iPOPO core service to be registered. This means that the ``pelix.ipopo.core`` must have been declared in the list of bundles (or installed and started programmatically). :param context: A :class:`~pelix.framework.BundleContext` object :raise BundleException: Error looking for the iPOPO service or starting a component """ with use_ipopo(context) as ipopo: for name, (factory, properties) in self.__state.components.items(): ipopo.instantiate(factory, name, properties)
Instantiate the defined components .. note:: This method requires the iPOPO core service to be registered. This means that the ``pelix.ipopo.core`` must have been declared in the list of bundles (or installed and started programmatically). :param context: A :class:`~pelix.framework.BundleContext` object :raise BundleException: Error looking for the iPOPO service or starting a component
def refresh_db(**kwargs): ''' Update ports with ``port selfupdate`` CLI Example: .. code-block:: bash salt mac pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) cmd = ['port', 'selfupdate'] return salt.utils.mac_utils.execute_return_success(cmd)
Update ports with ``port selfupdate`` CLI Example: .. code-block:: bash salt mac pkg.refresh_db
def _tryMatch(self, textToMatchObject): """Try to find themselves in the text. Returns (count, matchedRule) or (None, None) if doesn't match """ for rule in self.context.rules: ruleTryMatchResult = rule.tryMatch(textToMatchObject) if ruleTryMatchResult is not None: _logger.debug('\tmatched rule %s at %d in included context %s/%s', rule.shortId(), textToMatchObject.currentColumnIndex, self.context.parser.syntax.name, self.context.name) return ruleTryMatchResult else: return None
Try to find themselves in the text. Returns (count, matchedRule) or (None, None) if doesn't match
def nonterminal(n): """ Create a PEG function to match a nonterminal. """ def match_nonterminal(s, grm=None, pos=0): if grm is None: grm = {} expr = grm[n] return expr(s, grm, pos) return match_nonterminal
Create a PEG function to match a nonterminal.
def get_image_path(definition): """Helper to get path of image from a definition in resource directory. :param definition: A definition (hazard, exposure). :type definition: dict :returns: The definition's image path. :rtype: str """ path = resources_path( 'img', 'wizard', 'keyword-subcategory-%s.svg' % definition['key']) if os.path.exists(path): return path else: return not_set_image_path
Helper to get path of image from a definition in resource directory. :param definition: A definition (hazard, exposure). :type definition: dict :returns: The definition's image path. :rtype: str
def get_anki_phrases(lang='english', limit=None): """ Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."] """ lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == 'en': return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."]
def is_bool_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except TypeError: return False if isinstance(arr_or_dtype, CategoricalDtype): arr_or_dtype = arr_or_dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndexClass): # TODO(jreback) # we don't have a boolean Index class # so its object, we need to infer to # guess this return (arr_or_dtype.is_object and arr_or_dtype.inferred_type == 'boolean') elif is_extension_array_dtype(arr_or_dtype): dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return dtype._is_boolean return issubclass(dtype.type, np.bool_)
Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True
def send_method_request(self, method: str, method_params: dict) -> dict: """ Sends user-defined method and method params """ url = '/'.join((self.METHOD_URL, method)) method_params['v'] = self.API_VERSION if self._access_token: method_params['access_token'] = self._access_token response = self.post(url, method_params, timeout=10) response.raise_for_status() return json.loads(response.text)
Sends user-defined method and method params
def export_chat_invite_link( self, chat_id: Union[int, str] ) -> str: """Use this method to generate a new invite link for a chat; any previously generated link is revoked. You must be an administrator in the chat for this to work and have the appropriate admin rights. Args: chat_id (``int`` | ``str``): Unique identifier for the target chat or username of the target channel/supergroup (in the format @username). Returns: On success, the exported invite link as string is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ peer = self.resolve_peer(chat_id) if isinstance(peer, types.InputPeerChat): return self.send( functions.messages.ExportChatInvite( peer=peer.chat_id ) ).link elif isinstance(peer, types.InputPeerChannel): return self.send( functions.channels.ExportInvite( channel=peer ) ).link
Use this method to generate a new invite link for a chat; any previously generated link is revoked. You must be an administrator in the chat for this to work and have the appropriate admin rights. Args: chat_id (``int`` | ``str``): Unique identifier for the target chat or username of the target channel/supergroup (in the format @username). Returns: On success, the exported invite link as string is returned. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
def _decodeAddressField(byteIter, smscField=False, log=False): """ Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple """ addressLen = next(byteIter) if addressLen > 0: toa = next(byteIter) ton = (toa & 0x70) # bits 6,5,4 of type-of-address == type-of-number if ton == 0x50: # Alphanumberic number addressLen = int(math.ceil(addressLen / 2.0)) septets = unpackSeptets(byteIter, addressLen) addressValue = decodeGsm7(septets) return (addressValue, (addressLen + 2)) else: # ton == 0x00: Unknown (might be international, local, etc) - leave as is # ton == 0x20: National number if smscField: addressValue = decodeSemiOctets(byteIter, addressLen-1) else: if addressLen % 2: addressLen = int(addressLen / 2) + 1 else: addressLen = int(addressLen / 2) addressValue = decodeSemiOctets(byteIter, addressLen) addressLen += 1 # for the return value, add the toa byte if ton == 0x10: # International number addressValue = '+' + addressValue return (addressValue, (addressLen + 1)) else: return (None, 1)
Decodes the address field at the current position of the bytearray iterator :param byteIter: Iterator over bytearray :type byteIter: iter(bytearray) :return: Tuple containing the address value and amount of bytes read (value is or None if it is empty (zero-length)) :rtype: tuple
def tobytes(self): """Return the bitstring as bytes, padding with zero bits if needed. Up to seven zero bits will be added at the end to byte align. """ d = offsetcopy(self._datastore, 0).rawbytes # Need to ensure that unused bits at end are set to zero unusedbits = 8 - self.len % 8 if unusedbits != 8: d[-1] &= (0xff << unusedbits) return bytes(d)
Return the bitstring as bytes, padding with zero bits if needed. Up to seven zero bits will be added at the end to byte align.
def check_extension(conn, extension: str) -> bool: """Check to see if an extension is installed.""" query = 'SELECT installed_version FROM pg_available_extensions WHERE name=%s;' with conn.cursor() as cursor: cursor.execute(query, (extension,)) result = cursor.fetchone() if result is None: raise psycopg2.ProgrammingError( 'Extension is not available for installation.', extension ) else: extension_version = result[0] return bool(extension_version)
Check to see if an extension is installed.
def get_git_branch(git_path='git'): """Returns the name of the current git branch """ branch_match = call((git_path, 'rev-parse', '--symbolic-full-name', 'HEAD')) if branch_match == "HEAD": return None else: return os.path.basename(branch_match)
Returns the name of the current git branch
def encode_notifications(tokens, notifications): """ Returns the encoded bytes of tokens and notifications tokens a list of tokens or a string of only one token notifications a list of notifications or a dictionary of only one """ fmt = "!BH32sH%ds" structify = lambda t, p: struct.pack(fmt % len(p), 0, 32, t, len(p), p) binaryify = lambda t: t.decode('hex') if type(notifications) is dict and type(tokens) in (str, unicode): tokens, notifications = ([tokens], [notifications]) if type(notifications) is list and type(tokens) is list: return ''.join(map(lambda y: structify(*y), ((binaryify(t), json.dumps(p, separators=(',',':'), ensure_ascii=False).encode('utf-8')) for t, p in zip(tokens, notifications))))
Returns the encoded bytes of tokens and notifications tokens a list of tokens or a string of only one token notifications a list of notifications or a dictionary of only one
def _from_dict(cls, _dict): """Initialize a UnalignedElement object from a json dictionary.""" args = {} if 'document_label' in _dict: args['document_label'] = _dict.get('document_label') if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') if 'types' in _dict: args['types'] = [ TypeLabelComparison._from_dict(x) for x in (_dict.get('types')) ] if 'categories' in _dict: args['categories'] = [ CategoryComparison._from_dict(x) for x in (_dict.get('categories')) ] if 'attributes' in _dict: args['attributes'] = [ Attribute._from_dict(x) for x in (_dict.get('attributes')) ] return cls(**args)
Initialize a UnalignedElement object from a json dictionary.
def read_input_data(filename): """Helper function to get training data""" logging.info('Opening file %s for reading input', filename) input_file = open(filename, 'r') data = [] labels = [] for line in input_file: tokens = line.split(',', 1) labels.append(tokens[0].strip()) data.append(tokens[1].strip()) return labels, data
Helper function to get training data
def visit_snippet(self, node): """ HTML document generator visit handler """ lang = self.highlightlang linenos = node.rawsource.count('\n') >= self.highlightlinenothreshold - 1 fname = node['filename'] highlight_args = node.get('highlight_args', {}) if 'language' in node: # code-block directives lang = node['language'] highlight_args['force'] = True if 'linenos' in node: linenos = node['linenos'] def warner(msg): self.builder.warn(msg, (self.builder.current_docname, node.line)) highlighted = self.highlighter.highlight_block(node.rawsource, lang, warn=warner, linenos=linenos, **highlight_args) starttag = self.starttag(node, 'div', suffix='', CLASS='highlight-%s snippet' % lang) self.body.append(starttag) self.body.append('<div class="snippet-filename">%s</div>\n''' % (fname,)) self.body.append(highlighted) self.body.append('</div>\n') raise nodes.SkipNode
HTML document generator visit handler
def sub(x, y, context=None): """ Return ``x`` - ``y``. """ return _apply_function_in_current_context( BigFloat, mpfr.mpfr_sub, ( BigFloat._implicit_convert(x), BigFloat._implicit_convert(y), ), context, )
Return ``x`` - ``y``.
def check_geophysical_vars_fill_value(self, ds): ''' Check that geophysical variables contain fill values. :param netCDF4.Dataset ds: An open netCDF dataset ''' results = [] for geo_var in get_geophysical_variables(ds): results.append( self._has_var_attr(ds, geo_var, '_FillValue', '_FillValue', BaseCheck.MEDIUM), ) return results
Check that geophysical variables contain fill values. :param netCDF4.Dataset ds: An open netCDF dataset
def make_request(self, session, url, **kwargs): """Make a HTTP POST request. :param url: The URL to post to. :param data: The data to post. :returns: The response to the request. :rtype: requests.Response """ log.debug('Making request: POST %s %s' % (url, kwargs)) return session.post(url, **kwargs)
Make a HTTP POST request. :param url: The URL to post to. :param data: The data to post. :returns: The response to the request. :rtype: requests.Response
def _perp_eigendecompose(matrix: np.ndarray, rtol: float = 1e-5, atol: float = 1e-8, ) -> Tuple[np.array, List[np.ndarray]]: """An eigendecomposition that ensures eigenvectors are perpendicular. numpy.linalg.eig doesn't guarantee that eigenvectors from the same eigenspace will be perpendicular. This method uses Gram-Schmidt to recover a perpendicular set. It further checks that all eigenvectors are perpendicular and raises an ArithmeticError otherwise. Args: matrix: The matrix to decompose. rtol: Relative threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. atol: Absolute threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. Returns: The eigenvalues and column eigenvectors. The i'th eigenvalue is associated with the i'th column eigenvector. Raises: ArithmeticError: Failed to find perpendicular eigenvectors. """ vals, cols = np.linalg.eig(matrix) vecs = [cols[:, i] for i in range(len(cols))] # Convert list of row arrays to list of column arrays. for i in range(len(vecs)): vecs[i] = np.reshape(vecs[i], (len(vecs[i]), vecs[i].ndim)) # Group by similar eigenvalue. n = len(vecs) groups = _group_similar( list(range(n)), lambda k1, k2: np.allclose(vals[k1], vals[k2], rtol=rtol)) # Remove overlap between eigenvectors with the same eigenvalue. for g in groups: q, _ = np.linalg.qr(np.hstack([vecs[i] for i in g])) for i in range(len(g)): vecs[g[i]] = q[:, i] return vals, vecs
An eigendecomposition that ensures eigenvectors are perpendicular. numpy.linalg.eig doesn't guarantee that eigenvectors from the same eigenspace will be perpendicular. This method uses Gram-Schmidt to recover a perpendicular set. It further checks that all eigenvectors are perpendicular and raises an ArithmeticError otherwise. Args: matrix: The matrix to decompose. rtol: Relative threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. atol: Absolute threshold for determining whether eigenvalues are from the same eigenspace and whether eigenvectors are perpendicular. Returns: The eigenvalues and column eigenvectors. The i'th eigenvalue is associated with the i'th column eigenvector. Raises: ArithmeticError: Failed to find perpendicular eigenvectors.
def _diff_cache_cluster(current, desired): ''' If you need to enhance what modify_cache_cluster() considers when deciding what is to be (or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that needs to be done to make the mappings meaningful should be done in the munging section below as well. This function will ONLY touch settings that are explicitly called out in 'desired' - any settings which might have previously been changed from their 'default' values will not be changed back simply by leaving them out of 'desired'. This is both intentional, and much, much easier to code :) ''' ### The data formats are annoyingly (and as far as I can can tell, unnecessarily) ### different - we have to munge to a common format to compare... if current.get('SecurityGroups') is not None: current['SecurityGroupIds'] = [s['SecurityGroupId'] for s in current['SecurityGroups']] if current.get('CacheSecurityGroups') is not None: current['CacheSecurityGroupNames'] = [c['CacheSecurityGroupName'] for c in current['CacheSecurityGroups']] if current.get('NotificationConfiguration') is not None: current['NotificationTopicArn'] = current['NotificationConfiguration']['TopicArn'] current['NotificationTopicStatus'] = current['NotificationConfiguration']['TopicStatus'] if current.get('CacheParameterGroup') is not None: current['CacheParameterGroupName'] = current['CacheParameterGroup']['CacheParameterGroupName'] modifiable = { 'AutoMinorVersionUpgrade': 'AutoMinorVersionUpgrade', 'AZMode': 'AZMode', 'CacheNodeType': 'CacheNodeType', 'CacheNodeIdsToRemove': None, 'CacheParameterGroupName': 'CacheParameterGroupName', 'CacheSecurityGroupNames': 'CacheSecurityGroupNames', 'EngineVersion': 'EngineVersion', 'NewAvailabilityZones': None, 'NotificationTopicArn': 'NotificationTopicArn', 'NotificationTopicStatus': 'NotificationTopicStatus', 'NumCacheNodes': 'NumCacheNodes', 'PreferredMaintenanceWindow': 'PreferredMaintenanceWindow', 'SecurityGroupIds': 'SecurityGroupIds', 'SnapshotRetentionLimit': 'SnapshotRetentionLimit', 'SnapshotWindow': 'SnapshotWindow' } need_update = {} for m, o in modifiable.items(): if m in desired: if not o: # Always pass these through - let AWS do the math... need_update[m] = desired[m] else: if m in current: # Equivalence testing works fine for current simple type comparisons # This might need enhancement if more complex structures enter the picture if current[m] != desired[m]: need_update[m] = desired[m] return need_update
If you need to enhance what modify_cache_cluster() considers when deciding what is to be (or can be) updated, add it to 'modifiable' below. It's a dict mapping the param as used in modify_cache_cluster() to that in describe_cache_clusters(). Any data fiddlery that needs to be done to make the mappings meaningful should be done in the munging section below as well. This function will ONLY touch settings that are explicitly called out in 'desired' - any settings which might have previously been changed from their 'default' values will not be changed back simply by leaving them out of 'desired'. This is both intentional, and much, much easier to code :)
def _make_order(field_path, direction): """Helper for :meth:`order_by`.""" return query_pb2.StructuredQuery.Order( field=query_pb2.StructuredQuery.FieldReference(field_path=field_path), direction=_enum_from_direction(direction), )
Helper for :meth:`order_by`.
def _create_state_data(self, context, resp_args, relay_state): """ Adds the frontend idp entity id to state See super class satosa.frontends.saml2.SAMLFrontend#save_state :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str] """ state = super()._create_state_data(context, resp_args, relay_state) state["target_entity_id"] = context.target_entity_id_from_path() return state
Adds the frontend idp entity id to state See super class satosa.frontends.saml2.SAMLFrontend#save_state :type context: satosa.context.Context :type resp_args: dict[str, str | saml2.samlp.NameIDPolicy] :type relay_state: str :rtype: dict[str, dict[str, str] | str]
def send_command_w_enter(self, *args, **kwargs): """ For 'show run-config' Cisco WLC adds a 'Press Enter to continue...' message Even though pagination is disabled show run-config also has excessive delays in the output which requires special handling. Arguments are the same as send_command_timing() method """ if len(args) > 1: raise ValueError("Must pass in delay_factor as keyword argument") # If no delay_factor use 1 for default value delay_factor = kwargs.get("delay_factor", 1) kwargs["delay_factor"] = self.select_delay_factor(delay_factor) output = self.send_command_timing(*args, **kwargs) if "Press any key" in output or "Press Enter to" in output: new_args = list(args) if len(args) == 1: new_args[0] = self.RETURN else: kwargs["command_string"] = self.RETURN if not kwargs.get("max_loops"): kwargs["max_loops"] = 150 # Send an 'enter' output = self.send_command_timing(*new_args, **kwargs) # WLC has excessive delay after this appears on screen if "802.11b Advanced Configuration" in output: # Defaults to 30 seconds time.sleep(kwargs["delay_factor"] * 30) not_done = True i = 1 while not_done and i <= 150: time.sleep(kwargs["delay_factor"] * 3) i += 1 new_data = "" new_data = self.read_channel() if new_data: output += new_data else: not_done = False strip_prompt = kwargs.get("strip_prompt", True) if strip_prompt: # Had to strip trailing prompt twice. output = self.strip_prompt(output) output = self.strip_prompt(output) return output
For 'show run-config' Cisco WLC adds a 'Press Enter to continue...' message Even though pagination is disabled show run-config also has excessive delays in the output which requires special handling. Arguments are the same as send_command_timing() method
def interactive(f): """Decorator for making functions appear as interactively defined. This results in the function being linked to the user_ns as globals() instead of the module globals(). """ # build new FunctionType, so it can have the right globals # interactive functions never have closures, that's kind of the point if isinstance(f, FunctionType): mainmod = __import__('__main__') f = FunctionType(f.__code__, mainmod.__dict__, f.__name__, f.__defaults__, ) # associate with __main__ for uncanning f.__module__ = '__main__' return f
Decorator for making functions appear as interactively defined. This results in the function being linked to the user_ns as globals() instead of the module globals().
def sanitizer(name, replacements=[(':','_'), ('/','_'), ('\\','_')]): """ String sanitizer to avoid problematic characters in filenames. """ for old,new in replacements: name = name.replace(old,new) return name
String sanitizer to avoid problematic characters in filenames.
def settings_module(self): """Gets SETTINGS_MODULE variable""" settings_module = parse_conf_data( os.environ.get( self.ENVVAR_FOR_DYNACONF, self.SETTINGS_FILE_FOR_DYNACONF ), tomlfy=True, ) if settings_module != getattr(self, "SETTINGS_MODULE", None): self.set("SETTINGS_MODULE", settings_module) return self.SETTINGS_MODULE
Gets SETTINGS_MODULE variable
def cupy_wrapper(func): """A wrapper function that converts numpy ndarray arguments to cupy arrays, and convert any cupy arrays returned by the wrapped function into numpy ndarrays. """ @functools.wraps(func) def wrapped(*args, **kwargs): args = list(args) for n, a in enumerate(args): if isinstance(a, np.ndarray): args[n] = cp.asarray(a) for k, v in kwargs.items(): if isinstance(v, np.ndarray): kwargs[k] = cp.asarray(v) rtn = func(*args, **kwargs) if isinstance(rtn, (list, tuple)): for n, a in enumerate(rtn): if isinstance(a, cp.core.core.ndarray): rtn[n] = cp.asnumpy(a) else: if isinstance(rtn, cp.core.core.ndarray): rtn = cp.asnumpy(rtn) return rtn return wrapped
A wrapper function that converts numpy ndarray arguments to cupy arrays, and convert any cupy arrays returned by the wrapped function into numpy ndarrays.