code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def setValidityErrorHandler(self, err_func, warn_func, arg=None): """ Register error and warning handlers for DTD validation. These will be called back as f(msg,arg) """ libxml2mod.xmlSetValidErrors(self._o, err_func, warn_func, arg)
Register error and warning handlers for DTD validation. These will be called back as f(msg,arg)
def get_parent_path(index=2): # type: (int) -> str """ Get the caller's parent path to sys.path If the caller is a CLI through stdin, the parent of the current working directory is used """ try: path = _caller_path(index) except RuntimeError: path = os.getcwd() path = os.path.abspath(os.path.join(path, os.pardir)) return path
Get the caller's parent path to sys.path If the caller is a CLI through stdin, the parent of the current working directory is used
def match_in(grammar, text): """Determine if there is a match for grammar in text.""" for result in grammar.parseWithTabs().scanString(text): return True return False
Determine if there is a match for grammar in text.
def process_request(self, request, response): """Logs the basic endpoint requested""" self.logger.info('Requested: {0} {1} {2}'.format(request.method, request.relative_uri, request.content_type))
Logs the basic endpoint requested
def get_headers_from_environ(environ): """Get a wsgiref.headers.Headers object with headers from the environment. Headers in environ are prefixed with 'HTTP_', are all uppercase, and have had dashes replaced with underscores. This strips the HTTP_ prefix and changes underscores back to dashes before adding them to the returned set of headers. Args: environ: An environ dict for the request as defined in PEP-333. Returns: A wsgiref.headers.Headers object that's been filled in with any HTTP headers found in environ. """ headers = wsgiref.headers.Headers([]) for header, value in environ.iteritems(): if header.startswith('HTTP_'): headers[header[5:].replace('_', '-')] = value # Content-Type is special; it does not start with 'HTTP_'. if 'CONTENT_TYPE' in environ: headers['CONTENT-TYPE'] = environ['CONTENT_TYPE'] return headers
Get a wsgiref.headers.Headers object with headers from the environment. Headers in environ are prefixed with 'HTTP_', are all uppercase, and have had dashes replaced with underscores. This strips the HTTP_ prefix and changes underscores back to dashes before adding them to the returned set of headers. Args: environ: An environ dict for the request as defined in PEP-333. Returns: A wsgiref.headers.Headers object that's been filled in with any HTTP headers found in environ.
def cancel_task(all, task_id): """ Executor for `globus task cancel` """ if bool(all) + bool(task_id) != 1: raise click.UsageError( "You must pass EITHER the special --all flag " "to cancel all in-progress tasks OR a single " "task ID to cancel." ) client = get_client() if all: from sys import maxsize task_ids = [ task_row["task_id"] for task_row in client.task_list( filter="type:TRANSFER,DELETE/status:ACTIVE,INACTIVE", fields="task_id", num_results=maxsize, # FIXME want to ask for "unlimited" set ) ] task_count = len(task_ids) if not task_ids: raise click.ClickException("You have no in-progress tasks.") def cancellation_iterator(): for i in task_ids: yield (i, client.cancel_task(i).data) def json_converter(res): return { "results": [x for i, x in cancellation_iterator()], "task_ids": task_ids, } def _custom_text(res): for (i, (task_id, data)) in enumerate(cancellation_iterator(), start=1): safeprint( u"{} ({} of {}): {}".format(task_id, i, task_count, data["message"]) ) # FIXME: this is kind of an abuse of formatted_print because the # text format and json converter are doing their own thing, not really # interacting with the "response data" (None). Is there a better way of # handling this? formatted_print(None, text_format=_custom_text, json_converter=json_converter) else: res = client.cancel_task(task_id) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
Executor for `globus task cancel`
def zero_year_special_case(from_date, to_date, start, end): """strptime does not resolve a 0000 year, we must handle this.""" if start == 'pos' and end == 'pos': # always interval from earlier to later if from_date.startswith('0000') and not to_date.startswith('0000'): return True # always interval from later to earlier if not from_date.startswith('0000') and to_date.startswith('0000'): return False # an interval from 0000-MM-DD/0000-MM-DD ??? PARSE !!! if from_date.startswith('0000') and to_date.startswith('0000'): # fill from date assuming first subsequent date object if missing # missing m+d, assume jan 1 if len(from_date) == 4: fm, fd = 1, 1 # missing d, assume the 1st elif len(from_date) == 7: fm, fd = int(from_date[5:7]), 1 # not missing any date objects elif len(from_date) == 10: fm, fd = int(from_date[5:7]), int(from_date[8:10]) # fill to date assuming first subsequent date object if missing # missing m+d, assume jan 1 if len(to_date) == 4: tm, td = 1, 1 # missing d, assume the 1st elif len(to_date) == 7: tm, td = int(to_date[5:7]), 1 # not missing any date objects elif len(to_date) == 10: tm, td = int(to_date[5:7]), int(to_date[8:10]) # equality check if from_date == to_date: return True # compare the dates if fm <= tm: if fd <= td: return True else: return False else: return False # these cases are always one way or the other # "-0000" is an invalid edtf elif start == 'neg' and end == 'neg': return False # False unless start is not "0000" elif start == 'neg' and end == 'pos': if from_date.startswith("0000"): return False else: return True
strptime does not resolve a 0000 year, we must handle this.
def params(self, **kwargs): """ Specify query params to be used when executing the search. All the keyword arguments will override the current values. See https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search for all available parameters. Example:: s = Search() s = s.params(routing='user-1', preference='local') """ s = self._clone() s._params.update(kwargs) return s
Specify query params to be used when executing the search. All the keyword arguments will override the current values. See https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search for all available parameters. Example:: s = Search() s = s.params(routing='user-1', preference='local')
def get_key_value_pairs(self, subsystem, filename): """ Read the lines of the given file from the given subsystem and split the lines into key-value pairs. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self return util.read_key_value_pairs_from_file(self.per_subsystem[subsystem], subsystem + '.' + filename)
Read the lines of the given file from the given subsystem and split the lines into key-value pairs. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available.
def get_item(self, key): """ Returns the value associated with the key. """ keys = list(self.keys()) # make sure it exists if not key in keys: self.print_message("ERROR: '"+str(key)+"' not found.") return None try: x = eval(self.get_value(1,keys.index(key))) return x except: self.print_message("ERROR: '"+str(self.get_value(1,keys.index(key)))+"' cannot be evaluated.") return None
Returns the value associated with the key.
def cli_aliases(self): r"""Developer script aliases. """ scripting_groups = [] aliases = {} for cli_class in self.cli_classes: instance = cli_class() if getattr(instance, "alias", None): scripting_group = getattr(instance, "scripting_group", None) if scripting_group: scripting_groups.append(scripting_group) entry = (scripting_group, instance.alias) if (scripting_group,) in aliases: message = "alias conflict between scripting group" message += " {!r} and {}" message = message.format( scripting_group, aliases[(scripting_group,)].__name__ ) raise Exception(message) if entry in aliases: message = "alias conflict between {} and {}" message = message.format( aliases[entry].__name__, cli_class.__name__ ) raise Exception(message) aliases[entry] = cli_class else: entry = (instance.alias,) if entry in scripting_groups: message = "alias conflict between {}" message += " and scripting group {!r}" message = message.format(cli_class.__name__, instance.alias) raise Exception(message) if entry in aliases: message = "alias conflict be {} and {}" message = message.format(cli_class.__name__, aliases[entry]) raise Exception(message) aliases[(instance.alias,)] = cli_class else: if instance.program_name in scripting_groups: message = "Alias conflict between {}" message += " and scripting group {!r}" message = message.format(cli_class.__name__, instance.program_name) raise Exception(message) aliases[(instance.program_name,)] = cli_class alias_map = {} for key, value in aliases.items(): if len(key) == 1: alias_map[key[0]] = value else: if key[0] not in alias_map: alias_map[key[0]] = {} alias_map[key[0]][key[1]] = value return alias_map
r"""Developer script aliases.
def author_to_dict(obj): """Who needs a switch/case statement when you can instead use this easy to comprehend drivel? """ def default(): raise RuntimeError("unsupported type {t}".format(t=type(obj).__name__)) # a more pythonic way to handle this would be several try blocks to catch # missing attributes return { # GitAuthor has name,email,date properties 'GitAuthor': lambda x: {'name': x.name, 'email': x.email}, # InputGitAuthor only has _identity, which returns a dict # XXX consider trying to rationalize this upstream... 'InputGitAuthor': lambda x: x._identity, }.get(type(obj).__name__, lambda x: default())(obj)
Who needs a switch/case statement when you can instead use this easy to comprehend drivel?
def make_number(value, lineno, type_=None): """ Wrapper: creates a constant number node. """ return symbols.NUMBER(value, type_=type_, lineno=lineno)
Wrapper: creates a constant number node.
def model(x_train, y_train, x_test, y_test): """Model providing function: Create Keras model with double curly brackets dropped-in as needed. Return value has to be a valid python dictionary with two customary keys: - loss: Specify a numeric evaluation metric to be minimized - status: Just use STATUS_OK and see hyperopt documentation if not feasible The last one is optional, though recommended, namely: - model: specify the model just created so that we can later use it again. """ from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop keras_model = Sequential() keras_model.add(Dense(512, input_shape=(784,))) keras_model.add(Activation('relu')) keras_model.add(Dropout({{uniform(0, 1)}})) keras_model.add(Dense({{choice([256, 512, 1024])}})) keras_model.add(Activation('relu')) keras_model.add(Dropout({{uniform(0, 1)}})) keras_model.add(Dense(10)) keras_model.add(Activation('softmax')) rms = RMSprop() keras_model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['acc']) keras_model.fit(x_train, y_train, batch_size={{choice([64, 128])}}, epochs=1, verbose=2, validation_data=(x_test, y_test)) score, acc = keras_model.evaluate(x_test, y_test, verbose=0) print('Test accuracy:', acc) return {'loss': -acc, 'status': STATUS_OK, 'model': keras_model.to_yaml(), 'weights': pickle.dumps(keras_model.get_weights())}
Model providing function: Create Keras model with double curly brackets dropped-in as needed. Return value has to be a valid python dictionary with two customary keys: - loss: Specify a numeric evaluation metric to be minimized - status: Just use STATUS_OK and see hyperopt documentation if not feasible The last one is optional, though recommended, namely: - model: specify the model just created so that we can later use it again.
def average_colors(c1, c2): ''' Average the values of two colors together ''' r = int((c1[0] + c2[0])/2) g = int((c1[1] + c2[1])/2) b = int((c1[2] + c2[2])/2) return (r, g, b)
Average the values of two colors together
def get_font_glyph_data(font): """Return information for each glyph in a font""" from fontbakery.constants import (PlatformID, WindowsEncodingID) font_data = [] try: subtable = font['cmap'].getcmap(PlatformID.WINDOWS, WindowsEncodingID.UNICODE_BMP) if not subtable: # Well... Give it a chance here... # It may be using a different Encoding_ID value subtable = font['cmap'].tables[0] cmap = subtable.cmap except: return None cmap_reversed = dict(zip(cmap.values(), cmap.keys())) for glyph_name in font.getGlyphSet().keys(): if glyph_name in cmap_reversed: uni_glyph = cmap_reversed[glyph_name] contours = glyph_contour_count(font, glyph_name) font_data.append({ 'unicode': uni_glyph, 'name': glyph_name, 'contours': {contours} }) return font_data
Return information for each glyph in a font
def populate(self, source=DEFAULT_SEGMENT_SERVER, segments=None, pad=True, on_error='raise', **kwargs): """Query the segment database for each flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. Entries in this dict will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of known segments during which to query, if not given, existing known segments for flags will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with each flag, default: `True`. on_error : `str` how to handle an error querying for one flag, one of - `'raise'` (default): raise the Exception - `'warn'`: print a warning - `'ignore'`: move onto the next flag as if nothing happened **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityDict` a reference to the modified DataQualityDict """ # check on_error flag if on_error not in ['raise', 'warn', 'ignore']: raise ValueError("on_error must be one of 'raise', 'warn', " "or 'ignore'") # format source source = urlparse(source) # perform query for all segments if source.netloc and segments is not None: segments = SegmentList(map(Segment, segments)) tmp = type(self).query(self.keys(), segments, url=source.geturl(), on_error=on_error, **kwargs) elif not source.netloc: tmp = type(self).read(source.geturl(), **kwargs) # apply padding and wrap to given known segments for key in self: if segments is None and source.netloc: try: tmp = {key: self[key].query( self[key].name, self[key].known, **kwargs)} except URLError as exc: if on_error == 'ignore': pass elif on_error == 'warn': warnings.warn('Error querying for %s: %s' % (key, exc)) else: raise continue self[key].known &= tmp[key].known self[key].active = tmp[key].active if pad: self[key] = self[key].pad(inplace=True) if segments is not None: self[key].known &= segments self[key].active &= segments return self
Query the segment database for each flag's active segments. This method assumes all of the metadata for each flag have been filled. Minimally, the following attributes must be filled .. autosummary:: ~DataQualityFlag.name ~DataQualityFlag.known Segments will be fetched from the database, with any :attr:`~DataQualityFlag.padding` added on-the-fly. Entries in this dict will be modified in-place. Parameters ---------- source : `str` source of segments for this flag. This must be either a URL for a segment database or a path to a file on disk. segments : `SegmentList`, optional a list of known segments during which to query, if not given, existing known segments for flags will be used. pad : `bool`, optional, default: `True` apply the `~DataQualityFlag.padding` associated with each flag, default: `True`. on_error : `str` how to handle an error querying for one flag, one of - `'raise'` (default): raise the Exception - `'warn'`: print a warning - `'ignore'`: move onto the next flag as if nothing happened **kwargs any other keyword arguments to be passed to :meth:`DataQualityFlag.query` or :meth:`DataQualityFlag.read`. Returns ------- self : `DataQualityDict` a reference to the modified DataQualityDict
def _have_conf(self, magic_hash=None): """Get the daemon current configuration state If the daemon has received a configuration from its arbiter, this will return True If a `magic_hash` is provided it is compared with the one included in the daemon configuration and this function returns True only if they match! :return: boolean indicating if the daemon has a configuration :rtype: bool """ self.app.have_conf = getattr(self.app, 'cur_conf', None) not in [None, {}] if magic_hash is not None: # Beware, we got an str in entry, not an int magic_hash = int(magic_hash) # I've got a conf and a good one return self.app.have_conf and self.app.cur_conf.magic_hash == magic_hash return self.app.have_conf
Get the daemon current configuration state If the daemon has received a configuration from its arbiter, this will return True If a `magic_hash` is provided it is compared with the one included in the daemon configuration and this function returns True only if they match! :return: boolean indicating if the daemon has a configuration :rtype: bool
def main(): """The command line interface for the ``pip-accel`` program.""" arguments = sys.argv[1:] # If no arguments are given, the help text of pip-accel is printed. if not arguments: usage() sys.exit(0) # If no install subcommand is given we pass the command line straight # to pip without any changes and exit immediately afterwards. if 'install' not in arguments: # This will not return. os.execvp('pip', ['pip'] + arguments) else: arguments = [arg for arg in arguments if arg != 'install'] config = Config() # Initialize logging output. coloredlogs.install( fmt=config.log_format, level=config.log_verbosity, ) # Adjust verbosity based on -v, -q, --verbose, --quiet options. for argument in list(arguments): if match_option(argument, '-v', '--verbose'): coloredlogs.increase_verbosity() elif match_option(argument, '-q', '--quiet'): coloredlogs.decrease_verbosity() # Perform the requested action(s). try: accelerator = PipAccelerator(config) accelerator.install_from_arguments(arguments) except NothingToDoError as e: # Don't print a traceback for this (it's not very user friendly) and # exit with status zero to stay compatible with pip. For more details # please refer to https://github.com/paylogic/pip-accel/issues/47. logger.warning("%s", e) sys.exit(0) except Exception: logger.exception("Caught unhandled exception!") sys.exit(1)
The command line interface for the ``pip-accel`` program.
def _build_amps_list(self, amp_value, processlist): """Return the AMPS process list according to the amp_value Search application monitored processes by a regular expression """ ret = [] try: # Search in both cmdline and name (for kernel thread, see #1261) for p in processlist: add_it = False if (re.search(amp_value.regex(), p['name']) is not None): add_it = True else: for c in p['cmdline']: if (re.search(amp_value.regex(), c) is not None): add_it = True break if add_it: ret.append({'pid': p['pid'], 'cpu_percent': p['cpu_percent'], 'memory_percent': p['memory_percent']}) except (TypeError, KeyError) as e: logger.debug("Can not build AMPS list ({})".format(e)) return ret
Return the AMPS process list according to the amp_value Search application monitored processes by a regular expression
def _process_thread(self, client): """Process a single client. Args: client: GRR client object to act on. """ file_list = self.files if not file_list: return print('Filefinder to collect {0:d} items'.format(len(file_list))) flow_action = flows_pb2.FileFinderAction( action_type=flows_pb2.FileFinderAction.DOWNLOAD) flow_args = flows_pb2.FileFinderArgs( paths=file_list, action=flow_action,) flow_id = self._launch_flow(client, 'FileFinder', flow_args) self._await_flow(client, flow_id) collected_flow_data = self._download_files(client, flow_id) if collected_flow_data: print('{0!s}: Downloaded: {1:s}'.format(flow_id, collected_flow_data)) fqdn = client.data.os_info.fqdn.lower() self.state.output.append((fqdn, collected_flow_data))
Process a single client. Args: client: GRR client object to act on.
def symlink(source, link_name): """ Method to allow creating symlinks on Windows """ if os.path.islink(link_name) and os.readlink(link_name) == source: return os_symlink = getattr(os, "symlink", None) if callable(os_symlink): os_symlink(source, link_name) else: import ctypes csl = ctypes.windll.kernel32.CreateSymbolicLinkW csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) csl.restype = ctypes.c_ubyte flags = 1 if os.path.isdir(source) else 0 if csl(link_name, source, flags) == 0: raise ctypes.WinError()
Method to allow creating symlinks on Windows
def current_op(self, include_all=False): """Get information on operations currently running. :Parameters: - `include_all` (optional): if ``True`` also list currently idle operations in the result """ cmd = SON([("currentOp", 1), ("$all", include_all)]) with self.__client._socket_for_writes() as sock_info: if sock_info.max_wire_version >= 4: return sock_info.command("admin", cmd) else: spec = {"$all": True} if include_all else {} x = helpers._first_batch(sock_info, "admin", "$cmd.sys.inprog", spec, -1, True, self.codec_options, ReadPreference.PRIMARY, cmd, self.client._event_listeners) return x.get('data', [None])[0]
Get information on operations currently running. :Parameters: - `include_all` (optional): if ``True`` also list currently idle operations in the result
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-2 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ mean = self._get_mean(imt, rup.mag, rup.hypo_depth, dists.rrup, d=0) stddevs = self._get_stddevs(stddev_types, dists.rrup) mean = self._apply_amplification_factor(mean, sites.vs30) return mean, stddevs
Implements equation 3.5.1-1 page 148 for mean value and equation 3.5.5-2 page 151 for total standard deviation. See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
def levenshtein(s1, s2, allow_substring=False): """Return the Levenshtein distance between two strings. The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted, inserted or deleted to transform s1 into s2. Setting the `allow_substring` parameter to True allows s1 to be a substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero. :param string s1: The first string :param string s2: The second string :param bool allow_substring: Whether to allow s1 to be a substring of s2 :returns: Levenshtein distance. :rtype int """ len1, len2 = len(s1), len(s2) lev = [] for i in range(len1 + 1): lev.append([0] * (len2 + 1)) for i in range(len1 + 1): lev[i][0] = i for j in range(len2 + 1): lev[0][j] = 0 if allow_substring else j for i in range(len1): for j in range(len2): lev[i + 1][j + 1] = min(lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j])) return min(lev[len1]) if allow_substring else lev[len1][len2]
Return the Levenshtein distance between two strings. The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted, inserted or deleted to transform s1 into s2. Setting the `allow_substring` parameter to True allows s1 to be a substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero. :param string s1: The first string :param string s2: The second string :param bool allow_substring: Whether to allow s1 to be a substring of s2 :returns: Levenshtein distance. :rtype int
def is_valid(number): """determines whether the card number is valid.""" n = str(number) if not n.isdigit(): return False return int(n[-1]) == get_check_digit(n[:-1])
determines whether the card number is valid.
def _getDict(j9Page): """Parses a Journal Title Abbreviations page Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed For Backend """ slines = j9Page.read().decode('utf-8').split('\n') while slines.pop(0) != "<DL>": pass currentName = slines.pop(0).split('"></A><DT>')[1] currentTag = slines.pop(0).split("<B><DD>\t")[1] j9Dict = {} while True: try: j9Dict[currentTag].append(currentName) except KeyError: j9Dict[currentTag] = [currentName] try: currentName = slines.pop(0).split('</B><DT>')[1] currentTag = slines.pop(0).split("<B><DD>\t")[1] except IndexError: break return j9Dict
Parses a Journal Title Abbreviations page Note the pages are not well formatted html as the <DT> tags are not closes so html parses (Beautiful Soup) do not work. This is a simple parser that only works on the webpages and may fail if they are changed For Backend
def list_all_commands(self): """ Returns a list of all the Workbench commands""" commands = [name for name, _ in inspect.getmembers(self, predicate=inspect.isroutine) if not name.startswith('_')] return commands
Returns a list of all the Workbench commands
def _nextPage(self, offset): """Retrieves the next set of results from the service.""" self.logger.debug("Iterator crecord=%s" % str(self.crecord)) params = { 'q': self.q, 'rows': '0', 'facet': 'true', 'facet.field': self.field, 'facet.limit': str(self.pagesize), 'facet.offset': str(offset), 'facet.zeros': 'false', 'wt': 'python', } if self.fq is not None: params['fq'] = self.fq request = urllib.parse.urlencode(params, doseq=True) rsp = self.client.doPost( self.client.solrBase + '', request, self.client.formheaders ) data = eval(rsp.read()) try: self.res = data['facet_counts']['facet_fields'][self.field] self.logger.debug(self.res) except Exception: self.res = [] self.index = 0
Retrieves the next set of results from the service.
def update_confirmation_comment(self, confirmation_comment_id, confirmation_comment_dict): """ Updates a confirmation comment :param confirmation_comment_id: the confirmation comment id :param confirmation_comment_dict: dict :return: dict """ return self._create_put_request( resource=CONFIRMATION_COMMENTS, billomat_id=confirmation_comment_id, send_data=confirmation_comment_dict )
Updates a confirmation comment :param confirmation_comment_id: the confirmation comment id :param confirmation_comment_dict: dict :return: dict
def get_column_cursor_position(self, column): """ Return the relative cursor position for this column at the current line. (It will stay between the boundaries of the line in case of a larger number.) """ line_length = len(self.current_line) current_column = self.cursor_position_col column = max(0, min(line_length, column)) return column - current_column
Return the relative cursor position for this column at the current line. (It will stay between the boundaries of the line in case of a larger number.)
def register(linter): """required method to auto register this checker """ linter.register_checker(ClassChecker(linter)) linter.register_checker(SpecialMethodsChecker(linter))
required method to auto register this checker
def mul_inv(a, b): """ Modular inversion a mod b :param a: :param b: :return: """ b0 = b x0, x1 = 0, 1 if b == 1: return 1 while a > 1: q = a // b a, b = b, a % b x0, x1 = x1 - q * x0, x0 if x1 < 0: x1 += b0 return x1
Modular inversion a mod b :param a: :param b: :return:
def get_member_profile(self, member_id): ''' a method to retrieve member profile details :param member_id: integer with member id from member profile :return: dictionary with member profile details inside [json] key profile_details = self.objects.profile.schema ''' # https://www.meetup.com/meetup_api/docs/members/:member_id/#get title = '%s.get_member_profile' % self.__class__.__name__ # validate inputs input_fields = { 'member_id': member_id } for key, value in input_fields.items(): if value: object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # construct member id if not member_id: raise IndexError('%s requires member id argument.' % title) # compose request fields url = '%s/members/%s' % (self.endpoint, str(member_id)) params = { 'fields': 'gender,birthday,last_event,messaging_pref,next_event,other_services,privacy,self,stats' } # send requests profile_details = self._get_request(url, params=params) # construct method output if profile_details['json']: profile_details['json'] = self._reconstruct_member(profile_details['json']) return profile_details
a method to retrieve member profile details :param member_id: integer with member id from member profile :return: dictionary with member profile details inside [json] key profile_details = self.objects.profile.schema
def _execute(self, execute_inputs, execute_outputs, backward_execution=False): """Calls the custom execute function of the script.py of the state """ self._script.build_module() outcome_item = self._script.execute(self, execute_inputs, execute_outputs, backward_execution) # in the case of backward execution the outcome is not relevant if backward_execution: return # If the state was preempted, the state must be left on the preempted outcome if self.preempted: return Outcome(-2, "preempted") # Outcome id was returned if outcome_item in self.outcomes: return self.outcomes[outcome_item] # Outcome name was returned for outcome_id, outcome in self.outcomes.items(): if outcome.name == outcome_item: return self.outcomes[outcome_id] logger.error("Returned outcome of {0} not existing: {1}".format(self, outcome_item)) return Outcome(-1, "aborted")
Calls the custom execute function of the script.py of the state
def _get_esxcluster_proxy_details(): ''' Returns the running esxcluster's proxy details ''' det = __salt__['esxcluster.get_details']() return det.get('vcenter'), det.get('username'), det.get('password'), \ det.get('protocol'), det.get('port'), det.get('mechanism'), \ det.get('principal'), det.get('domain'), det.get('datacenter'), \ det.get('cluster')
Returns the running esxcluster's proxy details
def SpinBasisKet(*numer_denom, hs): """Constructor for a :class:`BasisKet` for a :class:`SpinSpace` For a half-integer spin system:: >>> hs = SpinSpace('s', spin=(3, 2)) >>> assert SpinBasisKet(1, 2, hs=hs) == BasisKet("+1/2", hs=hs) For an integer spin system:: >>> hs = SpinSpace('s', spin=1) >>> assert SpinBasisKet(1, hs=hs) == BasisKet("+1", hs=hs) Note that ``BasisKet(1, hs=hs)`` with an integer index (which would hypothetically refer to ``BasisKet("0", hs=hs)`` is not allowed for spin systems:: >>> BasisKet(1, hs=hs) Traceback (most recent call last): ... TypeError: label_or_index must be an instance of one of str, SpinIndex; not int Raises: TypeError: if `hs` is not a :class:`SpinSpace` or the wrong number of positional arguments is given ValueError: if any of the positional arguments are out range for the given `hs` """ try: spin_numer, spin_denom = hs.spin.as_numer_denom() except AttributeError: raise TypeError( "hs=%s for SpinBasisKet must be a SpinSpace instance" % hs) assert spin_denom in (1, 2) if spin_denom == 1: # integer spin if len(numer_denom) != 1: raise TypeError( "SpinBasisKet requires exactly one positional argument for an " "integer-spin Hilbert space") numer = numer_denom[0] if numer < -spin_numer or numer > spin_numer: raise ValueError( "spin quantum number %s must be in range (%s, %s)" % (numer, -spin_numer, spin_numer)) label = str(numer) if numer > 0: label = "+" + label return BasisKet(label, hs=hs) else: # half-integer spin if len(numer_denom) != 2: raise TypeError( "SpinBasisKet requires exactly two positional arguments for a " "half-integer-spin Hilbert space") numer, denom = numer_denom numer = int(numer) denom = int(denom) if denom != 2: raise ValueError( "The second positional argument (denominator of the spin " "quantum number) must be 2, not %s" % denom) if numer < -spin_numer or numer > spin_numer: raise ValueError( "spin quantum number %s/%s must be in range (%s/2, %s/2)" % (numer, denom, -spin_numer, spin_numer)) label = str(numer) if numer > 0: label = "+" + label label = label + "/2" return BasisKet(label, hs=hs)
Constructor for a :class:`BasisKet` for a :class:`SpinSpace` For a half-integer spin system:: >>> hs = SpinSpace('s', spin=(3, 2)) >>> assert SpinBasisKet(1, 2, hs=hs) == BasisKet("+1/2", hs=hs) For an integer spin system:: >>> hs = SpinSpace('s', spin=1) >>> assert SpinBasisKet(1, hs=hs) == BasisKet("+1", hs=hs) Note that ``BasisKet(1, hs=hs)`` with an integer index (which would hypothetically refer to ``BasisKet("0", hs=hs)`` is not allowed for spin systems:: >>> BasisKet(1, hs=hs) Traceback (most recent call last): ... TypeError: label_or_index must be an instance of one of str, SpinIndex; not int Raises: TypeError: if `hs` is not a :class:`SpinSpace` or the wrong number of positional arguments is given ValueError: if any of the positional arguments are out range for the given `hs`
def file_stat(self, filters=all_true): """Find out how many files, directorys and total size (Include file in it's sub-folder). :returns: stat, a dict like ``{"file": number of files, "dir": number of directorys, "size": total size in bytes}`` **中文文档** 返回一个目录中的文件, 文件夹, 大小的统计数据。 """ self.assert_is_dir_and_exists() stat = {"file": 0, "dir": 0, "size": 0} for p in self.select(filters=filters, recursive=True): if p.is_file(): stat["file"] += 1 stat["size"] += p.size elif p.is_dir(): stat["dir"] += 1 return stat
Find out how many files, directorys and total size (Include file in it's sub-folder). :returns: stat, a dict like ``{"file": number of files, "dir": number of directorys, "size": total size in bytes}`` **中文文档** 返回一个目录中的文件, 文件夹, 大小的统计数据。
def path_to_tuple(path, windows=False): """ Split `chan_path` into individual parts and form a tuple (used as key). """ if windows: path_tup = tuple(path.split('\\')) else: path_tup = tuple(path.split('/')) # # Normalize UTF-8 encoding to consistent form so cache lookups will work, see # https://docs.python.org/3.6/library/unicodedata.html#unicodedata.normalize path_tup = tuple(normalize('NFD', part) for part in path_tup) return path_tup
Split `chan_path` into individual parts and form a tuple (used as key).
def clean_extra(self): """Clean extra files/directories specified by get_extra_paths()""" extra_paths = self.get_extra_paths() for path in extra_paths: if not os.path.exists(path): continue if os.path.isdir(path): self._clean_directory(path) else: self._clean_file(path)
Clean extra files/directories specified by get_extra_paths()
def _get_resource_per_page(self, resource, per_page=1000, page=1, params=None): """ Gets specific data per resource page and per page """ assert (isinstance(resource, str)) common_params = {'per_page': per_page, 'page': page} if not params: params = common_params else: params.update(common_params) return self._create_get_request(resource=resource, params=params)
Gets specific data per resource page and per page
def value(self): """Value of a reference property. You can set the reference with a Part, Part id or None value. Ensure that the model of the provided part, matches the configured model :return: a :class:`Part` or None :raises APIError: When unable to find the associated :class:`Part` Example ------- Get the wheel reference property >>> part = project.part('Bike') >>> wheels_ref_property = part.property('Wheels') >>> isinstance(wheels_ref_property, MultiReferenceProperty) True The value returns a list of Parts or is an empty list >>> type(wheels_ref_property.value) in (list, tuple) True Get the selection of wheel instances: >>> wheel_choices = wheels_ref_property.choices() Choose random wheel from the wheel_choices: >>> from random import choice >>> wheel_choice_1 = choice(wheel_choices) >>> wheel_choice_2 = choice(wheel_choices) Set chosen wheel 1: provide a single wheel: >>> wheels_ref_property.value = [wheel_choice_1] 2: provide multiple wheels: >>> wheels_ref_property.value = [wheel_choice_1, wheel_choice_2] """ if not self._value: return None if not self._cached_values and isinstance(self._value, (list, tuple)): ids = [v.get('id') for v in self._value] self._cached_values = list(self._client.parts(id__in=','.join(ids), category=None)) return self._cached_values
Value of a reference property. You can set the reference with a Part, Part id or None value. Ensure that the model of the provided part, matches the configured model :return: a :class:`Part` or None :raises APIError: When unable to find the associated :class:`Part` Example ------- Get the wheel reference property >>> part = project.part('Bike') >>> wheels_ref_property = part.property('Wheels') >>> isinstance(wheels_ref_property, MultiReferenceProperty) True The value returns a list of Parts or is an empty list >>> type(wheels_ref_property.value) in (list, tuple) True Get the selection of wheel instances: >>> wheel_choices = wheels_ref_property.choices() Choose random wheel from the wheel_choices: >>> from random import choice >>> wheel_choice_1 = choice(wheel_choices) >>> wheel_choice_2 = choice(wheel_choices) Set chosen wheel 1: provide a single wheel: >>> wheels_ref_property.value = [wheel_choice_1] 2: provide multiple wheels: >>> wheels_ref_property.value = [wheel_choice_1, wheel_choice_2]
def alltoall(self, x, mesh_axis, split_axis, concat_axis): """Grouped alltoall. Args: x: a LaidOutTensor mesh_axis: an integer the mesh axis along which to group split_axis: an integer (the Tensor axis along which to split) concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor """ return self._collective_with_groups( x, [mesh_axis], functools.partial( alltoall_ring, split_axis=split_axis, concat_axis=concat_axis))
Grouped alltoall. Args: x: a LaidOutTensor mesh_axis: an integer the mesh axis along which to group split_axis: an integer (the Tensor axis along which to split) concat_axis: an integer (the Tensor axis along which to concatenate) Returns: a LaidOutTensor
def request(self, rule, view_class, annotation): """Make a request against the app. This attempts to use the schema to replace any url params in the path pattern. If there are any unused parameters in the schema, after substituting the ones in the path, they will be sent as query string parameters or form parameters. The substituted values are taken from the "example" value in the schema. Returns a dict with the following keys: - **url** -- Example URL, with url_prefix added to the path pattern, and the example values substituted in for URL params. - **method** -- HTTP request method (e.g. "GET"). - **params** -- A dictionary of query string or form parameters. - **response** -- The text response to the request. :param route: Werkzeug Route object. :param view_class: View class for the annotated method. :param annotation: Annotation for the method to be requested. :type annotation: doctor.resource.ResourceAnnotation :returns: dict """ headers = self._get_headers(rule, annotation) example_values = self._get_example_values(rule, annotation) # If any of the example values for DELETE/GET HTTP methods are dicts # or lists, we will need to json dump them before building the rule, # otherwise the query string parameter won't get parsed correctly # by doctor. if annotation.http_method.upper() in ('DELETE', 'GET'): for key, value in list(example_values.items()): if isinstance(value, (dict, list)): example_values[key] = json.dumps(value) _, path = rule.build(example_values, append_unknown=True) if annotation.http_method.upper() not in ('DELETE', 'GET'): parsed_path = parse.urlparse(path) path = parsed_path.path params = example_values else: params = {} method_name = annotation.http_method.lower() method = getattr(self.test_client, method_name) if method_name in ('post', 'put'): response = method(path, data=json.dumps(params), headers=headers, content_type='application/json') else: response = method(path, data=params, headers=headers) return { 'url': '/'.join([self.url_prefix, path.lstrip('/')]), 'method': annotation.http_method.upper(), 'params': params, 'response': response.data, }
Make a request against the app. This attempts to use the schema to replace any url params in the path pattern. If there are any unused parameters in the schema, after substituting the ones in the path, they will be sent as query string parameters or form parameters. The substituted values are taken from the "example" value in the schema. Returns a dict with the following keys: - **url** -- Example URL, with url_prefix added to the path pattern, and the example values substituted in for URL params. - **method** -- HTTP request method (e.g. "GET"). - **params** -- A dictionary of query string or form parameters. - **response** -- The text response to the request. :param route: Werkzeug Route object. :param view_class: View class for the annotated method. :param annotation: Annotation for the method to be requested. :type annotation: doctor.resource.ResourceAnnotation :returns: dict
def get_download_urls(self, package_name, version="", pkg_type="all"): """Query PyPI for pkg download URI for a packge""" if version: versions = [version] else: #If they don't specify version, show em all. (package_name, versions) = self.query_versions_pypi(package_name) all_urls = [] for ver in versions: metadata = self.release_data(package_name, ver) for urls in self.release_urls(package_name, ver): if pkg_type == "source" and urls['packagetype'] == "sdist": all_urls.append(urls['url']) elif pkg_type == "egg" and \ urls['packagetype'].startswith("bdist"): all_urls.append(urls['url']) elif pkg_type == "all": #All all_urls.append(urls['url']) #Try the package's metadata directly in case there's nothing #returned by XML-RPC's release_urls() if metadata and metadata.has_key('download_url') and \ metadata['download_url'] != "UNKNOWN" and \ metadata['download_url'] != None: if metadata['download_url'] not in all_urls: if pkg_type != "all": url = filter_url(pkg_type, metadata['download_url']) if url: all_urls.append(url) return all_urls
Query PyPI for pkg download URI for a packge
def barcode(self, code, bc, width=255, height=2, pos='below', font='a'): """ Print Barcode """ # Align Bar Code() self._raw(TXT_ALIGN_CT) # Height if height >=2 or height <=6: self._raw(BARCODE_HEIGHT) else: raise BarcodeSizeError() # Width if width >= 1 or width <=255: self._raw(BARCODE_WIDTH) else: raise BarcodeSizeError() # Font if font.upper() == "B": self._raw(BARCODE_FONT_B) else: # DEFAULT FONT: A self._raw(BARCODE_FONT_A) # Position if pos.upper() == "OFF": self._raw(BARCODE_TXT_OFF) elif pos.upper() == "BOTH": self._raw(BARCODE_TXT_BTH) elif pos.upper() == "ABOVE": self._raw(BARCODE_TXT_ABV) else: # DEFAULT POSITION: BELOW self._raw(BARCODE_TXT_BLW) # Type if bc.upper() == "UPC-A": self._raw(BARCODE_UPC_A) elif bc.upper() == "UPC-E": self._raw(BARCODE_UPC_E) elif bc.upper() == "EAN13": self._raw(BARCODE_EAN13) elif bc.upper() == "EAN8": self._raw(BARCODE_EAN8) elif bc.upper() == "CODE39": self._raw(BARCODE_CODE39) elif bc.upper() == "ITF": self._raw(BARCODE_ITF) elif bc.upper() == "NW7": self._raw(BARCODE_NW7) else: raise BarcodeTypeError() # Print Code if code: self._raw(code) else: raise exception.BarcodeCodeError()
Print Barcode
def _enter(ins): """ Enter function sequence for doing a function start ins.quad[1] contains size (in bytes) of local variables Use '__fastcall__' as 1st parameter to prepare a fastcall function (no local variables). """ output = [] if ins.quad[1] == '__fastcall__': return output output.append('push ix') output.append('ld ix, 0') output.append('add ix, sp') size_bytes = int(ins.quad[1]) if size_bytes != 0: if size_bytes < 7: output.append('ld hl, 0') output.extend(['push hl'] * (size_bytes >> 1)) if size_bytes % 2: # odd? output.append('push hl') output.append('inc sp') else: output.append('ld hl, -%i' % size_bytes) # "Pushes nn bytes" output.append('add hl, sp') output.append('ld sp, hl') output.append('ld (hl), 0') output.append('ld bc, %i' % (size_bytes - 1)) output.append('ld d, h') output.append('ld e, l') output.append('inc de') output.append('ldir') # Clear with ZEROs return output
Enter function sequence for doing a function start ins.quad[1] contains size (in bytes) of local variables Use '__fastcall__' as 1st parameter to prepare a fastcall function (no local variables).
def crop(img, i, j, h, w): """Crop the given PIL Image. Args: img (PIL Image): Image to be cropped. i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped image. w (int): Width of the cropped image. Returns: PIL Image: Cropped image. """ if not _is_pil_image(img): raise TypeError('img should be PIL Image. Got {}'.format(type(img))) return img.crop((j, i, j + w, i + h))
Crop the given PIL Image. Args: img (PIL Image): Image to be cropped. i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped image. w (int): Width of the cropped image. Returns: PIL Image: Cropped image.
def show(self, uuid=None, term=None): """Show the information related to unique identities. This method prints information related to unique identities such as identities or enrollments. When <uuid> is given, it will only show information about the unique identity related to <uuid>. When <term> is set, it will only show information about those unique identities that have any attribute (name, email, username, source) which match with the given term. This parameter does not have any effect when <uuid> is set. :param uuid: unique identifier :param term: term to match with unique identities data """ try: if uuid: uidentities = api.unique_identities(self.db, uuid) elif term: uidentities = api.search_unique_identities(self.db, term) else: uidentities = api.unique_identities(self.db) for uid in uidentities: # Add enrollments to a new property 'roles' enrollments = api.enrollments(self.db, uid.uuid) uid.roles = enrollments self.display('show.tmpl', uidentities=uidentities) except NotFoundError as e: self.error(str(e)) return e.code return CMD_SUCCESS
Show the information related to unique identities. This method prints information related to unique identities such as identities or enrollments. When <uuid> is given, it will only show information about the unique identity related to <uuid>. When <term> is set, it will only show information about those unique identities that have any attribute (name, email, username, source) which match with the given term. This parameter does not have any effect when <uuid> is set. :param uuid: unique identifier :param term: term to match with unique identities data
def nn_model(X, Y, n_h, num_iterations=10000, print_cost=False): """ Arguments: X -- dataset of shape (2, number of examples) Y -- labels of shape (1, number of examples) n_h -- size of the hidden layer num_iterations -- Number of iterations in gradient descent loop print_cost -- if True, print the cost every 1000 iterations Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(3) n_x = layer_sizes(X, Y)[0] n_y = layer_sizes(X, Y)[2] # Initialize parameters, then retrieve W1, b1, W2, b2. # Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters". parameters = initialize_parameters(n_x, n_h, n_y) W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache". A2, cache = forward_propagation(X, parameters) # Cost function. Inputs: "A2, Y, parameters". Outputs: "cost". cost = compute_cost(A2, Y, parameters) # Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads". grads = backward_propagation(parameters, cache, X, Y) # Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters". parameters = update_parameters(parameters, grads) # Print the cost every 1000 iterations if print_cost and i % 1000 == 0: print("Cost after iteration %i: %f" % (i, cost)) return parameters
Arguments: X -- dataset of shape (2, number of examples) Y -- labels of shape (1, number of examples) n_h -- size of the hidden layer num_iterations -- Number of iterations in gradient descent loop print_cost -- if True, print the cost every 1000 iterations Returns: parameters -- parameters learnt by the model. They can then be used to predict.
def __pauli_meas_gates(circuit, qreg, op): """ Add state measurement gates to a circuit. """ if op not in ['X', 'Y', 'Z']: raise QiskitError("There's no X, Y or Z basis for this Pauli " "measurement") if op == "X": circuit.u2(0., np.pi, qreg) # H elif op == "Y": circuit.u2(0., 0.5 * np.pi, qreg)
Add state measurement gates to a circuit.
def _loadFromHStruct(self, dtype: HdlType, bitAddr: int): """ Parse HStruct type to this transaction template instance :return: address of it's end """ for f in dtype.fields: t = f.dtype origin = f isPadding = f.name is None if isPadding: width = t.bit_length() bitAddr += width else: fi = TransTmpl(t, bitAddr, parent=self, origin=origin) self.children.append(fi) bitAddr = fi.bitAddrEnd return bitAddr
Parse HStruct type to this transaction template instance :return: address of it's end
def filter_catalog(catalog, **kwargs): """ Create a new catalog selected from input based on photometry. Parameters ---------- bright_limit : float Fraction of catalog based on brightness that should be retained. Value of 1.00 means full catalog. max_bright : int Maximum number of sources to keep regardless of `bright_limit`. min_bright : int Minimum number of sources to keep regardless of `bright_limit`. colname : str Name of column to use for selection/sorting. Returns ------- new_catalog : `~astropy.table.Table` New table which only has the sources that meet the selection criteria. """ # interpret input pars bright_limit = kwargs.get('bright_limit', 1.00) max_bright = kwargs.get('max_bright', None) min_bright = kwargs.get('min_bright', 20) colname = kwargs.get('colname', 'vegamag') # sort by magnitude phot_column = catalog[colname] num_sources = len(phot_column) sort_indx = np.argsort(phot_column) if max_bright is None: max_bright = num_sources # apply limits, insuring no more than full catalog gets selected limit_num = max(int(num_sources * bright_limit), min_bright) limit_num = min(max_bright, limit_num, num_sources) # Extract sources identified by selection new_catalog = catalog[sort_indx[:limit_num]] return new_catalog
Create a new catalog selected from input based on photometry. Parameters ---------- bright_limit : float Fraction of catalog based on brightness that should be retained. Value of 1.00 means full catalog. max_bright : int Maximum number of sources to keep regardless of `bright_limit`. min_bright : int Minimum number of sources to keep regardless of `bright_limit`. colname : str Name of column to use for selection/sorting. Returns ------- new_catalog : `~astropy.table.Table` New table which only has the sources that meet the selection criteria.
def _edge_group_substitution( self, ndid, nsplit, idxs, sr_tab, ndoffset, ed_remove, into_or_from ): """ Reconnect edges. :param ndid: id of low resolution edges :param nsplit: number of split :param idxs: indexes of low resolution :param sr_tab: :param ndoffset: :param ed_remove: :param into_or_from: if zero, connection of input edges is done. If one, connection of output edges is performed. :return: """ # this is useful for type(idxs) == np.ndarray eidxs = idxs[nm.where(self.edges[idxs, 1 - into_or_from] == ndid)[0]] # selected_edges = self.edges[idxs, 1 - into_or_from] # selected_edges == ndid # whre = nm.where(self.edges[idxs, 1 - into_or_from] == ndid) # whre0 = (nm.where(self.edges[idxs, 1 - into_or_from] == ndid) == ndid)[0] # eidxs = [idxs[i] for i in idxs] for igrp in self.edges_by_group(eidxs): if igrp.shape[0] > 1: # high resolution block to high resolution block # all directions are the same directions = self.edge_dir[igrp[0]] edge_indexes = sr_tab[directions, :].T.flatten() + ndoffset # debug code # if len(igrp) != len(edge_indexes): # print("Problem ") self.edges[igrp, 1] = edge_indexes if self._edge_weight_table is not None: self.edges_weights[igrp] = self._edge_weight_table[1, directions] else: # low res block to hi res block, if into_or_from is set to 0 # hig res block to low res block, if into_or_from is set to 1 ed_remove.append(igrp[0]) # number of new edges is equal to number of pixels on one side of the box (in 2D and D too) nnewed = np.power(nsplit, self.data.ndim - 1) muleidxs = nm.tile(igrp, nnewed) # copy the low-res edge multipletime newed = self.edges[muleidxs, :] neweddir = self.edge_dir[muleidxs] local_node_ids = sr_tab[ self.edge_dir[igrp] + self.data.ndim * into_or_from, : ].T.flatten() # first or second (the actual) node id is substitued by new node indexes newed[:, 1 - into_or_from] = local_node_ids + ndoffset if self._edge_weight_table is not None: self.add_edges( newed, neweddir, self.edge_group[igrp], edge_low_or_high=1 ) else: self.add_edges( newed, neweddir, self.edge_group[igrp], edge_low_or_high=None ) return ed_remove
Reconnect edges. :param ndid: id of low resolution edges :param nsplit: number of split :param idxs: indexes of low resolution :param sr_tab: :param ndoffset: :param ed_remove: :param into_or_from: if zero, connection of input edges is done. If one, connection of output edges is performed. :return:
def gpp(argv=None): """Shortcut function for running the previewing command.""" if argv is None: argv = sys.argv[1:] argv.insert(0, 'preview') return main(argv)
Shortcut function for running the previewing command.
def observe(self, terminal, reward, index=0): """ Observe experience from the environment to learn from. Optionally pre-processes rewards Child classes should call super to get the processed reward EX: terminal, reward = super()... Args: terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action. """ self.current_terminal = terminal self.current_reward = reward if self.batched_observe: # Batched observe for better performance with Python. self.observe_terminal[index].append(self.current_terminal) self.observe_reward[index].append(self.current_reward) if self.current_terminal or len(self.observe_terminal[index]) >= self.batching_capacity: self.episode = self.model.observe( terminal=self.observe_terminal[index], reward=self.observe_reward[index], index=index ) self.observe_terminal[index] = list() self.observe_reward[index] = list() else: self.episode = self.model.observe( terminal=self.current_terminal, reward=self.current_reward )
Observe experience from the environment to learn from. Optionally pre-processes rewards Child classes should call super to get the processed reward EX: terminal, reward = super()... Args: terminal (bool): boolean indicating if the episode terminated after the observation. reward (float): scalar reward that resulted from executing the action.
def load(self, filename): """ Load a npz file. Supports only files previously saved by :meth:`pypianoroll.Multitrack.save`. Notes ----- Attribute values will all be overwritten. Parameters ---------- filename : str The name of the npz file to be loaded. """ def reconstruct_sparse(target_dict, name): """Return a reconstructed instance of `scipy.sparse.csc_matrix`.""" return csc_matrix((target_dict[name+'_csc_data'], target_dict[name+'_csc_indices'], target_dict[name+'_csc_indptr']), shape=target_dict[name+'_csc_shape']).toarray() with np.load(filename) as loaded: if 'info.json' not in loaded: raise ValueError("Cannot find 'info.json' in the npz file.") info_dict = json.loads(loaded['info.json'].decode('utf-8')) self.name = info_dict['name'] self.beat_resolution = info_dict['beat_resolution'] self.tempo = loaded['tempo'] if 'downbeat' in loaded.files: self.downbeat = loaded['downbeat'] else: self.downbeat = None idx = 0 self.tracks = [] while str(idx) in info_dict: pianoroll = reconstruct_sparse( loaded, 'pianoroll_{}'.format(idx)) track = Track(pianoroll, info_dict[str(idx)]['program'], info_dict[str(idx)]['is_drum'], info_dict[str(idx)]['name']) self.tracks.append(track) idx += 1 self.check_validity()
Load a npz file. Supports only files previously saved by :meth:`pypianoroll.Multitrack.save`. Notes ----- Attribute values will all be overwritten. Parameters ---------- filename : str The name of the npz file to be loaded.
def refresh_db(**kwargs): ''' Check the yum repos for updated packages Returns: - ``True``: Updates are available - ``False``: An error occurred - ``None``: No updates are available repo Refresh just the specified repo disablerepo Do not refresh the specified repo enablerepo Refresh a disabled repo using this option branch Add the specified branch when refreshing disableexcludes Disable the excludes defined in your config files. Takes one of three options: - ``all`` - disable all excludes - ``main`` - disable excludes defined in [main] in yum.conf - ``repoid`` - disable excludes defined for that repo setopt A comma-separated or Python list of key=value options. This list will be expanded and ``--setopt`` prepended to each in the yum/dnf command that is run. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' pkg.refresh_db ''' # Remove rtag file to keep multiple refreshes from happening in pkg states salt.utils.pkg.clear_rtag(__opts__) retcodes = { 100: True, 0: None, 1: False, } ret = True check_update_ = kwargs.pop('check_update', True) options = _get_options(**kwargs) clean_cmd = ['--quiet', '--assumeyes', 'clean', 'expire-cache'] clean_cmd.extend(options) _call_yum(clean_cmd, ignore_retcode=True) if check_update_: update_cmd = ['--quiet', '--assumeyes', 'check-update'] if (__grains__.get('os_family') == 'RedHat' and __grains__.get('osmajorrelease') == 7): # This feature is disabled because it is not used by Salt and adds a # lot of extra time to the command with large repos like EPEL update_cmd.append('--setopt=autocheck_running_kernel=false') update_cmd.extend(options) ret = retcodes.get(_call_yum(update_cmd, ignore_retcode=True)['retcode'], False) return ret
Check the yum repos for updated packages Returns: - ``True``: Updates are available - ``False``: An error occurred - ``None``: No updates are available repo Refresh just the specified repo disablerepo Do not refresh the specified repo enablerepo Refresh a disabled repo using this option branch Add the specified branch when refreshing disableexcludes Disable the excludes defined in your config files. Takes one of three options: - ``all`` - disable all excludes - ``main`` - disable excludes defined in [main] in yum.conf - ``repoid`` - disable excludes defined for that repo setopt A comma-separated or Python list of key=value options. This list will be expanded and ``--setopt`` prepended to each in the yum/dnf command that is run. .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' pkg.refresh_db
def do_implicit_flow_authorization(self, session): """ Standard OAuth2 authorization method. It's used for getting access token More info: https://vk.com/dev/implicit_flow_user """ logger.info('Doing implicit flow authorization, app_id=%s', self.app_id) auth_data = { 'client_id': self.app_id, 'display': 'mobile', 'response_type': 'token', 'scope': self.scope, 'redirect_uri': 'https://oauth.vk.com/blank.html', 'v': self.api_version } response = session.post(url=self.AUTHORIZE_URL, data=stringify_values(auth_data)) url_query_params = parse_url_query_params(response.url) if 'expires_in' in url_query_params: logger.info('Token will be expired in %s sec.' % url_query_params['expires_in']) if 'access_token' in url_query_params: return url_query_params # Permissions are needed logger.info('Getting permissions') action_url = parse_form_action_url(response.text) logger.debug('Response form action: %s', action_url) if action_url: response = session.get(action_url) url_query_params = parse_url_query_params(response.url) return url_query_params try: response_json = response.json() except ValueError: # not JSON in response error_message = 'OAuth2 grant access error' logger.error(response.text) else: error_message = 'VK error: [{}] {}'.format( response_json['error'], response_json['error_description']) logger.error('Permissions obtained') raise VkAuthError(error_message)
Standard OAuth2 authorization method. It's used for getting access token More info: https://vk.com/dev/implicit_flow_user
def invert_if_negative(self): """ |True| if a point having a value less than zero should appear with a fill different than those with a positive value. |False| if the fill should be the same regardless of the bar's value. When |True|, a bar with a solid fill appears with white fill; in a bar with gradient fill, the direction of the gradient is reversed, e.g. dark -> light instead of light -> dark. The term "invert" here should be understood to mean "invert the *direction* of the *fill gradient*". """ invertIfNegative = self._element.invertIfNegative if invertIfNegative is None: return True return invertIfNegative.val
|True| if a point having a value less than zero should appear with a fill different than those with a positive value. |False| if the fill should be the same regardless of the bar's value. When |True|, a bar with a solid fill appears with white fill; in a bar with gradient fill, the direction of the gradient is reversed, e.g. dark -> light instead of light -> dark. The term "invert" here should be understood to mean "invert the *direction* of the *fill gradient*".
def resource_type(self): """ Get the CoRE Link Format rt attribute of the resource. :return: the CoRE Link Format rt attribute """ value = "rt=" lst = self._attributes.get("rt") if lst is None: value = "" else: value += "\"" + str(lst) + "\"" return value
Get the CoRE Link Format rt attribute of the resource. :return: the CoRE Link Format rt attribute
def _create_dictionary_of_IFS( self): """*Generate the list of dictionaries containing all the rows in the IFS stream* **Return:** - ``dictList`` - a list of dictionaries containing all the rows in the IFS stream **Usage:** .. code-block:: python from sherlock.imports import IFS stream = IFS( log=log, settings=settings ) dictList = stream._create_dictionary_of_IFS() """ self.log.debug( 'starting the ``_create_dictionary_of_IFS`` method') # GRAB THE CONTENT OF THE IFS CSV try: response = requests.get( url=self.settings["ifs galaxies url"], ) thisData = response.content thisData = thisData.split("\n") status_code = response.status_code except requests.exceptions.RequestException: print 'HTTP Request failed' sys.exit(0) dictList = [] columns = ["name", "raDeg", "decDeg", "z"] for line in thisData: thisDict = {} line = line.strip() line = line.replace("\t", " ") values = line.split("|") if len(values) > 3: thisDict["name"] = values[0].strip() # ASTROCALC UNIT CONVERTER OBJECT converter = unit_conversion( log=self.log ) try: raDeg = converter.ra_sexegesimal_to_decimal( ra=values[1].strip() ) thisDict["raDeg"] = raDeg decDeg = converter.dec_sexegesimal_to_decimal( dec=values[2].strip() ) thisDict["decDeg"] = decDeg except: name = thisDict["name"] self.log.warning( 'Could not convert the coordinates for IFS source %(name)s. Skipping import of this source.' % locals()) continue try: z = float(values[3].strip()) if z > 0.: thisDict["z"] = float(values[3].strip()) else: thisDict["z"] = None except: thisDict["z"] = None dictList.append(thisDict) self.log.debug( 'completed the ``_create_dictionary_of_IFS`` method') return dictList
*Generate the list of dictionaries containing all the rows in the IFS stream* **Return:** - ``dictList`` - a list of dictionaries containing all the rows in the IFS stream **Usage:** .. code-block:: python from sherlock.imports import IFS stream = IFS( log=log, settings=settings ) dictList = stream._create_dictionary_of_IFS()
def process_waypoint_request(self, m, master): '''process a waypoint request from the master''' if (not self.loading_waypoints or time.time() > self.loading_waypoint_lasttime + 10.0): self.loading_waypoints = False self.console.error("not loading waypoints") return if m.seq >= self.wploader.count(): self.console.error("Request for bad waypoint %u (max %u)" % (m.seq, self.wploader.count())) return wp = self.wploader.wp(m.seq) wp.target_system = self.target_system wp.target_component = self.target_component self.master.mav.send(self.wploader.wp(m.seq)) self.loading_waypoint_lasttime = time.time() self.console.writeln("Sent waypoint %u : %s" % (m.seq, self.wploader.wp(m.seq))) if m.seq == self.wploader.count() - 1: self.loading_waypoints = False self.console.writeln("Sent all %u waypoints" % self.wploader.count())
process a waypoint request from the master
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ item = super(ReferenceAnalysesView, self).folderitem(obj, item, index) if not item: return None item["Category"] = obj.getCategoryTitle ref_analysis = api.get_object(obj) ws = ref_analysis.getWorksheet() if not ws: logger.warn( "No Worksheet found for ReferenceAnalysis {}" .format(obj.getId)) else: item["Worksheet"] = ws.Title() anchor = "<a href='%s'>%s</a>" % (ws.absolute_url(), ws.Title()) item["replace"]["Worksheet"] = anchor # Add the analysis to the QC Chart self.chart.add_analysis(obj) return item
Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item
def modifyInPlace(self, *, sort=None, purge=False, done=None): """Like Model.modify, but changes existing database instead of returning a new one.""" self.data = self.modify(sort=sort, purge=purge, done=done)
Like Model.modify, but changes existing database instead of returning a new one.
def finish_review(self, success=True, error=False): """Mark our review as finished.""" if self.set_status: if error: self.github_repo.create_status( state="error", description="Static analysis error! inline-plz failed to run.", context="inline-plz", sha=self.last_sha, ) elif success: self.github_repo.create_status( state="success", description="Static analysis complete! No errors found in your PR.", context="inline-plz", sha=self.last_sha, ) else: self.github_repo.create_status( state="failure", description="Static analysis complete! Found errors in your PR.", context="inline-plz", sha=self.last_sha, )
Mark our review as finished.
def get_match_details(self, match_id=None, **kwargs): """Returns a dictionary containing the details for a Dota 2 match :param match_id: (int, optional) :return: dictionary of matches, see :doc:`responses </responses>` """ if 'match_id' not in kwargs: kwargs['match_id'] = match_id url = self.__build_url(urls.GET_MATCH_DETAILS, **kwargs) req = self.executor(url) if self.logger: self.logger.info('URL: {0}'.format(url)) if not self.__check_http_err(req.status_code): return response.build(req, url, self.raw_mode)
Returns a dictionary containing the details for a Dota 2 match :param match_id: (int, optional) :return: dictionary of matches, see :doc:`responses </responses>`
def to_dict(self): """Convert instance to a serializable mapping.""" config = {} for attr in dir(self): if not attr.startswith('_'): value = getattr(self, attr) if not hasattr(value, '__call__'): config[attr] = value return config
Convert instance to a serializable mapping.
def mangle_command(command, name_max=255, has_variables=False): """ Mangle a command line string into something suitable for use as the basename of a filename. At minimum this function must remove slashes, but it also does other things to clean up the basename: removing directory names from the command name, replacing many non-typical characters with undersores, in addition to replacing slashes with dots. By default, curly braces, '{' and '}', are replaced with underscore, set 'has_variables' to leave curly braces alone. This function was copied from the function that insights-client uses to create the name it uses to capture the output of the command. Here, server side, it is used to figure out what file in the archive contains the output of a command. Server side, the command may contain references to variables (names within matching curly braces) that will be expanded before the name is actually used as a file name. To completly mimic the insights-client behavior, curly braces need to be replaced with underscores. If the command has variable references, the curly braces must be left alone. Set has_variables, to leave curly braces alone. This implementation of 'has_variables' assumes that variable names only contain characters that are not replaced by mangle_command. """ if has_variables: pattern = r"[^\w\-\.\/{}]+" else: pattern = r"[^\w\-\.\/]+" mangledname = re.sub(r"^/(usr/|)(bin|sbin)/", "", command) mangledname = re.sub(pattern, "_", mangledname) mangledname = re.sub(r"/", ".", mangledname).strip(" ._-") mangledname = mangledname[:name_max] return mangledname
Mangle a command line string into something suitable for use as the basename of a filename. At minimum this function must remove slashes, but it also does other things to clean up the basename: removing directory names from the command name, replacing many non-typical characters with undersores, in addition to replacing slashes with dots. By default, curly braces, '{' and '}', are replaced with underscore, set 'has_variables' to leave curly braces alone. This function was copied from the function that insights-client uses to create the name it uses to capture the output of the command. Here, server side, it is used to figure out what file in the archive contains the output of a command. Server side, the command may contain references to variables (names within matching curly braces) that will be expanded before the name is actually used as a file name. To completly mimic the insights-client behavior, curly braces need to be replaced with underscores. If the command has variable references, the curly braces must be left alone. Set has_variables, to leave curly braces alone. This implementation of 'has_variables' assumes that variable names only contain characters that are not replaced by mangle_command.
def f_rollup(items, times, freq): """ Use :func:`groupby_freq` to rollup items :param items: items in timeseries :param times: times corresponding to items :param freq: One of the ``dateutil.rrule`` frequency constants :type freq: str """ rollup = [np.sum(item for __, item in ts) for _, ts in groupby_freq(items, times, freq)] return np.array(rollup)
Use :func:`groupby_freq` to rollup items :param items: items in timeseries :param times: times corresponding to items :param freq: One of the ``dateutil.rrule`` frequency constants :type freq: str
def get_string(self, input_string): """ Return string type user input """ if input_string in ('--input', '--outname', '--framework'): # was the flag set? try: index = self.args.index(input_string) + 1 except ValueError: # it wasn't, so if it's required, exit if input_string in self.required: print("\n {flag} is required".format(input_string)) print_short_help() sys.exit(1) # it wasn't, if its optional, return the default else: return None # the flag was set, so check if a value was set, otherwise exit try: if self.args[index] in self.flags: print("\n {flag} was set but a value was not specified".format(flag=input_string)) print_short_help() sys.exit(1) except IndexError: print("\n {flag} was set but a value was not specified".format(input_string)) print_short_help() sys.exit(1) # a value was set, so check and assign the appropriate value or exit if input_string == '--input': return os.path.abspath(self.args[index]) elif input_string == '--outname': return format(self.args[index])
Return string type user input
def to_dict(self): """ Return the user as a dict. """ public_keys = [public_key.b64encoded for public_key in self.public_keys] return dict(name=self.name, passwd=self.passwd, uid=self.uid, gid=self.gid, gecos=self.gecos, home_dir=self.home_dir, shell=self.shell, public_keys=public_keys)
Return the user as a dict.
def deserialize(self, d): """ De-serialize a Q object from a (possibly nested) dict. """ children = [] for child in d.pop('children'): if isinstance(child, dict): children.append(self.deserialize(child)) else: children.append(self.prepare_value(child)) query = Q() query.children = children query.connector = d['connector'] query.negated = d['negated'] if 'subtree_parents' in d: query.subtree_parents = d['subtree_parents'] return query
De-serialize a Q object from a (possibly nested) dict.
def exists(self, names): """Checks if the given file list exists in the current directory level. in names of type str The names to check. return exists of type str The names which exist. """ if not isinstance(names, list): raise TypeError("names can only be an instance of type list") for a in names[:10]: if not isinstance(a, basestring): raise TypeError( "array can only contain objects of type basestring") exists = self._call("exists", in_p=[names]) return exists
Checks if the given file list exists in the current directory level. in names of type str The names to check. return exists of type str The names which exist.
def set_query(self, value): """ Convert a dict form of query in a string of needed and store the query string. Args: value -- A query string or a dict with query xpaths as keys and text or nested query dicts as values. """ if isinstance(value, basestring) or value is None: self._content['query'] = value elif hasattr(value, 'keys'): self._content['query'] = query.terms_from_dict(value) else: raise TypeError("Query must be a string or dict. Got: " + type(value) + " insted!")
Convert a dict form of query in a string of needed and store the query string. Args: value -- A query string or a dict with query xpaths as keys and text or nested query dicts as values.
def split(self, url): """ Split the url into I{protocol} and I{location} @param url: A URL. @param url: str @return: (I{url}, I{location}) @rtype: tuple """ parts = url.split('://', 1) if len(parts) == 2: return parts else: return (None, url)
Split the url into I{protocol} and I{location} @param url: A URL. @param url: str @return: (I{url}, I{location}) @rtype: tuple
def log(self, *args): """stdout and stderr for the link""" print("%s %s" % (str(self).ljust(8), " ".join([str(x) for x in args])))
stdout and stderr for the link
def labels(self): """ Returns the taxon set of the tree (same as the label- or leaf-set) """ return set([n.taxon.label for n in self._tree.leaf_nodes()])
Returns the taxon set of the tree (same as the label- or leaf-set)
def parallel_tfa_lcdir(lcdir, templateinfo, lcfileglob=None, timecols=None, magcols=None, errcols=None, lcformat='hat-sql', lcformatdir=None, interp='nearest', sigclip=5.0, mintemplatedist_arcmin=10.0, nworkers=NCPUS, maxworkertasks=1000): '''This applies TFA in parallel to all LCs in a directory. Parameters ---------- lcdir : str This is the directory containing the light curve files to process.. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. lcfileglob : str or None The UNIX file glob to use when searching for light curve files in `lcdir`. If None, the default file glob associated with registered LC format provided is used. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`. ''' # open the templateinfo first if isinstance(templateinfo,str) and os.path.exists(templateinfo): with open(templateinfo,'rb') as infd: templateinfo = pickle.load(infd) try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("can't figure out the light curve format") return None except Exception as e: LOGEXCEPTION("can't figure out the light curve format") return None # find all the files matching the lcglob in lcdir if lcfileglob is None: lcfileglob = dfileglob lclist = sorted(glob.glob(os.path.join(lcdir, lcfileglob))) return parallel_tfa_lclist( lclist, templateinfo, timecols=timecols, magcols=magcols, errcols=errcols, lcformat=lcformat, lcformatdir=None, interp=interp, sigclip=sigclip, mintemplatedist_arcmin=mintemplatedist_arcmin, nworkers=nworkers, maxworkertasks=maxworkertasks )
This applies TFA in parallel to all LCs in a directory. Parameters ---------- lcdir : str This is the directory containing the light curve files to process.. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. lcfileglob : str or None The UNIX file glob to use when searching for light curve files in `lcdir`. If None, the default file glob associated with registered LC format provided is used. timecols : list of str or None The timecol keys to use from the lcdict in applying TFA corrections. magcols : list of str or None The magcol keys to use from the lcdict in applying TFA corrections. errcols : list of str or None The errcol keys to use from the lcdict in applying TFA corrections. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming the light curves to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to the light curves before running TFA on it. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. nworkers : int The number of parallel workers to launch maxworkertasks : int The maximum number of tasks per worker allowed before it's replaced by a fresh one. Returns ------- dict Contains the input file names and output TFA light curve filenames per input file organized by each `magcol` in `magcols`.
def zscale(data,contrast,min=100,max=60000): """Scale the data cube into the range 0-255""" ## pic 100 random elements along each dimension ## use zscale (see the IRAF display man page or ## http://iraf.net/article.php/20051205162333315 import random x=[] for i in random.sample(xrange(data.shape[0]),50): for j in random.sample(xrange(data.shape[1]),50): x.append(data[i,j]) yl=numarray.sort(numarray.clip(x,min,max)) n=len(yl) ym=sum(yl)/float(n) xl=numarray.array(range(n)) xm=sum(xl)/float(n) ss_xx=sum((xl-xm)*(xl-xm)) ss_yy=sum((yl-ym)*(yl-ym)) ss_xy=sum((xl-xm)*(yl-ym)) b=ss_xy/ss_xx a=ym-b*xm z1=yl[n/2] + (b/contrast)*(1-n/2) z2=yl[n/2] + (b/contrast)*(n-n/2) ## Now put the data inbetween Z1 and Z2 high=data-z1 z2=z2-z1 high=numarray.clip(high,0,z2) ## and change that to 0-255 high= 256-256*high/z2 ### send back the scalled data return high
Scale the data cube into the range 0-255
def skip_if_needed(self, job_record): """ method is called from abstract_state_machine.manage_job to notify about job's failed processing if should_skip_node returns True - the node's job_record is transferred to STATE_SKIPPED """ tree = self.get_tree(job_record.process_name) node = tree.get_node(job_record.process_name, job_record.timeperiod) if tree.should_skip_tree_node(node): self.skip_tree_node(node)
method is called from abstract_state_machine.manage_job to notify about job's failed processing if should_skip_node returns True - the node's job_record is transferred to STATE_SKIPPED
def sync_local_order(self): """! @brief Calculates current level of local (partial) synchronization in the network. @return (double) Level of local (partial) synchronization. @see sync_order() """ if (self._ccore_network_pointer is not None): return wrapper.sync_local_order(self._ccore_network_pointer); return order_estimator.calculate_local_sync_order(self._phases, self);
! @brief Calculates current level of local (partial) synchronization in the network. @return (double) Level of local (partial) synchronization. @see sync_order()
def framewise(self): """ Property to determine whether the current frame should have framewise normalization enabled. Required for bokeh plotting classes to determine whether to send updated ranges for each frame. """ current_frames = [el for f in self.traverse(lambda x: x.current_frame) for el in (f.traverse(lambda x: x, [Element]) if f else [])] current_frames = util.unique_iterator(current_frames) return any(self.lookup_options(frame, 'norm').options.get('framewise') for frame in current_frames)
Property to determine whether the current frame should have framewise normalization enabled. Required for bokeh plotting classes to determine whether to send updated ranges for each frame.
def relative_strength_index(data, period): """ Relative Strength Index. Formula: RSI = 100 - (100 / 1 + (prevGain/prevLoss)) """ catch_errors.check_for_period_error(data, period) period = int(period) changes = [data_tup[1] - data_tup[0] for data_tup in zip(data[::1], data[1::1])] filtered_gain = [val < 0 for val in changes] gains = [0 if filtered_gain[idx] is True else changes[idx] for idx in range(0, len(filtered_gain))] filtered_loss = [val > 0 for val in changes] losses = [0 if filtered_loss[idx] is True else abs(changes[idx]) for idx in range(0, len(filtered_loss))] avg_gain = np.mean(gains[:period]) avg_loss = np.mean(losses[:period]) rsi = [] if avg_loss == 0: rsi.append(100) else: rs = avg_gain / avg_loss rsi.append(100 - (100 / (1 + rs))) for idx in range(1, len(data) - period): avg_gain = ((avg_gain * (period - 1) + gains[idx + (period - 1)]) / period) avg_loss = ((avg_loss * (period - 1) + losses[idx + (period - 1)]) / period) if avg_loss == 0: rsi.append(100) else: rs = avg_gain / avg_loss rsi.append(100 - (100 / (1 + rs))) rsi = fill_for_noncomputable_vals(data, rsi) return rsi
Relative Strength Index. Formula: RSI = 100 - (100 / 1 + (prevGain/prevLoss))
def __we_c(cls, calib, tc, temp, we_v, ae_v): """ Compute weC from sensor temperature compensation of weV, aeV """ we_t = we_v - (calib.we_elc_mv / 1000.0) # remove electronic we zero ae_t = ae_v - (calib.ae_elc_mv / 1000.0) # remove electronic ae zero we_c = tc.correct(calib, temp, we_t, ae_t) # print("A4Datum__we_c: we_t:%f ae_t:%f we_c:%s" % (we_t, ae_t, we_c), file=sys.stderr) return we_c
Compute weC from sensor temperature compensation of weV, aeV
def _format_download_uri_for_extension(etextno, extension, mirror=None): """Returns the download location on the Project Gutenberg servers for a given text and extension. The list of available extensions for a given text can be found via the formaturi metadata extractor. """ mirror = mirror or _GUTENBERG_MIRROR root = mirror.strip().rstrip('/') path = _etextno_to_uri_subdirectory(etextno) uri = '{root}/{path}/{etextno}{extension}'.format( root=root, path=path, etextno=etextno, extension=extension) return uri
Returns the download location on the Project Gutenberg servers for a given text and extension. The list of available extensions for a given text can be found via the formaturi metadata extractor.
def owner(self, data): """The Owner payload value for this resource request.""" if data is not None: self._request.add_payload('owner', data) else: self.tcex.log.warn(u'Provided owner was invalid. ({})'.format(data))
The Owner payload value for this resource request.
def init(): """ Initializes the preprocessor """ global OUTPUT global INCLUDED global CURRENT_DIR global ENABLED global INCLUDEPATH global IFDEFS global ID_TABLE global CURRENT_FILE global_.FILENAME = '(stdin)' OUTPUT = '' INCLUDED = {} CURRENT_DIR = '' pwd = get_include_path() INCLUDEPATH = [os.path.join(pwd, 'library'), os.path.join(pwd, 'library-asm')] ENABLED = True IFDEFS = [] global_.has_errors = 0 global_.error_msg_cache.clear() parser.defaulted_states = {} ID_TABLE = DefinesTable() del CURRENT_FILE[:]
Initializes the preprocessor
def set_permissions(obj_name, principal, permissions, access_mode='grant', applies_to=None, obj_type='file', reset_perms=False, protected=None): ''' Set the permissions of an object. This can be a file, folder, registry key, printer, service, etc... Args: obj_name (str): The object for which to set permissions. This can be the path to a file or folder, a registry key, printer, etc. For more information about how to format the name see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx principal (str): The name of the user or group for which to set permissions. Can also pass a SID. permissions (str, list): The type of permissions to grant/deny the user. Can be one of the basic permissions, or a list of advanced permissions. access_mode (Optional[str]): Whether to grant or deny user the access. Valid options are: - grant (default): Grants the user access - deny: Denies the user access applies_to (Optional[str]): The objects to which these permissions will apply. Not all these options apply to all object types. Defaults to 'this_folder_subfolders_files' obj_type (Optional[str]): The type of object for which to set permissions. Default is 'file' reset_perms (Optional[bool]): True will overwrite the permissions on the specified object. False will append the permissions. Default is False protected (Optional[bool]): True will disable inheritance for the object. False will enable inheritance. None will make no change. Default is None. Returns: bool: True if successful, raises an error otherwise Usage: .. code-block:: python salt.utils.win_dacl.set_permissions( 'C:\\Temp', 'jsnuffy', 'full_control', 'grant') ''' # Set up applies_to defaults used by registry and file types if applies_to is None: if 'registry' in obj_type.lower(): applies_to = 'this_key_subkeys' elif obj_type.lower() == 'file': applies_to = 'this_folder_subfolders_files' # If you don't pass `obj_name` it will create a blank DACL # Otherwise, it will grab the existing DACL and add to it if reset_perms: obj_dacl = dacl(obj_type=obj_type) else: obj_dacl = dacl(obj_name, obj_type) obj_dacl.rm_ace(principal, access_mode) obj_dacl.add_ace(principal, access_mode, permissions, applies_to) obj_dacl.order_acl() obj_dacl.save(obj_name, protected) return True
Set the permissions of an object. This can be a file, folder, registry key, printer, service, etc... Args: obj_name (str): The object for which to set permissions. This can be the path to a file or folder, a registry key, printer, etc. For more information about how to format the name see: https://msdn.microsoft.com/en-us/library/windows/desktop/aa379593(v=vs.85).aspx principal (str): The name of the user or group for which to set permissions. Can also pass a SID. permissions (str, list): The type of permissions to grant/deny the user. Can be one of the basic permissions, or a list of advanced permissions. access_mode (Optional[str]): Whether to grant or deny user the access. Valid options are: - grant (default): Grants the user access - deny: Denies the user access applies_to (Optional[str]): The objects to which these permissions will apply. Not all these options apply to all object types. Defaults to 'this_folder_subfolders_files' obj_type (Optional[str]): The type of object for which to set permissions. Default is 'file' reset_perms (Optional[bool]): True will overwrite the permissions on the specified object. False will append the permissions. Default is False protected (Optional[bool]): True will disable inheritance for the object. False will enable inheritance. None will make no change. Default is None. Returns: bool: True if successful, raises an error otherwise Usage: .. code-block:: python salt.utils.win_dacl.set_permissions( 'C:\\Temp', 'jsnuffy', 'full_control', 'grant')
def findspans(self, type,set=None): """Yields span annotation elements of the specified type that include this word. Arguments: type: The annotation type, can be passed as using any of the :class:`AnnotationType` member, or by passing the relevant :class:`AbstractSpanAnnotation` or :class:`AbstractAnnotationLayer` class. set (str or None): Constrain by set Example:: for chunk in word.findspans(folia.Chunk): print(" Chunk class=", chunk.cls, " words=") for word2 in chunk.wrefs(): #print all words in the chunk (of which the word is a part) print(word2, end="") print() Yields: Matching span annotation instances (derived from :class:`AbstractSpanAnnotation`) """ if issubclass(type, AbstractAnnotationLayer): layerclass = type else: layerclass = ANNOTATIONTYPE2LAYERCLASS[type.ANNOTATIONTYPE] e = self while True: if not e.parent: break e = e.parent for layer in e.select(layerclass,set,False): if type is layerclass: for e2 in layer.select(AbstractSpanAnnotation,set,True, (True, Word, Morpheme)): if not isinstance(e2, AbstractSpanRole) and self in e2.wrefs(): yield e2 else: for e2 in layer.select(type,set,True, (True, Word, Morpheme)): if not isinstance(e2, AbstractSpanRole) and self in e2.wrefs(): yield e2
Yields span annotation elements of the specified type that include this word. Arguments: type: The annotation type, can be passed as using any of the :class:`AnnotationType` member, or by passing the relevant :class:`AbstractSpanAnnotation` or :class:`AbstractAnnotationLayer` class. set (str or None): Constrain by set Example:: for chunk in word.findspans(folia.Chunk): print(" Chunk class=", chunk.cls, " words=") for word2 in chunk.wrefs(): #print all words in the chunk (of which the word is a part) print(word2, end="") print() Yields: Matching span annotation instances (derived from :class:`AbstractSpanAnnotation`)
def get_weather(self): """ Returns an instance of the Weather Service. """ import predix.data.weather weather = predix.data.weather.WeatherForecast() return weather
Returns an instance of the Weather Service.
def bg(func): """Run a function in background, will not block main thread's exit.(thread.daemon=True) :: from torequests.utils import bg, print_info import time def test1(n): time.sleep(n) print_info(n, 'done') @bg def test2(n): time.sleep(n) print_info(n, 'done') test3 = bg(test1) test2(1) test3(1) print_info('not be blocked') time.sleep(2) # [2018-06-12 23:46:19](L81): not be blocked # [2018-06-12 23:46:20](L81): 1 done # [2018-06-12 23:46:20](L81): 1 done """ @wraps(func) def wrapper(*args, **kwargs): t = Thread(target=func, args=args, kwargs=kwargs) t.daemon = True t.start() return t return wrapper
Run a function in background, will not block main thread's exit.(thread.daemon=True) :: from torequests.utils import bg, print_info import time def test1(n): time.sleep(n) print_info(n, 'done') @bg def test2(n): time.sleep(n) print_info(n, 'done') test3 = bg(test1) test2(1) test3(1) print_info('not be blocked') time.sleep(2) # [2018-06-12 23:46:19](L81): not be blocked # [2018-06-12 23:46:20](L81): 1 done # [2018-06-12 23:46:20](L81): 1 done
def put(self, url, data): """ Make a PUT request to save data. data should be a dictionary. """ response = self._run_method('PUT', url, data=data) log.debug("OUTPUT: %s" % response.content) return self._handle_response(url, response)
Make a PUT request to save data. data should be a dictionary.
def read_nonblocking (self, size = 1, timeout = -1): """This reads at most size bytes from the child application. It includes a timeout. If the read does not complete within the timeout period then a TIMEOUT exception is raised. If the end of file is read then an EOF exception will be raised. If a log file was set using setlog() then all data will also be written to the log file. If timeout is None then the read may block indefinitely. If timeout is -1 then the self.timeout value is used. If timeout is 0 then the child is polled and if there was no data immediately ready then this will raise a TIMEOUT exception. The timeout refers only to the amount of time to read at least one character. This is not effected by the 'size' parameter, so if you call read_nonblocking(size=100, timeout=30) and only one character is available right away then one character will be returned immediately. It will not wait for 30 seconds for another 99 characters to come in. This is a wrapper around os.read(). It uses select.select() to implement the timeout. """ if self.closed: raise ValueError ('I/O operation on closed file in read_nonblocking().') if timeout == -1: timeout = self.timeout # Note that some systems such as Solaris do not give an EOF when # the child dies. In fact, you can still try to read # from the child_fd -- it will block forever or until TIMEOUT. # For this case, I test isalive() before doing any reading. # If isalive() is false, then I pretend that this is the same as EOF. if not self.isalive(): r,w,e = self.__select([self.child_fd], [], [], 0) # timeout of 0 means "poll" if not r: self.flag_eof = True raise EOF ('End Of File (EOF) in read_nonblocking(). Braindead platform.') elif self.__irix_hack: # This is a hack for Irix. It seems that Irix requires a long delay before checking isalive. # This adds a 2 second delay, but only when the child is terminated. r, w, e = self.__select([self.child_fd], [], [], 2) if not r and not self.isalive(): self.flag_eof = True raise EOF ('End Of File (EOF) in read_nonblocking(). Pokey platform.') r,w,e = self.__select([self.child_fd], [], [], timeout) if not r: if not self.isalive(): # Some platforms, such as Irix, will claim that their processes are alive; # then timeout on the select; and then finally admit that they are not alive. self.flag_eof = True raise EOF ('End of File (EOF) in read_nonblocking(). Very pokey platform.') else: raise TIMEOUT ('Timeout exceeded in read_nonblocking().') if self.child_fd in r: try: s = os.read(self.child_fd, size) except OSError, e: # Linux does this self.flag_eof = True raise EOF ('End Of File (EOF) in read_nonblocking(). Exception style platform.') if s == b'': # BSD style self.flag_eof = True raise EOF ('End Of File (EOF) in read_nonblocking(). Empty string style platform.') s2 = self._cast_buffer_type(s) if self.logfile is not None: self.logfile.write(s2) self.logfile.flush() if self.logfile_read is not None: self.logfile_read.write(s2) self.logfile_read.flush() return s raise ExceptionPexpect ('Reached an unexpected state in read_nonblocking().')
This reads at most size bytes from the child application. It includes a timeout. If the read does not complete within the timeout period then a TIMEOUT exception is raised. If the end of file is read then an EOF exception will be raised. If a log file was set using setlog() then all data will also be written to the log file. If timeout is None then the read may block indefinitely. If timeout is -1 then the self.timeout value is used. If timeout is 0 then the child is polled and if there was no data immediately ready then this will raise a TIMEOUT exception. The timeout refers only to the amount of time to read at least one character. This is not effected by the 'size' parameter, so if you call read_nonblocking(size=100, timeout=30) and only one character is available right away then one character will be returned immediately. It will not wait for 30 seconds for another 99 characters to come in. This is a wrapper around os.read(). It uses select.select() to implement the timeout.
def run(command, timeout=None, cwd=None, env=None, debug=None): """ Runs a given command on the system within a set time period, providing an easy way to access command output as it happens without waiting for the command to finish running. :type list :param command: Should be a list that contains the command that should be ran on the given system. The only whitespaces that can occur is for paths that use a backslash to escape it appropriately :type int :param timeout: Specificed in seconds. If a command outruns the timeout then the command and its child processes will be terminated. The default is to run :type string :param cwd: If cwd is set then the current directory will be changed to cwd before it is executed. Note that this directory is not considered when searching the executable, so you can’t specify the program’s path relative to cwd. :type dict :param env: A dict of any ENV variables that should be combined into the OS ENV that will help the command to run successfully. Note that more often than not the command run does not have the same ENV variables available as your shell by default and as such require some assistance. :type function :param debug: A function (also a class function) can be passed in here and all output, line by line, from the command being run will be passed to it as it gets outputted to stdout. This allows for things such as logging (using the built in python logging lib) what is happening on long running commands or redirect output of a tail -f call as lines get outputted without having to wait till the command finishes. :return returns :class:`Command.Response` that contains the exit code and the output from the command """ return Command.run(command, timeout=timeout, cwd=cwd, env=env, debug=debug)
Runs a given command on the system within a set time period, providing an easy way to access command output as it happens without waiting for the command to finish running. :type list :param command: Should be a list that contains the command that should be ran on the given system. The only whitespaces that can occur is for paths that use a backslash to escape it appropriately :type int :param timeout: Specificed in seconds. If a command outruns the timeout then the command and its child processes will be terminated. The default is to run :type string :param cwd: If cwd is set then the current directory will be changed to cwd before it is executed. Note that this directory is not considered when searching the executable, so you can’t specify the program’s path relative to cwd. :type dict :param env: A dict of any ENV variables that should be combined into the OS ENV that will help the command to run successfully. Note that more often than not the command run does not have the same ENV variables available as your shell by default and as such require some assistance. :type function :param debug: A function (also a class function) can be passed in here and all output, line by line, from the command being run will be passed to it as it gets outputted to stdout. This allows for things such as logging (using the built in python logging lib) what is happening on long running commands or redirect output of a tail -f call as lines get outputted without having to wait till the command finishes. :return returns :class:`Command.Response` that contains the exit code and the output from the command
def _prune_components(self): """ Remove components for which the remote party did not provide any candidates. This can only be determined after end-of-candidates. """ seen_components = set(map(lambda x: x.component, self._remote_candidates)) missing_components = self._components - seen_components if missing_components: self.__log_info('Components %s have no candidate pairs' % missing_components) self._components = seen_components
Remove components for which the remote party did not provide any candidates. This can only be determined after end-of-candidates.
def GetValues(self, fd): """Return the values for this attribute as stored in an AFF4Object.""" result = None for result in fd.new_attributes.get(self, []): # We need to interpolate sub fields in this rdfvalue. if self.field_names: for x in self.GetSubFields(result, self.field_names): yield x else: yield result for result in fd.synced_attributes.get(self, []): result = result.ToRDFValue() # We need to interpolate sub fields in this rdfvalue. if result is not None: if self.field_names: for x in self.GetSubFields(result, self.field_names): yield x else: yield result if result is None: default = self.GetDefault(fd) if default is not None: yield default
Return the values for this attribute as stored in an AFF4Object.
def get_cert_contents(kwargs): """Builds parameters with server cert file contents. Args: kwargs(dict): The keyword args passed to ensure_server_cert_exists, optionally containing the paths to the cert, key and chain files. Returns: dict: A dictionary containing the appropriate parameters to supply to upload_server_certificate. An empty dictionary if there is a problem. """ paths = { "certificate": kwargs.get("path_to_certificate"), "private_key": kwargs.get("path_to_private_key"), "chain": kwargs.get("path_to_chain"), } for key, value in paths.items(): if value is not None: continue path = input("Path to %s (skip): " % (key,)) if path == "skip" or not path.strip(): continue paths[key] = path parameters = { "ServerCertificateName": kwargs.get("cert_name"), } for key, path in paths.items(): if not path: continue # Allow passing of file like object for tests try: contents = path.read() except AttributeError: with open(utils.full_path(path)) as read_file: contents = read_file.read() if key == "certificate": parameters["CertificateBody"] = contents elif key == "private_key": parameters["PrivateKey"] = contents elif key == "chain": parameters["CertificateChain"] = contents return parameters
Builds parameters with server cert file contents. Args: kwargs(dict): The keyword args passed to ensure_server_cert_exists, optionally containing the paths to the cert, key and chain files. Returns: dict: A dictionary containing the appropriate parameters to supply to upload_server_certificate. An empty dictionary if there is a problem.
def get_network_channel(self): """Get a reasonable 'default' network channel. When configuring/examining network configuration, it's desirable to find the correct channel. Here we run with the 'real' number of the current channel if it is a LAN channel, otherwise it evaluates all of the channels to find the first workable LAN channel and returns that """ if self._netchannel is None: for channel in chain((0xe,), range(1, 0xc)): try: rsp = self.xraw_command( netfn=6, command=0x42, data=(channel,)) except exc.IpmiException as ie: if ie.ipmicode == 0xcc: # We have hit an invalid channel, move on to next # candidate continue else: raise chantype = ord(rsp['data'][1]) & 0b1111111 if chantype in (4, 6): try: # Some implementations denote an inactive channel # by refusing to do parameter retrieval if channel != 0xe: # skip checking if channel is active if we are # actively using the channel self.xraw_command( netfn=0xc, command=2, data=(channel, 5, 0, 0)) # If still here, the channel seems serviceable... # However some implementations may still have # ambiguous channel info, that will need to be # picked up on an OEM extension... self._netchannel = ord(rsp['data'][0]) & 0b1111 break except exc.IpmiException as ie: # This means the attempt to fetch parameter 5 failed, # therefore move on to next candidate channel continue return self._netchannel
Get a reasonable 'default' network channel. When configuring/examining network configuration, it's desirable to find the correct channel. Here we run with the 'real' number of the current channel if it is a LAN channel, otherwise it evaluates all of the channels to find the first workable LAN channel and returns that
def _subspace_process(streams, lowcut, highcut, filt_order, sampling_rate, multiplex, align, shift_len, reject, no_missed=True, stachans=None, parallel=False, plot=False, cores=1): """ Process stream data, internal function. :type streams: list :param streams: List of obspy.core.stream.Stream to be used to \ generate the subspace detector. These should be pre-clustered \ and aligned. :type lowcut: float :param lowcut: Lowcut in Hz, can be None to not apply filter :type highcut: float :param highcut: Highcut in Hz, can be None to not apply filter :type filt_order: int :param filt_order: Number of corners for filter. :type sampling_rate: float :param sampling_rate: Desired sampling rate in Hz :type multiplex: bool :param multiplex: Whether to multiplex the data or not. Data are \ multiplexed according to the method of Harris, see the multi \ function for details. :type stachans: list of tuple :param stachans: list of tuples of (station, channel) to use. :type align: bool :param align: Whether to align the data or not - needs to be done \ at some point :type shift_len: float :param shift_len: Maximum shift allowed for alignment in seconds. :type reject: float :param reject: Minimum correlation for traces, only used if align=True. :type no_missed: bool :param: no_missed: Reject streams with missed traces, defaults to True. \ A missing trace from lots of events will reduce the quality of the \ subspace detector if multiplexed. Only used when multi is set to True. :type plot: bool :param plot: Passed down to align traces - used to check alignment process. :return: Processed streams :rtype: list :return: Station, channel pairs in order :rtype: list of tuple :return: List of delays :rtype: list """ from multiprocessing import Pool, cpu_count processed_streams = [] if not stachans: input_stachans = list(set([(tr.stats.station, tr.stats.channel) for st in streams for tr in st.sort()])) else: input_stachans = stachans input_stachans.sort() # Make sure stations and channels are in order # Check that all channels are the same length in seconds first_length = len(streams[0][0].data) /\ streams[0][0].stats.sampling_rate for st in streams: for tr in st: if not len(tr) / tr.stats.sampling_rate == first_length: msg = 'All channels of all streams must be the same length' raise IOError(msg) for st in streams: if not parallel: processed_stream = Stream() for stachan in input_stachans: dummy, tr = _internal_process( st=st, lowcut=lowcut, highcut=highcut, filt_order=filt_order, sampling_rate=sampling_rate, first_length=first_length, stachan=stachan, debug=0) processed_stream += tr processed_streams.append(processed_stream) else: pool = Pool(processes=min(cores, cpu_count())) results = [pool.apply_async( _internal_process, (st,), {'lowcut': lowcut, 'highcut': highcut, 'filt_order': filt_order, 'sampling_rate': sampling_rate, 'first_length': first_length, 'stachan': stachan, 'debug': 0, 'i': i}) for i, stachan in enumerate(input_stachans)] pool.close() try: processed_stream = [p.get() for p in results] except KeyboardInterrupt as e: # pragma: no cover pool.terminate() raise e pool.join() processed_stream.sort(key=lambda tup: tup[0]) processed_stream = Stream([p[1] for p in processed_stream]) processed_streams.append(processed_stream) if no_missed and multiplex: for tr in processed_stream: if np.count_nonzero(tr.data) == 0: processed_streams.remove(processed_stream) print('Removed stream with empty trace') break if align: processed_streams = align_design( design_set=processed_streams, shift_len=shift_len, reject=reject, multiplex=multiplex, plot=plot, no_missed=no_missed) output_streams = [] for processed_stream in processed_streams: if len(processed_stream) == 0: # If we have removed all of the traces then onwards! continue # Need to order the stream according to input_stachans _st = Stream() for stachan in input_stachans: tr = processed_stream.select( station=stachan[0], channel=stachan[1]) if len(tr) >= 1: _st += tr[0] elif multiplex and len(tr) == 0: raise IndexError( 'Missing data for %s.%s' % (stachan[0], stachan[1])) if multiplex: st = multi(stream=_st) st = Stream(Trace(st)) st[0].stats.station = 'Multi' st[0].stats.sampling_rate = sampling_rate else: st = _st for tr in st: # Normalize the data norm = np.linalg.norm(tr.data) if not norm == 0: tr.data /= norm output_streams.append(st) return output_streams, input_stachans
Process stream data, internal function. :type streams: list :param streams: List of obspy.core.stream.Stream to be used to \ generate the subspace detector. These should be pre-clustered \ and aligned. :type lowcut: float :param lowcut: Lowcut in Hz, can be None to not apply filter :type highcut: float :param highcut: Highcut in Hz, can be None to not apply filter :type filt_order: int :param filt_order: Number of corners for filter. :type sampling_rate: float :param sampling_rate: Desired sampling rate in Hz :type multiplex: bool :param multiplex: Whether to multiplex the data or not. Data are \ multiplexed according to the method of Harris, see the multi \ function for details. :type stachans: list of tuple :param stachans: list of tuples of (station, channel) to use. :type align: bool :param align: Whether to align the data or not - needs to be done \ at some point :type shift_len: float :param shift_len: Maximum shift allowed for alignment in seconds. :type reject: float :param reject: Minimum correlation for traces, only used if align=True. :type no_missed: bool :param: no_missed: Reject streams with missed traces, defaults to True. \ A missing trace from lots of events will reduce the quality of the \ subspace detector if multiplexed. Only used when multi is set to True. :type plot: bool :param plot: Passed down to align traces - used to check alignment process. :return: Processed streams :rtype: list :return: Station, channel pairs in order :rtype: list of tuple :return: List of delays :rtype: list