code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def convolve2d_disk(fn, r, sig, nstep=200): """Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is azimuthally symmetric function in two dimensions and g is a step function given by: g(r) = H(1-r/s) Parameters ---------- fn : function Input function that takes a single radial coordinate parameter. r : `~numpy.ndarray` Array of points at which the convolution is to be evaluated. sig : float Radius parameter of the step function. nstep : int Number of sampling point for numeric integration. """ r = np.array(r, ndmin=1) sig = np.array(sig, ndmin=1) rmin = r - sig rmax = r + sig rmin[rmin < 0] = 0 delta = (rmax - rmin) / nstep redge = rmin[..., np.newaxis] + \ delta[..., np.newaxis] * np.linspace(0, nstep, nstep + 1) rp = 0.5 * (redge[..., 1:] + redge[..., :-1]) dr = redge[..., 1:] - redge[..., :-1] fnv = fn(rp) r = r.reshape(r.shape + (1,)) cphi = -np.ones(dr.shape) m = ((rp + r) / sig < 1) | (r == 0) rrp = r * rp sx = r ** 2 + rp ** 2 - sig ** 2 cphi[~m] = sx[~m] / (2 * rrp[~m]) dphi = 2 * np.arccos(cphi) v = rp * fnv * dphi * dr / (np.pi * sig * sig) s = np.sum(v, axis=-1) return s
Evaluate the convolution f'(r) = f(r) * g(r) where f(r) is azimuthally symmetric function in two dimensions and g is a step function given by: g(r) = H(1-r/s) Parameters ---------- fn : function Input function that takes a single radial coordinate parameter. r : `~numpy.ndarray` Array of points at which the convolution is to be evaluated. sig : float Radius parameter of the step function. nstep : int Number of sampling point for numeric integration.
def delete(self, route: str(), callback: object()): """ Binds a PUT route with the given callback :rtype: object """ self.__set_route('delete', {route: callback}) return RouteMapping
Binds a PUT route with the given callback :rtype: object
def request_connect(self, act, coro): "Requests a connect for `coro` corutine with parameters and completion \ passed via `act`" result = self.try_run_act(act, perform_connect) if result: return result, coro else: self.add_token(act, coro, perform_connect)
Requests a connect for `coro` corutine with parameters and completion \ passed via `act`
def has_connection(self, i, j): """! @brief Returns True if there is connection between i and j oscillators and False - if connection doesn't exist. @param[in] i (uint): index of an oscillator in the network. @param[in] j (uint): index of an oscillator in the network. """ if ( (self._ccore_network_pointer is not None) and (self._osc_conn is None) ): self._osc_conn = wrapper.sync_connectivity_matrix(self._ccore_network_pointer); return super().has_connection(i, j);
! @brief Returns True if there is connection between i and j oscillators and False - if connection doesn't exist. @param[in] i (uint): index of an oscillator in the network. @param[in] j (uint): index of an oscillator in the network.
def _get_jamo_short_name(jamo): """ Function for taking a Unicode scalar value representing a Jamo and determining the correct value for its Jamo_Short_Name property. For more information on the Jamo_Short_Name property see the Unicode Standard, ch. 03, section 3.12, Conjoining Jamo Behavior. https://www.unicode.org/versions/latest/ch03.pdf :param jamo: Unicode scalar value representing a Jamo :return: Returns a string representing its Jamo_Short_Name property """ if not _is_jamo(jamo): raise ValueError("Value 0x%0.4x passed in does not represent a Jamo!" % jamo) if not _jamo_short_names: _load_jamo_short_names() return _jamo_short_names[jamo]
Function for taking a Unicode scalar value representing a Jamo and determining the correct value for its Jamo_Short_Name property. For more information on the Jamo_Short_Name property see the Unicode Standard, ch. 03, section 3.12, Conjoining Jamo Behavior. https://www.unicode.org/versions/latest/ch03.pdf :param jamo: Unicode scalar value representing a Jamo :return: Returns a string representing its Jamo_Short_Name property
def p_elision(self, p): """elision : COMMA | elision COMMA """ if len(p) == 2: p[0] = [ast.Elision(p[1])] else: p[1].append(ast.Elision(p[2])) p[0] = p[1]
elision : COMMA | elision COMMA
def create(self, basedir, outdir, name, prefix=None): """ :API: public """ zippath = os.path.join(outdir, '{}.{}'.format(name, self.extension)) with open_zip(zippath, 'w', compression=self.compression) as zip: # For symlinks, we want to archive the actual content of linked files but # under the relpath derived from symlink. for root, _, files in safe_walk(basedir, followlinks=True): root = ensure_text(root) for file in files: file = ensure_text(file) full_path = os.path.join(root, file) relpath = os.path.relpath(full_path, basedir) if prefix: relpath = os.path.join(ensure_text(prefix), relpath) zip.write(full_path, relpath) return zippath
:API: public
def _job_statistics(self): """Helper for job-type specific statistics-based properties.""" statistics = self._properties.get("statistics", {}) return statistics.get(self._JOB_TYPE, {})
Helper for job-type specific statistics-based properties.
def _add(self, codeobj): """Add a child (value) to this object.""" assert isinstance(codeobj, CodeExpression.TYPES) self.value = codeobj
Add a child (value) to this object.
def _get_exception_class_from_status_code(status_code): """ Utility function that accepts a status code, and spits out a reference to the correct exception class to raise. :param str status_code: The status code to return an exception class for. :rtype: PetfinderAPIError or None :returns: The appropriate PetfinderAPIError subclass. If the status code is not an error, return ``None``. """ if status_code == '100': return None exc_class = STATUS_CODE_MAPPING.get(status_code) if not exc_class: # No status code match, return the "I don't know wtf this is" # exception class. return STATUS_CODE_MAPPING['UNKNOWN'] else: # Match found, yay. return exc_class
Utility function that accepts a status code, and spits out a reference to the correct exception class to raise. :param str status_code: The status code to return an exception class for. :rtype: PetfinderAPIError or None :returns: The appropriate PetfinderAPIError subclass. If the status code is not an error, return ``None``.
def get_all_adv_settings() -> Dict[str, Dict[str, Union[str, bool, None]]]: """ :return: a dict of settings keyed by setting ID, where each value is a dict with keys "id", "title", "description", and "value" """ settings_file = CONFIG['feature_flags_file'] values, _ = _read_settings_file(settings_file) return { key: {**settings_by_id[key].__dict__, 'value': value} for key, value in values.items() }
:return: a dict of settings keyed by setting ID, where each value is a dict with keys "id", "title", "description", and "value"
def as_dict(self): """ Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object. :return: Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object. """ return {"@module": self.__class__.__module__, "@class": self.__class__.__name__, "additional_condition": self._additional_condition, "symmetry_measure_type": self.symmetry_measure_type, "nb_set_weights": [nb_set_weight.as_dict() for nb_set_weight in self.nb_set_weights], "ce_estimator": self.ce_estimator, }
Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object. :return: Bson-serializable dict representation of the WeightedNbSetChemenvStrategy object.
def get_bac(age, weight, height, sex, volume, percent): """Returns the *Blood Alcohol Content* (raise) for a person (described by the given attributes) after a drink containing *volume* ml of alcohol with the given *percent* (vol/vol). """ return gramm_to_promille( calculate_alcohol(volume, percent), age, weight, height, sex )
Returns the *Blood Alcohol Content* (raise) for a person (described by the given attributes) after a drink containing *volume* ml of alcohol with the given *percent* (vol/vol).
def get_full_angles(self): """Get the interpolated lons/lats. """ if (self.sun_azi is not None and self.sun_zen is not None and self.sat_azi is not None and self.sat_zen is not None): return self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen = self._get_full_angles() self.sun_azi = da.from_delayed(self.sun_azi, dtype=self["ANGULAR_RELATIONS"].dtype, shape=(self.scanlines, self.pixels)) self.sun_zen = da.from_delayed(self.sun_zen, dtype=self["ANGULAR_RELATIONS"].dtype, shape=(self.scanlines, self.pixels)) self.sat_azi = da.from_delayed(self.sat_azi, dtype=self["ANGULAR_RELATIONS"].dtype, shape=(self.scanlines, self.pixels)) self.sat_zen = da.from_delayed(self.sat_zen, dtype=self["ANGULAR_RELATIONS"].dtype, shape=(self.scanlines, self.pixels)) return self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen
Get the interpolated lons/lats.
def save_thumbnail(self, thumbnail): """ Save a thumbnail to the thumbnail_storage. Also triggers the ``thumbnail_created`` signal and caches the thumbnail values and dimensions for future lookups. """ filename = thumbnail.name try: self.thumbnail_storage.delete(filename) except Exception: pass self.thumbnail_storage.save(filename, thumbnail) thumb_cache = self.get_thumbnail_cache( thumbnail.name, create=True, update=True) # Cache thumbnail dimensions. if settings.THUMBNAIL_CACHE_DIMENSIONS: dimensions_cache, created = ( models.ThumbnailDimensions.objects.get_or_create( thumbnail=thumb_cache, defaults={'width': thumbnail.width, 'height': thumbnail.height})) if not created: dimensions_cache.width = thumbnail.width dimensions_cache.height = thumbnail.height dimensions_cache.save() signals.thumbnail_created.send(sender=thumbnail)
Save a thumbnail to the thumbnail_storage. Also triggers the ``thumbnail_created`` signal and caches the thumbnail values and dimensions for future lookups.
def set_portfast(self, name, value=None, default=False, disable=False): """Configures the portfast value for the specified interface Args: name (string): The interface identifier to configure. The name must be the full interface name (eg Ethernet1, not Et1) value (bool): True if portfast is enabled otherwise False default (bool): Configures the portfast parameter to its default value using the EOS CLI default config command disable (bool): Negates the portfast parameter using the EOS CLI no config command Returns: True if the command succeeds, otherwise False Raises: ValueError: Rasied if an invalid interface name is specified TypeError: Raised if the value keyword argument does not evaluate to a valid boolean """ if value is False: disable = True string = 'spanning-tree portfast' cmds = self.command_builder(string, value=value, default=default, disable=disable) return self.configure_interface(name, cmds)
Configures the portfast value for the specified interface Args: name (string): The interface identifier to configure. The name must be the full interface name (eg Ethernet1, not Et1) value (bool): True if portfast is enabled otherwise False default (bool): Configures the portfast parameter to its default value using the EOS CLI default config command disable (bool): Negates the portfast parameter using the EOS CLI no config command Returns: True if the command succeeds, otherwise False Raises: ValueError: Rasied if an invalid interface name is specified TypeError: Raised if the value keyword argument does not evaluate to a valid boolean
def _make_weirdness_regex(): """ Creates a list of regexes that match 'weird' character sequences. The more matches there are, the weirder the text is. """ groups = [] # Match diacritical marks, except when they modify a non-cased letter or # another mark. # # You wouldn't put a diacritical mark on a digit or a space, for example. # You might put it on a Latin letter, but in that case there will almost # always be a pre-composed version, and we normalize to pre-composed # versions first. The cases that can't be pre-composed tend to be in # large scripts without case, which are in class C. groups.append('[^CM]M') # Match non-Latin characters adjacent to Latin characters. # # This is a simplification from ftfy version 2, which compared all # adjacent scripts. However, the ambiguities we need to resolve come from # encodings designed to represent Latin characters. groups.append('[Ll][AaC]') groups.append('[AaC][Ll]') # Match IPA letters next to capital letters. # # IPA uses lowercase letters only. Some accented capital letters next to # punctuation can accidentally decode as IPA letters, and an IPA letter # appearing next to a capital letter is a good sign that this happened. groups.append('[LA]i') groups.append('i[LA]') # Match non-combining diacritics. We've already set aside the common ones # like ^ (the CIRCUMFLEX ACCENT, repurposed as a caret, exponent sign, # or happy eye) and assigned them to category 'o'. The remaining ones, # like the diaeresis (¨), are pretty weird to see on their own instead # of combined with a letter. groups.append('2') # Match C1 control characters, which are almost always the result of # decoding Latin-1 that was meant to be Windows-1252. groups.append('X') # Match private use and unassigned characters. groups.append('P') groups.append('_') # Match adjacent characters from any different pair of these categories: # - Modifier marks (M) # - Letter modifiers (m) # - Miscellaneous numbers (N) # - Symbols (1 or 3, because 2 is already weird on its own) exclusive_categories = 'MmN13' for cat1 in exclusive_categories: others_range = ''.join(c for c in exclusive_categories if c != cat1) groups.append('{cat1}[{others_range}]'.format( cat1=cat1, others_range=others_range )) regex = '|'.join(groups) return re.compile(regex)
Creates a list of regexes that match 'weird' character sequences. The more matches there are, the weirder the text is.
def _notebook_model_from_db(self, record, content): """ Build a notebook model from database record. """ path = to_api_path(record['parent_name'] + record['name']) model = base_model(path) model['type'] = 'notebook' model['last_modified'] = model['created'] = record['created_at'] if content: content = reads_base64(record['content']) self.mark_trusted_cells(content, path) model['content'] = content model['format'] = 'json' self.validate_notebook_model(model) return model
Build a notebook model from database record.
def list_parameters(self, parameter_type=None, page_size=None): """Lists the parameters visible to this client. Parameters are returned in lexicographical order. :param str parameter_type: The type of parameter :rtype: :class:`.Parameter` iterator """ params = {'details': True} if parameter_type is not None: params['type'] = parameter_type if page_size is not None: params['limit'] = page_size return pagination.Iterator( client=self._client, path='/mdb/{}/parameters'.format(self._instance), params=params, response_class=mdb_pb2.ListParametersResponse, items_key='parameter', item_mapper=Parameter, )
Lists the parameters visible to this client. Parameters are returned in lexicographical order. :param str parameter_type: The type of parameter :rtype: :class:`.Parameter` iterator
def _platform_patterns(self, platform='generic', compiled=False): """Return all the patterns for specific platform.""" patterns = self._dict_compiled.get(platform, None) if compiled else self._dict_text.get(platform, None) if patterns is None: raise KeyError("Unknown platform: {}".format(platform)) return patterns
Return all the patterns for specific platform.
def delete_maintenance_window(self, id, **kwargs): # noqa: E501 """Delete a specific maintenance window # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_maintenance_window(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerMaintenanceWindow If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_maintenance_window_with_http_info(id, **kwargs) # noqa: E501 else: (data) = self.delete_maintenance_window_with_http_info(id, **kwargs) # noqa: E501 return data
Delete a specific maintenance window # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_maintenance_window(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :return: ResponseContainerMaintenanceWindow If the method is called asynchronously, returns the request thread.
def _tryConnect(src, unit, intfName): """ Try connect src to interface of specified name on unit. Ignore if interface is not present or if it already has driver. """ try: dst = getattr(unit, intfName) except AttributeError: return if not dst._sig.drivers: connect(src, dst)
Try connect src to interface of specified name on unit. Ignore if interface is not present or if it already has driver.
def _find_known(row): """Find variant present in known pathogenic databases. """ out = [] clinvar_no = set(["unknown", "untested", "non-pathogenic", "probable-non-pathogenic", "uncertain_significance", "uncertain_significance", "not_provided", "benign", "likely_benign"]) if row["cosmic_ids"] or row["cosmic_id"]: out.append("cosmic") if row["clinvar_sig"] and not row["clinvar_sig"].lower() in clinvar_no: out.append("clinvar") return out
Find variant present in known pathogenic databases.
def _add_zone(self, zone, name='', status=Zone.CLEAR, expander=False): """ Adds a zone to the internal zone list. :param zone: zone number :type zone: int :param name: human readable zone name :type name: string :param status: zone status :type status: int """ if not zone in self._zones: self._zones[zone] = Zone(zone=zone, name=name, status=None, expander=expander) self._update_zone(zone, status=status)
Adds a zone to the internal zone list. :param zone: zone number :type zone: int :param name: human readable zone name :type name: string :param status: zone status :type status: int
def bin_remove(self): """Remove Slackware packages """ packages = self.args[1:] options = [ "-r", "--removepkg" ] additional_options = [ "--deps", "--check-deps", "--tag", "--checklist" ] flag, extra = "", [] flags = [ "-warn", "-preserve", "-copy", "-keep" ] # merge --check-deps and --deps options if (additional_options[1] in self.args and additional_options[0] not in self.args): self.args.append(additional_options[0]) if len(self.args) > 1 and self.args[0] in options: for additional in additional_options: if additional in self.args: extra.append(additional) self.args.remove(additional) packages = self.args[1:] for fl in flags: if fl in self.args: flag = self.args[1] packages = self.args[2:] PackageManager(packages).remove(flag, extra) else: usage("")
Remove Slackware packages
def roles_remove(user, role): """Remove user from role.""" user, role = _datastore._prepare_role_modify_args(user, role) if user is None: raise click.UsageError('Cannot find user.') if role is None: raise click.UsageError('Cannot find role.') if _datastore.remove_role_from_user(user, role): click.secho('Role "{0}" removed from user "{1}" ' 'successfully.'.format(role, user), fg='green') else: raise click.UsageError('Cannot remove role from user.')
Remove user from role.
def url_defaults(self, func: Callable, name: AppOrBlueprintKey=None) -> Callable: """Add a url default preprocessor. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.url_defaults def default(endpoint, values): ... """ self.url_default_functions[name].append(func) return func
Add a url default preprocessor. This is designed to be used as a decorator. An example usage, .. code-block:: python @app.url_defaults def default(endpoint, values): ...
def get_mac_acl_for_intf_input_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf") config = get_mac_acl_for_intf input = ET.SubElement(get_mac_acl_for_intf, "input") interface_name = ET.SubElement(input, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _prepare_nameparser_constants(): """Prepare nameparser Constants. Remove nameparser's titles and use our own and add as suffixes the roman numerals. Configuration is the same for all names (i.e. instances). """ constants = Constants() roman_numeral_suffixes = [u'v', u'vi', u'vii', u'viii', u'ix', u'x', u'xii', u'xiii', u'xiv', u'xv'] titles = [u'Dr', u'Prof', u'Professor', u'Sir', u'Editor', u'Ed', u'Mr', u'Mrs', u'Ms', u'Chair', u'Co-Chair', u'Chairs', u'co-Chairs'] constants.titles.remove(*constants.titles).add(*titles) constants.suffix_not_acronyms.add(*roman_numeral_suffixes) return constants
Prepare nameparser Constants. Remove nameparser's titles and use our own and add as suffixes the roman numerals. Configuration is the same for all names (i.e. instances).
def run_sequential(self): """Perform the computation sequentially, only holding two computed objects in memory at a time. """ try: result = self.empty_result(*self.context) for obj in self.iterable: r = self.compute(obj, *self.context) result = self.process_result(r, result) self.progress.update(1) # Short-circuited? if self.done: break except Exception as e: raise e finally: self.progress.close() return result
Perform the computation sequentially, only holding two computed objects in memory at a time.
def merge_plugin_from_baseline(baseline_plugins, args): """ :type baseline_plugins: tuple of BasePlugin :param baseline_plugins: BasePlugin instances from baseline file :type args: dict :param args: diction of arguments parsed from usage param priority: input param > baseline param > default :Returns tuple of initialized plugins """ def _remove_key(d, key): r = dict(d) r.pop(key) return r baseline_plugins_dict = { vars(plugin)["name"]: _remove_key(vars(plugin), "name") for plugin in baseline_plugins } # Use input plugin as starting point if args.use_all_plugins: # input param and default param are used plugins_dict = dict(args.plugins) # baseline param priority > default for plugin_name, param_name, param_value in _get_prioritized_parameters( baseline_plugins_dict, args.is_using_default_value, prefer_default=True, ): try: plugins_dict[plugin_name][param_name] = param_value except KeyError: log.warning( 'Baseline contain plugin %s which is not in all plugins! Ignoring...' % (plugin_name), ) return from_parser_builder( plugins_dict, exclude_lines_regex=args.exclude_lines, ) # Use baseline plugin as starting point disabled_plugins = PluginOptions.get_disabled_plugins(args) plugins_dict = { plugin_name: plugin_params for plugin_name, plugin_params in baseline_plugins_dict.items() if plugin_name not in disabled_plugins } # input param priority > baseline input_plugins_dict = dict(args.plugins) for plugin_name, param_name, param_value in _get_prioritized_parameters( input_plugins_dict, args.is_using_default_value, prefer_default=False, ): try: plugins_dict[plugin_name][param_name] = param_value except KeyError: log.warning( '%s specified, but %s not configured! Ignoring...' % ("".join(["--", param_name.replace("_", "-")]), plugin_name), ) return from_parser_builder( plugins_dict, exclude_lines_regex=args.exclude_lines, )
:type baseline_plugins: tuple of BasePlugin :param baseline_plugins: BasePlugin instances from baseline file :type args: dict :param args: diction of arguments parsed from usage param priority: input param > baseline param > default :Returns tuple of initialized plugins
def _get_consecutive_portions_of_front(front): """ Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form), such that each list yielded is consecutive in frequency. """ last_f = None ls = [] for f, s in front: if last_f is not None and f != last_f + 1: yield ls ls = [] ls.append((f, s)) last_f = f yield ls
Yields lists of the form [(f, s), (f, s)], one at a time from the given front (which is a list of the same form), such that each list yielded is consecutive in frequency.
def batch_per(hyps: Sequence[Sequence[T]], refs: Sequence[Sequence[T]]) -> float: """ Calculates the phoneme error rate of a batch.""" macro_per = 0.0 for i in range(len(hyps)): ref = [phn_i for phn_i in refs[i] if phn_i != 0] hyp = [phn_i for phn_i in hyps[i] if phn_i != 0] macro_per += distance.edit_distance(ref, hyp)/len(ref) return macro_per/len(hyps)
Calculates the phoneme error rate of a batch.
def list_migration_issues_accounts(self, account_id, content_migration_id): """ List migration issues. Returns paginated migration issues """ path = {} data = {} params = {} # REQUIRED - PATH - account_id """ID""" path["account_id"] = account_id # REQUIRED - PATH - content_migration_id """ID""" path["content_migration_id"] = content_migration_id self.logger.debug("GET /api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/accounts/{account_id}/content_migrations/{content_migration_id}/migration_issues".format(**path), data=data, params=params, all_pages=True)
List migration issues. Returns paginated migration issues
def read(self, size=None): """Read bytes from an iterator.""" while size is None or len(self.buffer) < size: try: self.buffer += next(self.data_stream) except StopIteration: break sized_chunk = self.buffer[:size] if size is None: self.buffer = "" else: self.buffer = self.buffer[size:] return sized_chunk
Read bytes from an iterator.
def number(type=None, length=None, prefixes=None): """ Return a random credit card number. :param type: credit card type. Defaults to a random selection. :param length: length of the credit card number. Defaults to the length for the selected card type. :param prefixes: allowed prefixes for the card number. Defaults to prefixes for the selected card type. :return: credit card randomly generated number (int) """ # select credit card type if type and type in CARDS: card = type else: card = random.choice(list(CARDS.keys())) # select a credit card number's prefix if not prefixes: prefixes = CARDS[card]['prefixes'] prefix = random.choice(prefixes) # select length of the credit card number, if it's not set if not length: length = CARDS[card]['length'] # generate all digits but the last one result = str(prefix) for d in range(length - len(str(prefix))): result += str(basic.number()) last_digit = check_digit(int(result)) return int(result[:-1] + str(last_digit))
Return a random credit card number. :param type: credit card type. Defaults to a random selection. :param length: length of the credit card number. Defaults to the length for the selected card type. :param prefixes: allowed prefixes for the card number. Defaults to prefixes for the selected card type. :return: credit card randomly generated number (int)
def record_exists(self, table, keys): """ Checks if a record exists in Cassandra :param table: Target Cassandra table. Use dot notation to target a specific keyspace. :type table: str :param keys: The keys and their values to check the existence. :type keys: dict """ keyspace = self.keyspace if '.' in table: keyspace, table = table.split('.', 1) ks = " AND ".join("{}=%({})s".format(key, key) for key in keys.keys()) cql = "SELECT * FROM {keyspace}.{table} WHERE {keys}".format( keyspace=keyspace, table=table, keys=ks) try: rs = self.get_conn().execute(cql, keys) return rs.one() is not None except Exception: return False
Checks if a record exists in Cassandra :param table: Target Cassandra table. Use dot notation to target a specific keyspace. :type table: str :param keys: The keys and their values to check the existence. :type keys: dict
def iget(self, irods_path, attempts=1, pause=15): """Add an iget command to retrieve a file from iRODS. Parameters ---------- irods_path: str Filepath which should be fetched using iget attempts: int (default: 1) Number of retries, if iRODS access fails pause: int (default: 15) Pause between two access attempts in seconds """ if attempts > 1: cmd = """ for i in {{1..{0}}}; do ret=$(iget -v {1} 2>&1) echo $ret if [[ $ret == *"ERROR"* ]]; then echo "Attempt $i failed" else break fi sleep {2}s done """ cmd = lstrip(cmd) cmd = cmd.format(attempts, irods_path, pause) self.add(cmd) else: self.add('iget -v "{}"'.format(irods_path))
Add an iget command to retrieve a file from iRODS. Parameters ---------- irods_path: str Filepath which should be fetched using iget attempts: int (default: 1) Number of retries, if iRODS access fails pause: int (default: 15) Pause between two access attempts in seconds
def openRtpPort(self): """Open RTP socket binded to a specified port.""" #------------- # TO COMPLETE #------------- # Create a new datagram socket to receive RTP packets from the server # self.rtpSocket = ... # Set the timeout value of the socket to 0.5sec # ... self.rtpSocket.settimeout(0.5) # try: # Bind the socket to the address using the RTP port given by the client user # ... # except: # tkMessageBox.showwarning('Unable to Bind', 'Unable to bind PORT=%d' %self.rtpPort) try: #self.rtpSocket.connect(self.serverAddr,self.rtpPort) self.rtpSocket.bind((self.serverAddr,self.rtpPort)) # WATCH OUT THE ADDRESS FORMAT!!!!! rtpPort# should be bigger than 1024 #self.rtpSocket.listen(5) print("Bind RtpPort Success") except: tkinter.messagebox.showwarning('Connection Failed', 'Connection to rtpServer failed...')
Open RTP socket binded to a specified port.
def ClearAllVar(self): """Clear this Value.""" self.value = None # Call OnClearAllVar on options. _ = [option.OnClearAllVar() for option in self.options]
Clear this Value.
def fetchall(self): """Fetch all rows.""" result = self.query.result() return [row.values() for row in result]
Fetch all rows.
def GaussianCdfInverse(p, mu=0, sigma=1): """Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float """ x = ROOT2 * erfinv(2 * p - 1) return mu + x * sigma
Evaluates the inverse CDF of the gaussian distribution. See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function Args: p: float mu: mean parameter sigma: standard deviation parameter Returns: float
def get_index_translog_disable_flush(self): """Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... } """ disabled = {} settings = self.get('/_settings') setting_getters = [ lambda s: s['index.translog.disable_flush'], lambda s: s['index']['translog']['disable_flush']] for idx in settings: idx_settings = settings[idx]['settings'] for getter in setting_getters: try: disabled[idx] = booleanise(getter(idx_settings)) except KeyError as e: pass if not idx in disabled: disabled[idx] = 'unknown' return disabled
Return a dictionary showing the position of the 'translog.disable_flush' knob for each index in the cluster. The dictionary will look like this: { "index1": True, # Autoflushing DISABLED "index2": False, # Autoflushing ENABLED "index3": "unknown", # Using default setting (probably enabled) ... }
def kakwani(values, ineq_axis, weights = None): """ Computes the Kakwani index """ from scipy.integrate import simps if weights is None: weights = ones(len(values)) # sign = -1 # if tax == True: # sign = -1 # else: # sign = 1 PLCx, PLCy = pseudo_lorenz(values, ineq_axis, weights) LCx, LCy = lorenz(ineq_axis, weights) del PLCx return simps((LCy - PLCy), LCx)
Computes the Kakwani index
def dequeue(self, destination): """ Removes and returns an item from the queue (or C{None} if no items in queue). @param destination: The queue name (destinationination). @type destination: C{str} @return: The first frame in the specified queue, or C{None} if there are none. @rtype: C{stompclient.frame.Frame} """ if not self.has_frames(destination): return None message_id = self.queue_metadata[destination]['frames'].pop() self.queue_metadata[destination]['dequeued'] += 1 frame = self.frame_store[message_id] del self.frame_store[message_id] self._opcount += 1 self._sync() return frame
Removes and returns an item from the queue (or C{None} if no items in queue). @param destination: The queue name (destinationination). @type destination: C{str} @return: The first frame in the specified queue, or C{None} if there are none. @rtype: C{stompclient.frame.Frame}
def determine_extra_packages(self, packages): """ Return all packages that are installed, but missing from "packages". Return value is a tuple of the package names """ args = [ "pip", "freeze", ] installed = subprocess.check_output(args, universal_newlines=True) installed_list = set() lines = installed.strip().split('\n') for (package, version) in self._parse_requirements(lines): installed_list.add(package) package_list = set() for (package, version) in self._parse_requirements(packages.readlines()): package_list.add(package) removal_list = installed_list - package_list return tuple(removal_list)
Return all packages that are installed, but missing from "packages". Return value is a tuple of the package names
def watch(static_root, watch_paths=None, on_reload=None, host='localhost', port=5555, server_base_path="/", watcher_interval=1.0, recursive=True, open_browser=True, open_browser_delay=1.0): """Initialises an HttpWatcherServer to watch the given path for changes. Watches until the IO loop is terminated, or a keyboard interrupt is intercepted. Args: static_root: The path whose contents are to be served and watched. watch_paths: The paths to be watched for changes. If not supplied, this defaults to the static root. on_reload: An optional callback to pass to the watcher server that will be executed just before the server triggers a reload in connected clients. host: The host to which to bind our server. port: The port to which to bind our server. server_base_path: If the content is to be served from a non-standard base path, specify it here. watcher_interval: The maximum refresh rate of the watcher server. recursive: Whether to monitor the watch path recursively. open_browser: Whether or not to automatically attempt to open the user's browser at the root URL of the project (default: True). open_browser_delay: The number of seconds to wait before attempting to open the user's browser. """ server = httpwatcher.HttpWatcherServer( static_root, watch_paths=watch_paths, on_reload=on_reload, host=host, port=port, server_base_path=server_base_path, watcher_interval=watcher_interval, recursive=recursive, open_browser=open_browser, open_browser_delay=open_browser_delay ) server.listen() try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: server.shutdown()
Initialises an HttpWatcherServer to watch the given path for changes. Watches until the IO loop is terminated, or a keyboard interrupt is intercepted. Args: static_root: The path whose contents are to be served and watched. watch_paths: The paths to be watched for changes. If not supplied, this defaults to the static root. on_reload: An optional callback to pass to the watcher server that will be executed just before the server triggers a reload in connected clients. host: The host to which to bind our server. port: The port to which to bind our server. server_base_path: If the content is to be served from a non-standard base path, specify it here. watcher_interval: The maximum refresh rate of the watcher server. recursive: Whether to monitor the watch path recursively. open_browser: Whether or not to automatically attempt to open the user's browser at the root URL of the project (default: True). open_browser_delay: The number of seconds to wait before attempting to open the user's browser.
def later(timeout, f, *args, **kwargs): ''' Sets a timer that will call the *f* function past *timeout* seconds. See example in :ref:`sample_inter` :return: :class:`Greenlet` new 'thread' which will perform the call when specified. ''' def wrap(*args, **kwargs): sleep(timeout) return f(*args, **kwargs) return spawn(wrap, *args, **kwargs)
Sets a timer that will call the *f* function past *timeout* seconds. See example in :ref:`sample_inter` :return: :class:`Greenlet` new 'thread' which will perform the call when specified.
def _flow_check_handler_internal(self): """Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here. """ integ_flow = self.integ_br_obj.dump_flows_for( in_port=self.int_peer_port_num) ext_flow = self.ext_br_obj.dump_flows_for( in_port=self.phy_peer_port_num) for net_uuid, lvm in six.iteritems(self.local_vlan_map): vdp_vlan = lvm.any_consistent_vlan() flow_required = False if not (vdp_vlan and ovs_lib.is_valid_vlan_tag(vdp_vlan)): return if not self._check_bridge_flow(integ_flow, vdp_vlan, lvm.lvid): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on Integ bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if not self._check_bridge_flow(ext_flow, lvm.lvid, vdp_vlan): LOG.error("Flow for VDP Vlan %(vdp_vlan)s, Local vlan " "%(lvid)s not present on External bridge", {'vdp_vlan': vdp_vlan, 'lvid': lvm.lvid}) flow_required = True if flow_required: LOG.info("Programming flows for lvid %(lvid)s vdp vlan" " %(vdp)s", {'lvid': lvm.lvid, 'vdp': vdp_vlan}) self.program_vm_ovs_flows(lvm.lvid, 0, vdp_vlan)
Periodic handler to check if installed flows are present. This handler runs periodically to check if installed flows are present. This function cannot detect and delete the stale flows, if present. It requires more complexity to delete stale flows. Generally, stale flows are not present. So, that logic is not put here.
def render(self, template, **data): """Render data with template, return html unicodes. parameters template str the template's filename data dict the data to render """ # make a copy and update the copy dct = self.global_data.copy() dct.update(data) try: html = self.env.get_template(template).render(**dct) except TemplateNotFound: raise JinjaTemplateNotFound return html
Render data with template, return html unicodes. parameters template str the template's filename data dict the data to render
def delete_audio_mp3_profile(apps, schema_editor): """ Delete audio_mp3 profile """ Profile = apps.get_model('edxval', 'Profile') Profile.objects.filter(profile_name=AUDIO_MP3_PROFILE).delete()
Delete audio_mp3 profile
def delete(self, moveFixIssuesTo=None, moveAffectedIssuesTo=None): """Delete this project version from the server. If neither of the arguments are specified, the version is removed from all issues it is attached to. :param moveFixIssuesTo: in issues for which this version is a fix version, add this argument version to the fix version list :param moveAffectedIssuesTo: in issues for which this version is an affected version, add this argument version to the affected version list """ params = {} if moveFixIssuesTo is not None: params['moveFixIssuesTo'] = moveFixIssuesTo if moveAffectedIssuesTo is not None: params['moveAffectedIssuesTo'] = moveAffectedIssuesTo return super(Version, self).delete(params)
Delete this project version from the server. If neither of the arguments are specified, the version is removed from all issues it is attached to. :param moveFixIssuesTo: in issues for which this version is a fix version, add this argument version to the fix version list :param moveAffectedIssuesTo: in issues for which this version is an affected version, add this argument version to the affected version list
def _init_properties(self): """ Init Properties """ super(BaseCRUDView, self)._init_properties() # Reset init props self.related_views = self.related_views or [] self._related_views = self._related_views or [] self.description_columns = self.description_columns or {} self.validators_columns = self.validators_columns or {} self.formatters_columns = self.formatters_columns or {} self.add_form_extra_fields = self.add_form_extra_fields or {} self.edit_form_extra_fields = self.edit_form_extra_fields or {} self.show_exclude_columns = self.show_exclude_columns or [] self.add_exclude_columns = self.add_exclude_columns or [] self.edit_exclude_columns = self.edit_exclude_columns or [] # Generate base props list_cols = self.datamodel.get_user_columns_list() self.list_columns = self.list_columns or [list_cols[0]] self._gen_labels_columns(self.list_columns) self.order_columns = ( self.order_columns or self.datamodel.get_order_columns_list(list_columns=self.list_columns) ) if self.show_fieldsets: self.show_columns = [] for fieldset_item in self.show_fieldsets: self.show_columns = self.show_columns + list( fieldset_item[1].get("fields") ) else: if not self.show_columns: self.show_columns = [ x for x in list_cols if x not in self.show_exclude_columns ] if self.add_fieldsets: self.add_columns = [] for fieldset_item in self.add_fieldsets: self.add_columns = self.add_columns + list( fieldset_item[1].get("fields") ) else: if not self.add_columns: self.add_columns = [ x for x in list_cols if x not in self.add_exclude_columns ] if self.edit_fieldsets: self.edit_columns = [] for fieldset_item in self.edit_fieldsets: self.edit_columns = self.edit_columns + list( fieldset_item[1].get("fields") ) else: if not self.edit_columns: self.edit_columns = [ x for x in list_cols if x not in self.edit_exclude_columns ]
Init Properties
def crystal(positions, molecules, group, cellpar=[1.0, 1.0, 1.0, 90, 90, 90], repetitions=[1, 1, 1]): '''Build a crystal from atomic positions, space group and cell parameters. **Parameters** positions: list of coordinates A list of the atomic positions molecules: list of Molecule The molecules corresponding to the positions, the molecule will be translated in all the equivalent positions. group: int | str Space group given either as its number in International Tables or as its Hermann-Mauguin symbol. repetitions: Repetition of the unit cell in each direction cellpar: Unit cell parameters This function was taken and adapted from the *spacegroup* module found in `ASE <https://wiki.fysik.dtu.dk/ase/>`_. The module *spacegroup* module was originally developed by Jesper Frills. ''' sp = Spacegroup(group) sites, kind = sp.equivalent_sites(positions) nx, ny, nz = repetitions reptot = nx*ny*nz # Unit cell parameters a,b,c = cellpar_to_cell(cellpar) cry = System() i = 0 with cry.batch() as batch: for x in range(nx): for y in range(ny): for z in range(nz): for s, ki in zip(sites, kind): tpl = molecules[ki] tpl.move_to(s[0]*a +s[1]*b + s[2]*c + a*x + b*y + c*z) batch.append(tpl.copy()) # Computing the box_vectors cry.box_vectors = np.array([a*nx, b*ny, c*nz]) return cry
Build a crystal from atomic positions, space group and cell parameters. **Parameters** positions: list of coordinates A list of the atomic positions molecules: list of Molecule The molecules corresponding to the positions, the molecule will be translated in all the equivalent positions. group: int | str Space group given either as its number in International Tables or as its Hermann-Mauguin symbol. repetitions: Repetition of the unit cell in each direction cellpar: Unit cell parameters This function was taken and adapted from the *spacegroup* module found in `ASE <https://wiki.fysik.dtu.dk/ase/>`_. The module *spacegroup* module was originally developed by Jesper Frills.
def _Open(self, path_spec, mode='rb'): """Opens the file system object defined by path specification. Args: path_spec (PathSpec): path specification of the file system. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') file_object = resolver.Resolver.OpenFileObject( path_spec.parent, resolver_context=self._resolver_context) try: zip_file = zipfile.ZipFile(file_object, 'r') except: file_object.close() raise self._file_object = file_object self._zip_file = zip_file
Opens the file system object defined by path specification. Args: path_spec (PathSpec): path specification of the file system. mode (Optional[str]): file access mode. The default is 'rb' which represents read-only binary. Raises: AccessError: if the access to open the file was denied. IOError: if the file system object could not be opened. PathSpecError: if the path specification is incorrect. ValueError: if the path specification is invalid.
def analyze_xml(xml): """Analyzes `file` against packtools' XMLValidator. """ f = StringIO(xml) try: xml = packtools.XMLValidator.parse(f, sps_version='sps-1.4') except packtools.exceptions.PacktoolsError as e: logger.exception(e) summary = {} summary['dtd_is_valid'] = False summary['sps_is_valid'] = False summary['is_valid'] = False summary['parsing_error'] = True summary['dtd_errors'] = [] summary['sps_errors'] = [] return summary except XMLSyntaxError as e: logger.exception(e) summary = {} summary['dtd_is_valid'] = False summary['sps_is_valid'] = False summary['is_valid'] = False summary['parsing_error'] = True summary['dtd_errors'] = [e.message] summary['sps_errors'] = [] return summary else: summary = summarize(xml) return summary
Analyzes `file` against packtools' XMLValidator.
def revoke(self, only_access=False): """Revoke the current Authorization. :param only_access: (Optional) When explicitly set to True, do not evict the refresh token if one is set. Revoking a refresh token will in-turn revoke all access tokens associated with that authorization. """ if only_access or self.refresh_token is None: super(Authorizer, self).revoke() else: self._authenticator.revoke_token( self.refresh_token, "refresh_token" ) self._clear_access_token() self.refresh_token = None
Revoke the current Authorization. :param only_access: (Optional) When explicitly set to True, do not evict the refresh token if one is set. Revoking a refresh token will in-turn revoke all access tokens associated with that authorization.
def parse_quadrant_measurement(quad_azimuth): """ Parses a quadrant measurement of the form "AxxB", where A and B are cardinal directions and xx is an angle measured relative to those directions. In other words, it converts a measurement such as E30N into an azimuth of 60 degrees, or W10S into an azimuth of 260 degrees. For ambiguous quadrant measurements such as "N30S", a ValueError is raised. Parameters ----------- quad_azimuth : string An azimuth measurement in quadrant form. Returns ------- azi : float An azimuth in degrees clockwise from north. See Also -------- parse_azimuth """ def rotation_direction(first, second): return np.cross(_azimuth2vec(first), _azimuth2vec(second)) # Parse measurement quad_azimuth = quad_azimuth.strip() try: first_dir = quadrantletter_to_azimuth(quad_azimuth[0].upper()) sec_dir = quadrantletter_to_azimuth(quad_azimuth[-1].upper()) except KeyError: raise ValueError('{} is not a valid azimuth'.format(quad_azimuth)) angle = float(quad_azimuth[1:-1]) # Convert quadrant measurement into an azimuth direc = rotation_direction(first_dir, sec_dir) azi = first_dir + direc * angle # Catch ambiguous measurements such as N10S and raise an error if abs(direc) < 0.9: raise ValueError('{} is not a valid azimuth'.format(quad_azimuth)) # Ensure that 0 <= azi <= 360 if azi < 0: azi += 360 elif azi > 360: azi -= 360 return azi
Parses a quadrant measurement of the form "AxxB", where A and B are cardinal directions and xx is an angle measured relative to those directions. In other words, it converts a measurement such as E30N into an azimuth of 60 degrees, or W10S into an azimuth of 260 degrees. For ambiguous quadrant measurements such as "N30S", a ValueError is raised. Parameters ----------- quad_azimuth : string An azimuth measurement in quadrant form. Returns ------- azi : float An azimuth in degrees clockwise from north. See Also -------- parse_azimuth
def _run_tRNA_scan(fasta_file): """ Run tRNA-scan-SE to predict tRNA """ out_file = fasta_file + "_trnascan" se_file = fasta_file + "_second_str" cmd = "tRNAscan-SE -q -o {out_file} -f {se_file} {fasta_file}" run(cmd.format(**locals())) return out_file, se_file
Run tRNA-scan-SE to predict tRNA
def register( model, app=None, manager_name="history", records_class=None, table_name=None, **records_config ): """ Create historical model for `model` and attach history manager to `model`. Keyword arguments: app -- App to install historical model into (defaults to model.__module__) manager_name -- class attribute name to use for historical manager records_class -- class to use for history relation (defaults to HistoricalRecords) table_name -- Custom name for history table (defaults to 'APPNAME_historicalMODELNAME') This method should be used as an alternative to attaching an `HistoricalManager` instance directly to `model`. """ from . import models if records_class is None: records_class = models.HistoricalRecords records = records_class(**records_config) records.manager_name = manager_name records.table_name = table_name records.module = app and ("%s.models" % app) or model.__module__ records.cls = model records.add_extra_methods(model) records.finalize(model)
Create historical model for `model` and attach history manager to `model`. Keyword arguments: app -- App to install historical model into (defaults to model.__module__) manager_name -- class attribute name to use for historical manager records_class -- class to use for history relation (defaults to HistoricalRecords) table_name -- Custom name for history table (defaults to 'APPNAME_historicalMODELNAME') This method should be used as an alternative to attaching an `HistoricalManager` instance directly to `model`.
def _getZoomLevelRange(self, resolution, unit='meters'): "Return lower and higher zoom level given a resolution" assert unit in ('meters', 'degrees') if unit == 'meters' and self.unit == 'degrees': resolution = resolution / self.metersPerUnit elif unit == 'degrees' and self.unit == 'meters': resolution = resolution * EPSG4326_METERS_PER_UNIT lo = 0 hi = len(self.RESOLUTIONS) while lo < hi: mid = (lo + hi) // 2 if resolution > self.RESOLUTIONS[mid]: hi = mid else: lo = mid + 1 return lo, hi
Return lower and higher zoom level given a resolution
def ReadUserDefinedFunction(self, udf_link, options=None): """Reads a user defined function. :param str udf_link: The link to the user defined function. :param dict options: The request options for the request. :return: The read UDF. :rtype: dict """ if options is None: options = {} path = base.GetPathFromLink(udf_link) udf_id = base.GetResourceIdOrFullNameFromLink(udf_link) return self.Read(path, 'udfs', udf_id, None, options)
Reads a user defined function. :param str udf_link: The link to the user defined function. :param dict options: The request options for the request. :return: The read UDF. :rtype: dict
async def rt_connect(self, loop): """Start subscription manager for real time data.""" if self.sub_manager is not None: return self.sub_manager = SubscriptionManager( loop, "token={}".format(self._access_token), SUB_ENDPOINT ) self.sub_manager.start()
Start subscription manager for real time data.
def iteration(self, node_status=True): """ Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status) """ self.clean_initial_status(self.available_statuses.values()) actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)} if self.actual_iteration == 0: self.actual_iteration += 1 delta, node_count, status_delta = self.status_delta(actual_status) if node_status: return {"iteration": 0, "status": actual_status.copy(), "node_count": node_count.copy(), "status_delta": status_delta.copy()} else: return {"iteration": 0, "status": {}, "node_count": node_count.copy(), "status_delta": status_delta.copy()} for u in self.graph.nodes(): if actual_status[u] == 1: continue neighbors = list(self.graph.neighbors(u)) if isinstance(self.graph, nx.DiGraph): neighbors = list(self.graph.predecessors(u)) infected = 0 for v in neighbors: infected += self.status[v] if len(neighbors) > 0: infected_ratio = float(infected)/len(neighbors) if infected_ratio >= self.params['nodes']['threshold'][u]: actual_status[u] = 1 delta, node_count, status_delta = self.status_delta(actual_status) self.status = actual_status self.actual_iteration += 1 if node_status: return {"iteration": self.actual_iteration - 1, "status": delta.copy(), "node_count": node_count.copy(), "status_delta": status_delta.copy()} else: return {"iteration": self.actual_iteration - 1, "status": {}, "node_count": node_count.copy(), "status_delta": status_delta.copy()}
Execute a single model iteration :return: Iteration_id, Incremental node status (dictionary node->status)
def on_connection_close(self) -> None: """Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection. """ if _has_stream_request_body(self.__class__): if not self.request._body_future.done(): self.request._body_future.set_exception(iostream.StreamClosedError()) self.request._body_future.exception()
Called in async handlers if the client closed the connection. Override this to clean up resources associated with long-lived connections. Note that this method is called only if the connection was closed during asynchronous processing; if you need to do cleanup after every request override `on_finish` instead. Proxies may keep a connection open for a time (perhaps indefinitely) after the client has gone away, so this method may not be called promptly after the end user closes their connection.
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader): """Returns a list of input readers for the input spec. Args: mapper_spec: The MapperSpec for this InputReader. Must contain 'blob_keys' parameter with one or more blob keys. _reader: a callable that returns a file-like object for reading blobs. Used for dependency injection. Returns: A list of InputReaders spanning the subfiles within the blobs. There will be at least one reader per blob, but it will otherwise attempt to keep the expanded size even. """ params = _get_params(mapper_spec) blob_keys = params[cls.BLOB_KEYS_PARAM] if isinstance(blob_keys, basestring): # This is a mechanism to allow multiple blob keys (which do not contain # commas) in a single string. It may go away. blob_keys = blob_keys.split(",") blob_files = {} total_size = 0 for blob_key in blob_keys: zip_input = zipfile.ZipFile(_reader(blob_key)) blob_files[blob_key] = zip_input.infolist() total_size += sum(x.file_size for x in blob_files[blob_key]) shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count) # We can break on both blob key and file-within-zip boundaries. # A shard will span at minimum a single blob key, but may only # handle a few files within a blob. size_per_shard = total_size // shard_count readers = [] for blob_key in blob_keys: bfiles = blob_files[blob_key] current_shard_size = 0 start_file_index = 0 next_file_index = 0 for fileinfo in bfiles: next_file_index += 1 current_shard_size += fileinfo.file_size if current_shard_size >= size_per_shard: readers.append(cls(blob_key, start_file_index, next_file_index, 0, _reader)) current_shard_size = 0 start_file_index = next_file_index if current_shard_size != 0: readers.append(cls(blob_key, start_file_index, next_file_index, 0, _reader)) return readers
Returns a list of input readers for the input spec. Args: mapper_spec: The MapperSpec for this InputReader. Must contain 'blob_keys' parameter with one or more blob keys. _reader: a callable that returns a file-like object for reading blobs. Used for dependency injection. Returns: A list of InputReaders spanning the subfiles within the blobs. There will be at least one reader per blob, but it will otherwise attempt to keep the expanded size even.
def closeEvent(self, event): """Perform post-flight checks before closing Make sure processing of any kind is wrapped up before closing """ # Make it snappy, but take care to clean it all up. # TODO(marcus): Enable GUI to return on problem, such # as asking whether or not the user really wants to quit # given there are things currently running. self.hide() if self.data["state"]["is_closing"]: # Explicitly clear potentially referenced data self.info(self.tr("Cleaning up models..")) for v in self.data["views"].values(): v.model().deleteLater() v.setModel(None) self.info(self.tr("Cleaning up terminal..")) for item in self.data["models"]["terminal"].items: del(item) self.info(self.tr("Cleaning up controller..")) self.controller.cleanup() self.info(self.tr("All clean!")) self.info(self.tr("Good bye")) return super(Window, self).closeEvent(event) self.info(self.tr("Closing..")) def on_problem(): self.heads_up("Warning", "Had trouble closing down. " "Please tell someone and try again.") self.show() if self.controller.is_running: self.info(self.tr("..as soon as processing is finished..")) self.controller.is_running = False self.finished.connect(self.close) util.defer(2000, on_problem) return event.ignore() self.data["state"]["is_closing"] = True util.defer(200, self.close) return event.ignore()
Perform post-flight checks before closing Make sure processing of any kind is wrapped up before closing
def _setLocation(self, path): '''Set current location to *path*. *path* must be the same as root or under the root. .. note:: Comparisons are case-sensitive. If you set the root as 'D:/' then location can be set as 'D:/folder' *not* 'd:/folder'. ''' model = self._filesystemWidget.model() if not path.startswith(model.root.path): raise ValueError('Location must be root or under root.') # Ensure children for each segment in path are loaded. segments = self._segmentPath(path) for segment in reversed(segments): pathIndex = model.pathIndex(segment) model.fetchMore(pathIndex) self._filesystemWidget.setRootIndex(model.pathIndex(path)) self._locationWidget.clear() # Add history entry for each segment. for segment in segments: index = model.pathIndex(segment) if not index.isValid(): # Root item. icon = model.iconFactory.icon( riffle.icon_factory.IconType.Computer ) self._locationWidget.addItem( icon, model.root.path or model.root.name, model.root.path ) else: icon = model.icon(index) self._locationWidget.addItem(icon, segment, segment) if self._locationWidget.count() > 1: self._upButton.setEnabled(True) self._upShortcut.setEnabled(True) else: self._upButton.setEnabled(False) self._upShortcut.setEnabled(False)
Set current location to *path*. *path* must be the same as root or under the root. .. note:: Comparisons are case-sensitive. If you set the root as 'D:/' then location can be set as 'D:/folder' *not* 'd:/folder'.
def do_it(self, dbg): '''Starts a thread that will load values asynchronously''' try: var_objects = [] for variable in self.vars: variable = variable.strip() if len(variable) > 0: if '\t' in variable: # there are attributes beyond scope scope, attrs = variable.split('\t', 1) name = attrs[0] else: scope, attrs = (variable, None) name = scope var_obj = pydevd_vars.getVariable(dbg, self.thread_id, self.frame_id, scope, attrs) var_objects.append((var_obj, name)) t = GetValueAsyncThreadDebug(dbg, self.sequence, var_objects) t.start() except: exc = get_exception_traceback_str() sys.stderr.write('%s\n' % (exc,)) cmd = dbg.cmd_factory.make_error_message(self.sequence, "Error evaluating variable %s " % exc) dbg.writer.add_command(cmd)
Starts a thread that will load values asynchronously
def parse_uint(self, buff, start, end): ''' parse an integer from the buffer given the interval of bytes :param buff: :param start: :param end: ''' return struct.unpack_from(self.ustructmap[end - start], buff, start)[0]
parse an integer from the buffer given the interval of bytes :param buff: :param start: :param end:
def average_dtu_configurations(list_of_objects): """Return DtuConfiguration instance with averaged values. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values. """ result = DtuConfiguration() if len(list_of_objects) == 0: return result list_of_members = result.__dict__.keys() # compute average of all the members of the class for member in list_of_members: result.__dict__[member] = np.mean( [tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects] ) return result
Return DtuConfiguration instance with averaged values. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values.
def getCachedOrUpdatedValue(self, key, channel=None): """ Gets the channel's value with the given key. If the key is not found in the cache, the value is queried from the host. If 'channel' is given, the respective channel's value is returned. """ if channel: return self._hmchannels[channel].getCachedOrUpdatedValue(key) try: return self._VALUES[key] except KeyError: value = self._VALUES[key] = self.getValue(key) return value
Gets the channel's value with the given key. If the key is not found in the cache, the value is queried from the host. If 'channel' is given, the respective channel's value is returned.
def get_iso_packet_buffer_list(transfer_p): """ Python-specific helper extracting a list of iso packet buffers. """ transfer = transfer_p.contents offset = 0 result = [] append = result.append for iso_transfer in _get_iso_packet_list(transfer): length = iso_transfer.length append(_get_iso_packet_buffer(transfer, offset, length)) offset += length return result
Python-specific helper extracting a list of iso packet buffers.
def fit(self, X=None, u=None): """Fit X into an embedded space. Inputs ---------- X : array, shape (n_samples, n_features) u,s,v : svd decomposition of X (optional) Assigns ---------- embedding : array-like, shape (n_samples, n_components) Stores the embedding vectors. u,sv,v : singular value decomposition of data S, potentially with smoothing isort1 : sorting along first dimension of matrix isort2 : sorting along second dimension of matrix (if n_Y > 0) cmap: correlation of each item with all locations in the embedding map (before upsampling) A: PC coefficients of each Fourier mode """ X = X.copy() if self.mode is 'parallel': Xall = X.copy() X = np.reshape(Xall.copy(), (-1, Xall.shape[-1])) #X -= X.mean(axis=-1)[:,np.newaxis] if ((u is None)): nmin = min([X.shape[0], X.shape[1]]) nmin = np.minimum(nmin-1, self.nPC) u,sv,v = svdecon(np.float64(X), k=nmin) u = u * sv NN, self.nPC = u.shape self.u = u # now sort in X U = self._map(u.copy(), self.n_components, self.n_X, u.copy()) return self
Fit X into an embedded space. Inputs ---------- X : array, shape (n_samples, n_features) u,s,v : svd decomposition of X (optional) Assigns ---------- embedding : array-like, shape (n_samples, n_components) Stores the embedding vectors. u,sv,v : singular value decomposition of data S, potentially with smoothing isort1 : sorting along first dimension of matrix isort2 : sorting along second dimension of matrix (if n_Y > 0) cmap: correlation of each item with all locations in the embedding map (before upsampling) A: PC coefficients of each Fourier mode
def cells_from_defaults(clz, jsonobj): """ Creates a referent instance of type `json.kind` and initializes it to default values. """ # convert strings to dicts if isinstance(jsonobj, (str, unicode)): jsonobj = json.loads(jsonobj) assert 'cells' in jsonobj, "No cells in object" domain = TaxonomyCell.get_domain() cells = [] for num, cell_dna in enumerate(jsonobj['cells']): assert 'kind' in cell_dna, "No type definition" classgenerator = domain.node[cell_dna['kind']]['class'] cell = classgenerator() cell['num'].merge(num) for attr, val in cell_dna.items(): if not attr in ['kind']: cell[attr].merge(val) cells.append(cell) return cells
Creates a referent instance of type `json.kind` and initializes it to default values.
def histogram(values, num_bins, bounds, normalized=True, plot=False, color='b'): """Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays. """ hist, bins = np.histogram(values, bins=num_bins, range=bounds) width = (bins[1] - bins[0]) if normalized: if np.sum(hist) > 0: hist = hist.astype(np.float32) / np.sum(hist) if plot: import matplotlib.pyplot as plt plt.bar(bins[:-1], hist, width=width, color=color) return hist, bins
Generate a histogram plot. Parameters ---------- values : :obj:`numpy.ndarray` An array of values to put in the histogram. num_bins : int The number equal-width bins in the histogram. bounds : :obj:`tuple` of float Two floats - a min and a max - that define the lower and upper ranges of the histogram, respectively. normalized : bool If True, the bins will show the percentage of elements they contain rather than raw counts. plot : bool If True, this function uses pyplot to plot the histogram. color : :obj:`str` The color identifier for the plotted bins. Returns ------- :obj:`tuple of `:obj:`numpy.ndarray` The values of the histogram and the bin edges as ndarrays.
def encode_aes256(key, plaintext): """ Utility method to encode some given plaintext with the given key. Important thing to note: This is not a general purpose encryption method - it has specific semantics (see below for details). Takes the given hex string key and converts it to a 256 bit binary blob. Then pads the given plaintext to AES block size which is always 16 bytes, regardless of AES key size. Then encrypts using AES-256-CBC using a random IV. Then converts both the IV and the ciphertext to hex. Finally returns the IV appended by the ciphertext. :param key: string, 64 hex chars long :param plaintext: string, any amount of data """ if len(key) != 64: raise TypeError("encode_aes256() expects a 256 bit key encoded as a 64 hex character string") # generate AES.block_size cryptographically secure random bytes for our IV (initial value) iv = os.urandom(AES.block_size) # set up an AES cipher object cipher = AES.new(binascii.unhexlify(key.encode('ascii')), mode=AES.MODE_CBC, IV=iv) # encrypt the plaintext after padding it ciphertext = cipher.encrypt(ensure_bytes(pad_aes256(plaintext))) # append the hexed IV and the hexed ciphertext iv_plus_encrypted = binascii.hexlify(iv) + binascii.hexlify(ciphertext) # return that return iv_plus_encrypted
Utility method to encode some given plaintext with the given key. Important thing to note: This is not a general purpose encryption method - it has specific semantics (see below for details). Takes the given hex string key and converts it to a 256 bit binary blob. Then pads the given plaintext to AES block size which is always 16 bytes, regardless of AES key size. Then encrypts using AES-256-CBC using a random IV. Then converts both the IV and the ciphertext to hex. Finally returns the IV appended by the ciphertext. :param key: string, 64 hex chars long :param plaintext: string, any amount of data
def build_package_from_pr_number(gh_token, sdk_id, pr_number, output_folder, *, with_comment=False): """Will clone the given PR branch and vuild the package with the given name.""" con = Github(gh_token) repo = con.get_repo(sdk_id) sdk_pr = repo.get_pull(pr_number) # "get_files" of Github only download the first 300 files. Might not be enough. package_names = {f.filename.split('/')[0] for f in sdk_pr.get_files() if f.filename.startswith("azure")} absolute_output_folder = Path(output_folder).resolve() with tempfile.TemporaryDirectory() as temp_dir, \ manage_git_folder(gh_token, Path(temp_dir) / Path("sdk"), sdk_id, pr_number=pr_number) as sdk_folder: for package_name in package_names: _LOGGER.debug("Build {}".format(package_name)) execute_simple_command( ["python", "./build_package.py", "--dest", str(absolute_output_folder), package_name], cwd=sdk_folder ) _LOGGER.debug("Build finished: {}".format(package_name)) if with_comment: files = [f.name for f in absolute_output_folder.iterdir()] comment_message = None dashboard = DashboardCommentableObject(sdk_pr, "(message created by the CI based on PR content)") try: installation_message = build_installation_message(sdk_pr) download_message = build_download_message(sdk_pr, files) comment_message = installation_message + "\n\n" + download_message dashboard.create_comment(comment_message) except Exception: _LOGGER.critical("Unable to do PR comment:\n%s", comment_message)
Will clone the given PR branch and vuild the package with the given name.
def get_bgcolor(self, index): """Background color depending on value""" value = self.get_value(index) if index.column() < 3: color = ReadOnlyCollectionsModel.get_bgcolor(self, index) else: if self.remote: color_name = value['color'] else: color_name = get_color_name(value) color = QColor(color_name) color.setAlphaF(.2) return color
Background color depending on value
def configure_roles_on_host(api, host): """ Go through all the roles on this host, and configure them if they match the role types that we care about. """ for role_ref in host.roleRefs: # Mgmt service/role has no cluster name. Skip over those. if role_ref.get('clusterName') is None: continue # Get the role and inspect the role type role = api.get_cluster(role_ref['clusterName'])\ .get_service(role_ref['serviceName'])\ .get_role(role_ref['roleName']) LOG.debug("Evaluating %s (%s)" % (role.name, host.hostname)) config = None if role.type == 'DATANODE': config = DATANODE_CONF elif role.type == 'TASKTRACKER': config = TASKTRACKER_CONF elif role.type == 'REGIONSERVER': config = REGIONSERVER_CONF else: continue # Set the config LOG.info("Configuring %s (%s)" % (role.name, host.hostname)) role.update_config(config)
Go through all the roles on this host, and configure them if they match the role types that we care about.
def get_data(__pkg: str, __name: str) -> str: """Return top-most data file for given package. Args: __pkg: Package name __name: Data file name """ for dname in get_data_dirs(__pkg): test_path = path.join(dname, __name) if path.exists(test_path): return test_path raise FileNotFoundError('No data file {!r} for {!r}'.format(__name, __pkg))
Return top-most data file for given package. Args: __pkg: Package name __name: Data file name
def _wrap_OCLArray(cls): """ WRAPPER """ def prepare(arr): return np.require(arr, None, "C") @classmethod def from_array(cls, arr, *args, **kwargs): queue = get_device().queue return cl_array.to_device(queue, prepare(arr), *args, **kwargs) @classmethod def empty(cls, shape, dtype=np.float32): queue = get_device().queue return cl_array.empty(queue, shape, dtype) @classmethod def empty_like(cls, arr): return cls.empty(arr.shape, arr.dtype) @classmethod def zeros(cls, shape, dtype=np.float32): queue = get_device().queue return cl_array.zeros(queue, shape, dtype) @classmethod def zeros_like(cls, arr): queue = get_device().queue return cl_array.zeros_like(queue, arr) def copy_buffer(self, buf, **kwargs): queue = get_device().queue return cl.enqueue_copy(queue, self.data, buf.data, **kwargs) def write_array(self, data, **kwargs): queue = get_device().queue return cl.enqueue_copy(queue, self.data, prepare(data), **kwargs) def copy_image(self, img, **kwargs): queue = get_device().queue return cl.enqueue_copy(queue, self.data, img, offset=0, origin=(0,)*len(img.shape), region=img.shape, **kwargs) def copy_image_resampled(self, img, **kwargs): # if not self.dtype == img.dtype: # raise NotImplementedError("images have different dtype!") if self.dtype.type == np.float32: type_str = "float" elif self.dtype.type == np.complex64: type_str = "complex" else: raise NotImplementedError("only resampling of float32 and complex64 arrays possible ") kern_str = "img%dd_to_buf_%s" % (len(img.shape), type_str) OCLArray._resample_prog.run_kernel(kern_str, self.shape[::-1], None, img, self.data) def wrap_module_func(mod, f): def func(self, *args, **kwargs): return getattr(mod, f)(self, *args, **kwargs) return func cls.from_array = from_array cls.empty = empty cls.empty_like = empty_like cls.zeros = zeros cls.zeros_like = zeros_like cls.copy_buffer = copy_buffer cls.copy_image = copy_image cls.copy_image_resampled = copy_image_resampled cls.write_array = write_array cls._resample_prog = OCLProgram(abspath("kernels/copy_resampled.cl")) for f in ["sum", "max", "min", "dot", "vdot"]: setattr(cls, f, wrap_module_func(cl_array, f)) for f in dir(cl_math): if isinstance(getattr(cl_math, f), collections.Callable): setattr(cls, f, wrap_module_func(cl_math, f)) # cls.sum = sum cls.__name__ = str("OCLArray") return cls
WRAPPER
def rollback(self): """ Rollback of this current transaction. """ self._check_thread() if self.state not in (_STATE_ACTIVE, _STATE_PARTIAL_COMMIT): raise TransactionError("Transaction is not active.") try: if self.state != _STATE_PARTIAL_COMMIT: request = transaction_rollback_codec.encode_request(self.id, self.thread_id) self.client.invoker.invoke_on_connection(request, self.connection).result() self.state = _STATE_ROLLED_BACK finally: self._locals.transaction_exists = False
Rollback of this current transaction.
def parse_domains(self, domain, params): """ Parse a single Route53Domains domain """ domain_id = self.get_non_aws_id(domain['DomainName']) domain['name'] = domain.pop('DomainName') #TODO: Get Dnssec info when available #api_client = params['api_client'] #details = api_client.get_domain_detail(DomainName = domain['name']) #get_keys(details, domain, ['Dnssec']) self.domains[domain_id] = domain
Parse a single Route53Domains domain
def make_default_docstr(func, with_args=True, with_ret=True, with_commandline=True, with_example=True, with_header=False, with_debug=False): r""" Tries to make a sensible default docstr so the user can fill things in without typing too much # TODO: Interleave old documentation with new documentation Args: func (function): live python function with_args (bool): with_ret (bool): (Defaults to True) with_commandline (bool): (Defaults to True) with_example (bool): (Defaults to True) with_header (bool): (Defaults to False) with_debug (bool): (Defaults to False) Returns: tuple: (argname, val) Ignore: pass CommandLine: python -m utool.util_autogen --exec-make_default_docstr --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_autogen import * # NOQA >>> import utool as ut >>> func = ut.make_default_docstr >>> #func = ut.make_args_docstr >>> #func = PythonStatement >>> func = auto_docstr >>> default_docstr = make_default_docstr(func) >>> result = str(default_docstr) >>> print(result) """ import utool as ut #from utool import util_inspect funcinfo = ut.util_inspect.infer_function_info(func) argname_list = funcinfo.argname_list argtype_list = funcinfo.argtype_list argdesc_list = funcinfo.argdesc_list return_header = funcinfo.return_header return_type = funcinfo.return_type return_name = funcinfo.return_name return_desc = funcinfo.return_desc funcname = funcinfo.funcname modname = funcinfo.modname defaults = funcinfo.defaults num_indent = funcinfo.num_indent needs_surround = funcinfo.needs_surround funcname = funcinfo.funcname ismethod = funcinfo.ismethod va_name = funcinfo.va_name kw_name = funcinfo.kw_name kw_keys = funcinfo.kw_keys docstr_parts = [] # Header part if with_header: header_block = funcname docstr_parts.append(header_block) # Args part if with_args and len(argname_list) > 0: argheader = 'Args' arg_docstr = make_args_docstr(argname_list, argtype_list, argdesc_list, ismethod, va_name, kw_name, kw_keys) argsblock = make_docstr_block(argheader, arg_docstr) docstr_parts.append(argsblock) # if False: # with_kw = with_args # if with_kw and len(kwarg_keys) > 0: # #ut.embed() # import textwrap # kwargs_docstr = ', '.join(kwarg_keys) # kwargs_docstr = '\n'.join(textwrap.wrap(kwargs_docstr)) # kwargsblock = make_docstr_block('Kwargs', kwargs_docstr) # docstr_parts.append(kwargsblock) # Return / Yeild part if with_ret and return_header is not None: if return_header is not None: return_doctr = make_returns_or_yeilds_docstr(return_type, return_name, return_desc) returnblock = make_docstr_block(return_header, return_doctr) docstr_parts.append(returnblock) # Example part # try to generate a simple and unit testable example if with_commandline: cmdlineheader = 'CommandLine' cmdlinecode = make_cmdline_docstr(funcname, modname) cmdlineblock = make_docstr_block(cmdlineheader, cmdlinecode) docstr_parts.append(cmdlineblock) if with_example: exampleheader = 'Example' examplecode = make_example_docstr(funcname, modname, argname_list, defaults, return_type, return_name, ismethod) examplecode_ = ut.indent(examplecode, '>>> ') exampleblock = make_docstr_block(exampleheader, examplecode_) docstr_parts.append(exampleblock) # DEBUG part (in case something goes wrong) if with_debug: debugheader = 'Debug' debugblock = ut.codeblock( ''' num_indent = {num_indent} ''' ).format(num_indent=num_indent) debugblock = make_docstr_block(debugheader, debugblock) docstr_parts.append(debugblock) # Enclosure / Indentation Parts if needs_surround: docstr_parts = ['r"""'] + ['\n\n'.join(docstr_parts)] + ['"""'] default_docstr = '\n'.join(docstr_parts) else: default_docstr = '\n\n'.join(docstr_parts) docstr_indent = ' ' * (num_indent + 4) default_docstr = ut.indent(default_docstr, docstr_indent) return default_docstr
r""" Tries to make a sensible default docstr so the user can fill things in without typing too much # TODO: Interleave old documentation with new documentation Args: func (function): live python function with_args (bool): with_ret (bool): (Defaults to True) with_commandline (bool): (Defaults to True) with_example (bool): (Defaults to True) with_header (bool): (Defaults to False) with_debug (bool): (Defaults to False) Returns: tuple: (argname, val) Ignore: pass CommandLine: python -m utool.util_autogen --exec-make_default_docstr --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_autogen import * # NOQA >>> import utool as ut >>> func = ut.make_default_docstr >>> #func = ut.make_args_docstr >>> #func = PythonStatement >>> func = auto_docstr >>> default_docstr = make_default_docstr(func) >>> result = str(default_docstr) >>> print(result)
def add_at_risk_counts(*fitters, **kwargs): """ Add counts showing how many individuals were at risk at each time point in survival/hazard plots. Parameters ---------- fitters: One or several fitters, for example KaplanMeierFitter, NelsonAalenFitter, etc... Returns -------- ax: The axes which was used. Examples -------- >>> # First train some fitters and plot them >>> fig = plt.figure() >>> ax = plt.subplot(111) >>> >>> f1 = KaplanMeierFitter() >>> f1.fit(data) >>> f1.plot(ax=ax) >>> >>> f2 = KaplanMeierFitter() >>> f2.fit(data) >>> f2.plot(ax=ax) >>> >>> # There are equivalent >>> add_at_risk_counts(f1, f2) >>> add_at_risk_counts(f1, f2, ax=ax, fig=fig) >>> >>> # This overrides the labels >>> add_at_risk_counts(f1, f2, labels=['fitter one', 'fitter two']) >>> >>> # This hides the labels >>> add_at_risk_counts(f1, f2, labels=None) """ from matplotlib import pyplot as plt # Axes and Figure can't be None ax = kwargs.get("ax", None) if ax is None: ax = plt.gca() fig = kwargs.get("fig", None) if fig is None: fig = plt.gcf() if "labels" not in kwargs: labels = [f._label for f in fitters] else: # Allow None, in which case no labels should be used labels = kwargs["labels"] if labels is None: labels = [None] * len(fitters) # Create another axes where we can put size ticks ax2 = plt.twiny(ax=ax) # Move the ticks below existing axes # Appropriate length scaled for 6 inches. Adjust for figure size. ax2_ypos = -0.15 * 6.0 / fig.get_figheight() move_spines(ax2, ["bottom"], [ax2_ypos]) # Hide all fluff remove_spines(ax2, ["top", "right", "bottom", "left"]) # Set ticks and labels on bottom ax2.xaxis.tick_bottom() # Match tick numbers and locations ax2.set_xlim(ax.get_xlim()) ax2.set_xticks(ax.get_xticks()) # Remove ticks, need to do this AFTER moving the ticks remove_ticks(ax2, x=True, y=True) # Add population size at times ticklabels = [] for tick in ax2.get_xticks(): lbl = "" for f, l in zip(fitters, labels): # First tick is prepended with the label if tick == ax2.get_xticks()[0] and l is not None: if is_latex_enabled(): s = "\n{}\\quad".format(l) + "{}" else: s = "\n{} ".format(l) + "{}" else: s = "\n{}" lbl += s.format(f.durations[f.durations >= tick].shape[0]) ticklabels.append(lbl.strip()) # Align labels to the right so numbers can be compared easily ax2.set_xticklabels(ticklabels, ha="right") # Add a descriptive headline. ax2.xaxis.set_label_coords(0, ax2_ypos) ax2.set_xlabel("At risk") plt.tight_layout() return ax
Add counts showing how many individuals were at risk at each time point in survival/hazard plots. Parameters ---------- fitters: One or several fitters, for example KaplanMeierFitter, NelsonAalenFitter, etc... Returns -------- ax: The axes which was used. Examples -------- >>> # First train some fitters and plot them >>> fig = plt.figure() >>> ax = plt.subplot(111) >>> >>> f1 = KaplanMeierFitter() >>> f1.fit(data) >>> f1.plot(ax=ax) >>> >>> f2 = KaplanMeierFitter() >>> f2.fit(data) >>> f2.plot(ax=ax) >>> >>> # There are equivalent >>> add_at_risk_counts(f1, f2) >>> add_at_risk_counts(f1, f2, ax=ax, fig=fig) >>> >>> # This overrides the labels >>> add_at_risk_counts(f1, f2, labels=['fitter one', 'fitter two']) >>> >>> # This hides the labels >>> add_at_risk_counts(f1, f2, labels=None)
def popen(self, stdout, stderr): """Build popen object to run :rtype: subprocess.Popen """ self.logger.info('Executing command: %s', self.command_str) return subprocess.Popen([self._executor_script], stdout=stdout, stderr=stderr)
Build popen object to run :rtype: subprocess.Popen
def perform(cls, entity_cls, usecase_cls, request_object_cls, payload: dict, raise_error=False): """ This method bundles all essential artifacts and initiates usecase execution. :param entity_cls: The entity class to be used for running the usecase :param usecase_cls: The usecase class that will be executed by the tasklet. :param request_object_cls: The request object to be used as input to the use case :type request_object_cls: protean.core.Request :param payload: The payload to be passed to the request object :type payload: dict :param raise_error: Raise error when a failure response is generated :type raise_error: bool """ # Initialize the use case and request objects use_case = usecase_cls() payload.update({'entity_cls': entity_cls}) request_object = request_object_cls.from_dict(payload) # Run the use case and return the response resp = use_case.execute(request_object) # If raise error is set then check the response and raise error if raise_error and isinstance(resp, ResponseFailure): raise UsecaseExecutionError( (resp.code, resp.value), orig_exc=getattr(resp, 'exc', None), orig_trace=getattr(resp, 'trace', None) ) return resp
This method bundles all essential artifacts and initiates usecase execution. :param entity_cls: The entity class to be used for running the usecase :param usecase_cls: The usecase class that will be executed by the tasklet. :param request_object_cls: The request object to be used as input to the use case :type request_object_cls: protean.core.Request :param payload: The payload to be passed to the request object :type payload: dict :param raise_error: Raise error when a failure response is generated :type raise_error: bool
def airspeed_ratio(VFR_HUD): '''recompute airspeed with a different ARSPD_RATIO''' import mavutil mav = mavutil.mavfile_global airspeed_pressure = (VFR_HUD.airspeed**2) / ratio airspeed = sqrt(airspeed_pressure * ratio) return airspeed
recompute airspeed with a different ARSPD_RATIO
def get_last_doc(self): """Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback. """ try: result = self.elastic.search( index=self.meta_index_name, body={ "query": {"match_all": {}}, "sort": [{"_ts": "desc"}], }, size=1 )["hits"]["hits"] for r in result: r['_source']['_id'] = r['_id'] return r['_source'] except es_exceptions.RequestError: # no documents so ES returns 400 because of undefined _ts mapping return None
Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback.
def recurse(self, factory_meta, extras): """Recurse into a sub-factory call.""" return self.__class__(factory_meta, extras, strategy=self.strategy)
Recurse into a sub-factory call.
def on_menu_criteria_file(self, event): """ read pmag_criteria.txt file and open changecriteria dialog """ if self.data_model == 3: default_file = "criteria.txt" else: default_file = "pmag_criteria.txt" read_sucsess = False dlg = wx.FileDialog( self, message="choose pmag criteria file", defaultDir=self.WD, defaultFile=default_file, style=wx.FD_OPEN | wx.FD_CHANGE_DIR ) if self.show_dlg(dlg) == wx.ID_OK: criteria_file = dlg.GetPath() print(("-I- Read new criteria file: %s" % criteria_file)) # check if this is a valid pmag_criteria file try: mag_meas_data, file_type = pmag.magic_read(criteria_file) except: dlg = wx.MessageDialog( self, caption="Error", message="not a valid pmag_criteria file", style=wx.OK) result = self.show_dlg(dlg) if result == wx.ID_OK: dlg.Destroy() dlg.Destroy() return # initialize criteria self.acceptance_criteria = self.read_criteria_file(criteria_file) read_sucsess = True dlg.Destroy() if read_sucsess: self.on_menu_change_criteria(None)
read pmag_criteria.txt file and open changecriteria dialog
def has_return_exprs(self, node): """Traverse the tree below node looking for 'return expr'. Return True if at least 'return expr' is found, False if not. (If both 'return' and 'return expr' are found, return True.) """ results = {} if self.return_expr.match(node, results): return True for child in node.children: if child.type not in (syms.funcdef, syms.classdef): if self.has_return_exprs(child): return True return False
Traverse the tree below node looking for 'return expr'. Return True if at least 'return expr' is found, False if not. (If both 'return' and 'return expr' are found, return True.)
def check_dependency(self, operation, dependency): """ Enhances default behavior of method by checking dependency for matching operation. """ if isinstance(dependency[1], SQLBlob): # NOTE: we follow the sort order created by `assemble_changes` so we build a fixed chain # of operations. thus we should match exact operation here. return dependency[3] == operation return super(MigrationAutodetector, self).check_dependency(operation, dependency)
Enhances default behavior of method by checking dependency for matching operation.
def load(self): """ Load publish info from remote """ publish = self._get_publish() self.architectures = publish['Architectures'] for source in publish['Sources']: component = source['Component'] snapshot = source['Name'] self.publish_snapshots.append({ 'Component': component, 'Name': snapshot }) snapshot_remote = self._find_snapshot(snapshot) for source in self._get_source_snapshots(snapshot_remote, fallback_self=True): self.add(source, component)
Load publish info from remote
def single_gene_deletion(model, gene_list=None, method="fba", solution=None, processes=None, **kwargs): """ Knock out each gene from a given list. Parameters ---------- model : cobra.Model The metabolic model to perform deletions in. gene_list : iterable ``cobra.Gene``s to be deleted. If not passed, all the genes from the model are used. method: {"fba", "moma", "linear moma", "room", "linear room"}, optional Method used to predict the growth rate. solution : cobra.Solution, optional A previous solution to use as a reference for (linear) MOMA or ROOM. processes : int, optional The number of parallel processes to run. Can speed up the computations if the number of knockouts to perform is large. If not passed, will be set to the number of CPUs found. kwargs : Keyword arguments are passed on to underlying simulation functions such as ``add_room``. Returns ------- pandas.DataFrame A representation of all single gene deletions. The columns are 'growth' and 'status', where index : frozenset([str]) The gene identifier that was knocked out. growth : float The growth rate of the adjusted model. status : str The solution's status. """ return _multi_deletion( model, 'gene', element_lists=_element_lists(model.genes, gene_list), method=method, solution=solution, processes=processes, **kwargs)
Knock out each gene from a given list. Parameters ---------- model : cobra.Model The metabolic model to perform deletions in. gene_list : iterable ``cobra.Gene``s to be deleted. If not passed, all the genes from the model are used. method: {"fba", "moma", "linear moma", "room", "linear room"}, optional Method used to predict the growth rate. solution : cobra.Solution, optional A previous solution to use as a reference for (linear) MOMA or ROOM. processes : int, optional The number of parallel processes to run. Can speed up the computations if the number of knockouts to perform is large. If not passed, will be set to the number of CPUs found. kwargs : Keyword arguments are passed on to underlying simulation functions such as ``add_room``. Returns ------- pandas.DataFrame A representation of all single gene deletions. The columns are 'growth' and 'status', where index : frozenset([str]) The gene identifier that was knocked out. growth : float The growth rate of the adjusted model. status : str The solution's status.
def parse(content): """ Parse the content of a .env file (a line-delimited KEY=value format) into a dictionary mapping keys to values. """ values = {} for line in content.splitlines(): lexer = shlex.shlex(line, posix=True) tokens = list(lexer) # parses the assignment statement if len(tokens) < 3: continue name, op = tokens[:2] value = ''.join(tokens[2:]) if op != '=': continue if not re.match(r'[A-Za-z_][A-Za-z_0-9]*', name): continue value = value.replace(r'\n', '\n') value = value.replace(r'\t', '\t') values[name] = value return values
Parse the content of a .env file (a line-delimited KEY=value format) into a dictionary mapping keys to values.
def ensure_unicoded_and_unique(args_list, application): """ Iterate over args_list, make it unicode if needed and ensure that there are no duplicates. Returns list of unicoded arguments in the same order. """ unicoded_args = [] for argument in args_list: argument = (six.u(argument) if not isinstance(argument, six.text_type) else argument) if argument not in unicoded_args or argument == application: unicoded_args.append(argument) return unicoded_args
Iterate over args_list, make it unicode if needed and ensure that there are no duplicates. Returns list of unicoded arguments in the same order.
def add_category(self, category): """Add a category assigned to this message :rtype: Category """ self._categories = self._ensure_append(category, self._categories)
Add a category assigned to this message :rtype: Category
def extract_request_details(request_object, session_object=None): ''' a method for extracting request details from request and session objects NOTE: method is also a placeholder funnel for future validation processes, request logging, request context building and counter-measures for the nasty web :param request_object: request object generated by flask from request route :param session_object: session object generated by flask from client cookie :return: dictionary with request details ''' request_details = { 'error': '', 'status': 'ok', 'code': 200, 'method': request_object.method, 'session': {}, 'root': request_object.url_root, 'route': request_object.path, 'headers': {}, 'form': {}, 'params': {}, 'json': {}, 'data': '' } # automatically add header and query field data request_details['headers'].update(**request_object.headers) for key in request_object.args.keys(): request_details['params'][key] = request_object.args.get(key) # retrieve session details if session_object: request_details['session'].update(**session_object) # add data based upon type if request_object.is_json: try: json_details = request_object.get_json(silent=True) if isinstance(json_details, dict): request_details['json'] = json_details except: pass else: try: from base64 import b64encode request_details['data'] = b64encode(request_object.data).decode() except: pass try: for key, value in request_object.form.items(): request_details['form'][key] = value except: pass # TODO: handle non-json data parsing (such as by mimetype and request.files) # TODO: check content type against buffer values # TODO: status code and error handling return request_details
a method for extracting request details from request and session objects NOTE: method is also a placeholder funnel for future validation processes, request logging, request context building and counter-measures for the nasty web :param request_object: request object generated by flask from request route :param session_object: session object generated by flask from client cookie :return: dictionary with request details