code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _stringify_column(self, column_index): ''' Same as _stringify_row but for columns. ''' table_column = TableTranspose(self.table)[column_index] prior_cell = None for row_index in range(self.start[0], self.end[0]): cell, changed = self._check_interpret_cell(table_column[row_index], prior_cell, row_index, column_index) if changed: table_column[row_index] = cell prior_cell = cell
Same as _stringify_row but for columns.
def _swap_slice_indices(self, slc, make_slice=False): '''Swap slice indices Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing. ''' try: start = slc.start stop = slc.stop slc_step = slc.step except AttributeError: if make_slice: if slc < 0: slc += self.length() return slice(slc, slc + 1) else: return slc else: if not start and start != 0: slc_stop = self.length() elif start < 0: slc_stop = self.length() + start + 1 else: slc_stop = start + 1 if not stop and stop != 0: slc_start = 0 elif stop < 0: slc_start = self.length() + stop else: slc_start = stop return slice(slc_start, slc_stop, slc_step)
Swap slice indices Change slice indices from Verilog slicing (e.g. IEEE 1800-2012) to Python slicing.
def node_to_evenly_discretized(node): """ Parses the evenly discretized mfd node to an instance of the :class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD, or to None if not all parameters are available """ if not all([node.attrib["minMag"], node.attrib["binWidth"], node.nodes[0].text]): return None # Text to float rates = [float(x) for x in node.nodes[0].text.split()] return mfd.evenly_discretized.EvenlyDiscretizedMFD( float(node.attrib["minMag"]), float(node.attrib["binWidth"]), rates)
Parses the evenly discretized mfd node to an instance of the :class: openquake.hazardlib.mfd.evenly_discretized.EvenlyDiscretizedMFD, or to None if not all parameters are available
def repr_size(n_bytes): """ >>> repr_size(1000) '1000 Bytes' >>> repr_size(8257332324597) '7.5 TiB' """ if n_bytes < 1024: return '{0} Bytes'.format(n_bytes) i = -1 while n_bytes > 1023: n_bytes /= 1024.0 i += 1 return '{0} {1}iB'.format(round(n_bytes, 1), si_prefixes[i])
>>> repr_size(1000) '1000 Bytes' >>> repr_size(8257332324597) '7.5 TiB'
def rbac_policy_update(request, policy_id, **kwargs): """Update a RBAC Policy. :param request: request context :param policy_id: target policy id :param target_tenant: target tenant of the policy :return: RBACPolicy object """ body = {'rbac_policy': kwargs} rbac_policy = neutronclient(request).update_rbac_policy( policy_id, body=body).get('rbac_policy') return RBACPolicy(rbac_policy)
Update a RBAC Policy. :param request: request context :param policy_id: target policy id :param target_tenant: target tenant of the policy :return: RBACPolicy object
def dockprep(self, force_rerun=False): """Prepare a PDB file for docking by first converting it to mol2 format. Args: force_rerun (bool): If method should be rerun even if output file exists """ log.debug('{}: running dock preparation...'.format(self.id)) prep_mol2 = op.join(self.dock_dir, '{}_prep.mol2'.format(self.id)) prep_py = op.join(self.dock_dir, "prep.py") if ssbio.utils.force_rerun(flag=force_rerun, outfile=prep_mol2): with open(prep_py, "w") as f: f.write('import chimera\n') f.write('from DockPrep import prep\n') f.write('models = chimera.openModels.list(modelTypes=[chimera.Molecule])\n') f.write('prep(models)\n') f.write('from WriteMol2 import writeMol2\n') f.write('writeMol2(models, "{}")\n'.format(prep_mol2)) cmd = 'chimera --nogui {} {}'.format(self.structure_path, prep_py) os.system(cmd) os.remove(prep_py) os.remove('{}c'.format(prep_py)) if ssbio.utils.is_non_zero_file(prep_mol2): self.dockprep_path = prep_mol2 log.debug('{}: successful dockprep execution'.format(self.dockprep_path)) else: log.critical('{}: dockprep failed to run on PDB file'.format(self.structure_path))
Prepare a PDB file for docking by first converting it to mol2 format. Args: force_rerun (bool): If method should be rerun even if output file exists
def sync_memoize(f): """ Like memoize, but guarantees that decorated function is only called once, even when multiple threads are calling the decorating function with multiple parameters. """ # TODO: Think about an f that is recursive memory = {} lock = Lock() @wraps(f) def new_f(*args): try: return memory[args] except KeyError: # on cache misses, retry with lock held with lock: try: return memory[args] except KeyError: r = f(*args) memory[args] = r return r return new_f
Like memoize, but guarantees that decorated function is only called once, even when multiple threads are calling the decorating function with multiple parameters.
def cmd_up(self, args): '''adjust TRIM_PITCH_CD up by 5 degrees''' if len(args) == 0: adjust = 5.0 else: adjust = float(args[0]) old_trim = self.get_mav_param('TRIM_PITCH_CD', None) if old_trim is None: print("Existing trim value unknown!") return new_trim = int(old_trim + (adjust*100)) if math.fabs(new_trim - old_trim) > 1000: print("Adjustment by %d too large (from %d to %d)" % (adjust*100, old_trim, new_trim)) return print("Adjusting TRIM_PITCH_CD from %d to %d" % (old_trim, new_trim)) self.param_set('TRIM_PITCH_CD', new_trim)
adjust TRIM_PITCH_CD up by 5 degrees
def MoveToAttribute(self, name): """Moves the position of the current instance to the attribute with the specified qualified name. """ ret = libxml2mod.xmlTextReaderMoveToAttribute(self._o, name) return ret
Moves the position of the current instance to the attribute with the specified qualified name.
def in6_chksum(nh, u, p): """ As Specified in RFC 2460 - 8.1 Upper-Layer Checksums Performs IPv6 Upper Layer checksum computation. Provided parameters are: - 'nh' : value of upper layer protocol - 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be provided with all under layers (IPv6 and all extension headers, for example) - 'p' : the payload of the upper layer provided as a string Functions operate by filling a pseudo header class instance (PseudoIPv6) with - Next Header value - the address of _final_ destination (if some Routing Header with non segleft field is present in underlayer classes, last address is used.) - the address of _real_ source (basically the source address of an IPv6 class instance available in the underlayer or the source address in HAO option if some Destination Option header found in underlayer includes this option). - the length is the length of provided payload string ('p') """ ph6 = PseudoIPv6() ph6.nh = nh rthdr = 0 hahdr = 0 final_dest_addr_found = 0 while u is not None and not isinstance(u, IPv6): if (isinstance(u, IPv6ExtHdrRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[-1] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrSegmentRouting) and u.segleft != 0 and len(u.addresses) != 0 and final_dest_addr_found == 0): rthdr = u.addresses[0] final_dest_addr_found = 1 elif (isinstance(u, IPv6ExtHdrDestOpt) and (len(u.options) == 1) and isinstance(u.options[0], HAO)): hahdr = u.options[0].hoa u = u.underlayer if u is None: warning("No IPv6 underlayer to compute checksum. Leaving null.") return 0 if hahdr: ph6.src = hahdr else: ph6.src = u.src if rthdr: ph6.dst = rthdr else: ph6.dst = u.dst ph6.uplen = len(p) ph6s = raw(ph6) return checksum(ph6s + p)
As Specified in RFC 2460 - 8.1 Upper-Layer Checksums Performs IPv6 Upper Layer checksum computation. Provided parameters are: - 'nh' : value of upper layer protocol - 'u' : upper layer instance (TCP, UDP, ICMPv6*, ). Instance must be provided with all under layers (IPv6 and all extension headers, for example) - 'p' : the payload of the upper layer provided as a string Functions operate by filling a pseudo header class instance (PseudoIPv6) with - Next Header value - the address of _final_ destination (if some Routing Header with non segleft field is present in underlayer classes, last address is used.) - the address of _real_ source (basically the source address of an IPv6 class instance available in the underlayer or the source address in HAO option if some Destination Option header found in underlayer includes this option). - the length is the length of provided payload string ('p')
def _print_divide(self): """Prints all those table line dividers.""" for space in self.AttributesLength: self.StrTable += "+ " + "- " * space self.StrTable += "+" + "\n"
Prints all those table line dividers.
def data_to_imagesurface (data, **kwargs): """Turn arbitrary data values into a Cairo ImageSurface. The method and arguments are the same as data_to_argb32, except that the data array will be treated as 2D, and higher dimensionalities are not allowed. The return value is a Cairo ImageSurface object. Combined with the write_to_png() method on ImageSurfaces, this is an easy way to quickly visualize 2D data. """ import cairo data = np.atleast_2d (data) if data.ndim != 2: raise ValueError ('input array may not have more than 2 dimensions') argb32 = data_to_argb32 (data, **kwargs) format = cairo.FORMAT_ARGB32 height, width = argb32.shape stride = cairo.ImageSurface.format_stride_for_width (format, width) if argb32.strides[0] != stride: raise ValueError ('stride of data array not compatible with ARGB32') return cairo.ImageSurface.create_for_data (argb32, format, width, height, stride)
Turn arbitrary data values into a Cairo ImageSurface. The method and arguments are the same as data_to_argb32, except that the data array will be treated as 2D, and higher dimensionalities are not allowed. The return value is a Cairo ImageSurface object. Combined with the write_to_png() method on ImageSurfaces, this is an easy way to quickly visualize 2D data.
def rvs(self, size=1, param=None): """Gives a set of random values drawn from the kde. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params. """ if param is not None: dtype = [(param, float)] else: dtype = [(p, float) for p in self.params] size = int(size) arr = numpy.zeros(size, dtype=dtype) draws = self._kde.resample(size) draws = {param: draws[ii,:] for ii,param in enumerate(self.params)} for (param,_) in dtype: try: # transform back to param space tparam = self._tparams[param] tdraws = {tparam: draws[param]} draws[param] = self._transforms[tparam].inverse_transform( tdraws)[param] except KeyError: pass arr[param] = draws[param] return arr
Gives a set of random values drawn from the kde. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params.
def push(self, next_dfa, next_state, node_type, lineno, column): """Push a terminal and adjust the current state.""" dfa, state, node = self.stack[-1] new_node = Node(node_type, None, [], lineno, column) self.stack[-1] = (dfa, next_state, node) self.stack.append((next_dfa, 0, new_node))
Push a terminal and adjust the current state.
def to_match(self): """Return a unicode object with the MATCH representation of this BetweenClause.""" template = u'({field_name} BETWEEN {lower_bound} AND {upper_bound})' return template.format( field_name=self.field.to_match(), lower_bound=self.lower_bound.to_match(), upper_bound=self.upper_bound.to_match())
Return a unicode object with the MATCH representation of this BetweenClause.
def check_auth(name, sock_dir=None, queue=None, timeout=300): ''' This function is called from a multiprocess instance, to wait for a minion to become available to receive salt commands ''' event = salt.utils.event.SaltEvent('master', sock_dir, listen=True) starttime = time.mktime(time.localtime()) newtimeout = timeout log.debug('In check_auth, waiting for %s to become available', name) while newtimeout > 0: newtimeout = timeout - (time.mktime(time.localtime()) - starttime) ret = event.get_event(full=True) if ret is None: continue if ret['tag'] == 'salt/minion/{0}/start'.format(name): queue.put(name) newtimeout = 0 log.debug('Minion %s is ready to receive commands', name)
This function is called from a multiprocess instance, to wait for a minion to become available to receive salt commands
def change_email(self, email): """ Change user's login email :param user: AuthUser :param email: :return: """ def cb(): if not utils.is_email_valid(email): raise exceptions.AuthError("Email address invalid") self.user.change_email(email) return email return signals.user_update(self, ACTIONS["EMAIL"], cb, {"email": self.email})
Change user's login email :param user: AuthUser :param email: :return:
def create_venv(local='y', test='y', general='y'): """Create virtualenv w/requirements. Specify y/n for local/test/general to control installation.""" if not path.isdir(project_paths.venv): execute('virtualenv', '--distribute', '--no-site-packages', project_paths.venv) project.execute_python('-m', 'easy_install', 'pip') if local.lower() == 'y' and project_paths.local_requirements: project.execute_pip('install', '--upgrade', '-r', project_paths.local_requirements) if test.lower() == 'y' and project_paths.test_requirements: project.execute_pip('install', '--upgrade', '-r', project_paths.test_requirements) if general.lower() == 'y' and project_paths.requirements_txt: project.execute_pip('install', '--upgrade', '-r', project_paths.requirements_txt)
Create virtualenv w/requirements. Specify y/n for local/test/general to control installation.
def _dump_query_timestamps(self, current_time: float): """Output the number of GraphQL queries grouped by their query_hash within the last time.""" windows = [10, 11, 15, 20, 30, 60] print("GraphQL requests:", file=sys.stderr) for query_hash, times in self._graphql_query_timestamps.items(): print(" {}".format(query_hash), file=sys.stderr) for window in windows: reqs_in_sliding_window = sum(t > current_time - window * 60 for t in times) print(" last {} minutes: {} requests".format(window, reqs_in_sliding_window), file=sys.stderr)
Output the number of GraphQL queries grouped by their query_hash within the last time.
def notify(self, message, priority='normal', timeout=0, block=False): """ opens notification popup. :param message: message to print :type message: str :param priority: priority string, used to format the popup: currently, 'normal' and 'error' are defined. If you use 'X' here, the attribute 'global_notify_X' is used to format the popup. :type priority: str :param timeout: seconds until message disappears. Defaults to the value of 'notify_timeout' in the general config section. A negative value means never time out. :type timeout: int :param block: this notification blocks until a keypress is made :type block: bool :returns: an urwid widget (this notification) that can be handed to :meth:`clear_notify` for removal """ def build_line(msg, prio): cols = urwid.Columns([urwid.Text(msg)]) att = settings.get_theming_attribute('global', 'notify_' + prio) return urwid.AttrMap(cols, att) msgs = [build_line(message, priority)] if not self._notificationbar: self._notificationbar = urwid.Pile(msgs) else: newpile = self._notificationbar.widget_list + msgs self._notificationbar = urwid.Pile(newpile) self.update() def clear(*_): self.clear_notify(msgs) if block: # put "cancel to continue" widget as overlay on main widget txt = build_line('(escape continues)', priority) overlay = urwid.Overlay(txt, self.root_widget, ('fixed left', 0), ('fixed right', 0), ('fixed bottom', 0), None) self.show_as_root_until_keypress(overlay, 'esc', afterwards=clear) else: if timeout >= 0: if timeout == 0: timeout = settings.get('notify_timeout') self.mainloop.set_alarm_in(timeout, clear) return msgs[0]
opens notification popup. :param message: message to print :type message: str :param priority: priority string, used to format the popup: currently, 'normal' and 'error' are defined. If you use 'X' here, the attribute 'global_notify_X' is used to format the popup. :type priority: str :param timeout: seconds until message disappears. Defaults to the value of 'notify_timeout' in the general config section. A negative value means never time out. :type timeout: int :param block: this notification blocks until a keypress is made :type block: bool :returns: an urwid widget (this notification) that can be handed to :meth:`clear_notify` for removal
def verify_ticket(self, ticket, **kwargs): """Verifies CAS 3.0+ XML-based authentication ticket and returns extended attributes. @date: 2011-11-30 @author: Carlos Gonzalez Vila <carlewis@gmail.com> Returns username and attributes on success and None,None on failure. """ try: from xml.etree import ElementTree except ImportError: from elementtree import ElementTree page = self.fetch_saml_validation(ticket) try: user = None attributes = {} response = page.content tree = ElementTree.fromstring(response) # Find the authentication status success = tree.find('.//' + SAML_1_0_PROTOCOL_NS + 'StatusCode') if success is not None and success.attrib['Value'].endswith(':Success'): # User is validated name_identifier = tree.find('.//' + SAML_1_0_ASSERTION_NS + 'NameIdentifier') if name_identifier is not None: user = name_identifier.text attrs = tree.findall('.//' + SAML_1_0_ASSERTION_NS + 'Attribute') for at in attrs: if self.username_attribute in list(at.attrib.values()): user = at.find(SAML_1_0_ASSERTION_NS + 'AttributeValue').text attributes['uid'] = user values = at.findall(SAML_1_0_ASSERTION_NS + 'AttributeValue') if len(values) > 1: values_array = [] for v in values: values_array.append(v.text) attributes[at.attrib['AttributeName']] = values_array else: attributes[at.attrib['AttributeName']] = values[0].text return user, attributes, None finally: page.close()
Verifies CAS 3.0+ XML-based authentication ticket and returns extended attributes. @date: 2011-11-30 @author: Carlos Gonzalez Vila <carlewis@gmail.com> Returns username and attributes on success and None,None on failure.
def contains_point(self, x, y, d=2): """ Returns true when x, y is on the path stroke outline. """ if self.path != None and len(self.path) > 1 \ and self.path.contains(x, y): # If all points around the mouse are also part of the path, # this means we are somewhere INSIDE the path. # Only points near the edge (i.e. on the outline stroke) # should propagate. if not self.path.contains(x+d, y) \ or not self.path.contains(x, y+d) \ or not self.path.contains(x-d, y) \ or not self.path.contains(x, y-d) \ or not self.path.contains(x+d, y+d) \ or not self.path.contains(x-d, y-d) \ or not self.path.contains(x+d, y-d) \ or not self.path.contains(x-d, y+d): return True return False
Returns true when x, y is on the path stroke outline.
def make_ar_transition_matrix(coefficients): """Build transition matrix for an autoregressive StateSpaceModel. When applied to a vector of previous values, this matrix computes the expected new value (summing the previous states according to the autoregressive coefficients) in the top dimension of the state space, and moves all previous values down by one dimension, 'forgetting' the final (least recent) value. That is, it looks like this: ``` ar_matrix = [ coefs[0], coefs[1], ..., coefs[order] 1., 0 , ..., 0. 0., 1., ..., 0. ... 0., 0., ..., 1., 0. ] ``` Args: coefficients: float `Tensor` of shape `concat([batch_shape, [order]])`. Returns: ar_matrix: float `Tensor` with shape `concat([batch_shape, [order, order]])`. """ top_row = tf.expand_dims(coefficients, -2) coef_shape = dist_util.prefer_static_shape(coefficients) batch_shape, order = coef_shape[:-1], coef_shape[-1] remaining_rows = tf.concat([ tf.eye(order - 1, dtype=coefficients.dtype, batch_shape=batch_shape), tf.zeros(tf.concat([batch_shape, (order - 1, 1)], axis=0), dtype=coefficients.dtype) ], axis=-1) ar_matrix = tf.concat([top_row, remaining_rows], axis=-2) return ar_matrix
Build transition matrix for an autoregressive StateSpaceModel. When applied to a vector of previous values, this matrix computes the expected new value (summing the previous states according to the autoregressive coefficients) in the top dimension of the state space, and moves all previous values down by one dimension, 'forgetting' the final (least recent) value. That is, it looks like this: ``` ar_matrix = [ coefs[0], coefs[1], ..., coefs[order] 1., 0 , ..., 0. 0., 1., ..., 0. ... 0., 0., ..., 1., 0. ] ``` Args: coefficients: float `Tensor` of shape `concat([batch_shape, [order]])`. Returns: ar_matrix: float `Tensor` with shape `concat([batch_shape, [order, order]])`.
def get_service(self, name): """ Locates a remote service by name. The name can be a glob-like pattern (``"project.worker.*"``). If multiple services match the given name, a random instance will be chosen. There might be multiple services that match a given name if there are multiple services with the same name running, or when the pattern matches multiple different services. .. todo:: Make this use self.io_loop to resolve the request. The current implementation is blocking and slow :param name: a pattern for the searched service. :return: a :py:class:`gemstone.RemoteService` instance :raises ValueError: when the service can not be located :raises ServiceConfigurationError: when there is no configured discovery strategy """ if not self.discovery_strategies: raise ServiceConfigurationError("No service registry available") cached = self.remote_service_cache.get_entry(name) if cached: return cached.remote_service for strategy in self.discovery_strategies: endpoints = strategy.locate(name) if not endpoints: continue random.shuffle(endpoints) for url in endpoints: try: service = get_remote_service_instance_for_url(url) self.remote_service_cache.add_entry(name, service) return service except ConnectionError: continue # could not establish connection, try next raise ValueError("Service could not be located")
Locates a remote service by name. The name can be a glob-like pattern (``"project.worker.*"``). If multiple services match the given name, a random instance will be chosen. There might be multiple services that match a given name if there are multiple services with the same name running, or when the pattern matches multiple different services. .. todo:: Make this use self.io_loop to resolve the request. The current implementation is blocking and slow :param name: a pattern for the searched service. :return: a :py:class:`gemstone.RemoteService` instance :raises ValueError: when the service can not be located :raises ServiceConfigurationError: when there is no configured discovery strategy
def min(a, axis=None): """ Request the minimum of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. Parameters ---------- a : Array object The object whose minimum is to be found. axis : None, or int, or iterable of ints Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. Returns ------- out : Array The Array representing the requested mean. """ axes = _normalise_axis(axis, a) assert axes is not None and len(axes) == 1 return _Aggregation(a, axes[0], _MinStreamsHandler, _MinMaskedStreamsHandler, a.dtype, {})
Request the minimum of an Array over any number of axes. .. note:: Currently limited to operating on a single axis. Parameters ---------- a : Array object The object whose minimum is to be found. axis : None, or int, or iterable of ints Axis or axes along which the operation is performed. The default (axis=None) is to perform the operation over all the dimensions of the input array. The axis may be negative, in which case it counts from the last to the first axis. If axis is a tuple of ints, the operation is performed over multiple axes. Returns ------- out : Array The Array representing the requested mean.
def _find_by_name(tree_data, name, is_dir, start_at): """return data entry matching the given name and tree mode or None. Before the item is returned, the respective data item is set None in the tree_data list to mark it done""" try: item = tree_data[start_at] if item and item[2] == name and S_ISDIR(item[1]) == is_dir: tree_data[start_at] = None return item except IndexError: pass # END exception handling for index, item in enumerate(tree_data): if item and item[2] == name and S_ISDIR(item[1]) == is_dir: tree_data[index] = None return item # END if item matches # END for each item return None
return data entry matching the given name and tree mode or None. Before the item is returned, the respective data item is set None in the tree_data list to mark it done
def relativefrom(base, path): # type: (Text, Text) -> Text """Return a path relative from a given base path. Insert backrefs as appropriate to reach the path from the base. Arguments: base (str): Path to a directory. path (str): Path to make relative. Returns: str: the path to ``base`` from ``path``. >>> relativefrom("foo/bar", "baz/index.html") '../../baz/index.html' """ base_parts = list(iteratepath(base)) path_parts = list(iteratepath(path)) common = 0 for component_a, component_b in zip(base_parts, path_parts): if component_a != component_b: break common += 1 return "/".join([".."] * (len(base_parts) - common) + path_parts[common:])
Return a path relative from a given base path. Insert backrefs as appropriate to reach the path from the base. Arguments: base (str): Path to a directory. path (str): Path to make relative. Returns: str: the path to ``base`` from ``path``. >>> relativefrom("foo/bar", "baz/index.html") '../../baz/index.html'
def convert_to_argument(self): ''' Convert the Argument object to a tuple use in :meth:`~argparse.ArgumentParser.add_argument` calls on the parser ''' field_list = [ "action", "nargs", "const", "default", "type", "choices", "required", "help", "metavar", "dest" ] return ( self.name, { field: getattr(self, field) for field in field_list if getattr(self, field) is not None } )
Convert the Argument object to a tuple use in :meth:`~argparse.ArgumentParser.add_argument` calls on the parser
def _removePunctuation(text_string): """ Removes punctuation symbols from a string. :param text_string: A string. :type text_string: str. :returns: The input ``text_string`` with punctuation symbols removed. :rtype: str. >>> from rnlp.textprocessing import __removePunctuation >>> example = 'Hello, World!' >>> __removePunctuation(example) 'Hello World' """ try: return text_string.translate(None, _punctuation) except TypeError: return text_string.translate(str.maketrans('', '', _punctuation))
Removes punctuation symbols from a string. :param text_string: A string. :type text_string: str. :returns: The input ``text_string`` with punctuation symbols removed. :rtype: str. >>> from rnlp.textprocessing import __removePunctuation >>> example = 'Hello, World!' >>> __removePunctuation(example) 'Hello World'
def compare(self, reference_ids: Iterable, query_profiles: Iterable[Iterable], method: Optional) -> SimResult: """ Given two lists of entities (classes, individuals), resolves them to some type (phenotypes, go terms, etc) and returns their similarity """ pass
Given two lists of entities (classes, individuals), resolves them to some type (phenotypes, go terms, etc) and returns their similarity
def email_link_expired(self, now=None): """ Check if email link expired """ if not now: now = datetime.datetime.utcnow() return self.email_link_expires < now
Check if email link expired
def is_armed(self): """Return True or False if the system is armed in any way""" alarm_code = self.get_armed_status() if alarm_code == YALE_STATE_ARM_FULL: return True if alarm_code == YALE_STATE_ARM_PARTIAL: return True return False
Return True or False if the system is armed in any way
def isosceles(cls, origin=None, base=1, alpha=90): ''' :origin: optional Point :base: optional float describing triangle base length :return: Triangle initialized with points comprising a isosceles triangle. XXX isoceles triangle definition ''' o = Point(origin) base = o.x + base return cls(o, [base, o.y], [base / 2, o.y + base])
:origin: optional Point :base: optional float describing triangle base length :return: Triangle initialized with points comprising a isosceles triangle. XXX isoceles triangle definition
def add_quality_score_vs_no_of_observations_section(self): """ Add a section for the quality score vs number of observations line plot """ sample_data = [] data_labels = [] for rt_type_name, rt_type in recal_table_type._asdict().items(): sample_tables = self.gatk_base_recalibrator[rt_type]['quality_quantization_map'] if len(sample_tables) == 0: continue sample_data.append({ sample: {int(x): int(y) for x, y in zip(table['QualityScore'], table['Count'])} for sample, table in sample_tables.items() }) sample_y_sums = { sample: sum(int(y) for y in table['Count']) for sample, table in sample_tables.items() } sample_data.append({ sample: { int(x): float(y) / sample_y_sums[sample] for x, y in zip(table['QualityScore'], table['Count']) } for sample, table in sample_tables.items() }) flat_proportions = [float(y) / sample_y_sums[sample] for sample, table in sample_tables.items() for y in table['Count']] prop_ymax = max(flat_proportions) data_labels.append({'name': "{} Count".format(rt_type_name.capitalize().replace('_', '-')), 'ylab': 'Count'}) data_labels.append({'ymax': prop_ymax, 'name': "{} Percent".format(rt_type_name.capitalize().replace('_', '-')), 'ylab': 'Percent'}) plot = linegraph.plot( sample_data, pconfig={ 'title': "Observed Quality Score Counts", 'id': 'gatk-base-recalibrator-quality-score-vs-number-of-observations', 'xlab': 'Observed Quality Score', 'ylab': 'Count', 'xDecimals': False, 'data_labels': data_labels, }) # Reported vs empirical quality scores self.add_section( name='Observed Quality Scores', description=( 'This plot shows the distribution of base quality scores in each sample before and ' 'after base quality score recalibration (BQSR). Applying BQSR should broaden the ' 'distribution of base quality scores.' ), helptext=( 'For more information see ' '[the Broad\'s description of BQSR]' '(https://gatkforums.broadinstitute.org/gatk/discussion/44/base-quality-score-recalibration-bqsr)' '.' ), plot=plot, )
Add a section for the quality score vs number of observations line plot
def add_column(self, column): """Add a new column along with a formatting function.""" self.columns.append(column.name) self.column_funcs.append(column.path) if column.mask is not None: self.mask_parts.add(column.mask)
Add a new column along with a formatting function.
def auth(view, **kwargs): """ This plugin allow user to login to application kwargs: - signin_view - signout_view - template_dir - menu: - name - group_name - ... @plugin(user.login, model=model.User) class MyAccount(Juice): pass """ endpoint_namespace = view.__name__ + ":%s" view_name = view.__name__ UserModel = kwargs.pop("model") User = UserModel.User login_view = endpoint_namespace % "login" on_signin_view = kwargs.get("signin_view", "Index:index") on_signout_view = kwargs.get("signout_view", "Index:index") template_dir = kwargs.get("template_dir", "Juice/Plugin/User/Account") template_page = template_dir + "/%s.html" login_manager = LoginManager() login_manager.login_view = login_view login_manager.login_message_category = "error" init_app(login_manager.init_app) menu_context = view _menu = kwargs.get("menu", {}) if _menu: @menu(**_menu) class UserAccountMenu(object): pass menu_context = UserAccountMenu @login_manager.user_loader def load_user(userid): return User.get(userid) View.g(__USER_AUTH_ENABLED__=True) class Auth(object): decorators = view.decorators + [login_required] SESSION_KEY_SET_EMAIL_DATA = "set_email_tmp_data" TEMP_DATA_KEY = "login_tmp_data" @property def tmp_data(self): return session[self.TEMP_DATA_KEY] @tmp_data.setter def tmp_data(self, data): session[self.TEMP_DATA_KEY] = data def _login_enabled(self): if self.get_config("USER_AUTH_ALLOW_LOGIN") is not True: abort("UserLoginDisabledError") def _signup_enabled(self): if self.get_config("USER_AUTH_ALLOW_SIGNUP") is not True: abort("UserSignupDisabledError") def _oauth_enabled(self): if self.get_config("USER_AUTH_ALLOW_OAUTH") is not True: abort("UserOAuthDisabledError") def _send_reset_password(self, user): delivery = self.get_config("USER_AUTH_PASSWORD_RESET_METHOD") token_reset_ttl = self.get_config("USER_AUTH_TOKEN_RESET_TTL", 60) new_password = None if delivery.upper() == "TOKEN": token = user.set_temp_login(token_reset_ttl) url = url_for(endpoint_namespace % "reset_password", token=token, _external=True) else: new_password = user.set_password(password=None, random=True) url = url_for(endpoint_namespace % "login", _external=True) mail.send(template="reset-password.txt", method_=delivery, to=user.email, name=user.email, url=url, new_password=new_password) @classmethod def login_user(cls, user): login_user(user) now = datetime.datetime.now() user.update(last_login=now, last_visited=now) @menu("Login", endpoint=endpoint_namespace % "login", visible_with_auth_user=False, extends=menu_context) @template(template_page % "login", endpoint_namespace=endpoint_namespace) @route("login/", methods=["GET", "POST"], endpoint=endpoint_namespace % "login") @no_login_required def login(self): """ Login page """ self._login_enabled() logout_user() self.tmp_data = None self.meta_tags(title="Login") if request.method == "POST": email = request.form.get("email").strip() password = request.form.get("password").strip() if not email or not password: flash("Email or Password is empty", "error") return redirect(url_for(login_view, next=request.form.get("next"))) user = User.get_by_email(email) if user and user.password_hash and user.password_matched(password): self.login_user(user) return redirect(request.form.get("next") or url_for(on_signin_view)) else: flash("Email or Password is invalid", "error") return redirect(url_for(login_view, next=request.form.get("next"))) return dict(login_url_next=request.args.get("next", ""), login_url_default=url_for(on_signin_view), signup_enabled=self.get_config("USER_AUTH_ALLOW_SIGNUP"), oauth_enabled=self.get_config("USER_AUTH_ALLOW_LOGIN")) @menu("Logout", endpoint=endpoint_namespace % "logout", visible_with_auth_user=True, order=100, extends=menu_context) @route("logout/", endpoint=endpoint_namespace % "logout") @no_login_required def logout(self): logout_user() return redirect(url_for(on_signout_view or login_view)) @menu("Signup", endpoint=endpoint_namespace % "signup", visible_with_auth_user=False, extends=menu_context) @template(template_page % "signup", endpoint_namespace=endpoint_namespace) @route("signup/", methods=["GET", "POST"], endpoint=endpoint_namespace % "signup") @no_login_required def signup(self): """ For Email Signup :return: """ self._login_enabled() self._signup_enabled() self.meta_tags(title="Signup") if request.method == "POST": # reCaptcha if not recaptcha.verify(): flash("Invalid Security code", "error") return redirect(url_for(endpoint_namespace % "signup", next=request.form.get("next"))) try: name = request.form.get("name") email = request.form.get("email") password = request.form.get("password") password2 = request.form.get("password2") profile_image_url = request.form.get("profile_image_url", None) if not name: raise UserError("Name is required") elif not utils.is_valid_email(email): raise UserError("Invalid email address '%s'" % email) elif not password.strip() or password.strip() != password2.strip(): raise UserError("Passwords don't match") elif not utils.is_valid_password(password): raise UserError("Invalid password") else: new_account = User.new(email=email, password=password.strip(), first_name=name, profile_image_url=profile_image_url, signup_method="email") self.login_user(new_account) return redirect(request.form.get("next") or url_for(on_signin_view)) except ApplicationError as ex: flash(ex.message, "error") return redirect(url_for(endpoint_namespace % "signup", next=request.form.get("next"))) logout_user() return dict(login_url_next=request.args.get("next", "")) @route("lost-password/", methods=["GET", "POST"], endpoint=endpoint_namespace % "lost_password") @template(template_page % "lost_password", endpoint_namespace=endpoint_namespace) @no_login_required def lost_password(self): self._login_enabled() logout_user() self.meta_tags(title="Lost Password") if request.method == "POST": email = request.form.get("email") user = User.get_by_email(email) if user: self._send_reset_password(user) flash("A new password has been sent to '%s'" % email, "success") else: flash("Invalid email address", "error") return redirect(url_for(login_view)) else: return {} @menu("Account Settings", endpoint=endpoint_namespace % "account_settings", order=99, visible_with_auth_user=True, extends=menu_context) @template(template_page % "account_settings", endpoint_namespace=endpoint_namespace) @route("account-settings", methods=["GET", "POST"], endpoint=endpoint_namespace % "account_settings") @fresh_login_required def account_settings(self): self.meta_tags(title="Account Settings") if request.method == "POST": action = request.form.get("action") try: action = action.lower() # if action == "info": first_name = request.form.get("first_name").strip() last_name = request.form.get("last_name", "").strip() data = { "first_name": first_name, "last_name": last_name } current_user.update(**data) flash("Account info updated successfully!", "success") # elif action == "login": confirm_password = request.form.get("confirm-password").strip() if current_user.password_matched(confirm_password): self.change_login_handler() flash("Login Info updated successfully!", "success") else: flash("Invalid password", "error") # elif action == "password": confirm_password = request.form.get("confirm-password").strip() if current_user.password_matched(confirm_password): self.change_password_handler() flash("Password updated successfully!", "success") else: flash("Invalid password", "error") elif action == "profile-photo": file = request.files.get("file") if file: prefix = "profile-photos/%s/" % current_user.id extensions = ["jpg", "jpeg", "png", "gif"] my_photo = storage.upload(file, prefix=prefix, allowed_extensions=extensions) if my_photo: url = my_photo.url current_user.update(profile_image_url=url) flash("Profile Image updated successfully!", "success") else: raise UserError("Invalid action") except Exception as e: flash(e.message, "error") return redirect(url_for(endpoint_namespace % "account_settings")) return {} @classmethod def change_login_handler(cls, user_context=None, email=None): if not user_context: user_context = current_user if not email: email = request.form.get("email").strip() if not utils.is_valid_email(email): raise UserWarning("Invalid email address '%s'" % email) else: if email != user_context.email and User.get_by_email(email): raise UserWarning("Email exists already '%s'" % email) elif email != user_context.email: user_context.update(email=email) return True return False @classmethod def change_password_handler(cls, user_context=None, password=None, password2=None): if not user_context: user_context = current_user if not password: password = request.form.get("password").strip() if not password2: password2 = request.form.get("password2").strip() if password: if password != password2: raise UserWarning("Password don't match") elif not utils.is_valid_password(password): raise UserWarning("Invalid password") else: user_context.set_password(password) return True else: raise UserWarning("Password is empty") # OAUTH Login @route("oauth-login/<provider>", methods=["GET", "POST"], endpoint=endpoint_namespace % "oauth_login") @template(template_page % "oauth_login", endpoint_namespace=endpoint_namespace) @no_login_required def oauth_login(self, provider): """ Login via oauth providers """ self._login_enabled() self._oauth_enabled() provider = provider.lower() result = oauth.login(provider) response = oauth.response popup_js_custom = { "action": "", "url": "" } if result: if result.error: pass elif result.user: result.user.update() oauth_user = result.user user = User.get_by_oauth(provider=provider, provider_user_id=oauth_user.id) if not user: if oauth_user.email and User.get_by_email(oauth_user.email): flash("Account already exists with this email '%s'. " "Try to login or retrieve your password " % oauth_user.email, "error") popup_js_custom.update({ "action": "redirect", "url": url_for(login_view, next=request.form.get("next")) }) else: tmp_data = { "is_oauth": True, "provider": provider, "id": oauth_user.id, "name": oauth_user.name, "picture": oauth_user.picture, "first_name": oauth_user.first_name, "last_name": oauth_user.last_name, "email": oauth_user.email, "link": oauth_user.link } if not oauth_user.email: self.tmp_data = tmp_data popup_js_custom.update({ "action": "redirect", "url": url_for(endpoint_namespace % "setup_login") }) else: try: picture = oauth_user.picture user = User.new(email=oauth_user.email, name=oauth_user.name, signup_method=provider, profile_image_url=picture ) user.add_oauth(provider, oauth_user.provider_id, name=oauth_user.name, email=oauth_user.email, profile_image_url=oauth_user.picture, link=oauth_user.link) except ModelError as e: flash(e.message, "error") popup_js_custom.update({ "action": "redirect", "url": url_for(endpoint_namespace % "login") }) if user: self.login_user(user) return dict(popup_js=result.popup_js(custom=popup_js_custom), template_=template_page % "oauth_login") return response @template(template_page % "setup_login", endpoint_namespace=endpoint_namespace) @route("setup-login/", methods=["GET", "POST"], endpoint=endpoint_namespace % "setup_login") def setup_login(self): """ Allows to setup a email password if it's not provided specially coming from oauth-login :return: """ self._login_enabled() self.meta_tags(title="Setup Login") # Only user without email can set email if current_user.is_authenticated() and current_user.email: return redirect(url_for(endpoint_namespace % "account_settings")) if self.tmp_data: if request.method == "POST": if not self.tmp_data["is_oauth"]: return redirect(endpoint_namespace % "login") try: email = request.form.get("email") password = request.form.get("password") password2 = request.form.get("password2") if not utils.is_valid_email(email): raise UserError("Invalid email address '%s'" % email) elif User.get_by_email(email): raise UserError("An account exists already with this email address '%s' " % email) elif not password.strip() or password.strip() != password2.strip(): raise UserError("Passwords don't match") elif not utils.is_valid_password(password): raise UserError("Invalid password") else: user = User.new(email=email, password=password.strip(), name=self.tmp_data["name"], profile_image_url=self.tmp_data["picture"], signup_method=self.tmp_data["provider"]) user.add_oauth(self.tmp_data["provider"], self.tmp_data["id"], name=self.tmp_data["name"], email=email, profile_image_url=self.tmp_data["picture"], link=self.tmp_data["link"]) self.login_user(user) self.tmp_data = None return redirect(request.form.get("next") or url_for(on_signin_view)) except ApplicationError as ex: flash(ex.message, "error") return redirect(url_for(endpoint_namespace % "login")) return dict(provider=self.tmp_data) else: return redirect(url_for(endpoint_namespace % "login")) @route("reset-password/<token>", methods=["GET", "POST"], endpoint=endpoint_namespace % "reset_password") @template(template_page % "reset_password", endpoint_namespace=endpoint_namespace) @no_login_required def reset_password(self, token): self._login_enabled() logout_user() self.meta_tags(title="Reset Password") user = User.get_by_temp_login(token) if user: if not user.has_temp_login: return redirect(url_for(on_signin_view)) if request.method == "POST": try: self.change_password_handler(user_context=user) user.clear_temp_login() flash("Password updated successfully!", "success") return redirect(url_for(on_signin_view)) except Exception as ex: flash("Error: %s" % ex.message, "error") return redirect(url_for(endpoint_namespace % "reset_password", token=token)) else: return dict(token=token) else: abort(404, "Invalid token") @route("oauth-connect", methods=["POST"], endpoint="%s:oauth_connect" % endpoint_namespace) def oauth_connect(self): """ To login via social """ email = request.form.get("email").strip() name = request.form.get("name").strip() provider = request.form.get("provider").strip() provider_user_id = request.form.get("provider_user_id").strip() image_url = request.form.get("image_url").strip() next = request.form.get("next", "") try: current_user.oauth_connect(provider=provider, provider_user_id=provider_user_id, email=email, name=name, image_url=image_url) except Exception as ex: flash("Unable to link your account", "error") return redirect(url_for(endpoint_namespace % "account_settings")) return Auth
This plugin allow user to login to application kwargs: - signin_view - signout_view - template_dir - menu: - name - group_name - ... @plugin(user.login, model=model.User) class MyAccount(Juice): pass
def K_run_converging_Crane(D_run, D_branch, Q_run, Q_branch, angle=90): r'''Returns the loss coefficient for the run of a converging tee or wye according to the Crane method [1]_. .. math:: K_{branch} = C\left[1 + D\left(\frac{Q_{branch}}{Q_{comb}\cdot \beta_{branch}^2}\right)^2 - E\left(1 - \frac{Q_{branch}}{Q_{comb}} \right)^2 - \frac{F}{\beta_{branch}^2} \left(\frac{Q_{branch}} {Q_{comb}}\right)^2\right] .. math:: \beta_{branch} = \frac{D_{branch}}{D_{comb}} In the above equation, C=1, D=0, E=1. See the notes for definitions of F and also the special case of 90°. Parameters ---------- D_run : float Diameter of the straight-through inlet portion of the tee or wye [m] D_branch : float Diameter of the pipe attached at an angle to the straight-through, [m] Q_run : float Volumetric flow rate in the straight-through inlet of the tee or wye, [m^3/s] Q_branch : float Volumetric flow rate in the pipe attached at an angle to the straight- through, [m^3/s] angle : float, optional Angle the branch makes with the straight-through (tee=90, wye<90) [degrees] Returns ------- K : float Loss coefficient of run with respect to the velocity and inside diameter of the combined flow outlet [-] Notes ----- F is linearly interpolated from the table of angles below. There is no cutoff to prevent angles from being larger or smaller than 30 or 60 degrees. The switch to the special 90° happens at 75°. +-----------+------+ | Angle [°] | | +===========+======+ | 30 | 1.74 | +-----------+------+ | 45 | 1.41 | +-----------+------+ | 60 | 1 | +-----------+------+ For the special case of 90°, the formula used is as follows. .. math:: K_{run} = 1.55\left(\frac{Q_{branch}}{Q_{comb}} \right) - \left(\frac{Q_{branch}}{Q_{comb}}\right)^2 Examples -------- Example 7-35 of [1]_. A DN100 schedule 40 tee has 1135 liters/minute of water passing through the straight leg, and 380 liters/minute of water converging with it through a 90° branch. Calculate the loss coefficient in the run. The calculated value there is 0.03258. >>> K_run_converging_Crane(0.1023, 0.1023, 0.018917, 0.00633) 0.32575847854551254 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' beta = (D_branch/D_run) beta2 = beta*beta Q_comb = Q_run + Q_branch Q_ratio = Q_branch/Q_comb if angle < 75.0: C = 1.0 else: return 1.55*(Q_ratio) - Q_ratio*Q_ratio D, E = 0.0, 1.0 F = interp(angle, run_converging_Crane_angles, run_converging_Crane_Fs) K = C*(1. + D*(Q_ratio/beta2)**2 - E*(1. - Q_ratio)**2 - F/beta2*Q_ratio**2) return K
r'''Returns the loss coefficient for the run of a converging tee or wye according to the Crane method [1]_. .. math:: K_{branch} = C\left[1 + D\left(\frac{Q_{branch}}{Q_{comb}\cdot \beta_{branch}^2}\right)^2 - E\left(1 - \frac{Q_{branch}}{Q_{comb}} \right)^2 - \frac{F}{\beta_{branch}^2} \left(\frac{Q_{branch}} {Q_{comb}}\right)^2\right] .. math:: \beta_{branch} = \frac{D_{branch}}{D_{comb}} In the above equation, C=1, D=0, E=1. See the notes for definitions of F and also the special case of 90°. Parameters ---------- D_run : float Diameter of the straight-through inlet portion of the tee or wye [m] D_branch : float Diameter of the pipe attached at an angle to the straight-through, [m] Q_run : float Volumetric flow rate in the straight-through inlet of the tee or wye, [m^3/s] Q_branch : float Volumetric flow rate in the pipe attached at an angle to the straight- through, [m^3/s] angle : float, optional Angle the branch makes with the straight-through (tee=90, wye<90) [degrees] Returns ------- K : float Loss coefficient of run with respect to the velocity and inside diameter of the combined flow outlet [-] Notes ----- F is linearly interpolated from the table of angles below. There is no cutoff to prevent angles from being larger or smaller than 30 or 60 degrees. The switch to the special 90° happens at 75°. +-----------+------+ | Angle [°] | | +===========+======+ | 30 | 1.74 | +-----------+------+ | 45 | 1.41 | +-----------+------+ | 60 | 1 | +-----------+------+ For the special case of 90°, the formula used is as follows. .. math:: K_{run} = 1.55\left(\frac{Q_{branch}}{Q_{comb}} \right) - \left(\frac{Q_{branch}}{Q_{comb}}\right)^2 Examples -------- Example 7-35 of [1]_. A DN100 schedule 40 tee has 1135 liters/minute of water passing through the straight leg, and 380 liters/minute of water converging with it through a 90° branch. Calculate the loss coefficient in the run. The calculated value there is 0.03258. >>> K_run_converging_Crane(0.1023, 0.1023, 0.018917, 0.00633) 0.32575847854551254 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009.
def _get_compose_volumes(app_name, assembled_specs): """ This returns formatted volume specifications for a docker-compose app. We mount the app as well as any libs it needs so that local code is used in our container, instead of whatever code was in the docker image. Additionally, we create a volume for the /cp directory used by Dusty to facilitate easy file transfers using `dusty cp`.""" volumes = [] volumes.append(_get_cp_volume_mount(app_name)) volumes += get_app_volume_mounts(app_name, assembled_specs) return volumes
This returns formatted volume specifications for a docker-compose app. We mount the app as well as any libs it needs so that local code is used in our container, instead of whatever code was in the docker image. Additionally, we create a volume for the /cp directory used by Dusty to facilitate easy file transfers using `dusty cp`.
def validate_boundary(reference_intervals, estimated_intervals, trim): """Checks that the input annotations to a segment boundary estimation metric (i.e. one that only takes in segment intervals) look like valid segment times, and throws helpful errors if not. Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. trim : bool will the start and end events be trimmed? """ if trim: # If we're trimming, then we need at least 2 intervals min_size = 2 else: # If we're not trimming, then we only need one interval min_size = 1 if len(reference_intervals) < min_size: warnings.warn("Reference intervals are empty.") if len(estimated_intervals) < min_size: warnings.warn("Estimated intervals are empty.") for intervals in [reference_intervals, estimated_intervals]: util.validate_intervals(intervals)
Checks that the input annotations to a segment boundary estimation metric (i.e. one that only takes in segment intervals) look like valid segment times, and throws helpful errors if not. Parameters ---------- reference_intervals : np.ndarray, shape=(n, 2) reference segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. estimated_intervals : np.ndarray, shape=(m, 2) estimated segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. trim : bool will the start and end events be trimmed?
def draw_line(self, img, pixmapper, pt1, pt2, colour, linewidth): '''draw a line on the image''' pix1 = pixmapper(pt1) pix2 = pixmapper(pt2) clipped = cv.ClipLine((img.width, img.height), pix1, pix2) if clipped is None: return (pix1, pix2) = clipped cv.Line(img, pix1, pix2, colour, linewidth) cv.Circle(img, pix2, linewidth*2, colour)
draw a line on the image
def servo_output_raw_send(self, time_usec, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw, force_mavlink1=False): ''' The RAW values of the servo outputs (for RC input from the remote, use the RC_CHANNELS messages). The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. time_usec : Timestamp (microseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows to encode more than 8 servos. (uint8_t) servo1_raw : Servo output 1 value, in microseconds (uint16_t) servo2_raw : Servo output 2 value, in microseconds (uint16_t) servo3_raw : Servo output 3 value, in microseconds (uint16_t) servo4_raw : Servo output 4 value, in microseconds (uint16_t) servo5_raw : Servo output 5 value, in microseconds (uint16_t) servo6_raw : Servo output 6 value, in microseconds (uint16_t) servo7_raw : Servo output 7 value, in microseconds (uint16_t) servo8_raw : Servo output 8 value, in microseconds (uint16_t) ''' return self.send(self.servo_output_raw_encode(time_usec, port, servo1_raw, servo2_raw, servo3_raw, servo4_raw, servo5_raw, servo6_raw, servo7_raw, servo8_raw), force_mavlink1=force_mavlink1)
The RAW values of the servo outputs (for RC input from the remote, use the RC_CHANNELS messages). The standard PPM modulation is as follows: 1000 microseconds: 0%, 2000 microseconds: 100%. time_usec : Timestamp (microseconds since system boot) (uint32_t) port : Servo output port (set of 8 outputs = 1 port). Most MAVs will just use one, but this allows to encode more than 8 servos. (uint8_t) servo1_raw : Servo output 1 value, in microseconds (uint16_t) servo2_raw : Servo output 2 value, in microseconds (uint16_t) servo3_raw : Servo output 3 value, in microseconds (uint16_t) servo4_raw : Servo output 4 value, in microseconds (uint16_t) servo5_raw : Servo output 5 value, in microseconds (uint16_t) servo6_raw : Servo output 6 value, in microseconds (uint16_t) servo7_raw : Servo output 7 value, in microseconds (uint16_t) servo8_raw : Servo output 8 value, in microseconds (uint16_t)
def update(self, alert_condition_infra_id, policy_id, name, condition_type, alert_condition_configuration, enabled=True): """ This API endpoint allows you to update an alert condition for infrastucture :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :type policy_id: int :param policy_id: Alert policy id :type name: str :param name: The name of the alert condition :type condition_type: str :param condition_type: The type of the alert condition can be infra_process_running, infra_metric or infra_host_not_reporting :type alert_condition_configuration: hash :param alert_condition_configuration: hash containing config for the alert :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } } """ data = { "data": alert_condition_configuration } data['data']['type'] = condition_type data['data']['policy_id'] = policy_id data['data']['name'] = name data['data']['enabled'] = enabled return self._put( url='{0}alerts/conditions/{1}'.format(self.URL, alert_condition_infra_id), headers=self.headers, data=data )
This API endpoint allows you to update an alert condition for infrastucture :type alert_condition_infra_id: int :param alert_condition_infra_id: Alert Condition Infra ID :type policy_id: int :param policy_id: Alert policy id :type name: str :param name: The name of the alert condition :type condition_type: str :param condition_type: The type of the alert condition can be infra_process_running, infra_metric or infra_host_not_reporting :type alert_condition_configuration: hash :param alert_condition_configuration: hash containing config for the alert :type enabled: bool :param enabled: Whether to enable that alert condition :rtype: dict :return: The JSON response of the API :: { "data": { "id": "integer", "policy_id": "integer", "type": "string", "name": "string", "enabled": "boolean", "where_clause": "string", "comparison": "string", "filter": "hash", "critical_threshold": "hash", "event_type": "string", "process_where_clause": "string", "created_at_epoch_millis": "time", "updated_at_epoch_millis": "time" } }
def set_chuid(ctx, management_key, pin): """ Generate and set a CHUID on the YubiKey. """ controller = ctx.obj['controller'] _ensure_authenticated(ctx, controller, pin, management_key) controller.update_chuid()
Generate and set a CHUID on the YubiKey.
def draw_char_screen(self): """ Draws the output buffered in the char_buffer. """ self.screen = Image.new("RGB", (self.height, self.width)) self.drawer = ImageDraw.Draw(self.screen) for sy, line in enumerate(self.char_buffer): for sx, tinfo in enumerate(line): self.drawer.text((sx * 6, sy * 9), tinfo[0], fill=tinfo[1:]) self.output_device.interrupt()
Draws the output buffered in the char_buffer.
def _cleanup_channel(self, channel_id): """Remove the the channel from the list of available channels. :param int channel_id: Channel id :return: """ with self.lock: if channel_id not in self._channels: return del self._channels[channel_id]
Remove the the channel from the list of available channels. :param int channel_id: Channel id :return:
def words(ctx, input, output): """Read input document, and output words.""" log.info('chemdataextractor.read.elements') log.info('Reading %s' % input.name) doc = Document.from_file(input) for element in doc.elements: if isinstance(element, Text): for sentence in element.sentences: output.write(u' '.join(sentence.raw_tokens)) output.write(u'\n')
Read input document, and output words.
def before_create(self, context, resource): """ When triggered the resource which can either be uploaded or linked to will be parsed and analysed to see if it possibly is a budget data package resource (checking if all required headers and any of the recommended headers exist in the csv). The budget data package specific fields are then appended to the resource which makes it useful for export the dataset as a budget data package. """ # If the resource is being uploaded we load the uploaded file # If not we load the provided url if resource.get('upload', '') == '': self.data.load(resource['url']) else: self.data.load(resource['upload'].file) self.generate_budget_data_package(resource)
When triggered the resource which can either be uploaded or linked to will be parsed and analysed to see if it possibly is a budget data package resource (checking if all required headers and any of the recommended headers exist in the csv). The budget data package specific fields are then appended to the resource which makes it useful for export the dataset as a budget data package.
def remap( x, oMin, oMax, nMin, nMax ): """Map to a 0 to 1 scale http://stackoverflow.com/questions/929103/convert-a-number-range-to-another-range-maintaining-ratio """ #range check if oMin == oMax: log.warning("Zero input range, unable to rescale") return x if nMin == nMax: log.warning("Zero output range, unable to rescale") return x #check reversed input range reverseInput = False oldMin = min( oMin, oMax ) oldMax = max( oMin, oMax ) if not oldMin == oMin: reverseInput = True #check reversed output range reverseOutput = False newMin = min( nMin, nMax ) newMax = max( nMin, nMax ) if not newMin == nMin : reverseOutput = True portion = (x-oldMin)*(newMax-newMin)/(oldMax-oldMin) if reverseInput: portion = (oldMax-x)*(newMax-newMin)/(oldMax-oldMin) result = portion + newMin if reverseOutput: result = newMax - portion return result
Map to a 0 to 1 scale http://stackoverflow.com/questions/929103/convert-a-number-range-to-another-range-maintaining-ratio
def greenlet_logs(self): """ This greenlet always runs in background to update current logs in MongoDB every 10 seconds. Caution: it might get delayed when doing long blocking operations. Should we do this in a thread instead? """ while True: try: self.flush_logs() except Exception as e: # pylint: disable=broad-except self.log.error("When flushing logs: %s" % e) finally: time.sleep(self.config["report_interval"])
This greenlet always runs in background to update current logs in MongoDB every 10 seconds. Caution: it might get delayed when doing long blocking operations. Should we do this in a thread instead?
def lu_companion(top_row, value): r"""Compute an LU-factored :math:`C - t I` and its 1-norm. .. _dgecon: http://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga188b8d30443d14b1a3f7f8331d87ae60.html#ga188b8d30443d14b1a3f7f8331d87ae60 .. _dgetrf: http://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html#ga0019443faea08275ca60a734d0593e60 .. note:: The output of this function is intended to be used with `dgecon`_ from LAPACK. ``dgecon`` expects both the 1-norm of the matrix and expects the matrix to be passed in an already LU-factored form (via `dgetrf`_). The companion matrix :math:`C` is given by the ``top_row``, for example, the polynomial :math:`t^3 + 3 t^2 - t + 2` has a top row of ``-3, 1, -2`` and the corresponding companion matrix is: .. math:: \left[\begin{array}{c c c} -3 & 1 & -2 \\ 1 & 0 & 0 \\ 0 & 1 & 0 \end{array}\right] After doing a full cycle of the rows (shifting the first to the last and moving all other rows up), row reduction of :math:`C - t I` yields .. math:: \left[\begin{array}{c c c} 1 & -t & 0 \\ 0 & 1 & -t \\ -3 - t & 1 & -2 \end{array}\right] = \left[\begin{array}{c c c} 1 & 0 & 0 \\ 0 & 1 & 0 \\ -3 - t & 1 + t(-3 - t) & 1 \end{array}\right] \left[\begin{array}{c c c} 1 & -t & 0 \\ 0 & 1 & -t \\ 0 & 0 & -2 + t(1 + t(-3 - t)) \end{array}\right] and in general, the terms in the bottom row correspond to the intermediate values involved in evaluating the polynomial via `Horner's method`_. .. _Horner's method: https://en.wikipedia.org/wiki/Horner%27s_method .. testsetup:: lu-companion import numpy as np import numpy.linalg from bezier._algebraic_intersection import lu_companion .. doctest:: lu-companion >>> top_row = np.asfortranarray([-3.0, 1.0, -2.0]) >>> t_val = 0.5 >>> lu_mat, one_norm = lu_companion(top_row, t_val) >>> lu_mat array([[ 1. , -0.5 , 0. ], [ 0. , 1. , -0.5 ], [-3.5 , -0.75 , -2.375]]) >>> one_norm 4.5 >>> l_mat = np.tril(lu_mat, k=-1) + np.eye(3) >>> u_mat = np.triu(lu_mat) >>> a_mat = l_mat.dot(u_mat) >>> a_mat array([[ 1. , -0.5, 0. ], [ 0. , 1. , -0.5], [-3.5, 1. , -2. ]]) >>> np.linalg.norm(a_mat, ord=1) 4.5 Args: top_row (numpy.ndarray): 1D array, top row of companion matrix. value (float): The :math:`t` value used to form :math:`C - t I`. Returns: Tuple[numpy.ndarray, float]: Pair of * 2D array of LU-factored form of :math:`C - t I`, with the non-diagonal part of :math:`L` stored in the strictly lower triangle and :math:`U` stored in the upper triangle (we skip the permutation matrix, as it won't impact the 1-norm) * the 1-norm the matrix :math:`C - t I` As mentioned above, these two values are meant to be used with `dgecon`_. """ degree, = top_row.shape lu_mat = np.zeros((degree, degree), order="F") if degree == 1: lu_mat[0, 0] = top_row[0] - value return lu_mat, abs(lu_mat[0, 0]) # Column 0: Special case since it doesn't have ``-t`` above the diagonal. horner_curr = top_row[0] - value one_norm = 1.0 + abs(horner_curr) lu_mat[0, 0] = 1.0 lu_mat[degree - 1, 0] = horner_curr # Columns 1-(end - 1): Three values in LU and C - t I. abs_one_plus = 1.0 + abs(value) last_row = degree - 1 for col in six.moves.xrange(1, degree - 1): curr_coeff = top_row[col] horner_curr = value * horner_curr + curr_coeff one_norm = max(one_norm, abs_one_plus + abs(curr_coeff)) lu_mat[col - 1, col] = -value lu_mat[col, col] = 1.0 lu_mat[last_row, col] = horner_curr # Last Column: Special case since it doesn't have ``-1`` on the diagonal. curr_coeff = top_row[last_row] horner_curr = value * horner_curr + curr_coeff one_norm = max(one_norm, abs(value) + abs(curr_coeff)) lu_mat[last_row - 1, last_row] = -value lu_mat[last_row, last_row] = horner_curr return lu_mat, one_norm
r"""Compute an LU-factored :math:`C - t I` and its 1-norm. .. _dgecon: http://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga188b8d30443d14b1a3f7f8331d87ae60.html#ga188b8d30443d14b1a3f7f8331d87ae60 .. _dgetrf: http://www.netlib.org/lapack/explore-html/dd/d9a/group__double_g_ecomputational_ga0019443faea08275ca60a734d0593e60.html#ga0019443faea08275ca60a734d0593e60 .. note:: The output of this function is intended to be used with `dgecon`_ from LAPACK. ``dgecon`` expects both the 1-norm of the matrix and expects the matrix to be passed in an already LU-factored form (via `dgetrf`_). The companion matrix :math:`C` is given by the ``top_row``, for example, the polynomial :math:`t^3 + 3 t^2 - t + 2` has a top row of ``-3, 1, -2`` and the corresponding companion matrix is: .. math:: \left[\begin{array}{c c c} -3 & 1 & -2 \\ 1 & 0 & 0 \\ 0 & 1 & 0 \end{array}\right] After doing a full cycle of the rows (shifting the first to the last and moving all other rows up), row reduction of :math:`C - t I` yields .. math:: \left[\begin{array}{c c c} 1 & -t & 0 \\ 0 & 1 & -t \\ -3 - t & 1 & -2 \end{array}\right] = \left[\begin{array}{c c c} 1 & 0 & 0 \\ 0 & 1 & 0 \\ -3 - t & 1 + t(-3 - t) & 1 \end{array}\right] \left[\begin{array}{c c c} 1 & -t & 0 \\ 0 & 1 & -t \\ 0 & 0 & -2 + t(1 + t(-3 - t)) \end{array}\right] and in general, the terms in the bottom row correspond to the intermediate values involved in evaluating the polynomial via `Horner's method`_. .. _Horner's method: https://en.wikipedia.org/wiki/Horner%27s_method .. testsetup:: lu-companion import numpy as np import numpy.linalg from bezier._algebraic_intersection import lu_companion .. doctest:: lu-companion >>> top_row = np.asfortranarray([-3.0, 1.0, -2.0]) >>> t_val = 0.5 >>> lu_mat, one_norm = lu_companion(top_row, t_val) >>> lu_mat array([[ 1. , -0.5 , 0. ], [ 0. , 1. , -0.5 ], [-3.5 , -0.75 , -2.375]]) >>> one_norm 4.5 >>> l_mat = np.tril(lu_mat, k=-1) + np.eye(3) >>> u_mat = np.triu(lu_mat) >>> a_mat = l_mat.dot(u_mat) >>> a_mat array([[ 1. , -0.5, 0. ], [ 0. , 1. , -0.5], [-3.5, 1. , -2. ]]) >>> np.linalg.norm(a_mat, ord=1) 4.5 Args: top_row (numpy.ndarray): 1D array, top row of companion matrix. value (float): The :math:`t` value used to form :math:`C - t I`. Returns: Tuple[numpy.ndarray, float]: Pair of * 2D array of LU-factored form of :math:`C - t I`, with the non-diagonal part of :math:`L` stored in the strictly lower triangle and :math:`U` stored in the upper triangle (we skip the permutation matrix, as it won't impact the 1-norm) * the 1-norm the matrix :math:`C - t I` As mentioned above, these two values are meant to be used with `dgecon`_.
def _get_environ_vars(self): # type: () -> Iterable[Tuple[str, str]] """Returns a generator with all environmental vars with prefix PIP_""" for key, val in os.environ.items(): should_be_yielded = ( key.startswith("PIP_") and key[4:].lower() not in self._ignore_env_names ) if should_be_yielded: yield key[4:].lower(), val
Returns a generator with all environmental vars with prefix PIP_
def subscriber_choice_control(self): """ It controls subscribers choice and generates error message if there is a non-choice. """ self.current.task_data['option'] = None self.current.task_data['chosen_subscribers'], names = self.return_selected_form_items( self.input['form']['SubscriberList']) self.current.task_data[ 'msg'] = "You should choose at least one subscriber for migration operation." if self.current.task_data['chosen_subscribers']: self.current.task_data['option'] = self.input['cmd'] del self.current.task_data['msg']
It controls subscribers choice and generates error message if there is a non-choice.
def compare_ecp_pots(potential1, potential2, compare_meta=False, rel_tol=0.0): ''' Compare two ecp potentials for approximate equality (exponents/coefficients are within a tolerance) If compare_meta is True, the metadata is also compared for exact equality. ''' if potential1['angular_momentum'] != potential2['angular_momentum']: return False rexponents1 = potential1['r_exponents'] rexponents2 = potential2['r_exponents'] gexponents1 = potential1['gaussian_exponents'] gexponents2 = potential2['gaussian_exponents'] coefficients1 = potential1['coefficients'] coefficients2 = potential2['coefficients'] # integer comparison if rexponents1 != rexponents2: return False if not _compare_vector(gexponents1, gexponents2, rel_tol): return False if not _compare_matrix(coefficients1, coefficients2, rel_tol): return False if compare_meta: if potential1['ecp_type'] != potential2['ecp_type']: return False return True else: return True
Compare two ecp potentials for approximate equality (exponents/coefficients are within a tolerance) If compare_meta is True, the metadata is also compared for exact equality.
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures=False, storeToCSV=False, PLOT=False): """ This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file """ [fs, x] = audioBasicIO.readAudioFile(fileName) x = audioBasicIO.stereo2mono(x) if storeStFeatures: [mtF, stF, _] = mtFeatureExtraction(x, fs, round(fs * midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) else: [mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) # save mt features to numpy file numpy.save(outPutFile, mtF) if PLOT: print("Mid-term numpy file: " + outPutFile + ".npy saved") if storeToCSV: numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",") if PLOT: print("Mid-term CSV file: " + outPutFile + ".csv saved") if storeStFeatures: # save st features to numpy file numpy.save(outPutFile+"_st", stF) if PLOT: print("Short-term numpy file: " + outPutFile + "_st.npy saved") if storeToCSV: # store st features to CSV file numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",") if PLOT: print("Short-term CSV file: " + outPutFile + "_st.csv saved")
This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file
def query_organism_host(): """ Returns list of host organism by query parameters --- tags: - Query functions parameters: - name: taxid in: query type: integer required: false description: NCBI taxonomy identifier default: 9606 - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10 """ args = get_args( request_args=request.args, allowed_str_args=['entry_name'], allowed_int_args=['taxid', 'limit'] ) return jsonify(query.organism_host(**args))
Returns list of host organism by query parameters --- tags: - Query functions parameters: - name: taxid in: query type: integer required: false description: NCBI taxonomy identifier default: 9606 - name: entry_name in: query type: string required: false description: UniProt entry name default: A4_HUMAN - name: limit in: query type: integer required: false description: limit of results numbers default: 10
def use_plenary_agent_view(self): """Pass through to provider ResourceAgentSession.use_plenary_agent_view""" self._object_views['agent'] = PLENARY # self._get_provider_session('resource_agent_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_agent_view() except AttributeError: pass
Pass through to provider ResourceAgentSession.use_plenary_agent_view
def start_watching(self, cluster, callback): """ Initiates the "watching" of a cluster's associated znode. This is done via kazoo's ChildrenWatch object. When a cluster's znode's child nodes are updated, a callback is fired and we update the cluster's `nodes` attribute based on the existing child znodes and fire a passed-in callback with no arguments once done. If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL` seconds before trying again as long as no ChildrenWatch exists for the given cluster yet and we are not in the process of shutting down. """ logger.debug("starting to watch cluster %s", cluster.name) wait_on_any(self.connected, self.shutdown) logger.debug("done waiting on (connected, shutdown)") znode_path = "/".join([self.base_path, cluster.name]) self.stop_events[znode_path] = threading.Event() def should_stop(): return ( znode_path not in self.stop_events or self.stop_events[znode_path].is_set() or self.shutdown.is_set() ) while not should_stop(): try: if self.client.exists(znode_path): break except exceptions.ConnectionClosedError: break wait_on_any( self.stop_events[znode_path], self.shutdown, timeout=NO_NODE_INTERVAL ) logger.debug("setting up ChildrenWatch for %s", znode_path) @self.client.ChildrenWatch(znode_path) def watch(children): if should_stop(): return False logger.debug("znode children changed! (%s)", znode_path) new_nodes = [] for child in children: child_path = "/".join([znode_path, child]) try: new_nodes.append( Node.deserialize(self.client.get(child_path)[0]) ) except ValueError: logger.exception("Invalid node at path '%s'", child) continue cluster.nodes = new_nodes callback()
Initiates the "watching" of a cluster's associated znode. This is done via kazoo's ChildrenWatch object. When a cluster's znode's child nodes are updated, a callback is fired and we update the cluster's `nodes` attribute based on the existing child znodes and fire a passed-in callback with no arguments once done. If the cluster's znode does not exist we wait for `NO_NODE_INTERVAL` seconds before trying again as long as no ChildrenWatch exists for the given cluster yet and we are not in the process of shutting down.
def copy_config_file(self, config_file, path=None, overwrite=False): """Copy a default config file into the active profile directory. Default configuration files are kept in :mod:`IPython.config.default`. This function moves these from that location to the working profile directory. """ dst = os.path.join(self.location, config_file) if os.path.isfile(dst) and not overwrite: return False if path is None: path = os.path.join(get_ipython_package_dir(), u'config', u'profile', u'default') src = os.path.join(path, config_file) shutil.copy(src, dst) return True
Copy a default config file into the active profile directory. Default configuration files are kept in :mod:`IPython.config.default`. This function moves these from that location to the working profile directory.
def deactivate_workflow_transitions(cr, model, transitions=None): """ Disable workflow transitions for workflows on a given model. This can be necessary for automatic workflow transitions when writing to an object via the ORM in the post migration step. Returns a dictionary to be used on reactivate_workflow_transitions :param model: the model for which workflow transitions should be \ deactivated :param transitions: a list of ('module', 'name') xmlid tuples of \ transitions to be deactivated. Don't pass this if there's no specific \ reason to do so, the default is to deactivate all transitions .. versionadded:: 7.0 """ transition_ids = [] if transitions: data_obj = RegistryManager.get(cr.dbname)['ir.model.data'] for module, name in transitions: try: transition_ids.append( data_obj.get_object_reference( cr, SUPERUSER_ID, module, name)[1]) except ValueError: continue else: cr.execute( '''select distinct t.id from wkf w join wkf_activity a on a.wkf_id=w.id join wkf_transition t on t.act_from=a.id or t.act_to=a.id where w.osv=%s''', (model,)) transition_ids = [i for i, in cr.fetchall()] cr.execute( 'select id, condition from wkf_transition where id in %s', (tuple(transition_ids),)) transition_conditions = dict(cr.fetchall()) cr.execute( "update wkf_transition set condition = 'False' WHERE id in %s", (tuple(transition_ids),)) return transition_conditions
Disable workflow transitions for workflows on a given model. This can be necessary for automatic workflow transitions when writing to an object via the ORM in the post migration step. Returns a dictionary to be used on reactivate_workflow_transitions :param model: the model for which workflow transitions should be \ deactivated :param transitions: a list of ('module', 'name') xmlid tuples of \ transitions to be deactivated. Don't pass this if there's no specific \ reason to do so, the default is to deactivate all transitions .. versionadded:: 7.0
def subject_sequence_retriever(fasta_handle, b6_handle, e_value, *args, **kwargs): """Returns FASTA entries for subject sequences from BLAST hits Stores B6/M8 entries with E-Values below the e_value cutoff. Then iterates through the FASTA file and if an entry matches the subject of an B6/M8 entry, it's sequence is extracted and returned as a FASTA entry plus the E-Value. Args: fasta_handle (file): FASTA file handle, can technically be any iterable that returns FASTA "lines" b6_handle (file): B6/M8 file handle, can technically be any iterable that returns B6/M8 "lines" e_value (float): Max E-Value of entry to return *args: Variable length argument list for b6_iter **kwargs: Arbitrary keyword arguments for b6_iter Yields: FastaEntry: class containing all FASTA data Example: Note: These doctests will not pass, examples are only in doctest format as per convention. bio_utils uses pytests for testing. >>> fasta_handle = open('test.fasta') >>> b6_handle = open('test.b6') >>> for entry in subject_sequence_retriever(fasta_handle, ... b6_handle, 1e5) ... print(entry.sequence) # Print aligned subject sequence """ filtered_b6 = defaultdict(list) for entry in b6_evalue_filter(b6_handle, e_value, *args, **kwargs): filtered_b6[entry.subject].append( (entry.subject_start, entry.subject_end, entry._evalue_str)) for fastaEntry in fasta_iter(fasta_handle): if fastaEntry.id in filtered_b6: for alignment in filtered_b6[fastaEntry.id]: start = alignment[0] - 1 end = alignment[1] - 1 # Get subject sequence if start < end: subject_sequence = fastaEntry.sequence[start:end] elif start > end: subject_sequence = fastaEntry.sequence[end:start][::-1] else: subject_sequence = fastaEntry.sequence[start] fastaEntry.sequence = subject_sequence # Add E-value to FASTA/Q header if fastaEntry.description == '': fastaEntry.description = 'E-value: ' else: fastaEntry.description += ' E-value: ' fastaEntry.description += alignment[2] yield fastaEntry
Returns FASTA entries for subject sequences from BLAST hits Stores B6/M8 entries with E-Values below the e_value cutoff. Then iterates through the FASTA file and if an entry matches the subject of an B6/M8 entry, it's sequence is extracted and returned as a FASTA entry plus the E-Value. Args: fasta_handle (file): FASTA file handle, can technically be any iterable that returns FASTA "lines" b6_handle (file): B6/M8 file handle, can technically be any iterable that returns B6/M8 "lines" e_value (float): Max E-Value of entry to return *args: Variable length argument list for b6_iter **kwargs: Arbitrary keyword arguments for b6_iter Yields: FastaEntry: class containing all FASTA data Example: Note: These doctests will not pass, examples are only in doctest format as per convention. bio_utils uses pytests for testing. >>> fasta_handle = open('test.fasta') >>> b6_handle = open('test.b6') >>> for entry in subject_sequence_retriever(fasta_handle, ... b6_handle, 1e5) ... print(entry.sequence) # Print aligned subject sequence
def _check_iou_licence(self): """ Checks for a valid IOU key in the iourc file (paranoid mode). """ try: license_check = self._config().getboolean("license_check", True) except ValueError: raise IOUError("Invalid licence check setting") if license_check is False: return config = configparser.ConfigParser() try: with open(self.iourc_path, encoding="utf-8") as f: config.read_file(f) except OSError as e: raise IOUError("Could not open iourc file {}: {}".format(self.iourc_path, e)) except configparser.Error as e: raise IOUError("Could not parse iourc file {}: {}".format(self.iourc_path, e)) except UnicodeDecodeError as e: raise IOUError("Non ascii characters in iourc file {}, please remove them: {}".format(self.iourc_path, e)) if "license" not in config: raise IOUError("License section not found in iourc file {}".format(self.iourc_path)) hostname = socket.gethostname() if len(hostname) > 15: log.warning("Older IOU images may not boot because hostname '{}' length is above 15 characters".format(hostname)) if hostname not in config["license"]: raise IOUError("Hostname \"{}\" not found in iourc file {}".format(hostname, self.iourc_path)) user_ioukey = config["license"][hostname] if user_ioukey[-1:] != ';': raise IOUError("IOU key not ending with ; in iourc file {}".format(self.iourc_path)) if len(user_ioukey) != 17: raise IOUError("IOU key length is not 16 characters in iourc file {}".format(self.iourc_path)) user_ioukey = user_ioukey[:16] # We can't test this because it's mean distributing a valid licence key # in tests or generating one if not hasattr(sys, "_called_from_test"): try: hostid = (yield from gns3server.utils.asyncio.subprocess_check_output("hostid")).strip() except FileNotFoundError as e: raise IOUError("Could not find hostid: {}".format(e)) except subprocess.SubprocessError as e: raise IOUError("Could not execute hostid: {}".format(e)) try: ioukey = int(hostid, 16) except ValueError: raise IOUError("Invalid hostid detected: {}".format(hostid)) for x in hostname: ioukey += ord(x) pad1 = b'\x4B\x58\x21\x81\x56\x7B\x0D\xF3\x21\x43\x9B\x7E\xAC\x1D\xE6\x8A' pad2 = b'\x80' + 39 * b'\0' ioukey = hashlib.md5(pad1 + pad2 + struct.pack('!I', ioukey) + pad1).hexdigest()[:16] if ioukey != user_ioukey: raise IOUError("Invalid IOU license key {} detected in iourc file {} for host {}".format(user_ioukey, self.iourc_path, hostname))
Checks for a valid IOU key in the iourc file (paranoid mode).
def determine_node(self): """ Determines the type of node based on a combination of forwarding reachability and NAT type. """ # Manually set node_type as simultaneous. if self.node_type == "simultaneous": if self.nat_type != "unknown": return "simultaneous" # Get IP of binding interface. unspecific_bind = ["0.0.0.0", "127.0.0.1", "localhost"] if self.passive_bind in unspecific_bind: lan_ip = get_lan_ip(self.interface) else: lan_ip = self.passive_bind # Passive node checks. if lan_ip is not None \ and self.passive_port is not None and self.enable_forwarding: self.debug_print("Checking if port is forwarded.") # Check port isn't already forwarded. if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): msg = "Port already forwarded. Skipping NAT traversal." self.debug_print(msg) self.forwarding_type = "forwarded" return "passive" else: self.debug_print("Port is not already forwarded.") # Most routers. try: self.debug_print("Trying UPnP") UPnP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "UPnP" self.debug_print("Forwarded port with UPnP.") else: self.debug_print("UPnP failed to forward port.") except Exception as e: # Log exception. error = parse_exception(e) log_exception(self.error_log_path, error) self.debug_print("UPnP failed to forward port.") # Apple devices. try: self.debug_print("Trying NATPMP.") NatPMP(self.interface).forward_port("TCP", self.passive_port, lan_ip) if is_port_forwarded(lan_ip, self.passive_port, "TCP", self.forwarding_servers): self.forwarding_type = "NATPMP" self.debug_print("Port forwarded with NATPMP.") else: self.debug_print("Failed to forward port with NATPMP.") self.debug_print("Falling back on TCP hole punching or" " proxying.") except Exception as e: # Log exception error = parse_exception(e) log_exception(self.error_log_path, error) self.debug_print("Failed to forward port with NATPMP.") # Check it worked. if self.forwarding_type != "manual": return "passive" # Fail-safe node types. if self.nat_type != "unknown": return "simultaneous" else: return "active"
Determines the type of node based on a combination of forwarding reachability and NAT type.
def parse_node_response(self, response): """ Update the object with the remote node object """ for key, value in response.items(): if key == "console": self._console = value elif key == "node_directory": self._node_directory = value elif key == "command_line": self._command_line = value elif key == "status": self._status = value elif key == "console_type": self._console_type = value elif key == "name": self.name = value elif key in ["node_id", "project_id", "console_host", "startup_config_content", "private_config_content", "startup_script"]: if key in self._properties: del self._properties[key] else: self._properties[key] = value self._list_ports() for link in self._links: yield from link.node_updated(self)
Update the object with the remote node object
def delete_collection_namespaced_replication_controller(self, namespace, **kwargs): # noqa: E501 """delete_collection_namespaced_replication_controller # noqa: E501 delete collection of ReplicationController # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_replication_controller(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_replication_controller_with_http_info(namespace, **kwargs) # noqa: E501 else: (data) = self.delete_collection_namespaced_replication_controller_with_http_info(namespace, **kwargs) # noqa: E501 return data
delete_collection_namespaced_replication_controller # noqa: E501 delete collection of ReplicationController # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_replication_controller(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param bool include_uninitialized: If true, partially initialized resources are included in the response. :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
def _apply_dvportgroup_out_shaping(pg_name, out_shaping, out_shaping_conf): ''' Applies the values in out_shaping_conf to an out_shaping object pg_name The name of the portgroup out_shaping The vim.DVSTrafficShapingPolicy to apply the config to out_shaping_conf The out shaping config ''' log.trace('Building portgroup\'s \'%s\' out shaping policy', pg_name) if out_shaping_conf.get('average_bandwidth'): out_shaping.averageBandwidth = vim.LongPolicy() out_shaping.averageBandwidth.value = \ out_shaping_conf['average_bandwidth'] if out_shaping_conf.get('burst_size'): out_shaping.burstSize = vim.LongPolicy() out_shaping.burstSize.value = out_shaping_conf['burst_size'] if 'enabled' in out_shaping_conf: out_shaping.enabled = vim.BoolPolicy() out_shaping.enabled.value = out_shaping_conf['enabled'] if out_shaping_conf.get('peak_bandwidth'): out_shaping.peakBandwidth = vim.LongPolicy() out_shaping.peakBandwidth.value = out_shaping_conf['peak_bandwidth']
Applies the values in out_shaping_conf to an out_shaping object pg_name The name of the portgroup out_shaping The vim.DVSTrafficShapingPolicy to apply the config to out_shaping_conf The out shaping config
def mean(self): """Compute a total score for each model over all the tests. Uses the `norm_score` attribute, since otherwise direct comparison across different kinds of scores would not be possible. """ return np.dot(np.array(self.norm_scores), self.weights)
Compute a total score for each model over all the tests. Uses the `norm_score` attribute, since otherwise direct comparison across different kinds of scores would not be possible.
def perform_permissions_check(self, user, obj, perms): """ Performs the permissions check. """ return self.request.forum_permission_handler.can_access_moderation_queue(user)
Performs the permissions check.
def dorun(method, platonics=None, nsnrs=20, noise_samples=30, sweeps=30, burn=15): """ platonics = create_many_platonics(N=50) dorun(platonics) """ sigmas = np.logspace(np.log10(1.0/2048), 0, nsnrs) crbs, vals, errs, poss = [], [], [], [] for sigma in sigmas: print "#### sigma:", sigma for i, (image, pos) in enumerate(platonics): print 'image', i, '|', s,im = create_comparison_state(image, pos, method=method) # typical image set_image(s, im, sigma) crbs.append(crb(s)) val, err = sample(s, im, sigma, N=noise_samples, sweeps=sweeps, burn=burn) poss.append(pos) vals.append(val) errs.append(err) shape0 = (nsnrs, len(platonics), -1) shape1 = (nsnrs, len(platonics), noise_samples, -1) crbs = np.array(crbs).reshape(shape0) vals = np.array(vals).reshape(shape1) errs = np.array(errs).reshape(shape1) poss = np.array(poss).reshape(shape0) return [crbs, vals, errs, poss, sigmas]
platonics = create_many_platonics(N=50) dorun(platonics)
def save_dict_to_file(filename, dictionary): """Saves dictionary as CSV file.""" with open(filename, 'w') as f: writer = csv.writer(f) for k, v in iteritems(dictionary): writer.writerow([str(k), str(v)])
Saves dictionary as CSV file.
def create_border(video, color="blue", border_percent=2): """Creates a border around each frame to differentiate input and target. Args: video: 5-D NumPy array. color: string, "blue", "red" or "green". border_percent: Percentarge of the frame covered by the border. Returns: video: 5-D NumPy array. """ # Do not create border if the video is not in RGB format if video.shape[-1] != 3: return video color_to_axis = {"blue": 2, "red": 0, "green": 1} axis = color_to_axis[color] _, _, height, width, _ = video.shape border_height = np.ceil(border_percent * height / 100.0).astype(np.int) border_width = np.ceil(border_percent * width / 100.0).astype(np.int) video[:, :, :border_height, :, axis] = 255 video[:, :, -border_height:, :, axis] = 255 video[:, :, :, :border_width, axis] = 255 video[:, :, :, -border_width:, axis] = 255 return video
Creates a border around each frame to differentiate input and target. Args: video: 5-D NumPy array. color: string, "blue", "red" or "green". border_percent: Percentarge of the frame covered by the border. Returns: video: 5-D NumPy array.
def strip_water(self, os=None, o=None, on=None, compact=False, resn="SOL", groupname="notwater", **kwargs): """Write xtc and tpr with water (by resname) removed. :Keywords: *os* Name of the output tpr file; by default use the original but insert "nowater" before suffix. *o* Name of the output trajectory; by default use the original name but insert "nowater" before suffix. *on* Name of a new index file (without water). *compact* ``True``: write a compact and centered trajectory ``False``: use trajectory as it is [``False``] *centergroup* Index group used for centering ["Protein"] .. Note:: If *input* is provided (see below under *kwargs*) then *centergroup* is ignored and the group for centering is taken as the first entry in *input*. *resn* Residue name of the water molecules; all these residues are excluded. *groupname* Name of the group that is generated by subtracting all waters from the system. *force* : Boolean - ``True``: overwrite existing trajectories - ``False``: throw a IOError exception - ``None``: skip existing and log a warning [default] *kwargs* are passed on to :func:`gromacs.cbook.trj_compact` (unless the values have to be set to certain values such as s, f, n, o keywords). The *input* keyword is always mangled: Only the first entry (the group to centre the trajectory on) is kept, and as a second group (the output group) *groupname* is used. :Returns: dictionary with keys *tpr*, *xtc*, *ndx* which are the names of the the new files .. warning:: The input tpr file should *not* have *any position restraints*; otherwise Gromacs will throw a hissy-fit and say *Software inconsistency error: Position restraint coordinates are missing* (This appears to be a bug in Gromacs 4.x.) """ force = kwargs.pop('force', self.force) newtpr = self.outfile(self.infix_filename(os, self.tpr, '_nowater')) newxtc = self.outfile(self.infix_filename(o, self.xtc, '_nowater')) newndx = self.outfile(self.infix_filename(on, self.tpr, '_nowater', 'ndx')) nowater_ndx = self._join_dirname(newtpr, "nowater.ndx") # refers to original tpr if compact: TRJCONV = trj_compact # input overrides centergroup if kwargs.get('centergroup') is not None and 'input' in kwargs: logger.warn("centergroup = %r will be superceded by input[0] = %r", kwargs['centergroup'], kwargs['input'][0]) _input = kwargs.get('input', [kwargs.get('centergroup', 'Protein')]) kwargs['input'] = [_input[0], groupname] # [center group, write-out selection] del _input logger.info("Creating a compact trajectory centered on group %r", kwargs['input'][0]) logger.info("Writing %r to the output trajectory", kwargs['input'][1]) else: TRJCONV = gromacs.trjconv kwargs['input'] = [groupname] logger.info("Writing %r to the output trajectory (no centering)", kwargs['input'][0]) # clean kwargs, only legal arguments for Gromacs tool trjconv should remain kwargs.pop("centergroup", None) NOTwater = "! r {resn!s}".format(**vars()) # make_ndx selection ("not water residues") with utilities.in_dir(self.dirname): # ugly because I cannot break from the block if not self.check_file_exists(newxtc, resolve="indicate", force=force): # make no-water index B = IndexBuilder(struct=self.tpr, selections=['@'+NOTwater], ndx=self.ndx, out_ndx=nowater_ndx) B.combine(name_all=groupname, operation="|", defaultgroups=True) logger.debug("Index file for water removal: %r", nowater_ndx) logger.info("TPR file without water {newtpr!r}".format(**vars())) gromacs.tpbconv(s=self.tpr, o=newtpr, n=nowater_ndx, input=[groupname]) logger.info("NDX of the new system %r", newndx) gromacs.make_ndx(f=newtpr, o=newndx, input=['q'], stderr=False, stdout=False) # PROBLEM: If self.ndx contained a custom group required for fitting then we are loosing # this group here. We could try to merge only this group but it is possible that # atom indices changed. The only way to solve this is to regenerate the group with # a selection or only use Gromacs default groups. logger.info("Trajectory without water {newxtc!r}".format(**vars())) kwargs['s'] = self.tpr kwargs['f'] = self.xtc kwargs['n'] = nowater_ndx kwargs['o'] = newxtc TRJCONV(**kwargs) logger.info("pdb and gro for visualization") for ext in 'pdb', 'gro': try: # see warning in doc ... so we don't use the new xtc but the old one kwargs['o'] = self.filename(newtpr, ext=ext) TRJCONV(dump=0, stdout=False, stderr=False, **kwargs) # silent except: logger.exception("Failed building the water-less %(ext)s. " "Position restraints in tpr file (see docs)?" % vars()) logger.info("strip_water() complete") self.nowater[self.rp(newxtc)] = Transformer(dirname=self.dirname, s=newtpr, f=newxtc, n=newndx, force=force) return {'tpr':self.rp(newtpr), 'xtc':self.rp(newxtc), 'ndx':self.rp(newndx)}
Write xtc and tpr with water (by resname) removed. :Keywords: *os* Name of the output tpr file; by default use the original but insert "nowater" before suffix. *o* Name of the output trajectory; by default use the original name but insert "nowater" before suffix. *on* Name of a new index file (without water). *compact* ``True``: write a compact and centered trajectory ``False``: use trajectory as it is [``False``] *centergroup* Index group used for centering ["Protein"] .. Note:: If *input* is provided (see below under *kwargs*) then *centergroup* is ignored and the group for centering is taken as the first entry in *input*. *resn* Residue name of the water molecules; all these residues are excluded. *groupname* Name of the group that is generated by subtracting all waters from the system. *force* : Boolean - ``True``: overwrite existing trajectories - ``False``: throw a IOError exception - ``None``: skip existing and log a warning [default] *kwargs* are passed on to :func:`gromacs.cbook.trj_compact` (unless the values have to be set to certain values such as s, f, n, o keywords). The *input* keyword is always mangled: Only the first entry (the group to centre the trajectory on) is kept, and as a second group (the output group) *groupname* is used. :Returns: dictionary with keys *tpr*, *xtc*, *ndx* which are the names of the the new files .. warning:: The input tpr file should *not* have *any position restraints*; otherwise Gromacs will throw a hissy-fit and say *Software inconsistency error: Position restraint coordinates are missing* (This appears to be a bug in Gromacs 4.x.)
def transform_case(self, description, case_type): """Transforms the case of the expression description, based on options Args: description: The description to transform case_type: The casing type that controls the output casing second_expression: Seconds part Returns: The transformed description with proper casing """ if case_type == CasingTypeEnum.Sentence: description = "{}{}".format( description[0].upper(), description[1:]) elif case_type == CasingTypeEnum.Title: description = description.title() else: description = description.lower() return description
Transforms the case of the expression description, based on options Args: description: The description to transform case_type: The casing type that controls the output casing second_expression: Seconds part Returns: The transformed description with proper casing
def saved_xids(self): """Return previously saved xids.""" if self._saved_xids is None: self._saved_xids = [] if self.debug: fpfn = os.path.join(self.tcex.args.tc_temp_path, 'xids-saved') if os.path.isfile(fpfn) and os.access(fpfn, os.R_OK): with open(fpfn) as fh: self._saved_xids = fh.read().splitlines() return self._saved_xids
Return previously saved xids.
def cloudInCells(x, y, bins, weights=None): """ Use cloud-in-cells binning algorithm. Only valid for equal-spaced linear bins. http://ta.twi.tudelft.nl/dv/users/Lemmens/MThesis.TTH/chapter4.html#tth_sEc2 http://www.gnu.org/software/archimedes/manual/html/node29.html INPUTS: x: array of x-values y: array or y-values bins: [bins_x, bins_y] format, where bins_x corresponds to the bin edges along x-axis weights[None]: optionally assign a weight to each entry OUTPUTS: histogram: bins_x: bins_y: """ # For consistency, the variable names should be changed in this function, but low priority... x_bins = np.array(bins[0]) delta_x = x_bins[1] - x_bins[0] # Overflow and underflow bins x_bins = np.insert(x_bins, 0, x_bins[0] - delta_x) x_bins = np.append(x_bins, x_bins[-1] + delta_x) y_bins = np.array(bins[1]) delta_y = y_bins[1] - y_bins[0] y_bins = np.insert(y_bins, 0, y_bins[0] - delta_y) y_bins = np.append(y_bins, y_bins[-1] + delta_y) x_bound_cut = np.logical_and(x >= x_bins[0], x <= x_bins[-1]) y_bound_cut = np.logical_and(y >= y_bins[0], y <= y_bins[-1]) bound_cut = np.logical_and(x_bound_cut, y_bound_cut) if not np.any(weights): bound_weights = np.ones(len(x))[bound_cut] else: bound_weights = np.array(weights)[bound_cut] x_vals = np.array(x)[bound_cut] y_vals = np.array(y)[bound_cut] x_width = x_bins[1] - x_bins[0] y_width = y_bins[1] - y_bins[0] x_centers = x_bins[0: -1] + (0.5 * x_width) y_centers = y_bins[0: -1] + (0.5 * y_width) dx = x_vals - x_centers[np.digitize(x_vals, x_bins) - 1] dy = y_vals - y_centers[np.digitize(y_vals, y_bins) - 1] ux = ((dx / x_width) * (dx >= 0)) +\ ((1. + (dx / x_width)) * (dx < 0)) lx = 1. - ux uy = ((dy / y_width) * (dy >= 0)) +\ ((1. + (dy / y_width)) * (dy < 0)) ly = 1. - uy new_x_vals = [] new_y_vals = [] cell_weights = [] # 4 corners new_x_vals.append(x_vals + (0.5 * x_width)) new_y_vals.append(y_vals + (0.5 * y_width)) cell_weights.append(bound_weights * ux * uy) new_x_vals.append(x_vals + (0.5 * x_width)) new_y_vals.append(y_vals - (0.5 * y_width)) cell_weights.append(bound_weights * ux * ly) new_x_vals.append(x_vals - (0.5 * x_width)) new_y_vals.append(y_vals + (0.5 * y_width)) cell_weights.append(bound_weights * lx * uy) new_x_vals.append(x_vals - (0.5 * x_width)) new_y_vals.append(y_vals - (0.5 * y_width)) cell_weights.append(bound_weights * lx * ly) new_x_vals = np.concatenate(new_x_vals) new_y_vals = np.concatenate(new_y_vals) cell_weights = np.concatenate(cell_weights) result = np.histogram2d(new_x_vals, new_y_vals, bins = [x_bins, y_bins], weights = cell_weights)[0] result = np.transpose(result[1: result.shape[0] - 1])[1: result.shape[1] - 1] return result, x_bins, y_bins
Use cloud-in-cells binning algorithm. Only valid for equal-spaced linear bins. http://ta.twi.tudelft.nl/dv/users/Lemmens/MThesis.TTH/chapter4.html#tth_sEc2 http://www.gnu.org/software/archimedes/manual/html/node29.html INPUTS: x: array of x-values y: array or y-values bins: [bins_x, bins_y] format, where bins_x corresponds to the bin edges along x-axis weights[None]: optionally assign a weight to each entry OUTPUTS: histogram: bins_x: bins_y:
def rewind_body(prepared_request): """Move file pointer back to its recorded starting position so it can be read again on redirect. """ body_seek = getattr(prepared_request.body, 'seek', None) if body_seek is not None and isinstance(prepared_request._body_position, integer_types): try: body_seek(prepared_request._body_position) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect.") else: raise UnrewindableBodyError("Unable to rewind request body for redirect.")
Move file pointer back to its recorded starting position so it can be read again on redirect.
def _request_reports(self, resource_param_name, resources, endpoint_name): """Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: VirusTotal endpoint URL suffix. Returns: A list of the responses. """ params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources] return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)
Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: VirusTotal endpoint URL suffix. Returns: A list of the responses.
def contact_addresses(self): """ Provides a reference to contact addresses used by this server. Obtain a reference to manipulate or iterate existing contact addresses:: >>> from smc.elements.servers import ManagementServer >>> mgt_server = ManagementServer.objects.first() >>> for contact_address in mgt_server.contact_addresses: ... contact_address ... ContactAddress(location=Default,addresses=[u'1.1.1.1']) ContactAddress(location=foolocation,addresses=[u'12.12.12.12']) :rtype: MultiContactAddress """ return MultiContactAddress( href=self.get_relation('contact_addresses'), type=self.typeof, name=self.name)
Provides a reference to contact addresses used by this server. Obtain a reference to manipulate or iterate existing contact addresses:: >>> from smc.elements.servers import ManagementServer >>> mgt_server = ManagementServer.objects.first() >>> for contact_address in mgt_server.contact_addresses: ... contact_address ... ContactAddress(location=Default,addresses=[u'1.1.1.1']) ContactAddress(location=foolocation,addresses=[u'12.12.12.12']) :rtype: MultiContactAddress
def reindex(self): '''reset counters and indexes''' for i in range(self.rally_count()): self.rally_points[i].count = self.rally_count() self.rally_points[i].idx = i self.last_change = time.time()
reset counters and indexes
def sanitize(self): ''' Check if the current settings conform to the LISP specifications and fix them where possible. ''' super(EncapsulatedControlMessage, self).sanitize() # S: This is the Security bit. When set to 1 the following # authentication information will be appended to the end of the Map- # Reply. The detailed format of the Authentication Data Content is # for further study. if not isinstance(self.security, bool): raise ValueError('Security flag must be a boolean') if self.security: raise NotImplementedError('Handling security data is not ' + 'implemented yet') # "D" is the "DDT-originated" flag and is set by a DDT client to # indicate that the receiver can and should return Map-Referral # messages as appropriate. if not isinstance(self.ddt_originated, bool): raise ValueError('DDT originated flag must be a boolean') # The 6th bit in the ECM LISP header is allocated as the "R" # bit. The R bit indicates that the encapsulated Map-Register is # to be processed by an RTR. if not isinstance(self.for_rtr, bool): raise ValueError('For-RTR flag must be a boolean') # The 7th bit in the ECM header is allocated as the "N" bit. The # N bit indicates that this Map-Register is being relayed by an # RTR. When an RTR relays the ECM-ed Map-Register to a Map-Server, # the N bit must be set to 1. if not isinstance(self.relayed_by_rtr, bool): raise ValueError('Relayed-by-RTR flag must be a boolean')
Check if the current settings conform to the LISP specifications and fix them where possible.
def _validate_list(self, input_list, schema_list, path_to_root, object_title=''): ''' a helper method for recursively validating items in a list :return: input_list ''' # construct rules for list and items rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root) list_rules = self.keyMap[rules_path_to_root] initial_key = rules_path_to_root + '[0]' item_rules = self.keyMap[initial_key] # construct list error report template list_error = { 'object_title': object_title, 'model_schema': self.schema, 'input_criteria': list_rules, 'failed_test': 'value_datatype', 'input_path': path_to_root, 'error_value': 0, 'error_code': 4001 } # validate list rules if 'min_size' in list_rules.keys(): if len(input_list) < list_rules['min_size']: list_error['failed_test'] = 'min_size' list_error['error_value'] = len(input_list) list_error['error_code'] = 4031 raise InputValidationError(list_error) if 'max_size' in list_rules.keys(): if len(input_list) > list_rules['max_size']: list_error['failed_test'] = 'max_size' list_error['error_value'] = len(input_list) list_error['error_code'] = 4032 raise InputValidationError(list_error) # construct item error report template item_error = { 'object_title': object_title, 'model_schema': self.schema, 'input_criteria': item_rules, 'failed_test': 'value_datatype', 'input_path': initial_key, 'error_value': None, 'error_code': 4001 } # validate datatype of items for i in range(len(input_list)): input_path = path_to_root + '[%s]' % i item = input_list[i] item_error['input_path'] = input_path try: item_index = self._datatype_classes.index(item.__class__) except: item_error['error_value'] = item.__class__.__name__ raise InputValidationError(item_error) item_type = self._datatype_names[item_index] item_error['error_value'] = item if item_rules['value_datatype'] == 'null': pass else: if item_type != item_rules['value_datatype']: raise InputValidationError(item_error) # call appropriate validation sub-routine for datatype of item if item_type == 'boolean': input_list[i] = self._validate_boolean(item, input_path, object_title) elif item_type == 'number': input_list[i] = self._validate_number(item, input_path, object_title) elif item_type == 'string': input_list[i] = self._validate_string(item, input_path, object_title) elif item_type == 'map': input_list[i] = self._validate_dict(item, schema_list[0], input_path, object_title) elif item_type == 'list': input_list[i] = self._validate_list(item, schema_list[0], input_path, object_title) # validate unique values in list if 'unique_values' in list_rules.keys(): if len(set(input_list)) < len(input_list): list_error['failed_test'] = 'unique_values' list_error['error_value'] = input_list list_error['error_code'] = 4033 raise InputValidationError(list_error) # TODO: validate top-level item values against identical to reference # TODO: run lambda function and call validation url return input_list
a helper method for recursively validating items in a list :return: input_list
def stop(self): """Stops the instance. :raises RuntimeError: has not been started. :raises TypeError: :meth:`run` is not canonical. """ if not self.is_running(): raise RuntimeError('Not started') running, self._running = self._running, None try: next(running) except StopIteration: # expected. pass else: raise TypeError('run() must yield just one time')
Stops the instance. :raises RuntimeError: has not been started. :raises TypeError: :meth:`run` is not canonical.
def double_hash_encode_ngrams(ngrams, # type: Iterable[str] keys, # type: Sequence[bytes] ks, # type: Sequence[int] l, # type: int encoding # type: str ): # type: (...) -> bitarray """ Computes the double hash encoding of the ngrams with the given keys. Using the method from: Schnell, R., Bachteler, T., & Reiher, J. (2011). A Novel Error-Tolerant Anonymous Linking Code. http://grlc.german-microsimulation.de/wp-content/uploads/2017/05/downloadwp-grlc-2011-02.pdf :param ngrams: list of n-grams to be encoded :param keys: hmac secret keys for md5 and sha1 as bytes :param ks: ks[i] is k value to use for ngram[i] :param l: length of the output bitarray :param encoding: the encoding to use when turning the ngrams to bytes :return: bitarray of length l with the bits set which correspond to the encoding of the ngrams """ key_sha1, key_md5 = keys bf = bitarray(l) bf.setall(False) for m, k in zip(ngrams, ks): sha1hm = int( hmac.new(key_sha1, m.encode(encoding=encoding), sha1).hexdigest(), 16) % l md5hm = int( hmac.new(key_md5, m.encode(encoding=encoding), md5).hexdigest(), 16) % l for i in range(k): gi = (sha1hm + i * md5hm) % l bf[gi] = 1 return bf
Computes the double hash encoding of the ngrams with the given keys. Using the method from: Schnell, R., Bachteler, T., & Reiher, J. (2011). A Novel Error-Tolerant Anonymous Linking Code. http://grlc.german-microsimulation.de/wp-content/uploads/2017/05/downloadwp-grlc-2011-02.pdf :param ngrams: list of n-grams to be encoded :param keys: hmac secret keys for md5 and sha1 as bytes :param ks: ks[i] is k value to use for ngram[i] :param l: length of the output bitarray :param encoding: the encoding to use when turning the ngrams to bytes :return: bitarray of length l with the bits set which correspond to the encoding of the ngrams
def difference(self, other, joinBy=None, exact=False): """ *Wrapper of* ``DIFFERENCE`` DIFFERENCE is a binary, non-symmetric operator that produces one sample in the result for each sample of the first operand, by keeping the same metadata of the first operand sample and only those regions (with their schema and values) of the first operand sample which do not intersect with any region in the second operand sample (also known as negative regions) :param other: GMQLDataset :param joinBy: (optional) list of metadata attributes. It is used to extract subsets of samples on which to apply the operator: only those samples in the current and other dataset that have the same value for each specified attribute are considered when performing the operation :param exact: boolean. If true, the the regions are considered as intersecting only if their coordinates are exactly the same :return: a new GMQLDataset Example of usage. We compute the exact difference between Example_Dataset_1 and Example_Dataset_2, considering only the samples with same `antibody`:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") d2 = gl.get_example_dataset("Example_Dataset_2") result = d1.difference(other=d2, exact=True, joinBy=['antibody']) """ if isinstance(other, GMQLDataset): other_idx = other.__index else: raise TypeError("other must be a GMQLDataset. " "{} was provided".format(type(other))) if isinstance(joinBy, list) and \ all([isinstance(x, str) for x in joinBy]): metaJoinCondition = Some(self.opmng.getMetaJoinCondition(joinBy)) elif joinBy is None: metaJoinCondition = none() else: raise TypeError("joinBy must be a list of strings. " "{} was provided".format(type(joinBy))) if not isinstance(exact, bool): raise TypeError("exact must be a boolean. " "{} was provided".format(type(exact))) new_index = self.opmng.difference(self.__index, other_idx, metaJoinCondition, exact) new_local_sources, new_remote_sources = self.__combine_sources(self, other) new_location = self.__combine_locations(self, other) return GMQLDataset(index=new_index, location=new_location, local_sources=new_local_sources, remote_sources=new_remote_sources, meta_profile=self.meta_profile)
*Wrapper of* ``DIFFERENCE`` DIFFERENCE is a binary, non-symmetric operator that produces one sample in the result for each sample of the first operand, by keeping the same metadata of the first operand sample and only those regions (with their schema and values) of the first operand sample which do not intersect with any region in the second operand sample (also known as negative regions) :param other: GMQLDataset :param joinBy: (optional) list of metadata attributes. It is used to extract subsets of samples on which to apply the operator: only those samples in the current and other dataset that have the same value for each specified attribute are considered when performing the operation :param exact: boolean. If true, the the regions are considered as intersecting only if their coordinates are exactly the same :return: a new GMQLDataset Example of usage. We compute the exact difference between Example_Dataset_1 and Example_Dataset_2, considering only the samples with same `antibody`:: import gmql as gl d1 = gl.get_example_dataset("Example_Dataset_1") d2 = gl.get_example_dataset("Example_Dataset_2") result = d1.difference(other=d2, exact=True, joinBy=['antibody'])
def _pack(formatstring, value): """Pack a value into a bytestring. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * value (depends on formatstring): The value to be packed Returns: A bytestring (str). Raises: ValueError Note that the :mod:`struct` module produces byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically. """ _checkString(formatstring, description='formatstring', minlength=1) try: result = struct.pack(formatstring, value) except: errortext = 'The value to send is probably out of range, as the num-to-bytestring conversion failed.' errortext += ' Value: {0!r} Struct format code is: {1}' raise ValueError(errortext.format(value, formatstring)) if sys.version_info[0] > 2: return str(result, encoding='latin1') # Convert types to make it Python3 compatible return result
Pack a value into a bytestring. Uses the built-in :mod:`struct` Python module. Args: * formatstring (str): String for the packing. See the :mod:`struct` module for details. * value (depends on formatstring): The value to be packed Returns: A bytestring (str). Raises: ValueError Note that the :mod:`struct` module produces byte buffers for Python3, but bytestrings for Python2. This is compensated for automatically.
def batch_run_many(player, positions, batch_size=100): """Used to avoid a memory oveflow issue when running the network on too many positions. TODO: This should be a member function of player.network?""" prob_list = [] value_list = [] for idx in range(0, len(positions), batch_size): probs, values = player.network.run_many(positions[idx:idx + batch_size]) prob_list.append(probs) value_list.append(values) return np.concatenate(prob_list, axis=0), np.concatenate(value_list, axis=0)
Used to avoid a memory oveflow issue when running the network on too many positions. TODO: This should be a member function of player.network?
def on_channel_flow(self, method): """When RabbitMQ indicates the connection is unblocked, set the state appropriately. :param pika.spec.Channel.Flow method: The Channel flow frame """ if method.active: LOGGER.info('Channel flow is active (READY)') self.state = self.STATE_READY if self.on_ready: self.on_ready(self) else: LOGGER.warning('Channel flow is inactive (BLOCKED)') self.state = self.STATE_BLOCKED if self.on_unavailable: self.on_unavailable(self)
When RabbitMQ indicates the connection is unblocked, set the state appropriately. :param pika.spec.Channel.Flow method: The Channel flow frame
def check_and_make_label(lbl, lineno): """ Checks if the given label (or line number) is valid and, if so, returns a label object. :param lbl: Line number of label (string) :param lineno: Line number in the basic source code for error reporting :return: Label object or None if error. """ if isinstance(lbl, float): if lbl == int(lbl): id_ = str(int(lbl)) else: syntax_error(lineno, 'Line numbers must be integers.') return None else: id_ = lbl return global_.SYMBOL_TABLE.access_label(id_, lineno)
Checks if the given label (or line number) is valid and, if so, returns a label object. :param lbl: Line number of label (string) :param lineno: Line number in the basic source code for error reporting :return: Label object or None if error.
def sum(arrays, masks=None, dtype=None, out=None, zeros=None, scales=None): """Combine arrays by addition, with masks and offsets. Arrays and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the sum, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :return: sum, variance of the sum and number of points stored Example: >>> import numpy >>> image = numpy.array([[1., 3.], [1., -1.4]]) >>> inputs = [image, image + 1] >>> sum(inputs) array([[[ 1.5, 3.5], [ 1.5, -0.9]], <BLANKLINE> [[ 0.5, 0.5], [ 0.5, 0.5]], <BLANKLINE> [[ 2. , 2. ], [ 2. , 2. ]]]) """ return generic_combine(intl_combine.sum_method(), arrays, masks=masks, dtype=dtype, out=out, zeros=zeros, scales=scales)
Combine arrays by addition, with masks and offsets. Arrays and masks are a list of array objects. All input arrays have the same shape. If present, the masks have the same shape also. The function returns an array with one more dimension than the inputs and with size (3, shape). out[0] contains the sum, out[1] the variance and out[2] the number of points used. :param arrays: a list of arrays :param masks: a list of mask arrays, True values are masked :param dtype: data type of the output :param out: optional output, with one more axis than the input arrays :return: sum, variance of the sum and number of points stored Example: >>> import numpy >>> image = numpy.array([[1., 3.], [1., -1.4]]) >>> inputs = [image, image + 1] >>> sum(inputs) array([[[ 1.5, 3.5], [ 1.5, -0.9]], <BLANKLINE> [[ 0.5, 0.5], [ 0.5, 0.5]], <BLANKLINE> [[ 2. , 2. ], [ 2. , 2. ]]])
def get_event_logs(self, request_filter=None, log_limit=20, iterator=True): """Returns a list of event logs Example:: event_mgr = SoftLayer.EventLogManager(env.client) request_filter = event_mgr.build_filter(date_min="01/01/2019", date_max="02/01/2019") logs = event_mgr.get_event_logs(request_filter) for log in logs: print("Event Name: {}".format(log['eventName'])) :param dict request_filter: filter dict :param int log_limit: number of results to get in one API call :param bool iterator: False will only make one API call for log_limit results. True will keep making API calls until all logs have been retreived. There may be a lot of these. :returns: List of event logs. If iterator=True, will return a python generator object instead. """ if iterator: # Call iter_call directly as this returns the actual generator return self.client.iter_call('Event_Log', 'getAllObjects', filter=request_filter, limit=log_limit) return self.client.call('Event_Log', 'getAllObjects', filter=request_filter, limit=log_limit)
Returns a list of event logs Example:: event_mgr = SoftLayer.EventLogManager(env.client) request_filter = event_mgr.build_filter(date_min="01/01/2019", date_max="02/01/2019") logs = event_mgr.get_event_logs(request_filter) for log in logs: print("Event Name: {}".format(log['eventName'])) :param dict request_filter: filter dict :param int log_limit: number of results to get in one API call :param bool iterator: False will only make one API call for log_limit results. True will keep making API calls until all logs have been retreived. There may be a lot of these. :returns: List of event logs. If iterator=True, will return a python generator object instead.
def _l2rgb(self, mode): """Convert from L (black and white) to RGB. """ self._check_modes(("L", "LA")) bands = ["L"] * 3 if mode[-1] == "A": bands.append("A") data = self.data.sel(bands=bands) data["bands"] = list(mode) return data
Convert from L (black and white) to RGB.
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False): """ Turns data filters off for particular analytes and samples. Parameters ---------- filt : optional, str or array_like Name, partial name or list of names of filters. Supports partial matching. i.e. if 'cluster' is specified, all filters with 'cluster' in the name are activated. Defaults to all filters. analyte : optional, str or array_like Name or list of names of analytes. Defaults to all analytes. samples : optional, array_like or None Which samples to apply this filter to. If None, applies to all samples. Returns ------- None """ if samples is not None: subset = self.make_subset(samples) samples = self._get_samples(subset) for s in samples: try: self.data[s].filt.off(analyte, filt) except: warnings.warn("filt.off failure in sample " + s) if show_status: self.filter_status(subset=subset) return
Turns data filters off for particular analytes and samples. Parameters ---------- filt : optional, str or array_like Name, partial name or list of names of filters. Supports partial matching. i.e. if 'cluster' is specified, all filters with 'cluster' in the name are activated. Defaults to all filters. analyte : optional, str or array_like Name or list of names of analytes. Defaults to all analytes. samples : optional, array_like or None Which samples to apply this filter to. If None, applies to all samples. Returns ------- None
def apply(self, func, ids=None, applyto='measurement', noneval=nan, setdata=False, output_format='dict', ID=None, **kwargs): ''' Apply func to each of the specified measurements. Parameters ---------- func : callable Accepts a Measurement object or a DataFrame. ids : hashable| iterable of hashables | None Keys of measurements to which func will be applied. If None is given apply to all measurements. applyto : 'measurement' | 'data' * 'measurement' : apply to measurements objects themselves. * 'data' : apply to measurement associated data noneval : obj Value returned if applyto is 'data' but no data is available. setdata : bool Whether to set the data in the Measurement object. Used only if data is not already set. output_format : ['dict' | 'collection'] * collection : keeps result as collection WARNING: For collection, func should return a copy of the measurement instance rather than the original measurement instance. Returns ------- Dictionary keyed by measurement keys containing the corresponding output of func or returns a collection (if output_format='collection'). ''' if ids is None: ids = self.keys() else: ids = to_list(ids) result = dict((i, self[i].apply(func, applyto, noneval, setdata)) for i in ids) if output_format == 'collection': can_keep_as_collection = all( [isinstance(r, self._measurement_class) for r in result.values()]) if not can_keep_as_collection: raise TypeError( 'Cannot turn output into a collection. The provided func must return results of type {}'.format( self._measurement_class)) new_collection = self.copy() # Locate IDs to remove ids_to_remove = [x for x in self.keys() if x not in ids] # Remove data for these IDs for ids in ids_to_remove: new_collection.pop(ids) # Update keys with new values for k, v in new_collection.items(): new_collection[k] = result[k] if ID is not None: new_collection.ID = ID return new_collection else: # Return a dictionary return result
Apply func to each of the specified measurements. Parameters ---------- func : callable Accepts a Measurement object or a DataFrame. ids : hashable| iterable of hashables | None Keys of measurements to which func will be applied. If None is given apply to all measurements. applyto : 'measurement' | 'data' * 'measurement' : apply to measurements objects themselves. * 'data' : apply to measurement associated data noneval : obj Value returned if applyto is 'data' but no data is available. setdata : bool Whether to set the data in the Measurement object. Used only if data is not already set. output_format : ['dict' | 'collection'] * collection : keeps result as collection WARNING: For collection, func should return a copy of the measurement instance rather than the original measurement instance. Returns ------- Dictionary keyed by measurement keys containing the corresponding output of func or returns a collection (if output_format='collection').
def execute_task(f, args, kwargs, user_ns): """ Deserialize the buffer and execute the task. # Returns the result or exception. """ fname = getattr(f, '__name__', 'f') prefix = "parsl_" fname = prefix + "f" argname = prefix + "args" kwargname = prefix + "kwargs" resultname = prefix + "result" user_ns.update({fname: f, argname: args, kwargname: kwargs, resultname: resultname}) code = "{0} = {1}(*{2}, **{3})".format(resultname, fname, argname, kwargname) try: exec(code, user_ns, user_ns) except Exception as e: logger.warning("Caught exception; will raise it: {}".format(e)) raise e else: return user_ns.get(resultname)
Deserialize the buffer and execute the task. # Returns the result or exception.
def _get_graph_title(self): """获取图像的title.""" start_time = datetime.fromtimestamp(int(self.timestamp_list[0])) end_time = datetime.fromtimestamp(int(self.timestamp_list[-1])) end_time = end_time.strftime('%H:%M:%S') title = "Timespan: %s —— %s" % (start_time, end_time) return title
获取图像的title.
def has_callback(obj, handle): """Return whether a callback is currently registered for an object.""" callbacks = obj._callbacks if not callbacks: return False if isinstance(callbacks, Node): return handle is callbacks else: return handle in callbacks
Return whether a callback is currently registered for an object.
def save_keywords(filename, xml): """Save keyword XML to filename.""" tmp_dir = os.path.dirname(filename) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) file_desc = open(filename, "w") file_desc.write(xml) file_desc.close()
Save keyword XML to filename.
def set_Name(self, Name, SaveName=None, include=None, ForceUpdate=False): """ Set the Name of the instance, automatically updating the SaveName The name should be a str without spaces or underscores (removed) When the name is changed, if SaveName (i.e. the name used for saving) was not user-defined, it is automatically updated Parameters ---------- Name : str Name of the instance, without ' ' or '_' (automatically removed) SaveName : None / str If provided, overrides the default name for saving (not recommended) include: list Controls how te default SaveName is generated Each element of the list is a key str indicating whether an element should be present in the SaveName """ self._dall['Name'] = Name self.set_SaveName(SaveName=SaveName, include=include, ForceUpdate=ForceUpdate)
Set the Name of the instance, automatically updating the SaveName The name should be a str without spaces or underscores (removed) When the name is changed, if SaveName (i.e. the name used for saving) was not user-defined, it is automatically updated Parameters ---------- Name : str Name of the instance, without ' ' or '_' (automatically removed) SaveName : None / str If provided, overrides the default name for saving (not recommended) include: list Controls how te default SaveName is generated Each element of the list is a key str indicating whether an element should be present in the SaveName
def complete_hit(self, text, line, begidx, endidx): ''' Tab-complete hit command. ''' return [i for i in PsiturkNetworkShell.hit_commands if \ i.startswith(text)]
Tab-complete hit command.
def fill_extents(self): """Computes a bounding box in user-space coordinates covering the area that would be affected, (the "inked" area), by a :meth:`fill` operation given the current path and fill parameters. If the current path is empty, returns an empty rectangle ``(0, 0, 0, 0)``. Surface dimensions and clipping are not taken into account. Contrast with :meth:`path_extents` which is similar, but returns non-zero extents for some paths with no inked area, (such as a simple line segment). Note that :meth:`fill_extents` must necessarily do more work to compute the precise inked areas in light of the fill rule, so :meth:`path_extents` may be more desirable for sake of performance if the non-inked path extents are desired. See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`. :return: A ``(x1, y1, x2, y2)`` tuple of floats: the left, top, right and bottom of the resulting extents, respectively. """ extents = ffi.new('double[4]') cairo.cairo_fill_extents( self._pointer, extents + 0, extents + 1, extents + 2, extents + 3) self._check_status() return tuple(extents)
Computes a bounding box in user-space coordinates covering the area that would be affected, (the "inked" area), by a :meth:`fill` operation given the current path and fill parameters. If the current path is empty, returns an empty rectangle ``(0, 0, 0, 0)``. Surface dimensions and clipping are not taken into account. Contrast with :meth:`path_extents` which is similar, but returns non-zero extents for some paths with no inked area, (such as a simple line segment). Note that :meth:`fill_extents` must necessarily do more work to compute the precise inked areas in light of the fill rule, so :meth:`path_extents` may be more desirable for sake of performance if the non-inked path extents are desired. See :meth:`fill`, :meth:`set_fill_rule` and :meth:`fill_preserve`. :return: A ``(x1, y1, x2, y2)`` tuple of floats: the left, top, right and bottom of the resulting extents, respectively.
def setCentralWidget(self, widget): """ Sets the central widget for this button. :param widget | <QWidget> """ self.setEnabled(widget is not None) self._popupWidget.setCentralWidget(widget)
Sets the central widget for this button. :param widget | <QWidget>