code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def init_db_conn( connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None): """ Initialize a redis connection by each connection string defined in the configuration file """ rpool = redis.ConnectionPool( host=HOST, port=PORT, db=DB, password=PASSWORD) r = redis.Redis(connection_pool=rpool) redis_pool.connections[connection_name] = RedisClient(r)
Initialize a redis connection by each connection string defined in the configuration file
def build_seasonal_transition_matrix( num_seasons, is_last_day_of_season, dtype, basis_change_matrix=None, basis_change_matrix_inv=None): """Build a function computing transitions for a seasonal effect model.""" with tf.compat.v1.name_scope('build_seasonal_transition_matrix'): # If the season is changing, the transition matrix permutes the latent # state to shift all seasons up by a dimension, and sends the current # season's effect to the bottom. seasonal_permutation = np.concatenate( [np.arange(1, num_seasons), [0]], axis=0) seasonal_permutation_matrix = tf.constant( np.eye(num_seasons)[seasonal_permutation], dtype=dtype) # Optionally transform the transition matrix into a reparameterized space, # enforcing the zero-sum constraint for ConstrainedSeasonalStateSpaceModel. if basis_change_matrix is not None: seasonal_permutation_matrix = tf.matmul( basis_change_matrix, tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv)) identity_matrix = tf.eye( tf.shape(input=seasonal_permutation_matrix)[-1], dtype=dtype) def seasonal_transition_matrix(t): return tf.linalg.LinearOperatorFullMatrix( matrix=dist_util.pick_scalar_condition( is_last_day_of_season(t), seasonal_permutation_matrix, identity_matrix)) return seasonal_transition_matrix
Build a function computing transitions for a seasonal effect model.
def tee_output_python(): """Duplicate sys.stdout and sys.stderr to new StringIO.""" buffer = StringIO() out = CapturedStdout(buffer) orig_stdout, orig_stderr = sys.stdout, sys.stderr flush() sys.stdout = TeeingStreamProxy(sys.stdout, buffer) sys.stderr = TeeingStreamProxy(sys.stderr, buffer) try: yield out finally: flush() out.finalize() sys.stdout, sys.stderr = orig_stdout, orig_stderr
Duplicate sys.stdout and sys.stderr to new StringIO.
def to_slice(arr): """Test whether `arr` is an integer array that can be replaced by a slice Parameters ---------- arr: numpy.array Numpy integer array Returns ------- slice or None If `arr` could be converted to an array, this is returned, otherwise `None` is returned See Also -------- get_index_from_coord""" if isinstance(arr, slice): return arr if len(arr) == 1: return slice(arr[0], arr[0] + 1) step = np.unique(arr[1:] - arr[:-1]) if len(step) == 1: return slice(arr[0], arr[-1] + step[0], step[0])
Test whether `arr` is an integer array that can be replaced by a slice Parameters ---------- arr: numpy.array Numpy integer array Returns ------- slice or None If `arr` could be converted to an array, this is returned, otherwise `None` is returned See Also -------- get_index_from_coord
def b58ToC32(b58check, version=-1): """ >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d') 'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7' >>> b58ToC32('3GgUssdoWh5QkoUDXKqT6LMESBDf8aqp2y') 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G' >>> b58ToC32('mvWRFPELmpCHSkFQ7o9EVdCd9eXeUTa9T8') 'ST2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQYAC0RQ' >>> b58ToC32('2N8EgwcZq89akxb6mCTTKiHLVeXRpxjuy98') 'SN2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKP6D2ZK9' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 22) 'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 0) 'S02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 31) 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 20) 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 26) 'ST2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQYAC0RQ' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 21) 'SN2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKP6D2ZK9' """ addr_version_byte, addr_bin, addr_checksum = keylib.b58check.b58check_unpack(b58check) addr_version = ord(addr_version_byte) addr_hash160 = addr_bin.encode('hex') stacks_version = None if version < 0: stacks_version = addr_version if ADDR_BITCOIN_TO_STACKS.get(addr_version) is not None: stacks_version = ADDR_BITCOIN_TO_STACKS[addr_version] else: stacks_version = version return c32address(stacks_version, addr_hash160)
>>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d') 'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7' >>> b58ToC32('3GgUssdoWh5QkoUDXKqT6LMESBDf8aqp2y') 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G' >>> b58ToC32('mvWRFPELmpCHSkFQ7o9EVdCd9eXeUTa9T8') 'ST2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQYAC0RQ' >>> b58ToC32('2N8EgwcZq89akxb6mCTTKiHLVeXRpxjuy98') 'SN2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKP6D2ZK9' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 22) 'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 0) 'S02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 31) 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 20) 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 26) 'ST2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQYAC0RQ' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 21) 'SN2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKP6D2ZK9'
def reset_kernel(self): """Reset kernel of current client.""" client = self.get_current_client() if client is not None: self.switch_to_plugin() client.reset_namespace()
Reset kernel of current client.
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): """ Demmel p 312 """ p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) # pylint: disable=W0631 return x
Demmel p 312
def spladder(job, inputs, bam_id, bai_id): """ Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str """ job.fileStore.logToMaster('SplAdder: {}'.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() # Pull in alignment.bam from fileStore job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, 'alignment.bam')) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, 'alignment.bam.bai')) # Download input file download_url(job=job, url=inputs.gtf, work_dir=work_dir, name='annotation.gtf') download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name='annotation.gtf.pickle') # Call Spladder command = ['--insert_ir=y', '--insert_es=y', '--insert_ni=y', '--remove_se=n', '--validate_sg=n', '-b', 'alignment.bam', '-o ', '/data', '-a', 'annotation.gtf', '-v', 'y', '-c', '3', '-M', 'single', '-T', 'n', '-n', '50', '-P', 'y', '-p', 'n', '--sparse_bam', 'y'] docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool='jvivian/spladder:1.0') # Write output to fileStore and return ids output_pickle = os.path.join(work_dir, ' ', 'spladder', 'genes_graph_conf3.alignment.pickle') if not os.path.exists(output_pickle): matches = [] for root, dirnames, filenames in os.walk(work_dir): for filename in fnmatch.filter(filenames, '*genes_graph*'): matches.append(os.path.join(root, filename)) if matches: output_pickle = matches[0] else: raise RuntimeError("Couldn't find genes file!") output_filt = os.path.join(work_dir, 'alignment.filt.hdf5') output = os.path.join(work_dir, 'alignment.hdf5') print os.listdir(work_dir) tarball_files('spladder.tar.gz', file_paths=[output_pickle, output_filt, output], output_dir=work_dir) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'spladder.tar.gz'))
Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str
def get_stp_mst_detail_output_msti_port_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop('instance_id') port = ET.SubElement(msti, "port") interface_type = ET.SubElement(port, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def getAttributeName(self, index): """ Returns the String which represents the attribute name """ offset = self._get_attribute_offset(index) name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME] res = self.sb[name] # If the result is a (null) string, we need to look it up. if not res: attr = self.m_resourceIDs[name] if attr in public.SYSTEM_RESOURCES['attributes']['inverse']: res = 'android:' + public.SYSTEM_RESOURCES['attributes']['inverse'][attr] else: # Attach the HEX Number, so for multiple missing attributes we do not run # into problems. res = 'android:UNKNOWN_SYSTEM_ATTRIBUTE_{:08x}'.format(attr) return res
Returns the String which represents the attribute name
def load_script(browser, url): """Ensure that JavaScript at a given URL is available to the browser.""" if browser.current_url.startswith('file:'): url = 'https:' + url browser.execute_script(""" var script_tag = document.createElement("script"); script_tag.setAttribute("type", "text/javascript"); script_tag.setAttribute("src", arguments[0]); document.getElementsByTagName("head")[0].appendChild(script_tag); """, url) sleep(1)
Ensure that JavaScript at a given URL is available to the browser.
def logout(current): """ Log out view. Simply deletes the session object. For showing logout message: 'show_logout_message' field should be True in current.task_data, Message should be sent in current.task_data with 'logout_message' field. Message title should be sent in current.task_data with 'logout_title' field. current.task_data['show_logout_message'] = True current.task_data['logout_title'] = 'Message Title' current.task_data['logout_message'] = 'Message' Args: current: :attr:`~zengine.engine.WFCurrent` object. """ current.user.is_online(False) current.session.delete() current.output['cmd'] = 'logout' if current.task_data.get('show_logout_message', False): current.output['title'] = current.task_data.get('logout_title', None) current.output['msg'] = current.task_data.get('logout_message', None)
Log out view. Simply deletes the session object. For showing logout message: 'show_logout_message' field should be True in current.task_data, Message should be sent in current.task_data with 'logout_message' field. Message title should be sent in current.task_data with 'logout_title' field. current.task_data['show_logout_message'] = True current.task_data['logout_title'] = 'Message Title' current.task_data['logout_message'] = 'Message' Args: current: :attr:`~zengine.engine.WFCurrent` object.
def partition_asymmetry(bif_point): '''Calculate the partition asymmetry at a bifurcation point as defined in https://www.ncbi.nlm.nih.gov/pubmed/18568015 The number of nodes in each child tree is counted. The partition is defined as the ratio of the absolute difference and the sum of the number of bifurcations in the two daughter subtrees at each branch point.''' assert len(bif_point.children) == 2, 'A bifurcation point must have exactly 2 children' n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) if n == m: return 0.0 return abs(n - m) / abs(n + m)
Calculate the partition asymmetry at a bifurcation point as defined in https://www.ncbi.nlm.nih.gov/pubmed/18568015 The number of nodes in each child tree is counted. The partition is defined as the ratio of the absolute difference and the sum of the number of bifurcations in the two daughter subtrees at each branch point.
def unsubscribe(self): """ Completly stop all pubnub operations. """ _LOGGER.info("PubNub unsubscribing") self._pubnub.unsubscribe_all() self._pubnub.stop() self._pubnub = None
Completly stop all pubnub operations.
def dump(context=os.environ): """Dump current environment as a dictionary Arguments: context (dict, optional): Current context, defaults to the current environment. """ output = {} for key, value in context.iteritems(): if not key.startswith("BE_"): continue output[key[3:].lower()] = value return output
Dump current environment as a dictionary Arguments: context (dict, optional): Current context, defaults to the current environment.
def directories_in_directory(db, user_id, db_dirname): """ Return subdirectories of a directory. """ fields = _directory_default_fields() rows = db.execute( select( fields, ).where( _is_in_directory(directories, user_id, db_dirname), ) ) return [to_dict_no_content(fields, row) for row in rows]
Return subdirectories of a directory.
def env(**kwargs: Union[Dict[str, str], None]) -> ContextManager: """Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment """ old = os.environ.copy() try: os.environ.clear() # This apparent duplication is because ``putenv`` doesn’t update # ``os.environ``, and ``os.environ`` changes aren’t propagated to # subprocesses. for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value) for key, value in kwargs.items(): if value is None: del os.environ[key] else: os.environ[key] = value # NOQA: B003 os.putenv(key, value) yield finally: os.environ.clear() for key, value in old.items(): os.environ[key] = value # NOQA: B003 os.putenv(key, value)
Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment
def printTPRegionParams(tpregion): """ Note: assumes we are using TemporalMemory/TPShim in the TPRegion """ tm = tpregion.getSelf()._tfdr print "------------PY TemporalMemory Parameters ------------------" print "numberOfCols =", tm.columnDimensions print "cellsPerColumn =", tm.cellsPerColumn print "minThreshold =", tm.minThreshold print "activationThreshold =", tm.activationThreshold print "newSynapseCount =", tm.maxNewSynapseCount print "initialPerm =", tm.initialPermanence print "connectedPerm =", tm.connectedPermanence print "permanenceInc =", tm.permanenceIncrement print "permanenceDec =", tm.permanenceDecrement print "predictedSegmentDecrement=", tm.predictedSegmentDecrement print
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
def forward(self, X, training=False, device='cpu'): """Gather and concatenate the output from forward call with input data. The outputs from ``self.module_.forward`` are gathered on the compute device specified by ``device`` and then concatenated using PyTorch :func:`~torch.cat`. If multiple outputs are returned by ``self.module_.forward``, each one of them must be able to be concatenated this way. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether to set the module to train mode or not. device : string (default='cpu') The device to store each inference result on. This defaults to CPU memory since there is genereally more memory available there. For performance reasons this might be changed to a specific CUDA device, e.g. 'cuda:0'. Returns ------- y_infer : torch tensor The result from the forward step. """ y_infer = list(self.forward_iter(X, training=training, device=device)) is_multioutput = len(y_infer) > 0 and isinstance(y_infer[0], tuple) if is_multioutput: return tuple(map(torch.cat, zip(*y_infer))) return torch.cat(y_infer)
Gather and concatenate the output from forward call with input data. The outputs from ``self.module_.forward`` are gathered on the compute device specified by ``device`` and then concatenated using PyTorch :func:`~torch.cat`. If multiple outputs are returned by ``self.module_.forward``, each one of them must be able to be concatenated this way. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether to set the module to train mode or not. device : string (default='cpu') The device to store each inference result on. This defaults to CPU memory since there is genereally more memory available there. For performance reasons this might be changed to a specific CUDA device, e.g. 'cuda:0'. Returns ------- y_infer : torch tensor The result from the forward step.
def allocate(self): """Builds the context and the Hooks.""" self.logger.debug("Allocating environment.") self._allocate() self.logger.debug("Environment successfully allocated.")
Builds the context and the Hooks.
def read_lsm_floatpairs(fh): """Read LSM sequence of float pairs from file and return as list.""" size = struct.unpack('<i', fh.read(4))[0] return fh.read_array('<2f8', count=size)
Read LSM sequence of float pairs from file and return as list.
def attr_membership(attr_val, value_set, attr_type=basestring, modifier_fn=lambda x: x): """ Helper function passed to netCDF4.Dataset.get_attributes_by_value Checks that `attr_val` exists, has the same type as `attr_type`, and is contained in `value_set` attr_val: The value of the attribute being checked attr_type: A type object that the `attr_val` is expected to have the same type as. If the type is not the same, a warning is issued and the code attempts to cast `attr_val` to the expected type. value_set: The set against which membership for `attr_val` is tested modifier_fn: A function to apply to attr_val prior to applying the set membership test """ if attr_val is None: return False if not isinstance(attr_val, attr_type): warnings.warn("Attribute is of type {}, {} expected. " "Attempting to cast to expected type.".format(type(attr_val), attr_type)) try: # if the expected type is basestring, try casting to unicode type # since basestring can't be instantiated if attr_type is basestring: new_attr_val = six.text_type(attr_val) else: new_attr_val = attr_type(attr_val) # catch casting errors except (ValueError, UnicodeEncodeError) as e: warnings.warn("Could not cast to type {}".format(attr_type)) return False else: new_attr_val = attr_val try: is_in_set = modifier_fn(new_attr_val) in value_set except Exception as e: warnings.warn('Could not apply modifier function {} to value: ' ' {}'.format(modifier_fn, e.msg)) return False return is_in_set
Helper function passed to netCDF4.Dataset.get_attributes_by_value Checks that `attr_val` exists, has the same type as `attr_type`, and is contained in `value_set` attr_val: The value of the attribute being checked attr_type: A type object that the `attr_val` is expected to have the same type as. If the type is not the same, a warning is issued and the code attempts to cast `attr_val` to the expected type. value_set: The set against which membership for `attr_val` is tested modifier_fn: A function to apply to attr_val prior to applying the set membership test
def next_child(self, child_pid): """Get the next child PID in the PID relation.""" relation = self._get_child_relation(child_pid) if relation.index is not None: return self.children.filter( PIDRelation.index > relation.index ).ordered(ord='asc').first() else: return None
Get the next child PID in the PID relation.
def battery_voltage(self): """ Returns voltage in mV """ msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_VOLTAGE_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_VOLTAGE_LSB_REG) voltage_bin = msb << 4 | lsb & 0x0f return voltage_bin * 1.1
Returns voltage in mV
def getfield(f): """convert values from cgi.Field objects to plain values.""" if isinstance(f, list): return [getfield(x) for x in f] else: return f.value
convert values from cgi.Field objects to plain values.
def get_matches(self, code, start=0, end=None, skip=None): """Search for `code` in source and return a list of `Match`\es `code` can contain wildcards. ``${name}`` matches normal names and ``${?name} can match any expression. You can use `Match.get_ast()` for getting the node that has matched a given pattern. """ if end is None: end = len(self.source) for match in self._get_matched_asts(code): match_start, match_end = match.get_region() if start <= match_start and match_end <= end: if skip is not None and (skip[0] < match_end and skip[1] > match_start): continue yield match
Search for `code` in source and return a list of `Match`\es `code` can contain wildcards. ``${name}`` matches normal names and ``${?name} can match any expression. You can use `Match.get_ast()` for getting the node that has matched a given pattern.
def build_request_relationship(type, ids): """Build a relationship list. A relationship list is used to update relationships between two resources. Setting sensors on a label, for example, uses this function to construct the list of sensor ids to pass to the Helium API. Args: type(string): The resource type for the ids in the relationship ids([uuid] or uuid): Just one or a list of resource uuids to use in the relationship Returns: A ready to use relationship JSON object. """ if ids is None: return { 'data': None } elif isinstance(ids, str): return { 'data': {'id': ids, 'type': type} } else: return { "data": [{"id": id, "type": type} for id in ids] }
Build a relationship list. A relationship list is used to update relationships between two resources. Setting sensors on a label, for example, uses this function to construct the list of sensor ids to pass to the Helium API. Args: type(string): The resource type for the ids in the relationship ids([uuid] or uuid): Just one or a list of resource uuids to use in the relationship Returns: A ready to use relationship JSON object.
def build_body(self): ''' Builds the body of a syslog-ng configuration object. ''' _increase_indent() body_array = [x.build() for x in self.iterable] nl = '\n' if self.append_extra_newline else '' if len(self.iterable) >= 1: body = self.join_body_on.join(body_array) + nl else: body = '' _decrease_indent() return body
Builds the body of a syslog-ng configuration object.
def format_docstring(*args, **kwargs): """ Decorator for clean docstring formatting """ def decorator(func): func.__doc__ = getdoc(func).format(*args, **kwargs) return func return decorator
Decorator for clean docstring formatting
def do_find(self, params): """ \x1b[1mNAME\x1b[0m find - Find znodes whose path matches a given text \x1b[1mSYNOPSIS\x1b[0m find [path] [match] \x1b[1mOPTIONS\x1b[0m * path: the path (default: cwd) * match: the string to match in the paths (default: '') \x1b[1mEXAMPLES\x1b[0m > find / foo /foo2 /fooish/wayland /fooish/xorg /copy/foo """ for path in self._zk.find(params.path, params.match, 0): self.show_output(path)
\x1b[1mNAME\x1b[0m find - Find znodes whose path matches a given text \x1b[1mSYNOPSIS\x1b[0m find [path] [match] \x1b[1mOPTIONS\x1b[0m * path: the path (default: cwd) * match: the string to match in the paths (default: '') \x1b[1mEXAMPLES\x1b[0m > find / foo /foo2 /fooish/wayland /fooish/xorg /copy/foo
def _get_or_add(self, prop_name): """ Return element returned by 'get_or_add_' method for *prop_name*. """ get_or_add_method_name = 'get_or_add_%s' % prop_name get_or_add_method = getattr(self, get_or_add_method_name) element = get_or_add_method() return element
Return element returned by 'get_or_add_' method for *prop_name*.
def prettyname(cls, attrib_name): """ Returns the "pretty name" (capitalized, etc) of an attribute, by looking it up in ``cls.COLUMN_NAMES`` if it exists there. :param attrib_name: An attribute name. :type attrib_name: ``str`` :rtype: ``str`` """ if attrib_name.startswith('tags.'): tagname = attrib_name[len('tags.'):] return '{} (tag)'.format(tagname) elif attrib_name in cls.COLUMN_NAMES: return cls.COLUMN_NAMES[attrib_name] else: return attrib_name
Returns the "pretty name" (capitalized, etc) of an attribute, by looking it up in ``cls.COLUMN_NAMES`` if it exists there. :param attrib_name: An attribute name. :type attrib_name: ``str`` :rtype: ``str``
def in_memory(self, value): """Add or remove self from global memory. :param bool value: if True(False) ensure self is(is not) in memory. """ self_class = self.__class__ memory = Annotation.__ANNOTATIONS_IN_MEMORY__ if value: annotations_memory = memory.setdefault(self_class, set()) annotations_memory.add(self) else: if self_class in memory: annotations_memory = memory[self_class] while self in annotations_memory: annotations_memory.remove(self) if not annotations_memory: del memory[self_class]
Add or remove self from global memory. :param bool value: if True(False) ensure self is(is not) in memory.
def set_ntp_servers(primary_server=None, secondary_server=None, deploy=False): ''' Set the NTP servers of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: primary_server(str): The primary NTP server IP address or FQDN. secondary_server(str): The secondary NTP server IP address or FQDN. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' ntp.set_servers 0.pool.ntp.org 1.pool.ntp.org salt '*' ntp.set_servers primary_server=0.pool.ntp.org secondary_server=1.pool.ntp.org salt '*' ntp.ser_servers 0.pool.ntp.org 1.pool.ntp.org deploy=True ''' ret = {} if primary_server: query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' 'primary-ntp-server', 'element': '<ntp-server-address>{0}</ntp-server-address>'.format(primary_server)} ret.update({'primary_server': __proxy__['panos.call'](query)}) if secondary_server: query = {'type': 'config', 'action': 'set', 'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' 'secondary-ntp-server', 'element': '<ntp-server-address>{0}</ntp-server-address>'.format(secondary_server)} ret.update({'secondary_server': __proxy__['panos.call'](query)}) if deploy is True: ret.update(commit()) return ret
Set the NTP servers of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: primary_server(str): The primary NTP server IP address or FQDN. secondary_server(str): The secondary NTP server IP address or FQDN. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' ntp.set_servers 0.pool.ntp.org 1.pool.ntp.org salt '*' ntp.set_servers primary_server=0.pool.ntp.org secondary_server=1.pool.ntp.org salt '*' ntp.ser_servers 0.pool.ntp.org 1.pool.ntp.org deploy=True
def browserfamilies(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/browserfamilies", tag=tag, fromdate=fromdate, todate=todate)
Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email.
async def article( self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): """ Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. """ # TODO Does 'article' work always? # article, photo, gif, mpeg4_gif, video, audio, # voice, document, location, venue, contact, game result = types.InputBotInlineResult( id=id or '', type='article', send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ), title=title, description=description, url=url, thumb=thumb, content=content ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result
Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present.
def _getCellForNewSegment(self, colIdx): """ Return the index of a cell in this column which is a good candidate for adding a new segment. When we have fixed size resources in effect, we insure that we pick a cell which does not already have the max number of allowed segments. If none exists, we choose the least used segment in the column to re-allocate. :param colIdx which column to look at :returns: cell index """ # Not fixed size CLA, just choose a cell randomly if self.maxSegmentsPerCell < 0: if self.cellsPerColumn > 1: # Don't ever choose the start cell (cell # 0) in each column i = self._random.getUInt32(self.cellsPerColumn-1) + 1 else: i = 0 return i # Fixed size CLA, choose from among the cells that are below the maximum # number of segments. # NOTE: It is important NOT to always pick the cell with the fewest number # of segments. The reason is that if we always do that, we are more likely # to run into situations where we choose the same set of cell indices to # represent an 'A' in both context 1 and context 2. This is because the # cell indices we choose in each column of a pattern will advance in # lockstep (i.e. we pick cell indices of 1, then cell indices of 2, etc.). candidateCellIdxs = [] if self.cellsPerColumn == 1: minIdx = 0 maxIdx = 0 else: minIdx = 1 # Don't include startCell in the mix maxIdx = self.cellsPerColumn-1 for i in xrange(minIdx, maxIdx+1): numSegs = len(self.cells[colIdx][i]) if numSegs < self.maxSegmentsPerCell: candidateCellIdxs.append(i) # If we found one, return with it. Note we need to use _random to maintain # correspondence with CPP code. if len(candidateCellIdxs) > 0: #candidateCellIdx = random.choice(candidateCellIdxs) candidateCellIdx = ( candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))]) if self.verbosity >= 5: print "Cell [%d,%d] chosen for new segment, # of segs is %d" % ( colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx])) return candidateCellIdx # All cells in the column are full, find a segment to free up candidateSegment = None candidateSegmentDC = 1.0 # For each cell in this column for i in xrange(minIdx, maxIdx+1): # For each segment in this cell for s in self.cells[colIdx][i]: dc = s.dutyCycle() if dc < candidateSegmentDC: candidateCellIdx = i candidateSegmentDC = dc candidateSegment = s # Free up the least used segment if self.verbosity >= 5: print ("Deleting segment #%d for cell[%d,%d] to make room for new " "segment" % (candidateSegment.segID, colIdx, candidateCellIdx)) candidateSegment.debugPrint() self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment) self.cells[colIdx][candidateCellIdx].remove(candidateSegment) return candidateCellIdx
Return the index of a cell in this column which is a good candidate for adding a new segment. When we have fixed size resources in effect, we insure that we pick a cell which does not already have the max number of allowed segments. If none exists, we choose the least used segment in the column to re-allocate. :param colIdx which column to look at :returns: cell index
def get_op_traceback(self, op_name): """Get the traceback of an op in the latest version of the TF graph. Args: op_name: Name of the op. Returns: Creation traceback of the op, in the form of a list of 2-tuples: (file_path, lineno) Raises: ValueError: If the op with the given name cannot be found in the latest version of the graph that this SourceManager instance has received, or if this SourceManager instance has not received any graph traceback yet. """ if not self._graph_traceback: raise ValueError('No graph traceback has been received yet.') for op_log_entry in self._graph_traceback.log_entries: if op_log_entry.name == op_name: return self._code_def_to_traceback_list(op_log_entry.code_def) raise ValueError( 'No op named "%s" can be found in the graph of the latest version ' ' (%d).' % (op_name, self._graph_version))
Get the traceback of an op in the latest version of the TF graph. Args: op_name: Name of the op. Returns: Creation traceback of the op, in the form of a list of 2-tuples: (file_path, lineno) Raises: ValueError: If the op with the given name cannot be found in the latest version of the graph that this SourceManager instance has received, or if this SourceManager instance has not received any graph traceback yet.
def get_pending_reboot(): ''' Determine whether there is a reboot pending. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if the system is pending reboot, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_reboot ''' # Order the checks for reboot pending in most to least likely. checks = (get_pending_update, get_pending_file_rename, get_pending_servermanager, get_pending_component_servicing, get_reboot_required_witnessed, get_pending_computer_name, get_pending_domain_join) for check in checks: if check(): return True return False
Determine whether there is a reboot pending. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if the system is pending reboot, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_reboot
def get_all_security_groups(groupnames=None, group_ids=None, filters=None, region=None, key=None, keyid=None, profile=None): ''' Return a list of all Security Groups matching the given criteria and filters. Note that the 'groupnames' argument only functions correctly for EC2 Classic and default VPC Security Groups. To find groups by name in other VPCs you'll want to use the 'group-name' filter instead. Valid keys for the filters argument are: description - The description of the security group. egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access. group-id - The ID of the security group. group-name - The name of the security group. ip-permission.cidr - A CIDR range that has been granted permission. ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number. ip-permission.group-id - The ID of a security group that has been granted permission. ip-permission.group-name - The name of a security group that has been granted permission. ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number). ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code. ip-permission.user-id - The ID of an AWS account that has been granted permission. owner-id - The AWS account ID of the owner of the security group. tag-key - The key of a tag assigned to the security group. tag-value - The value of a tag assigned to the security group. vpc-id - The ID of the VPC specified when the security group was created. CLI example:: salt myminion boto_secgroup.get_all_security_groups filters='{group-name: mygroup}' ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if isinstance(groupnames, six.string_types): groupnames = [groupnames] if isinstance(group_ids, six.string_types): groupnames = [group_ids] interesting = ['description', 'id', 'instances', 'name', 'owner_id', 'region', 'rules', 'rules_egress', 'tags', 'vpc_id'] ret = [] try: r = conn.get_all_security_groups(groupnames=groupnames, group_ids=group_ids, filters=filters) for g in r: n = {} for a in interesting: v = getattr(g, a, None) if a == 'region': v = v.name elif a in ('rules', 'rules_egress'): v = _parse_rules(g, v) elif a == 'instances': v = [i.id for i in v()] n[a] = v ret += [n] return ret except boto.exception.BotoServerError as e: log.debug(e) return []
Return a list of all Security Groups matching the given criteria and filters. Note that the 'groupnames' argument only functions correctly for EC2 Classic and default VPC Security Groups. To find groups by name in other VPCs you'll want to use the 'group-name' filter instead. Valid keys for the filters argument are: description - The description of the security group. egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access. group-id - The ID of the security group. group-name - The name of the security group. ip-permission.cidr - A CIDR range that has been granted permission. ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number. ip-permission.group-id - The ID of a security group that has been granted permission. ip-permission.group-name - The name of a security group that has been granted permission. ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number). ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code. ip-permission.user-id - The ID of an AWS account that has been granted permission. owner-id - The AWS account ID of the owner of the security group. tag-key - The key of a tag assigned to the security group. tag-value - The value of a tag assigned to the security group. vpc-id - The ID of the VPC specified when the security group was created. CLI example:: salt myminion boto_secgroup.get_all_security_groups filters='{group-name: mygroup}'
def _unpack_token_json(token): """ Unpack a JSON-serialized JWT Returns (headers, payload, signatures) on success Raises DecodeError on bad input """ if not isinstance(token, dict): raise DecodeError("Not a dict") if not token.has_key('payload'): raise DecodeError("Missing 'payload' field") for k in ['header', 'signature']: if not token.has_key(k): raise DecodeError("Missing '{}' field".format(k)) if not isinstance(token[k], list): raise DecodeError("Field '{}' is not a string".format(k)) headers = [] signatures = [] signing_inputs = [] payload = None try: headers = [base64url_decode(str(h)) for h in token['header']] except (TypeError, binascii.Error): raise DecodeError("Invalid header padding") try: payload_data = base64url_decode(str(token['payload'])) except (TypeError, binascii.Error): raise DecodeError("Invalid payload padding") try: payload = json.loads(payload_data.decode('utf-8')) except ValueError as e: raise DecodeError('Invalid payload string: {}'.format(e)) try: signatures = [base64url_decode(str(s)) for s in token['signature']] except (TypeError, binascii.Error): raise DecodeError("Invalid crypto padding") for header_b64 in token['header']: signing_inputs.append( b'{}.{}'.format(header_b64, token['payload']) ) return (headers, payload, signatures, signing_inputs)
Unpack a JSON-serialized JWT Returns (headers, payload, signatures) on success Raises DecodeError on bad input
def FunctionTimer(on_done=None): ''' To check execution time of a function borrowed from https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d >>> def logger(details, args, kwargs): #some function that uses the time output ... print(details) ... >>> @FunctionTimer(on_done= logger) ... def foo(t=10): ... print('foo executing...') ... time.sleep(t) ... >>> @FunctionTimer(on_done= logger) ... def bar(t, n): ... for i in range(n): ... print('bar executing...') ... time.sleep(1) ... foo(t) ... >>> bar(3,2) bar executing... bar executing... foo executing... ('foo', 3) ('bar', 5) ''' def decfn(fn): def timed(*args, **kwargs): ts = time.time() result = fn(*args, **kwargs) te = time.time() if on_done: on_done((fn.__name__,int(te - ts)), args, kwargs) else: print(('%r %d sec(s)' % (fn.__name__, (te - ts)))) return result return timed return decfn
To check execution time of a function borrowed from https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d >>> def logger(details, args, kwargs): #some function that uses the time output ... print(details) ... >>> @FunctionTimer(on_done= logger) ... def foo(t=10): ... print('foo executing...') ... time.sleep(t) ... >>> @FunctionTimer(on_done= logger) ... def bar(t, n): ... for i in range(n): ... print('bar executing...') ... time.sleep(1) ... foo(t) ... >>> bar(3,2) bar executing... bar executing... foo executing... ('foo', 3) ('bar', 5)
def checkPermissions(permissions=[], obj=None): """ Checks if a user has permissions for a given object. Args: permissions: The permissions the current user must be compliant with obj: The object for which the permissions apply Returns: 1 if the user complies with all the permissions for the given object. Otherwise, it returns empty. """ if not obj: return False sm = getSecurityManager() for perm in permissions: if not sm.checkPermission(perm, obj): return '' return True
Checks if a user has permissions for a given object. Args: permissions: The permissions the current user must be compliant with obj: The object for which the permissions apply Returns: 1 if the user complies with all the permissions for the given object. Otherwise, it returns empty.
def getcolor(spec): """ Turn optional color string spec into an array. """ if isinstance(spec, str): from matplotlib import colors return asarray(colors.hex2color(colors.cnames[spec])) else: return spec
Turn optional color string spec into an array.
def ensure_session_key(request): """ Given a request return a session key that will be used. There may already be a session key associated, but if there is not, we force the session to create itself and persist between requests for the client behind the given request. """ key = request.session.session_key if key is None: # @@@ Django forces us to handle session key collision amongst # multiple processes (not handled) request.session.save() # force session to persist for client request.session.modified = True key = request.session.session_key return key
Given a request return a session key that will be used. There may already be a session key associated, but if there is not, we force the session to create itself and persist between requests for the client behind the given request.
def get(self, obj, key): """ Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer. """ if key not in self._exposed: raise MethodNotExposed() rightFuncs = self._exposed[key] T = obj.__class__ seen = {} for subT in inspect.getmro(T): for name, value in subT.__dict__.items(): for rightFunc in rightFuncs: if value is rightFunc: if name in seen: raise MethodNotExposed() return value.__get__(obj, T) seen[name] = True raise MethodNotExposed()
Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer.
def get_storage_credentials(key, read_only=False): """Authenticates a service account for reading and/or writing on a bucket. This uses the `google.oauth2.service_account` module to obtain "scoped credentials". These can be used with the `google.storage` module. TODO: docstring""" if read_only: scopes = ['https://www.googleapis.com/auth/devstorage.read_only'] else: scopes = ['https://www.googleapis.com/auth/devstorage.read_write'] credentials = service_account.Credentials.from_service_account_info(key) scoped_credentials = credentials.with_scopes(scopes) return scoped_credentials
Authenticates a service account for reading and/or writing on a bucket. This uses the `google.oauth2.service_account` module to obtain "scoped credentials". These can be used with the `google.storage` module. TODO: docstring
def set_from_json(self, obj, json, models=None, setter=None): ''' Sets the value of this property from a JSON value. This method first separately extracts and removes any ``units`` field in the JSON, and sets the associated units property directly. The remaining JSON is then passed to the superclass ``set_from_json`` to be handled. Args: obj: (HasProps) : instance to set the property value on json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None ''' json = self._extract_units(obj, json) super(UnitsSpecPropertyDescriptor, self).set_from_json(obj, json, models, setter)
Sets the value of this property from a JSON value. This method first separately extracts and removes any ``units`` field in the JSON, and sets the associated units property directly. The remaining JSON is then passed to the superclass ``set_from_json`` to be handled. Args: obj: (HasProps) : instance to set the property value on json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None
def stop(self, bIgnoreExceptions = True): """ Stops debugging all processes. If the kill on exit mode is on, debugged processes are killed when the debugger is stopped. Otherwise when the debugger stops it detaches from all debugged processes and leaves them running (default). For more details see: L{__init__} @note: This method is better than L{detach_from_all} because it can gracefully handle the last debugging event before detaching. @type bIgnoreExceptions: bool @param bIgnoreExceptions: C{True} to ignore any exceptions that may be raised when detaching. """ # Determine if we have a last debug event that we need to continue. try: event = self.lastEvent has_event = bool(event) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) has_event = False # If we do... if has_event: # Disable all breakpoints in the process before resuming execution. try: pid = event.get_pid() self.disable_process_breakpoints(pid) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) # Disable all breakpoints in the thread before resuming execution. try: tid = event.get_tid() self.disable_thread_breakpoints(tid) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) # Resume execution. try: event.continueDebugEvent = win32.DBG_CONTINUE self.cont(event) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) # Detach from or kill all debuggees. try: if self.__bKillOnExit: self.kill_all(bIgnoreExceptions) else: self.detach_from_all(bIgnoreExceptions) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) # Cleanup the process snapshots. try: self.system.clear() except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) # Close all Win32 handles the Python garbage collector failed to close. self.force_garbage_collection(bIgnoreExceptions)
Stops debugging all processes. If the kill on exit mode is on, debugged processes are killed when the debugger is stopped. Otherwise when the debugger stops it detaches from all debugged processes and leaves them running (default). For more details see: L{__init__} @note: This method is better than L{detach_from_all} because it can gracefully handle the last debugging event before detaching. @type bIgnoreExceptions: bool @param bIgnoreExceptions: C{True} to ignore any exceptions that may be raised when detaching.
def get_history_item_for_tree_iter(self, child_tree_iter): """Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item: """ history_item = self.history_tree_store[child_tree_iter][self.HISTORY_ITEM_STORAGE_ID] if history_item is None: # is dummy item if self.history_tree_store.iter_n_children(child_tree_iter) > 0: child_iter = self.history_tree_store.iter_nth_child(child_tree_iter, 0) history_item = self.history_tree_store[child_iter][self.HISTORY_ITEM_STORAGE_ID] else: logger.debug("In a dummy history should be respective real call element.") return history_item
Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item:
def get_form(self, request, obj=None, **kwargs): """Returns modified form for TreeItem model. 'Parent' field choices are built by sitetree itself. """ if obj is not None and obj.parent is not None: self.previous_parent = obj.parent previous_parent_id = self.previous_parent.id else: previous_parent_id = None my_choice_field = TreeItemChoiceField(self.tree, initial=previous_parent_id) form = super(TreeItemAdmin, self).get_form(request, obj, **kwargs) my_choice_field.label = form.base_fields['parent'].label my_choice_field.help_text = form.base_fields['parent'].help_text my_choice_field.widget = form.base_fields['parent'].widget # Replace 'parent' TreeItem field with new appropriate one form.base_fields['parent'] = my_choice_field # Try to resolve all currently registered url names including those in namespaces. if not getattr(self, 'known_url_names', False): self.known_url_names = [] self.known_url_rules = [] resolver = get_resolver(get_urlconf()) for ns, (url_prefix, ns_resolver) in resolver.namespace_dict.items(): if ns != 'admin': self._stack_known_urls(ns_resolver.reverse_dict, ns) self._stack_known_urls(resolver.reverse_dict) self.known_url_rules = sorted(self.known_url_rules) form.known_url_names_hint = _( 'You are seeing this warning because "URL as Pattern" option is active and pattern entered above ' 'seems to be invalid. Currently registered URL pattern names and parameters: ') form.known_url_names = self.known_url_names form.known_url_rules = self.known_url_rules return form
Returns modified form for TreeItem model. 'Parent' field choices are built by sitetree itself.
def load_file(self, filename): """Load file into treeview""" self.counter.clear() # python2 issues try: etree = ET.parse(filename) except ET.ParseError: parser = ET.XMLParser(encoding='UTF-8') etree = ET.parse(filename, parser) eroot = etree.getroot() self.remove_all() self.previewer.remove_all() self.widget_editor.hide_all() self.previewer.resource_paths.append(os.path.dirname(filename)) for element in eroot: self.populate_tree('', eroot, element,from_file=True) children = self.treeview.get_children('') for child in children: self.draw_widget(child) self.previewer.show_selected(None, None)
Load file into treeview
def Henry_H_at_T(T, H, Tderiv, T0=None, units=None, backend=None): """ Evaluate Henry's constant H at temperature T Parameters ---------- T: float Temperature (with units), assumed to be in Kelvin if ``units == None`` H: float Henry's constant Tderiv: float (optional) dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``. T0: float Reference temperature, assumed to be in Kelvin if ``units == None`` default: 298.15 K units: object (optional) object with attributes: kelvin (e.g. chempy.units.default_units) backend : module (optional) module with "exp", default: numpy, math """ be = get_backend(backend) if units is None: K = 1 else: K = units.Kelvin if T0 is None: T0 = 298.15*K return H * be.exp(Tderiv*(1/T - 1/T0))
Evaluate Henry's constant H at temperature T Parameters ---------- T: float Temperature (with units), assumed to be in Kelvin if ``units == None`` H: float Henry's constant Tderiv: float (optional) dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``. T0: float Reference temperature, assumed to be in Kelvin if ``units == None`` default: 298.15 K units: object (optional) object with attributes: kelvin (e.g. chempy.units.default_units) backend : module (optional) module with "exp", default: numpy, math
def simxGetStringParameter(clientID, paramIdentifier, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' paramValue = ct.POINTER(ct.c_char)() ret = c_GetStringParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode) a = bytearray() if ret == 0: i = 0 while paramValue[i] != b'\0': if sys.version_info[0] == 3: a.append(int.from_bytes(paramValue[i],'big')) else: a.append(paramValue[i]) i=i+1 if sys.version_info[0] == 3: a=str(a,'utf-8') else: a=str(a) return ret, a
Please have a look at the function description/documentation in the V-REP user manual
def get_recent_files(self): """Return a list of files opened by the project.""" try: recent_files = self.CONF[WORKSPACE].get('main', 'recent_files', default=[]) except EnvironmentError: return [] for recent_file in recent_files[:]: if not os.path.isfile(recent_file): recent_files.remove(recent_file) return list(OrderedDict.fromkeys(recent_files))
Return a list of files opened by the project.
def load_file(self): ''' Loads combined SAR format logfile in ASCII format. :return: ``True`` if loading and parsing of file went fine, \ ``False`` if it failed (at any point) ''' daychunks = self.__split_file() if (daychunks): maxcount = len(self.__splitpointers) for i in range(maxcount): start = self.__splitpointers[i] end = None if (i < (maxcount - 1)): end = self.__splitpointers[i + 1] chunk = self.__get_chunk(start, end) parser = sarparse.Parser() cpu_usage, mem_usage, swp_usage, io_usage = \ parser._parse_file(parser._split_file(chunk)) self.__sarinfos[self.__get_part_date(chunk)] = { "cpu": cpu_usage, "mem": mem_usage, "swap": swp_usage, "io": io_usage } del(cpu_usage) del(mem_usage) del(swp_usage) del(io_usage) del(parser) return(True)
Loads combined SAR format logfile in ASCII format. :return: ``True`` if loading and parsing of file went fine, \ ``False`` if it failed (at any point)
def get_loggers(self): '''Return a list of the logger methods: (debug, info, warn, error)''' return self.log.debug, self.log.info, self.log.warn, self.log.error
Return a list of the logger methods: (debug, info, warn, error)
def update(self, action: torch.Tensor) -> 'ChecklistStatelet': """ Takes an action index, updates checklist and returns an updated state. """ checklist_addition = (self.terminal_actions == action).float() new_checklist = self.checklist + checklist_addition new_checklist_state = ChecklistStatelet(terminal_actions=self.terminal_actions, checklist_target=self.checklist_target, checklist_mask=self.checklist_mask, checklist=new_checklist, terminal_indices_dict=self.terminal_indices_dict) return new_checklist_state
Takes an action index, updates checklist and returns an updated state.
def delete_by_hash(self, file_hash): """ Remove file/archive by it's `file_hash`. Args: file_hash (str): Hash, which is used to find the file in storage. Raises: IOError: If the file for given `file_hash` was not found in \ storage. """ full_path = self.file_path_from_hash(file_hash) return self.delete_by_path(full_path)
Remove file/archive by it's `file_hash`. Args: file_hash (str): Hash, which is used to find the file in storage. Raises: IOError: If the file for given `file_hash` was not found in \ storage.
def _cram_to_fastq_regions(regions, cram_file, dirs, data): """Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together. """ base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) fnames = run_multicore(_cram_to_fastq_region, [(cram_file, work_dir, base_name, region, data) for region in regions], data["config"]) # check if we have paired or single end data if any(not _is_gzip_empty(p1) for p1, p2, s in fnames): out = [[p1, p2] for p1, p2, s in fnames] else: out = [[s] for p1, p2, s in fnames] return out, work_dir
Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together.
def dtypes(self): """ Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object """ from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object
def _read_as_dict(self): """ Read the data contained in all entries as a list of dictionaries with the headers as the dictionary keys :return: list of dicts containing all tabular data """ data = list() for row in self._rows: row_data = OrderedDict() for i, header in enumerate(self.headers): row_data[header.cget('text')] = row[i].get() data.append(row_data) return data
Read the data contained in all entries as a list of dictionaries with the headers as the dictionary keys :return: list of dicts containing all tabular data
def epoch_to_human_time(epoch_time): """Converts an epoch timestamp to human readable time. This essentially converts an output of get_current_epoch_time to an output of get_current_human_time Args: epoch_time: An integer representing an epoch timestamp in milliseconds. Returns: A time string representing the input time. None if input param is invalid. """ if isinstance(epoch_time, int): try: d = datetime.datetime.fromtimestamp(epoch_time / 1000) return d.strftime("%m-%d-%Y %H:%M:%S ") except ValueError: return None
Converts an epoch timestamp to human readable time. This essentially converts an output of get_current_epoch_time to an output of get_current_human_time Args: epoch_time: An integer representing an epoch timestamp in milliseconds. Returns: A time string representing the input time. None if input param is invalid.
def execute_sync(self, message): """ Respond when the server indicates that the client is out of sync. The server can request a sync when this client sends a message that fails the check() on the server. If the reason for the failure isn't very serious, then the server can decide to send it as usual in the interest of a smooth gameplay experience. When this happens, the server sends out an extra response providing the clients with the information they need to resync themselves. """ info("synchronizing message: {message}") # Synchronize the world. with self.world._unlock_temporarily(): message._sync(self.world) self.world._react_to_sync_response(message) # Synchronize the tokens. for actor in self.actors: actor._react_to_sync_response(message)
Respond when the server indicates that the client is out of sync. The server can request a sync when this client sends a message that fails the check() on the server. If the reason for the failure isn't very serious, then the server can decide to send it as usual in the interest of a smooth gameplay experience. When this happens, the server sends out an extra response providing the clients with the information they need to resync themselves.
def create_assessment(self, assessment_form): """Creates a new ``Assessment``. arg: assessment_form (osid.assessment.AssessmentForm): the form for this ``Assessment`` return: (osid.assessment.Assessment) - the new ``Assessment`` raise: IllegalState - ``assessment_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``assessment_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_form`` did not originate from ``get_assessment_form_for_create()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceAdminSession.create_resource_template collection = JSONClientValidated('assessment', collection='Assessment', runtime=self._runtime) if not isinstance(assessment_form, ABCAssessmentForm): raise errors.InvalidArgument('argument type is not an AssessmentForm') if assessment_form.is_for_update(): raise errors.InvalidArgument('the AssessmentForm is for update only, not create') try: if self._forms[assessment_form.get_id().get_identifier()] == CREATED: raise errors.IllegalState('assessment_form already used in a create transaction') except KeyError: raise errors.Unsupported('assessment_form did not originate from this session') if not assessment_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') insert_result = collection.insert_one(assessment_form._my_map) self._forms[assessment_form.get_id().get_identifier()] = CREATED result = objects.Assessment( osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy) return result
Creates a new ``Assessment``. arg: assessment_form (osid.assessment.AssessmentForm): the form for this ``Assessment`` return: (osid.assessment.Assessment) - the new ``Assessment`` raise: IllegalState - ``assessment_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``assessment_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_form`` did not originate from ``get_assessment_form_for_create()`` *compliance: mandatory -- This method must be implemented.*
def set_token(self, token): """ Set token in authentification for next requests :param token: str. token to set in auth. If None, reinit auth """ if token: auth = HTTPBasicAuth(token, '') self._token = token self.authenticated = True # TODO: Remove this parameter self.session.auth = auth logger.debug("Using session token: %s", token) else: self._token = None self.authenticated = False self.session.auth = None logger.debug("Session token/auth reinitialised")
Set token in authentification for next requests :param token: str. token to set in auth. If None, reinit auth
def event_info(self, event): """Get a dict of info for the artist selected by "event".""" def default_func(event): return {} registry = { AxesImage : [pick_info.image_props], PathCollection : [pick_info.scatter_props, self._contour_info, pick_info.collection_props], Line2D : [pick_info.line_props, pick_info.errorbar_props], LineCollection : [pick_info.collection_props, self._contour_info, pick_info.errorbar_props], PatchCollection : [pick_info.collection_props, self._contour_info], PolyCollection : [pick_info.collection_props, pick_info.scatter_props], QuadMesh : [pick_info.collection_props], Rectangle : [pick_info.rectangle_props], } x, y = event.mouseevent.xdata, event.mouseevent.ydata props = dict(x=x, y=y, label=event.artist.get_label(), event=event) props['ind'] = getattr(event, 'ind', None) props['point_label'] = self._point_label(event) funcs = registry.get(type(event.artist), [default_func]) # 3D artist don't share inheritance. Fall back to naming convention. if '3D' in type(event.artist).__name__: funcs += [pick_info.three_dim_props] for func in funcs: props.update(func(event)) return props
Get a dict of info for the artist selected by "event".
def redirect( to, headers=None, status=302, content_type="text/html; charset=utf-8" ): """Abort execution and cause a 302 redirect (by default). :param to: path or fully qualified URL to redirect to :param headers: optional dict of headers to include in the new request :param status: status code (int) of the new request, defaults to 302 :param content_type: the content type (string) of the response :returns: the redirecting Response """ headers = headers or {} # URL Quote the URL before redirecting safe_to = quote_plus(to, safe=":/%#?&=@[]!$&'()*+,;") # According to RFC 7231, a relative URI is now permitted. headers["Location"] = safe_to return HTTPResponse( status=status, headers=headers, content_type=content_type )
Abort execution and cause a 302 redirect (by default). :param to: path or fully qualified URL to redirect to :param headers: optional dict of headers to include in the new request :param status: status code (int) of the new request, defaults to 302 :param content_type: the content type (string) of the response :returns: the redirecting Response
def csv( self, filepath=None ): """*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,"belfast, uk" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv("/path/to/myfile.csv") """ self.log.debug('starting the ``csv`` method') renderedData = self._list_of_dictionaries_to_csv("machine") if filepath and renderedData != "NO MATCH": # RECURSIVELY CREATE MISSING DIRECTORIES if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding='utf-8', mode='w') writeFile.write(renderedData) writeFile.close() self.log.debug('completed the ``csv`` method') return renderedData
*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,"belfast, uk" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv("/path/to/myfile.csv")
def wait(timeout=None, flush=True): """Wait for an event. Args: timeout (Optional[int]): The time in seconds that this function will wait before giving up and returning None. With the default value of None, this will block forever. flush (bool): If True a call to :any:`tdl.flush` will be made before listening for events. Returns: Type[Event]: An event, or None if the function has timed out. Anything added via :any:`push` will also be returned. """ if timeout is not None: timeout = timeout + _time.clock() # timeout at this time while True: if _eventQueue: return _eventQueue.pop(0) if flush: # a full 'round' of events need to be processed before flushing _tdl.flush() if timeout and _time.clock() >= timeout: return None # return None on timeout _time.sleep(0.001) # sleep 1ms _processEvents()
Wait for an event. Args: timeout (Optional[int]): The time in seconds that this function will wait before giving up and returning None. With the default value of None, this will block forever. flush (bool): If True a call to :any:`tdl.flush` will be made before listening for events. Returns: Type[Event]: An event, or None if the function has timed out. Anything added via :any:`push` will also be returned.
def cancel(self, workflow_id): """Cancels a running workflow. Args: workflow_id (str): Workflow id. Returns: Nothing """ self.logger.debug('Canceling workflow: ' + workflow_id) url = '%(wf_url)s/%(wf_id)s/cancel' % { 'wf_url': self.workflows_url, 'wf_id': workflow_id } r = self.gbdx_connection.post(url, data='') r.raise_for_status()
Cancels a running workflow. Args: workflow_id (str): Workflow id. Returns: Nothing
def get_running_race(self, race_id): """ Gets a running race for a given identifier.t http://strava.github.io/api/v3/running_races/#list :param race_id: id for the race :rtype: :class:`stravalib.model.RunningRace` """ raw = self.protocol.get('/running_races/{id}', id=race_id) return model.RunningRace.deserialize(raw, bind_client=self)
Gets a running race for a given identifier.t http://strava.github.io/api/v3/running_races/#list :param race_id: id for the race :rtype: :class:`stravalib.model.RunningRace`
def as_hyperbola(self, rotated=False): """ Hyperbolic error area """ idx = N.diag_indices(3) _ = 1/self.covariance_matrix[idx] d = list(_) d[-1] *= -1 arr = N.identity(4)*-1 arr[idx] = d hyp = conic(arr) if rotated: R = augment(self.axes) hyp = hyp.transform(R) return hyp
Hyperbolic error area
def chmod(path, mode=None, user=None, group=None, other=None, recursive=False): """Changes file mode permissions. >>> if chmod('/tmp/one', 0755): ... print('OK') OK NOTE: The precending ``0`` is required when using a numerical mode. """ successful = True mode = _ops_mode(mode) if user is not None: mode.user = user if group is not None: mode.group = group if other is not None: mode.other = other if recursive: for p in find(path, no_peek=True): successful = _chmod(p, mode) and successful else: successful = _chmod(path, mode) return successful
Changes file mode permissions. >>> if chmod('/tmp/one', 0755): ... print('OK') OK NOTE: The precending ``0`` is required when using a numerical mode.
def rgamma(alpha, beta, size=None): """ Random gamma variates. """ return np.random.gamma(shape=alpha, scale=1. / beta, size=size)
Random gamma variates.
def initialize_ui(self): """ Initializes the Component ui. :return: Method success. :rtype: bool """ LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__)) self.__model = ProjectsProxyModel(self) self.__model.setSourceModel(self.__script_editor.model) self.__delegate = RichText_QStyledItemDelegate(self, self.__style) self.Projects_Explorer_treeView.setParent(None) self.Projects_Explorer_treeView = Projects_QTreeView(self, self.__model) self.Projects_Explorer_treeView.setItemDelegate(self.__delegate) self.Projects_Explorer_treeView.setObjectName("Projects_Explorer_treeView") self.Projects_Explorer_treeView.setContextMenuPolicy(Qt.ActionsContextMenu) self.Projects_Explorer_dockWidgetContents_gridLayout.addWidget(self.Projects_Explorer_treeView, 0, 0) self.__view = self.Projects_Explorer_treeView self.__view_add_actions() self.__add_actions() # Signals / Slots. self.__view.expanded.connect(self.__view__expanded) self.__view.doubleClicked.connect(self.__view__doubleClicked) self.__view.selectionModel().selectionChanged.connect(self.__view_selectionModel__selectionChanged) self.__script_editor.Script_Editor_tabWidget.currentChanged.connect( self.__script_editor_Script_Editor_tabWidget__currentChanged) self.__script_editor.model.project_registered.connect(self.__script_editor_model__project_registered) self.initialized_ui = True return True
Initializes the Component ui. :return: Method success. :rtype: bool
def PrependENVPath(self, name, newpath, envname = 'ENV', sep = os.pathsep, delete_existing=1): """Prepend path elements to the path 'name' in the 'ENV' dictionary for this environment. Will only add any particular path once, and will normpath and normcase all paths to help assure this. This can also handle the case where the env variable is a list instead of a string. If delete_existing is 0, a newpath which is already in the path will not be moved to the front (it will be left where it is). """ orig = '' if envname in self._dict and name in self._dict[envname]: orig = self._dict[envname][name] nv = SCons.Util.PrependPath(orig, newpath, sep, delete_existing, canonicalize=self._canonicalize) if envname not in self._dict: self._dict[envname] = {} self._dict[envname][name] = nv
Prepend path elements to the path 'name' in the 'ENV' dictionary for this environment. Will only add any particular path once, and will normpath and normcase all paths to help assure this. This can also handle the case where the env variable is a list instead of a string. If delete_existing is 0, a newpath which is already in the path will not be moved to the front (it will be left where it is).
def initialize(self, config): """ Create the physical storage for this job store, allocate a workflow ID and persist the given Toil configuration to the store. :param toil.common.Config config: the Toil configuration to initialize this job store with. The given configuration will be updated with the newly allocated workflow ID. :raises JobStoreExistsException: if the physical storage for this job store already exists """ assert config.workflowID is None config.workflowID = str(uuid4()) logger.debug("The workflow ID is: '%s'" % config.workflowID) self.__config = config self.writeConfig()
Create the physical storage for this job store, allocate a workflow ID and persist the given Toil configuration to the store. :param toil.common.Config config: the Toil configuration to initialize this job store with. The given configuration will be updated with the newly allocated workflow ID. :raises JobStoreExistsException: if the physical storage for this job store already exists
def climb_stairs(n): """ :type n: int :rtype: int """ arr = [1, 1] for _ in range(1, n): arr.append(arr[-1] + arr[-2]) return arr[-1]
:type n: int :rtype: int
def surface(self, param): """Return the detector surface point corresponding to ``param``. For parameter value ``p``, the surface point is given by :: surf = p[0] * axes[0] + p[1] * axes[1] Parameters ---------- param : `array-like` or sequence Parameter value(s) at which to evaluate. A sequence of parameters must have length 2. Returns ------- point : `numpy.ndarray` Vector(s) pointing from the origin to the detector surface point at ``param``. If ``param`` is a single parameter, the returned array has shape ``(3,)``, otherwise ``broadcast(*param).shape + (3,)``. Examples -------- The method works with a single parameter, resulting in a single vector: >>> part = odl.uniform_partition([0, 0], [1, 1], (10, 10)) >>> det = Flat2dDetector(part, axes=[(1, 0, 0), (0, 0, 1)]) >>> det.surface([0, 0]) array([ 0., 0., 0.]) >>> det.surface([0, 1]) array([ 0., 0., 1.]) >>> det.surface([1, 1]) array([ 1., 0., 1.]) It is also vectorized, i.e., it can be called with multiple parameters at once (or n-dimensional arrays of parameters): >>> # 3 pairs of parameters, resulting in 3 vectors >>> det.surface([[0, 0, 1], ... [0, 1, 1]]) array([[ 0., 0., 0.], [ 0., 0., 1.], [ 1., 0., 1.]]) >>> # Pairs of parameters in a (4, 5) array each >>> param = (np.zeros((4, 5)), np.zeros((4, 5))) >>> det.surface(param).shape (4, 5, 3) >>> # Using broadcasting for "outer product" type result >>> param = (np.zeros((4, 1)), np.zeros((1, 5))) >>> det.surface(param).shape (4, 5, 3) """ squeeze_out = (np.broadcast(*param).shape == ()) param_in = param param = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in param) if self.check_bounds and not is_inside_bounds(param, self.params): raise ValueError('`param` {} not in the valid range ' '{}'.format(param_in, self.params)) # Compute outer product of the i-th spatial component of the # parameter and sum up the contributions surf = sum(np.multiply.outer(p, ax) for p, ax in zip(param, self.axes)) if squeeze_out: surf = surf.squeeze() return surf
Return the detector surface point corresponding to ``param``. For parameter value ``p``, the surface point is given by :: surf = p[0] * axes[0] + p[1] * axes[1] Parameters ---------- param : `array-like` or sequence Parameter value(s) at which to evaluate. A sequence of parameters must have length 2. Returns ------- point : `numpy.ndarray` Vector(s) pointing from the origin to the detector surface point at ``param``. If ``param`` is a single parameter, the returned array has shape ``(3,)``, otherwise ``broadcast(*param).shape + (3,)``. Examples -------- The method works with a single parameter, resulting in a single vector: >>> part = odl.uniform_partition([0, 0], [1, 1], (10, 10)) >>> det = Flat2dDetector(part, axes=[(1, 0, 0), (0, 0, 1)]) >>> det.surface([0, 0]) array([ 0., 0., 0.]) >>> det.surface([0, 1]) array([ 0., 0., 1.]) >>> det.surface([1, 1]) array([ 1., 0., 1.]) It is also vectorized, i.e., it can be called with multiple parameters at once (or n-dimensional arrays of parameters): >>> # 3 pairs of parameters, resulting in 3 vectors >>> det.surface([[0, 0, 1], ... [0, 1, 1]]) array([[ 0., 0., 0.], [ 0., 0., 1.], [ 1., 0., 1.]]) >>> # Pairs of parameters in a (4, 5) array each >>> param = (np.zeros((4, 5)), np.zeros((4, 5))) >>> det.surface(param).shape (4, 5, 3) >>> # Using broadcasting for "outer product" type result >>> param = (np.zeros((4, 1)), np.zeros((1, 5))) >>> det.surface(param).shape (4, 5, 3)
def roll_mean(input, window): '''Apply a rolling mean function to an array. This is a simple rolling aggregation.''' nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError('Out of bound') output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
Apply a rolling mean function to an array. This is a simple rolling aggregation.
def _generate_limit_items(lower, upper): """Yield key, value pairs for limits dictionary. Yield pairs of key, value where key is ``lower``, ``upper`` or ``fixed``. A key, value pair is emitted if the bounds are not None. """ # Use value + 0 to convert any -0.0 to 0.0 which looks better. if lower is not None and upper is not None and lower == upper: yield 'fixed', upper + 0 else: if lower is not None: yield 'lower', lower + 0 if upper is not None: yield 'upper', upper + 0
Yield key, value pairs for limits dictionary. Yield pairs of key, value where key is ``lower``, ``upper`` or ``fixed``. A key, value pair is emitted if the bounds are not None.
def generate_matrices(dim = 40): """ Generates the matrices that positive and negative samples are multiplied with. The matrix for positive samples is randomly drawn from a uniform distribution, with elements in [-1, 1]. The matrix for negative examples is the sum of the positive matrix with a matrix drawn from a normal distribution with mean 0 variance 1. """ positive = numpy.random.uniform(-1, 1, (dim, dim)) negative = positive + numpy.random.normal(0, 1, (dim, dim)) return positive, negative
Generates the matrices that positive and negative samples are multiplied with. The matrix for positive samples is randomly drawn from a uniform distribution, with elements in [-1, 1]. The matrix for negative examples is the sum of the positive matrix with a matrix drawn from a normal distribution with mean 0 variance 1.
def list(self, id=None): """ List all running jobs :param id: optional ID for the job to list """ args = {'id': id} self._job_chk.check(args) return self._client.json('job.list', args)
List all running jobs :param id: optional ID for the job to list
def wait_command(self, start_func, turns=1, end_func=None): """Call ``start_func``, wait ``turns``, and then call ``end_func`` if provided Disables input for the duration. :param start_func: function to call just after disabling input :param turns: number of turns to wait :param end_func: function to call just before re-enabling input :return: ``None`` """ self.disable_input() start_func() self.app.wait_turns(turns, cb=partial(self.enable_input, end_func))
Call ``start_func``, wait ``turns``, and then call ``end_func`` if provided Disables input for the duration. :param start_func: function to call just after disabling input :param turns: number of turns to wait :param end_func: function to call just before re-enabling input :return: ``None``
def add_to_dumper(dumper: Type, classes: List[Type]) -> None: """Register user-defined classes with the Dumper. This enables the Dumper to write objects of your classes to a \ YAML file. Note that all the arguments are types, not instances! Args: dumper: Your dumper class(!), derived from yatiml.Dumper classes: One or more classes to add. """ if not isinstance(classes, list): classes = [classes] # type: ignore for class_ in classes: if issubclass(class_, enum.Enum): dumper.add_representer(class_, EnumRepresenter(class_)) elif issubclass(class_, str) or issubclass(class_, UserString): dumper.add_representer(class_, UserStringRepresenter(class_)) else: dumper.add_representer(class_, Representer(class_))
Register user-defined classes with the Dumper. This enables the Dumper to write objects of your classes to a \ YAML file. Note that all the arguments are types, not instances! Args: dumper: Your dumper class(!), derived from yatiml.Dumper classes: One or more classes to add.
def get_wrapped_stream(stream, encoding=None, errors="replace"): """ Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. :param stream: A stream instance to wrap :param str encoding: The encoding to use for the stream :param str errors: The error handler to use, default "replace" :returns: A new, wrapped stream :rtype: :class:`StreamWrapper` """ if stream is None: raise TypeError("must provide a stream to wrap") stream = _get_binary_buffer(stream) if stream is not None and encoding is None: encoding = "utf-8" if not encoding: encoding = get_output_encoding(stream) else: encoding = get_canonical_encoding_name(encoding) return StreamWrapper(stream, encoding, errors, line_buffering=True)
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. :param stream: A stream instance to wrap :param str encoding: The encoding to use for the stream :param str errors: The error handler to use, default "replace" :returns: A new, wrapped stream :rtype: :class:`StreamWrapper`
def get_aggregates(self, request): """ Implements the Get aggregates (total number of objects filtered) maps to PATCH /api/object_name/get_aggregates/ in rest semantics :param request: rip.Request :return: rip.Response """ pipeline = crud_pipeline_factory.get_aggregates_pipeline( configuration=self.configuration) return pipeline(request=request)
Implements the Get aggregates (total number of objects filtered) maps to PATCH /api/object_name/get_aggregates/ in rest semantics :param request: rip.Request :return: rip.Response
def raise_from_response(resp): """Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400. """ if resp.status_code < 400: # Request was successful. Or at least, not a failure. return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text)
Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400.
def add_alarm_action(self, action_arn=None): """ Adds an alarm action, represented as an SNS topic, to this alarm. What do do when alarm is triggered. :type action_arn: str :param action_arn: SNS topics to which notification should be sent if the alarm goes to state ALARM. """ if not action_arn: return # Raise exception instead? self.actions_enabled = 'true' self.alarm_actions.append(action_arn)
Adds an alarm action, represented as an SNS topic, to this alarm. What do do when alarm is triggered. :type action_arn: str :param action_arn: SNS topics to which notification should be sent if the alarm goes to state ALARM.
def get_time_slide_id(xmldoc, time_slide, create_new = None, superset_ok = False, nonunique_ok = False): """ Return the time_slide_id corresponding to the offset vector described by time_slide, a dictionary of instrument/offset pairs. Example: >>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0}) 'time_slide:time_slide_id:10' This function is a wrapper around the .get_time_slide_id() method of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the documentation for that class for the meaning of the create_new, superset_ok and nonunique_ok keyword arguments. This function requires the document to contain exactly one time_slide table. If the document does not contain exactly one time_slide table then ValueError is raised, unless the optional create_new argument is not None. In that case a new table is created. This effect of the create_new argument is in addition to the affects described by the TimeSlideTable class. """ try: tisitable = lsctables.TimeSlideTable.get_table(xmldoc) except ValueError: # table not found if create_new is None: raise tisitable = lsctables.New(lsctables.TimeSlideTable) xmldoc.childNodes[0].appendChild(tisitable) # make sure the next_id attribute is correct tisitable.sync_next_id() # get the id return tisitable.get_time_slide_id(time_slide, create_new = create_new, superset_ok = superset_ok, nonunique_ok = nonunique_ok)
Return the time_slide_id corresponding to the offset vector described by time_slide, a dictionary of instrument/offset pairs. Example: >>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0}) 'time_slide:time_slide_id:10' This function is a wrapper around the .get_time_slide_id() method of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the documentation for that class for the meaning of the create_new, superset_ok and nonunique_ok keyword arguments. This function requires the document to contain exactly one time_slide table. If the document does not contain exactly one time_slide table then ValueError is raised, unless the optional create_new argument is not None. In that case a new table is created. This effect of the create_new argument is in addition to the affects described by the TimeSlideTable class.
def get_evpn_table(self): """Returns global EVPN table. Creates the table if it does not exist. """ evpn_table = self._global_tables.get(RF_L2_EVPN) # Lazy initialization of the table. if not evpn_table: evpn_table = EvpnTable(self._core_service, self._signal_bus) self._global_tables[RF_L2_EVPN] = evpn_table self._tables[(None, RF_L2_EVPN)] = evpn_table return evpn_table
Returns global EVPN table. Creates the table if it does not exist.
def delete(self): """Del or Backspace pressed. Delete selection""" with self._qpart: for cursor in self.cursors(): if cursor.hasSelection(): cursor.deleteChar()
Del or Backspace pressed. Delete selection
def similarity_matrix(self, x_subjects=None, y_subjects=None, symmetric=False): """ Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score """ if x_subjects is None: x_subjects = [] if y_subjects is None: y_subjects = [] xset = set(x_subjects) yset = set(y_subjects) zset = xset.union(yset) # first built map of gene->termClosure. # this could be calculated ahead of time for all g, # but this may be space-expensive. TODO: benchmark gmap={} for z in zset: gmap[z] = self.inferred_types(z) ilist = [] for x in x_subjects: for y in y_subjects: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({'x':x,'y':y,'shared':shared, 'c':len(shared), 'j':j}) return self.intersectionlist_to_matrix(ilist, x_subjects, y_subjects)
Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score
def build_groetzch_graph(): """Makes a new Groetzsch graph. Ref: http://mathworld.wolfram.com/GroetzschGraph.html""" # Because the graph is so complicated, we want to # build it via adjacency matrix specification # -- Initialize the matrix to all zeros adj = [[0 for _ in range(11)] for _ in range(11)] # -- Add individual edge connections row_connections = [] row_connections.append( (1,2,7,10) ) row_connections.append( (0,3,6,9) ) row_connections.append( (0,4,6,8) ) row_connections.append( (1,4,8,10) ) row_connections.append( (2,3,7,9) ) row_connections.append( (6,7,8,9,10) ) row_connections.append( (1,2,5) ) row_connections.append( (0,4,5) ) row_connections.append( (2,3,5) ) row_connections.append( (1,4,5) ) row_connections.append( (0,3,5) ) for j, tpl in enumerate(row_connections): for i in tpl: adj[j][i] = 1 adj[i][j] = 1 # Debug print the adjacency matrix #for row in adj: # print row graph, _ = create_graph_from_adjacency_matrix(adj) return graph
Makes a new Groetzsch graph. Ref: http://mathworld.wolfram.com/GroetzschGraph.html
def cprint(self, cstr): """ Clear line, then reprint on same line :param cstr: string to print on current line """ cstr = str(cstr) # Force it to be a string cstr_len = len(cstr) prev_cstr_len = len(self._prev_cstr) num_spaces = 0 if cstr_len < prev_cstr_len: num_spaces = abs(prev_cstr_len - cstr_len) try: print(cstr + " " * num_spaces, end='\r') self._prev_cstr = cstr except UnicodeEncodeError: print('Processing...', end='\r') self._prev_cstr = 'Processing...'
Clear line, then reprint on same line :param cstr: string to print on current line
def generate_idx(maxlen, nedit): """ generate all possible nedit edits of a string. each item has the form ((index1, index2), 'A', 'G') for nedit=2 index1 will be replaced by 'A', index2 by 'G' this covers all edits < nedit as well since some of the specified substitutions will not change the base """ ALPHABET = ["A", "C", "G", "T", "N"] indexlists = [] ALPHABETS = [ALPHABET for x in range(nedit)] return list(itertools.product(itertools.combinations(range(maxlen), nedit), *ALPHABETS))
generate all possible nedit edits of a string. each item has the form ((index1, index2), 'A', 'G') for nedit=2 index1 will be replaced by 'A', index2 by 'G' this covers all edits < nedit as well since some of the specified substitutions will not change the base
def mt_modelform_register_clean_method(form_self, field_name, func, nomaster=False): """ You can add clean_<field_name> for each translated field. For example: class MyModelForm(models.ModelForm): class Meta: model = MyModel _mt_fields = mt_fields(('title', 'description'), nomaster=True) fields = _mt_fields def __init__(self, *args, **kwargs): super(ItemCategoryEditForm, self).__init__(*args, **kwargs) mt_modelform_register_clean_method(self, 'title', self.mt_clean_title, nomaster=True) def mt_clean_title(self, field_name, lang): value = self.cleaned_data[field_name] # validation here return value """ args_list = [('{}_{}'.format(field_name, lang), lang) for lang in AVAILABLE_LANGUAGES] if not nomaster: args_list.append((field_name, None)) def _get_mt_clean_method(args): def _mt_clean_method(): return func(*args) return _mt_clean_method for item_args in args_list: method_name = 'clean_{}'.format(item_args[0]) setattr(form_self, method_name, _get_mt_clean_method(item_args))
You can add clean_<field_name> for each translated field. For example: class MyModelForm(models.ModelForm): class Meta: model = MyModel _mt_fields = mt_fields(('title', 'description'), nomaster=True) fields = _mt_fields def __init__(self, *args, **kwargs): super(ItemCategoryEditForm, self).__init__(*args, **kwargs) mt_modelform_register_clean_method(self, 'title', self.mt_clean_title, nomaster=True) def mt_clean_title(self, field_name, lang): value = self.cleaned_data[field_name] # validation here return value
def on_batch_end(self, iteration:int, smooth_loss:TensorOrNumber, **kwargs:Any)->None: "Determine if loss has runaway and we should stop." if iteration==0 or smooth_loss < self.best_loss: self.best_loss = smooth_loss self.opt.lr = self.sched.step() if self.sched.is_done or (self.stop_div and (smooth_loss > 4*self.best_loss or torch.isnan(smooth_loss))): #We use the smoothed loss to decide on the stopping since it's less shaky. return {'stop_epoch': True, 'stop_training': True}
Determine if loss has runaway and we should stop.
def write(self, image, options, thumbnail): """ Wrapper for ``_write`` """ format_ = options['format'] quality = options['quality'] image_info = options.get('image_info', {}) # additional non-default-value options: progressive = options.get('progressive', settings.THUMBNAIL_PROGRESSIVE) raw_data = self._get_raw_data( image, format_, quality, image_info=image_info, progressive=progressive ) thumbnail.write(raw_data)
Wrapper for ``_write``