code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def loadFiles(self, fileNames, rtiClass=None):
""" Loads files into the repository as repo tree items of class rtiClass.
Auto-detects using the extensions when rtiClass is None
"""
for fileName in fileNames:
self.repo.loadFile(fileName, rtiClass=rtiClass) | Loads files into the repository as repo tree items of class rtiClass.
Auto-detects using the extensions when rtiClass is None |
def create_target_group(name,
protocol,
port,
vpc_id,
region=None,
key=None,
keyid=None,
profile=None,
health_check_protocol='HTTP',
health_check_port='traffic-port',
health_check_path='/',
health_check_interval_seconds=30,
health_check_timeout_seconds=5,
healthy_threshold_count=5,
unhealthy_threshold_count=2):
'''
Create target group if not present.
name
(string) - The name of the target group.
protocol
(string) - The protocol to use for routing traffic to the targets
port
(int) - The port on which the targets receive traffic. This port is used unless
you specify a port override when registering the traffic.
vpc_id
(string) - The identifier of the virtual private cloud (VPC).
health_check_protocol
(string) - The protocol the load balancer uses when performing health check on
targets. The default is the HTTP protocol.
health_check_port
(string) - The port the load balancer uses when performing health checks on
targets. The default is 'traffic-port', which indicates the port on which each
target receives traffic from the load balancer.
health_check_path
(string) - The ping path that is the destination on the targets for health
checks. The default is /.
health_check_interval_seconds
(integer) - The approximate amount of time, in seconds, between health checks
of an individual target. The default is 30 seconds.
health_check_timeout_seconds
(integer) - The amount of time, in seconds, during which no response from a
target means a failed health check. The default is 5 seconds.
healthy_threshold_count
(integer) - The number of consecutive health checks successes required before
considering an unhealthy target healthy. The default is 5.
unhealthy_threshold_count
(integer) - The number of consecutive health check failures required before
considering a target unhealthy. The default is 2.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if target_group_exists(name, region, key, keyid, profile):
return True
try:
alb = conn.create_target_group(Name=name, Protocol=protocol, Port=port,
VpcId=vpc_id, HealthCheckProtocol=health_check_protocol,
HealthCheckPort=health_check_port,
HealthCheckPath=health_check_path,
HealthCheckIntervalSeconds=health_check_interval_seconds,
HealthCheckTimeoutSeconds=health_check_timeout_seconds,
HealthyThresholdCount=healthy_threshold_count,
UnhealthyThresholdCount=unhealthy_threshold_count)
if alb:
log.info('Created ALB %s: %s', name, alb['TargetGroups'][0]['TargetGroupArn'])
return True
else:
log.error('Failed to create ALB %s', name)
return False
except ClientError as error:
log.error(
'Failed to create ALB %s: %s: %s',
name, error.response['Error']['Code'],
error.response['Error']['Message'],
exc_info_on_loglevel=logging.DEBUG
) | Create target group if not present.
name
(string) - The name of the target group.
protocol
(string) - The protocol to use for routing traffic to the targets
port
(int) - The port on which the targets receive traffic. This port is used unless
you specify a port override when registering the traffic.
vpc_id
(string) - The identifier of the virtual private cloud (VPC).
health_check_protocol
(string) - The protocol the load balancer uses when performing health check on
targets. The default is the HTTP protocol.
health_check_port
(string) - The port the load balancer uses when performing health checks on
targets. The default is 'traffic-port', which indicates the port on which each
target receives traffic from the load balancer.
health_check_path
(string) - The ping path that is the destination on the targets for health
checks. The default is /.
health_check_interval_seconds
(integer) - The approximate amount of time, in seconds, between health checks
of an individual target. The default is 30 seconds.
health_check_timeout_seconds
(integer) - The amount of time, in seconds, during which no response from a
target means a failed health check. The default is 5 seconds.
healthy_threshold_count
(integer) - The number of consecutive health checks successes required before
considering an unhealthy target healthy. The default is 5.
unhealthy_threshold_count
(integer) - The number of consecutive health check failures required before
considering a target unhealthy. The default is 2.
returns
(bool) - True on success, False on failure.
CLI example:
.. code-block:: bash
salt myminion boto_elbv2.create_target_group learn1give1 protocol=HTTP port=54006 vpc_id=vpc-deadbeef |
def isclose(a, b, align=False, rtol=1.e-5, atol=1.e-8):
"""Compare two molecules for numerical equality.
Args:
a (Cartesian):
b (Cartesian):
align (bool): a and b are
prealigned along their principal axes of inertia and moved to their
barycenters before comparing.
rtol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.isclose` for further explanation.
atol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.isclose` for further explanation.
Returns:
:class:`numpy.ndarray`: Boolean array.
"""
coords = ['x', 'y', 'z']
if not (set(a.index) == set(b.index)
and np.alltrue(a.loc[:, 'atom'] == b.loc[a.index, 'atom'])):
message = 'Can only compare molecules with the same atoms and labels'
raise ValueError(message)
if align:
a = a.get_inertia()['transformed_Cartesian']
b = b.get_inertia()['transformed_Cartesian']
A, B = a.loc[:, coords], b.loc[a.index, coords]
out = a._frame.copy()
out['atom'] = True
out.loc[:, coords] = np.isclose(A, B, rtol=rtol, atol=atol)
return out | Compare two molecules for numerical equality.
Args:
a (Cartesian):
b (Cartesian):
align (bool): a and b are
prealigned along their principal axes of inertia and moved to their
barycenters before comparing.
rtol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.isclose` for further explanation.
atol (float): Relative tolerance for the numerical equality comparison
look into :func:`numpy.isclose` for further explanation.
Returns:
:class:`numpy.ndarray`: Boolean array. |
def resource_from_etree(self, etree, resource_class):
"""Construct a Resource from an etree.
Parameters:
etree - the etree to parse
resource_class - class of Resource object to create
The parsing is properly namespace aware but we search just
for the elements wanted and leave everything else alone. Will
raise an error if there are multiple <loc> or multiple <lastmod>
elements. Otherwise, provided there is a <loc> element then will
go ahead and extract as much as possible.
All errors raised are SitemapParseError with messages intended
to help debug problematic sitemap XML.
"""
loc_elements = etree.findall('{' + SITEMAP_NS + "}loc")
if (len(loc_elements) > 1):
raise SitemapParseError(
"Multiple <loc> elements while parsing <url> in sitemap")
elif (len(loc_elements) == 0):
raise SitemapParseError(
"Missing <loc> element while parsing <url> in sitemap")
else:
loc = loc_elements[0].text
if (loc is None or loc == ''):
raise SitemapParseError(
"Bad <loc> element with no content while parsing <url> in sitemap")
# must at least have a URI, make this object
resource = resource_class(uri=loc)
# and hopefully a lastmod datetime (but none is OK)
lastmod_elements = etree.findall('{' + SITEMAP_NS + "}lastmod")
if (len(lastmod_elements) > 1):
raise SitemapParseError(
"Multiple <lastmod> elements while parsing <url> in sitemap")
elif (len(lastmod_elements) == 1):
resource.lastmod = lastmod_elements[0].text
# proceed to look for other resource attributes in an rs:md element
md_elements = etree.findall('{' + RS_NS + "}md")
if (len(md_elements) > 1):
raise SitemapParseError(
"Found multiple (%d) <rs:md> elements for %s", (len(md_elements), loc))
elif (len(md_elements) == 1):
# have on element, look at attributes
md = self.md_from_etree(md_elements[0], context=loc)
# simple attributes that map directly to Resource object attributes
for att in ('capability', 'change', 'length', 'path', 'mime_type'):
if (att in md):
setattr(resource, att, md[att])
# The ResourceSync beta spec lists md5, sha-1 and sha-256 fixity
# digest types. Parse and warn of errors ignored.
if ('hash' in md):
try:
resource.hash = md['hash']
except ValueError as e:
self.logger.warning("%s in <rs:md> for %s" % (str(e), loc))
# look for rs:ln elements (optional)
ln_elements = etree.findall('{' + RS_NS + "}ln")
if (len(ln_elements) > 0):
resource.ln = []
for ln_element in ln_elements:
resource.ln.append(self.ln_from_etree(ln_element, loc))
return(resource) | Construct a Resource from an etree.
Parameters:
etree - the etree to parse
resource_class - class of Resource object to create
The parsing is properly namespace aware but we search just
for the elements wanted and leave everything else alone. Will
raise an error if there are multiple <loc> or multiple <lastmod>
elements. Otherwise, provided there is a <loc> element then will
go ahead and extract as much as possible.
All errors raised are SitemapParseError with messages intended
to help debug problematic sitemap XML. |
def temp_shell_task(cls, inp, mpi_procs=1, workdir=None, manager=None):
"""
Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use.
"""
# Build a simple manager to run the job in a shell subprocess
import tempfile
workdir = tempfile.mkdtemp() if workdir is None else workdir
if manager is None: manager = TaskManager.from_user_config()
# Construct the task and run it
task = cls.from_input(inp, workdir=workdir, manager=manager.to_shell_manager(mpi_procs=mpi_procs))
task.set_name('temp_shell_task')
return task | Build a Task with a temporary workdir. The task is executed via the shell with 1 MPI proc.
Mainly used for invoking Abinit to get important parameters needed to prepare the real task.
Args:
mpi_procs: Number of MPI processes to use. |
def pdf_Gates_Gaudin_Schuhman(d, d_characteristic, m):
r'''Calculates the probability density of a particle
distribution following the Gates, Gaudin and Schuhman (GGS) model given a
particle diameter `d`, characteristic (maximum) particle
diameter `d_characteristic`, and exponent `m`.
.. math::
q(d) = \frac{n}{d}\left(\frac{d}{d_{characteristic}}\right)^m
\text{ if } d < d_{characteristic} \text{ else } 0
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; in this model, it is the largest
particle size diameter in the distribution, [m]
m : float
Particle size distribution exponent, [-]
Returns
-------
pdf : float
GGS probability density function, [-]
Notes
-----
The characteristic diameter can be in terns of number density (denoted
:math:`q_0(d)`), length density (:math:`q_1(d)`), surface area density
(:math:`q_2(d)`), or volume density (:math:`q_3(d)`). Volume density is
most often used. Interconversions among the distributions is possible but
tricky.
Examples
--------
>>> pdf_Gates_Gaudin_Schuhman(d=2E-4, d_characteristic=1E-3, m=2.3)
283.8355768512045
References
----------
.. [1] Schuhmann, R., 1940. Principles of Comminution, I-Size Distribution
and Surface Calculations. American Institute of Mining, Metallurgical
and Petroleum Engineers Technical Publication 1189. Mining Technology,
volume 4, p. 1-11.
.. [2] Bayat, Hossein, Mostafa Rastgo, Moharram Mansouri Zadeh, and Harry
Vereecken. "Particle Size Distribution Models, Their Characteristics and
Fitting Capability." Journal of Hydrology 529 (October 1, 2015): 872-89.
'''
if d <= d_characteristic:
return m/d*(d/d_characteristic)**m
else:
return 0.0 | r'''Calculates the probability density of a particle
distribution following the Gates, Gaudin and Schuhman (GGS) model given a
particle diameter `d`, characteristic (maximum) particle
diameter `d_characteristic`, and exponent `m`.
.. math::
q(d) = \frac{n}{d}\left(\frac{d}{d_{characteristic}}\right)^m
\text{ if } d < d_{characteristic} \text{ else } 0
Parameters
----------
d : float
Specified particle diameter, [m]
d_characteristic : float
Characteristic particle diameter; in this model, it is the largest
particle size diameter in the distribution, [m]
m : float
Particle size distribution exponent, [-]
Returns
-------
pdf : float
GGS probability density function, [-]
Notes
-----
The characteristic diameter can be in terns of number density (denoted
:math:`q_0(d)`), length density (:math:`q_1(d)`), surface area density
(:math:`q_2(d)`), or volume density (:math:`q_3(d)`). Volume density is
most often used. Interconversions among the distributions is possible but
tricky.
Examples
--------
>>> pdf_Gates_Gaudin_Schuhman(d=2E-4, d_characteristic=1E-3, m=2.3)
283.8355768512045
References
----------
.. [1] Schuhmann, R., 1940. Principles of Comminution, I-Size Distribution
and Surface Calculations. American Institute of Mining, Metallurgical
and Petroleum Engineers Technical Publication 1189. Mining Technology,
volume 4, p. 1-11.
.. [2] Bayat, Hossein, Mostafa Rastgo, Moharram Mansouri Zadeh, and Harry
Vereecken. "Particle Size Distribution Models, Their Characteristics and
Fitting Capability." Journal of Hydrology 529 (October 1, 2015): 872-89. |
def matches_prefix(ip, prefix):
"""
Returns True if the given IP address is part of the given
network, returns False otherwise.
:type ip: string
:param ip: An IP address.
:type prefix: string
:param prefix: An IP prefix.
:rtype: bool
:return: True if the IP is in the prefix, False otherwise.
"""
ip_int = ip2int(ip)
network, pfxlen = parse_prefix(prefix)
network_int = ip2int(network)
mask_int = pfxlen2mask_int(pfxlen)
return ip_int&mask_int == network_int&mask_int | Returns True if the given IP address is part of the given
network, returns False otherwise.
:type ip: string
:param ip: An IP address.
:type prefix: string
:param prefix: An IP prefix.
:rtype: bool
:return: True if the IP is in the prefix, False otherwise. |
def _flush(self):
"""Purges the buffer and commits all pending values into the estimator."""
self._buffer.sort()
self._replace_batch()
self._buffer = []
self._compress() | Purges the buffer and commits all pending values into the estimator. |
def is_suicide_or_check_by_dropping_pawn(self, move):
'''
Checks if the given move would move would leave the king in check or
put it into check.
'''
self.push(move)
is_suicide = self.was_suicide()
is_check_by_dropping_pawn = self.was_check_by_dropping_pawn(move)
self.pop()
return is_suicide or is_check_by_dropping_pawn | Checks if the given move would move would leave the king in check or
put it into check. |
def invert_hash(self, tok_hash):
'''Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings
'''
return [tok_encoded.decode('utf8')
for (_, tok_encoded) in
self.client.scan_keys(HASH_KEYWORD_INDEX_TABLE,
((tok_hash,), (tok_hash,)))] | Get strings that correspond to some hash.
No string will correspond to :data:`DOCUMENT_HASH_KEY`; use
:data:`DOCUMENT_HASH_KEY_REPLACEMENT` instead.
:param int tok_hash: Murmur hash to query
:return: list of :class:`unicode` strings |
def get_node_fact_by_name(api_url=None, node_name=None, fact_name=None, verify=False, cert=list()):
"""
Returns specified fact for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
:param fact_name: Name of fact
"""
return utils._make_api_request(api_url, '/nodes/{0}/facts/{1}'.format(node_name,
fact_name), verify, cert) | Returns specified fact for a Node
:param api_url: Base PuppetDB API url
:param node_name: Name of node
:param fact_name: Name of fact |
def libvlc_media_get_type(p_md):
'''Get the media type of the media descriptor object.
@param p_md: media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t.
'''
f = _Cfunctions.get('libvlc_media_get_type', None) or \
_Cfunction('libvlc_media_get_type', ((1,),), None,
MediaType, Media)
return f(p_md) | Get the media type of the media descriptor object.
@param p_md: media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t. |
def get_item(key):
"""Return content in cached file in JSON format"""
CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key)
try:
return json.loads(open(CACHED_KEY_FILE, "rb").read().decode('UTF-8'))["_"]
except (IOError, ValueError):
return None | Return content in cached file in JSON format |
def start_tcp_server(self, port):
"""
Starts the TCP server using given port.
:param port: Port.
:type port: int
:return: Method success.
:rtype: bool
"""
self.__tcp_server.port = port
if not self.__tcp_server.online:
if self.__tcp_server.start():
self.__engine.notifications_manager.notify(
"{0} | TCP Server has started with '{1}' address on '{2}' port!".format(
self.__class__.__name__,
self.__address,
self.__port))
return True
else:
self.__engine.notifications_manager.warnify(
"{0} | TCP Server is already online!".format(self.__class__.__name__))
return False | Starts the TCP server using given port.
:param port: Port.
:type port: int
:return: Method success.
:rtype: bool |
def positions_to_contigs(positions):
"""Flattens and converts a positions array to a contigs array, if applicable.
"""
if isinstance(positions, np.ndarray):
flattened_positions = positions.flatten()
else:
try:
flattened_positions = np.array(
[pos for contig in positions for pos in contig])
except TypeError:
flattened_positions = np.array(positions)
if (np.diff(positions) == 0).any() and not (0 in set(positions)):
warnings.warn("I detected identical consecutive nonzero values.")
return positions
n = len(flattened_positions)
contigs = np.ones(n)
counter = 0
for i in range(1, n):
if positions[i] == 0:
counter += 1
contigs[i] += counter
else:
contigs[i] = contigs[i - 1]
return contigs | Flattens and converts a positions array to a contigs array, if applicable. |
def write_table(page, headers, data, cl=''):
"""
Write table in html
"""
page.table(class_=cl)
# list
if cl=='list':
for i in range(len(headers)):
page.tr()
page.th()
page.add('%s' % headers[i])
page.th.close()
page.td()
page.add('%s' % data[i])
page.td.close()
page.tr.close()
else:
page.tr()
for n in headers:
page.th()
page.add('%s' % n)
page.th.close()
page.tr.close()
if data and not re.search('list',str(type(data[0]))):
data = [data]
for row in data:
page.tr()
for item in row:
page.td()
page.add('%s' % item)
page.td.close()
page.tr.close()
page.table.close()
return page | Write table in html |
def _validate_minlength(self, min_length, field, value):
""" {'type': 'integer'} """
if isinstance(value, Iterable) and len(value) < min_length:
self._error(field, errors.MIN_LENGTH, len(value)) | {'type': 'integer'} |
def shorten_go_name_ptbl3(self, name, dcnt):
"""Shorten GO description for Table 3 in manuscript."""
if self._keep_this(name):
return name
name = name.replace("positive regulation of immune system process",
"+ reg. of immune sys. process")
name = name.replace("positive regulation of immune response",
"+ reg. of immune response")
name = name.replace("positive regulation of cytokine production",
"+ reg. of cytokine production")
if dcnt < 40:
name = name.replace("antigen processing and presentation", "a.p.p.")
if dcnt < 10:
name = name.replace("negative", "-")
name = name.replace("positive", "+")
#name = name.replace("tumor necrosis factor production", "tumor necrosis factor prod.")
name = name.replace("tumor necrosis factor production", "TNF production")
if dcnt < 4:
name = name.replace("regulation", "reg.")
name = name.replace("exogenous ", "")
name = name.replace(" via ", " w/")
name = name.replace("T cell mediated cytotoxicity", "cytotoxicity via T cell")
name = name.replace('involved in', 'in')
name = name.replace('-positive', '+')
return name | Shorten GO description for Table 3 in manuscript. |
def make(data, samples):
""" build a vcf file from the supercatg array and the cat.clust.gz output"""
outfile = open(os.path.join(data.dirs.outfiles, data.name+".vcf"), 'w')
inloci = os.path.join(data.dirs.outfiles, data.name+".loci")
names = [i.name for i in samples]
names.sort()
## TODO: Get a real version number for the current sw stack
version = "0.1"
## TODO: This is just reporting minimum depth per base. Would it be useful to
## report real depth of reads per base? YEAH, that's what supercatg is for.
mindepth = data.paramsdict["mindepth_statistical"]
print >>outfile, "##fileformat=VCFv4.1"
print >>outfile, "##fileDate="+time.strftime("%Y%m%d")
print >>outfile, "##source=ipyRAD.v."+version
print >>outfile, "##reference=common_allele_at_each_locus"
print >>outfile, "##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">"
print >>outfile, "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Total Depth\">"
print >>outfile, "##INFO=<ID=AF,Number=A,Type=Float,Description=\"Allele Frequency\">"
print >>outfile, "##INFO=<ID=AA,Number=1,Type=String,Description=\"Ancestral Allele\">"
print >>outfile, "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">"
print >>outfile, "##FORMAT=<ID=GQ,Number=1,Type=Integer,Description=\"Genotype Quality\">"
print >>outfile, "##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read Depth\">"
print >>outfile, "\t".join(["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO ","FORMAT"]+list(names))
loci = open(inloci).read().split("|")[:-1]
snps = 0
vcflist = []
for locusnumber in range(len(loci)):
samps = [i.split()[0][1:] for i in loci[locusnumber].strip().split("\n") if ">" in i]
loc = np.array([tuple(i.split()[-1]) for i in loci[locusnumber].strip().split("\n") if ">" in i])
NS = str(len(loc))
DP = str(mindepth)
for base in range(len(loc.T)):
col = []
site = list(loc.T[base])
site = list("".join(site).replace("-","").replace("N",""))
if site:
for bb in site:
if bb in list("RKYSWM"):
col += unstruct(bb)[0]
col += unstruct(bb)[1]
else:
col += bb
REF = most_common([i for i in col if i not in list("-RKYSWMN")])
ALT = set([i for i in col if (i in list("ATGC-N")) and (i!=REF)])
if ALT:
snps += 1
GENO = [REF]+list(ALT)
GENOS = []
for samp in names:
if samp in samps:
idx = samps.index(samp)
f = unstruct(loc.T[base][idx])
if ('-' in f) or ('N' in f):
GENOS.append("./.")
else:
GENOS.append(str(GENO.index(f[0]))+"|"+str(GENO.index(f[1])))
else:
GENOS.append("./.")
vcflist.append("\t".join([`locusnumber+1`, `base+1`, '.', REF, ",".join(ALT), "20", "PASS",
";".join(["NS="+NS, "DP="+DP]), "GT"]+GENOS))
if not locusnumber % 1000:
outfile.write( "\n".join(vcflist)+"\n" )
vcflist = []
#print >>outfile, "\t".join([`locusnumber+1`, `base+1`, '.', REF, ",".join(ALT), "20", "PASS",
# ";".join(["NS="+NS, "DP="+DP]), "GT"]+GENOS)
outfile.write( "\n".join(vcflist) )
outfile.close() | build a vcf file from the supercatg array and the cat.clust.gz output |
def in_(self, *objs):
"""
Create a condition
"""
if not objs:
return self.table.c[self.fielda]!=self.table.c[self.fielda]
else:
keys = get_objs_columns(objs, self.reference_fieldname)
sub_query = select([self.table.c[self.fielda]], (self.table.c[self.fieldb] == self.reference_class.c[self.reference_fieldname]) & (self.table.c[self.fieldb].in_(keys)))
condition = self.model_class.c[self.reversed_fieldname].in_(sub_query)
return condition | Create a condition |
def diff(cwd,
item1=None,
item2=None,
opts='',
git_opts='',
user=None,
password=None,
no_index=False,
cached=False,
paths=None,
output_encoding=None):
'''
.. versionadded:: 2015.8.12,2016.3.3,2016.11.0
Interface to `git-diff(1)`_
cwd
The path to the git checkout
item1 and item2
Revision(s) to pass to the ``git diff`` command. One or both of these
arguments may be ignored if some of the options below are set to
``True``. When ``cached`` is ``False``, and no revisions are passed
to this function, then the current working tree will be compared
against the index (i.e. unstaged changes). When two revisions are
passed, they will be compared to each other.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``diff``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
no_index : False
When it is necessary to diff two files in the same repo against each
other, and not diff two different revisions, set this option to
``True``. If this is left ``False`` in these instances, then a normal
``git diff`` will be performed against the index (i.e. unstaged
changes), and files in the ``paths`` option will be used to narrow down
the diff output.
.. note::
Requires Git 1.5.1 or newer. Additionally, when set to ``True``,
``item1`` and ``item2`` will be ignored.
cached : False
If ``True``, compare staged changes to ``item1`` (if specified),
otherwise compare them to the most recent commit.
.. note::
``item2`` is ignored if this option is is set to ``True``.
paths
File paths to pass to the ``git diff`` command. Can be passed as a
comma-separated list or a Python list.
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-diff(1)`: http://git-scm.com/docs/git-diff
CLI Example:
.. code-block:: bash
# Perform diff against the index (staging area for next commit)
salt myminion git.diff /path/to/repo
# Compare staged changes to the most recent commit
salt myminion git.diff /path/to/repo cached=True
# Compare staged changes to a specific revision
salt myminion git.diff /path/to/repo mybranch cached=True
# Perform diff against the most recent commit (includes staged changes)
salt myminion git.diff /path/to/repo HEAD
# Diff two commits
salt myminion git.diff /path/to/repo abcdef1 aabbccd
# Diff two commits, only showing differences in the specified paths
salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2
# Diff two files with one being outside the working tree
salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2
'''
if no_index and cached:
raise CommandExecutionError(
'The \'no_index\' and \'cached\' options cannot be used together'
)
command = ['git'] + _format_git_opts(git_opts)
command.append('diff')
command.extend(_format_opts(opts))
if paths is not None and not isinstance(paths, (list, tuple)):
try:
paths = paths.split(',')
except AttributeError:
paths = six.text_type(paths).split(',')
ignore_retcode = False
failhard = True
if no_index:
if _LooseVersion(version(versioninfo=False)) < _LooseVersion('1.5.1'):
raise CommandExecutionError(
'The \'no_index\' option is only supported in Git 1.5.1 and '
'newer'
)
ignore_retcode = True
failhard = False
command.append('--no-index')
for value in [x for x in (item1, item2) if x]:
log.warning(
'Revision \'%s\' ignored in git diff, as revisions cannot be '
'used when no_index=True', value
)
elif cached:
command.append('--cached')
if item1:
command.append(item1)
if item2:
log.warning(
'Second revision \'%s\' ignored in git diff, at most one '
'revision is considered when cached=True', item2
)
else:
for value in [x for x in (item1, item2) if x]:
command.append(value)
if paths:
command.append('--')
command.extend(paths)
return _git_run(command,
cwd=cwd,
user=user,
password=password,
ignore_retcode=ignore_retcode,
failhard=failhard,
redirect_stderr=True,
output_encoding=output_encoding)['stdout'] | .. versionadded:: 2015.8.12,2016.3.3,2016.11.0
Interface to `git-diff(1)`_
cwd
The path to the git checkout
item1 and item2
Revision(s) to pass to the ``git diff`` command. One or both of these
arguments may be ignored if some of the options below are set to
``True``. When ``cached`` is ``False``, and no revisions are passed
to this function, then the current working tree will be compared
against the index (i.e. unstaged changes). When two revisions are
passed, they will be compared to each other.
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``diff``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
no_index : False
When it is necessary to diff two files in the same repo against each
other, and not diff two different revisions, set this option to
``True``. If this is left ``False`` in these instances, then a normal
``git diff`` will be performed against the index (i.e. unstaged
changes), and files in the ``paths`` option will be used to narrow down
the diff output.
.. note::
Requires Git 1.5.1 or newer. Additionally, when set to ``True``,
``item1`` and ``item2`` will be ignored.
cached : False
If ``True``, compare staged changes to ``item1`` (if specified),
otherwise compare them to the most recent commit.
.. note::
``item2`` is ignored if this option is is set to ``True``.
paths
File paths to pass to the ``git diff`` command. Can be passed as a
comma-separated list or a Python list.
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-diff(1)`: http://git-scm.com/docs/git-diff
CLI Example:
.. code-block:: bash
# Perform diff against the index (staging area for next commit)
salt myminion git.diff /path/to/repo
# Compare staged changes to the most recent commit
salt myminion git.diff /path/to/repo cached=True
# Compare staged changes to a specific revision
salt myminion git.diff /path/to/repo mybranch cached=True
# Perform diff against the most recent commit (includes staged changes)
salt myminion git.diff /path/to/repo HEAD
# Diff two commits
salt myminion git.diff /path/to/repo abcdef1 aabbccd
# Diff two commits, only showing differences in the specified paths
salt myminion git.diff /path/to/repo abcdef1 aabbccd paths=path/to/file1,path/to/file2
# Diff two files with one being outside the working tree
salt myminion git.diff /path/to/repo no_index=True paths=path/to/file1,/absolute/path/to/file2 |
def prev(self):
"""Get the previous segment."""
seg = Segment(segment_t=idaapi.get_prev_seg(self.ea))
if seg.ea >= self.ea:
raise exceptions.NoMoreSegments("This is the first segment. no segments exist before it.")
return seg | Get the previous segment. |
def incr(self, key, value, default=0, time=1000000):
"""
Increment a key, if it exists, returns its actual value, if it doesn't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int
"""
return self._incr_decr('incr', key, value, default, time) | Increment a key, if it exists, returns its actual value, if it doesn't, return 0.
:param key: Key's name
:type key: six.string_types
:param value: Number to be incremented
:type value: int
:param default: Default value if key does not exist.
:type default: int
:param time: Time in seconds to expire key.
:type time: int
:return: Actual value of the key on server
:rtype: int |
def halt(self):
"""
Halt current endpoint.
"""
try:
self._halt()
except IOError as exc:
if exc.errno != errno.EBADMSG:
raise
else:
raise ValueError('halt did not return EBADMSG ?')
self._halted = True | Halt current endpoint. |
def play(events, speed_factor=1.0):
"""
Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
Note: the current keyboard state is cleared at the beginning and restored at
the end of the function.
"""
state = stash_state()
last_time = None
for event in events:
if speed_factor > 0 and last_time is not None:
_time.sleep((event.time - last_time) / speed_factor)
last_time = event.time
key = event.scan_code or event.name
press(key) if event.event_type == KEY_DOWN else release(key)
restore_modifiers(state) | Plays a sequence of recorded events, maintaining the relative time
intervals. If speed_factor is <= 0 then the actions are replayed as fast
as the OS allows. Pairs well with `record()`.
Note: the current keyboard state is cleared at the beginning and restored at
the end of the function. |
def function(data, maxt=None):
"""
Calculate the autocorrelation function for a 1D time series.
Parameters
----------
data : numpy.ndarray (N,)
The time series.
Returns
-------
rho : numpy.ndarray (N,)
An autocorrelation function.
"""
data = np.atleast_1d(data)
assert len(np.shape(data)) == 1, \
"The autocorrelation function can only by computed " \
+ "on a 1D time series."
if maxt is None:
maxt = len(data)
result = np.zeros(maxt, dtype=float)
_acor.function(np.array(data, dtype=float), result)
return result / result[0] | Calculate the autocorrelation function for a 1D time series.
Parameters
----------
data : numpy.ndarray (N,)
The time series.
Returns
-------
rho : numpy.ndarray (N,)
An autocorrelation function. |
def hook_focus_events(self):
""" Install the hooks for focus events.
This method may be overridden by subclasses as needed.
"""
widget = self.widget
widget.focusInEvent = self.focusInEvent
widget.focusOutEvent = self.focusOutEvent | Install the hooks for focus events.
This method may be overridden by subclasses as needed. |
def _noise_dict_update(noise_dict):
"""
Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
The noise variables are as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary
"""
# Create the default dictionary
default_dict = {'task_sigma': 0, 'drift_sigma': 0, 'auto_reg_sigma': 1,
'auto_reg_rho': [0.5], 'ma_rho': [0.0],
'physiological_sigma': 0, 'sfnr': 90, 'snr': 50,
'max_activity': 1000, 'voxel_size': [1.0, 1.0, 1.0],
'fwhm': 4, 'matched': 1}
# Check what noise is in the dictionary and add if necessary. Numbers
# determine relative proportion of noise
for default_key in default_dict:
if default_key not in noise_dict:
noise_dict[default_key] = default_dict[default_key]
return noise_dict | Update the noise dictionary parameters with default values, in case any
were missing
Parameters
----------
noise_dict : dict
A dictionary specifying the types of noise in this experiment. The
noise types interact in important ways. First, all noise types
ending with sigma (e.g. motion sigma) are mixed together in
_generate_temporal_noise. These values describe the proportion of
mixing of these elements. However critically, SFNR is the
parameter that describes how much noise these components contribute
to the brain. If you set the noise dict to matched then it will fit
the parameters to match the participant as best as possible.
The noise variables are as follows:
snr [float]: Ratio of MR signal to the spatial noise
sfnr [float]: Ratio of the MR signal to the temporal noise. This is the
total variability that the following sigmas 'sum' to:
task_sigma [float]: Size of the variance of task specific noise
drift_sigma [float]: Size of the variance of drift noise
auto_reg_sigma [float]: Size of the variance of autoregressive
noise. This is an ARMA process where the AR and MA components can be
separately specified
physiological_sigma [float]: Size of the variance of physiological
noise
auto_reg_rho [list]: The coefficients of the autoregressive
components you are modeling
ma_rho [list]:The coefficients of the moving average components you
are modeling
max_activity [float]: The max value of the averaged brain in order
to reference the template
voxel_size [list]: The mm size of the voxels
fwhm [float]: The gaussian smoothing kernel size (mm)
matched [bool]: Specify whether you are fitting the noise parameters
The volumes of brain noise that are generated have smoothness
specified by 'fwhm'
Returns
-------
noise_dict : dict
Updated dictionary |
def remove_nopairs(in_bam, out_dir, config):
"""Remove any reads without both pairs present in the file.
"""
runner = broad.runner_from_config(config)
out_bam = os.path.join(out_dir, "{}-safepair{}".format(*os.path.splitext(os.path.basename(in_bam))))
if not utils.file_exists(out_bam):
read_counts = collections.defaultdict(int)
with pysam.Samfile(in_bam, "rb") as in_pysam:
for read in in_pysam:
if read.is_paired:
read_counts[read.qname] += 1
with pysam.Samfile(in_bam, "rb") as in_pysam:
with file_transaction(out_bam) as tx_out_bam:
with pysam.Samfile(tx_out_bam, "wb", template=in_pysam) as out_pysam:
for read in in_pysam:
if read_counts[read.qname] == 2:
out_pysam.write(read)
return runner.run_fn("picard_sort", out_bam, "queryname") | Remove any reads without both pairs present in the file. |
def migrate_doc(self, doc):
"""
Migrate the doc from its current version to the target version
and return it.
"""
orig_ver = doc.get(self.version_attribute_name, 0)
funcs = self._get_migrate_funcs(orig_ver, self.target_version)
for func in funcs:
func(self, doc)
doc[self.version_attribute_name] = func.target
return doc | Migrate the doc from its current version to the target version
and return it. |
def get(self, key):
"""
Returns the list of values associated with the key. ``None`` if this map does not contain this key.
**Warning:
This method uses hashCode and equals of the binary form of the key, not the actual implementations of hashCode
and equals defined in the key's class.**
**Warning-2:
The list is NOT backed by the multimap, so changes to the map are list reflected in the collection, and
vice-versa.**
:param key: (object), the specified key.
:return: (Sequence), the list of the values associated with the specified key.
"""
check_not_none(key, "key can't be None")
key_data = self._to_data(key)
return self._encode_invoke_on_key(multi_map_get_codec, key_data, key=key_data,
thread_id=thread_id()) | Returns the list of values associated with the key. ``None`` if this map does not contain this key.
**Warning:
This method uses hashCode and equals of the binary form of the key, not the actual implementations of hashCode
and equals defined in the key's class.**
**Warning-2:
The list is NOT backed by the multimap, so changes to the map are list reflected in the collection, and
vice-versa.**
:param key: (object), the specified key.
:return: (Sequence), the list of the values associated with the specified key. |
def location(args):
"""
%prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs.
"""
from jcvi.formats.bed import BedLine
from jcvi.graphics.histogram import stem_leaf_plot
p = OptionParser(location.__doc__)
p.add_option("--dist", default=100, type="int",
help="Distance cutoff to call 5` and 3` [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bedfile, fastafile = args
dist = opts.dist
sizes = Sizes(fastafile).mapping
fp = open(bedfile)
fiveprime = threeprime = total = 0
percentages = []
for row in fp:
b = BedLine(row)
pos = b.start
size = sizes[b.seqid]
if pos < dist:
fiveprime += 1
if size - pos < dist:
threeprime += 1
total += 1
percentages.append(100 * pos / size)
m = "Five prime (within {0}bp of start codon): {1}\n".format(dist, fiveprime)
m += "Three prime (within {0}bp of stop codon): {1}\n".format(dist, threeprime)
m += "Total: {0}".format(total)
print(m, file=sys.stderr)
bins = 10
title = "Locations within the gene [0=Five-prime, 100=Three-prime]"
stem_leaf_plot(percentages, 0, 100, bins, title=title) | %prog location bedfile fastafile
Given SNP locations, summarize the locations in the sequences. For example,
find out if there are more 3`-SNPs than 5`-SNPs. |
def get(self, *args, **kwargs):
"""
Works just like the default Manager's :func:`get` method, but
you can pass an additional keyword argument named ``path`` specifying
the full path of the object you want to retrieve, e.g.
``"path/to/folder/readme.txt"``.
"""
if 'path' in kwargs:
kwargs = self.get_filter_args_with_path(True, **kwargs)
return super(FileNodeManager, self).get(
*args, **kwargs) | Works just like the default Manager's :func:`get` method, but
you can pass an additional keyword argument named ``path`` specifying
the full path of the object you want to retrieve, e.g.
``"path/to/folder/readme.txt"``. |
def validate(self, institute, case, user, link, variant, validate_type):
"""Mark validation status for a variant.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
validate_type(str): The outcome of validation.
choices=('True positive', 'False positive')
Returns:
updated_variant(dict)
"""
if not validate_type in SANGER_OPTIONS:
LOG.warning("Invalid validation string: %s", validate_type)
LOG.info("Validation options: %s", ', '.join(SANGER_OPTIONS))
return
updated_variant = self.variant_collection.find_one_and_update(
{'_id': variant['_id']},
{'$set': {'validation': validate_type}},
return_document=pymongo.ReturnDocument.AFTER
)
self.create_event(
institute=institute,
case=case,
user=user,
link=link,
category='variant',
verb='validate',
variant=variant,
subject=variant['display_name'],
)
return updated_variant | Mark validation status for a variant.
Arguments:
institute (dict): A Institute object
case (dict): Case object
user (dict): A User object
link (str): The url to be used in the event
variant (dict): A variant object
validate_type(str): The outcome of validation.
choices=('True positive', 'False positive')
Returns:
updated_variant(dict) |
def get_subscriptions(self):
"""
:calls: `GET /users/:user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/subscriptions",
None
) | :calls: `GET /users/:user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository` |
def is_domain_equal(self, other):
"""
Computes whether two Partial Orderings have the same generalization
structure.
"""
domain = self.get_domain()
other_domain = other.get_domain()
# if they share the same instance of memory for the domain
if domain == other_domain:
return True
else:
return False | Computes whether two Partial Orderings have the same generalization
structure. |
def run(self, run_priority: Optional[int] = None):
"""
Run a pyquil program on the QPU.
This formats the classified data from the QPU server by stacking measured bits into
an array of shape (trials, classical_addresses). The mapping of qubit to
classical address is backed out from MEASURE instructions in the program, so
only do measurements where there is a 1-to-1 mapping between qubits and classical
addresses.
:param run_priority: The priority with which to insert jobs into the QPU queue. Lower
integers correspond to higher priority. If not specified, the QPU
object's default priority is used.
:return: The QPU object itself.
"""
# This prevents a common error where users expect QVM.run()
# and QPU.run() to be interchangeable. QPU.run() needs the
# supplied executable to have been compiled, QVM.run() does not.
if isinstance(self._executable, Program):
raise TypeError("It looks like you have provided a Program where an Executable"
" is expected. Please use QuantumComputer.compile() to compile"
" your program.")
super().run()
request = QPURequest(program=self._executable.program,
patch_values=self._build_patch_values(),
id=str(uuid.uuid4()))
job_priority = run_priority if run_priority is not None else self.priority
job_id = self.client.call('execute_qpu_request', request=request, user=self.user,
priority=job_priority)
results = self._get_buffers(job_id)
ro_sources = self._executable.ro_sources
if results:
bitstrings = _extract_bitstrings(ro_sources, results)
elif not ro_sources:
warnings.warn("You are running a QPU program with no MEASURE instructions. "
"The result of this program will always be an empty array. Are "
"you sure you didn't mean to measure some of your qubits?")
bitstrings = np.zeros((0, 0), dtype=np.int64)
else:
bitstrings = None
self._bitstrings = bitstrings
self._last_results = results
return self | Run a pyquil program on the QPU.
This formats the classified data from the QPU server by stacking measured bits into
an array of shape (trials, classical_addresses). The mapping of qubit to
classical address is backed out from MEASURE instructions in the program, so
only do measurements where there is a 1-to-1 mapping between qubits and classical
addresses.
:param run_priority: The priority with which to insert jobs into the QPU queue. Lower
integers correspond to higher priority. If not specified, the QPU
object's default priority is used.
:return: The QPU object itself. |
def add_approximant_arg(parser, default=None, help=None):
"""Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant.
"""
if help is None:
help=str("The approximant(s) to use. Multiple approximants to use "
"in different regions may be provided. If multiple "
"approximants are provided, every one but the last must be "
"be followed by a conditional statement defining where that "
"approximant should be used. Conditionals can be any boolean "
"test understood by numpy. For example, 'Apprx:(mtotal > 4) & "
"(mchirp <= 5)' would use approximant 'Apprx' where total mass "
"is > 4 and chirp mass is <= 5. "
"Conditionals are applied in order, with each successive one "
"only applied to regions not covered by previous arguments. "
"For example, `'TaylorF2:mtotal < 4' 'IMRPhenomD:mchirp < 3'` "
"would result in IMRPhenomD being used where chirp mass is < 3 "
"and total mass is >= 4. The last approximant given may use "
"'else' as the conditional or include no conditional. In either "
"case, this will cause the last approximant to be used in any "
"remaning regions after all the previous conditionals have been "
"applied. For the full list of possible parameters to apply "
"conditionals to, see WaveformArray.default_fields(). Math "
"operations may also be used on parameters; syntax is python, "
"with any operation recognized by numpy.")
parser.add_argument("--approximant", nargs='+', type=str, default=default,
metavar='APPRX[:COND]',
help=help) | Adds an approximant argument to the given parser.
Parameters
----------
parser : ArgumentParser
The argument parser to add the argument to.
default : {None, str}
Specify a default for the approximant argument. Defaults to None.
help : {None, str}
Provide a custom help message. If None, will use a descriptive message
on how to specify the approximant. |
def SetTimezone(self, timezone):
"""Sets the timezone.
Args:
timezone (str): timezone.
Raises:
ValueError: if the timezone is not supported.
"""
if not timezone:
return
try:
self._timezone = pytz.timezone(timezone)
except pytz.UnknownTimeZoneError:
raise ValueError('Unsupported timezone: {0:s}'.format(timezone)) | Sets the timezone.
Args:
timezone (str): timezone.
Raises:
ValueError: if the timezone is not supported. |
def to_dms(angle, style='dms'):
"""Convert decimal angle to degrees, minutes and possibly seconds.
Args:
angle (float): Angle to convert
style (str): Return fractional or whole minutes values
Returns:
tuple of int: Angle converted to degrees, minutes and possibly seconds
Raises:
ValueError: Unknown value for ``style``
"""
sign = 1 if angle >= 0 else -1
angle = abs(angle) * 3600
minutes, seconds = divmod(angle, 60)
degrees, minutes = divmod(minutes, 60)
if style == 'dms':
return tuple(sign * abs(i) for i in (int(degrees), int(minutes),
seconds))
elif style == 'dm':
return tuple(sign * abs(i) for i in (int(degrees),
(minutes + seconds / 60)))
else:
raise ValueError('Unknown style type %r' % style) | Convert decimal angle to degrees, minutes and possibly seconds.
Args:
angle (float): Angle to convert
style (str): Return fractional or whole minutes values
Returns:
tuple of int: Angle converted to degrees, minutes and possibly seconds
Raises:
ValueError: Unknown value for ``style`` |
def apply_weight_drop(block, local_param_regex, rate, axes=(),
weight_dropout_mode='training'):
"""Apply weight drop to the parameter of a block.
Parameters
----------
block : Block or HybridBlock
The block whose parameter is to be applied weight-drop.
local_param_regex : str
The regex for parameter names used in the self.params.get(), such as 'weight'.
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
axes : tuple of int, default ()
The axes on which dropout mask is shared. If empty, regular dropout is applied.
weight_drop_mode : {'training', 'always'}, default 'training'
Whether the weight dropout should be applied only at training time, or always be applied.
Examples
--------
>>> net = gluon.rnn.LSTM(10, num_layers=2, bidirectional=True)
>>> gluonnlp.model.apply_weight_drop(net, r'.*h2h_weight', 0.5)
>>> net.collect_params()
lstm0_ (
Parameter lstm0_l0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
)
>>> ones = mx.nd.ones((3, 4, 5))
>>> net.initialize()
>>> with mx.autograd.train_mode():
... net(ones).max().asscalar() != net(ones).max().asscalar()
True
"""
if not rate:
return
existing_params = _find_params(block, local_param_regex)
for (local_param_name, param), \
(ref_params_list, ref_reg_params_list) in existing_params.items():
dropped_param = WeightDropParameter(param, rate, weight_dropout_mode, axes)
for ref_params in ref_params_list:
ref_params[param.name] = dropped_param
for ref_reg_params in ref_reg_params_list:
ref_reg_params[local_param_name] = dropped_param
if hasattr(block, local_param_name):
local_attr = getattr(block, local_param_name)
if local_attr == param:
local_attr = dropped_param
elif isinstance(local_attr, (list, tuple)):
if isinstance(local_attr, tuple):
local_attr = list(local_attr)
for i, v in enumerate(local_attr):
if v == param:
local_attr[i] = dropped_param
elif isinstance(local_attr, dict):
for k, v in local_attr:
if v == param:
local_attr[k] = dropped_param
else:
continue
if local_attr:
super(Block, block).__setattr__(local_param_name, local_attr) | Apply weight drop to the parameter of a block.
Parameters
----------
block : Block or HybridBlock
The block whose parameter is to be applied weight-drop.
local_param_regex : str
The regex for parameter names used in the self.params.get(), such as 'weight'.
rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
axes : tuple of int, default ()
The axes on which dropout mask is shared. If empty, regular dropout is applied.
weight_drop_mode : {'training', 'always'}, default 'training'
Whether the weight dropout should be applied only at training time, or always be applied.
Examples
--------
>>> net = gluon.rnn.LSTM(10, num_layers=2, bidirectional=True)
>>> gluonnlp.model.apply_weight_drop(net, r'.*h2h_weight', 0.5)
>>> net.collect_params()
lstm0_ (
Parameter lstm0_l0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_i2h_weight (shape=(40, 0), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r0_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r0_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r0_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_l1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_l1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_l1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_i2h_weight (shape=(40, 20), dtype=<class 'numpy.float32'>)
WeightDropParameter lstm0_r1_h2h_weight (shape=(40, 10), dtype=<class 'numpy.float32'>, \
rate=0.5, mode=training)
Parameter lstm0_r1_i2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
Parameter lstm0_r1_h2h_bias (shape=(40,), dtype=<class 'numpy.float32'>)
)
>>> ones = mx.nd.ones((3, 4, 5))
>>> net.initialize()
>>> with mx.autograd.train_mode():
... net(ones).max().asscalar() != net(ones).max().asscalar()
True |
def _write_local_schema_file(self, cursor):
"""
Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format.
"""
schema = []
tmp_schema_file_handle = NamedTemporaryFile(delete=True)
for name, type in zip(cursor.column_names, cursor.column_types):
schema.append(self.generate_schema_dict(name, type))
json_serialized_schema = json.dumps(schema).encode('utf-8')
tmp_schema_file_handle.write(json_serialized_schema)
return {self.schema_filename: tmp_schema_file_handle} | Takes a cursor, and writes the BigQuery schema for the results to a
local file system.
:return: A dictionary where key is a filename to be used as an object
name in GCS, and values are file handles to local files that
contains the BigQuery schema fields in .json format. |
def _reorient_3d(image):
"""
Reorganize the data for a 3d nifti
"""
# Create empty array where x,y,z correspond to LR (sagittal), PA (coronal), IS (axial) directions and the size
# of the array in each direction is the same with the corresponding direction of the input image.
new_image = numpy.zeros([image.dimensions[image.sagittal_orientation.normal_component],
image.dimensions[image.coronal_orientation.normal_component],
image.dimensions[image.axial_orientation.normal_component]],
dtype=image.nifti_data.dtype)
# Fill the new image with the values of the input image but with matching the orientation with x,y,z
if image.coronal_orientation.y_inverted:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
new_image.shape[2] - 1 - i).original_data))
else:
for i in range(new_image.shape[2]):
new_image[:, :, i] = numpy.fliplr(numpy.squeeze(image.get_slice(SliceType.AXIAL,
i).original_data))
return new_image | Reorganize the data for a 3d nifti |
def richardson(vals, k, c=None):
"""Richardson extrapolation with parameter estimation"""
if c is None:
c = richardson_parameter(vals, k)
return vals[k] - (vals[k] - vals[k - 1]) / c | Richardson extrapolation with parameter estimation |
def _parse_block(self, block, node):
'''
Return:
Node
'''
assert block[self.get_key()] == 'Block'
if self.is_compact_ast:
statements = block['statements']
else:
statements = block[self.get_children('children')]
for statement in statements:
node = self._parse_statement(statement, node)
return node | Return:
Node |
def _check_iso9660_filename(fullname, interchange_level):
# type: (bytes, int) -> None
'''
A function to check that a file identifier conforms to the ISO9660 rules
for a particular interchange level.
Parameters:
fullname - The name to check.
interchange_level - The interchange level to check against.
Returns:
Nothing.
'''
# Check to ensure the name is a valid filename for the ISO according to
# Ecma-119 7.5.
(name, extension, version) = _split_iso9660_filename(fullname)
# Ecma-119 says that filenames must end with a semicolon-number, but I have
# found CDs (Ubuntu 14.04 Desktop i386, for instance) that do not follow
# this. Thus we allow for names both with and without the semi+version.
# Ecma-119 says that filenames must have a version number, but I have
# found CDs (FreeBSD 10.1 amd64) that do not have any version number.
# Allow for this.
if version != b'' and (int(version) < 1 or int(version) > 32767):
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must have a version between 1 and 32767')
# Ecma-119 section 7.5.1 specifies that filenames must have at least one
# character in either the name or the extension.
if not name and not extension:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must have a non-empty name or extension')
if b';' in name or b';' in extension:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames must contain exactly one semicolon')
if interchange_level == 1:
# According to Ecma-119, section 10.1, at level 1 the filename can
# only be up to 8 d-characters or d1-characters, and the extension can
# only be up to 3 d-characters or 3 d1-characters.
if len(name) > 8 or len(extension) > 3:
raise pycdlibexception.PyCdlibInvalidInput('ISO9660 filenames at interchange level 1 cannot have more than 8 characters or 3 characters in the extension')
else:
# For all other interchange levels, the maximum filename length is
# specified in Ecma-119 7.5.2. However, I have found CDs (Ubuntu 14.04
# Desktop i386, for instance) that don't conform to this. Skip the
# check until we know how long is allowed.
pass
# Ecma-119 section 7.5.1 says that the file name and extension each contain
# zero or more d-characters or d1-characters. While the definition of
# d-characters and d1-characters is not specified in Ecma-119,
# http://wiki.osdev.org/ISO_9660 suggests that this consists of A-Z, 0-9, _
# which seems to correlate with empirical evidence. Thus we check for that
# here.
if interchange_level < 4:
_check_d1_characters(name)
_check_d1_characters(extension) | A function to check that a file identifier conforms to the ISO9660 rules
for a particular interchange level.
Parameters:
fullname - The name to check.
interchange_level - The interchange level to check against.
Returns:
Nothing. |
def _link_bam_file(in_file, new_dir, data):
"""Provide symlinks of BAM file and existing indexes if needed.
"""
new_dir = utils.safe_makedir(new_dir)
out_file = os.path.join(new_dir, os.path.basename(in_file))
if not utils.file_exists(out_file):
out_file = os.path.join(new_dir, "%s-prealign.bam" % dd.get_sample_name(data))
if data.get("cwl_keys"):
# Has indexes, we're okay to go with the original file
if utils.file_exists(in_file + ".bai"):
out_file = in_file
else:
utils.copy_plus(in_file, out_file)
else:
utils.symlink_plus(in_file, out_file)
return out_file | Provide symlinks of BAM file and existing indexes if needed. |
def fig3(args):
"""
%prog fig3 chrA02,A02,C2,chrC02 chr.sizes all.bed data
Napus Figure 3 displays alignments between quartet chromosomes, inset
with read histograms.
"""
from jcvi.formats.bed import Bed
p = OptionParser(fig3.__doc__)
p.add_option("--gauge_step", default=10000000, type="int",
help="Step size for the base scale")
opts, args, iopts = p.set_image_options(args, figsize="12x9")
if len(args) != 4:
sys.exit(not p.print_help())
chrs, sizes, bedfile, datadir = args
gauge_step = opts.gauge_step
diverge = iopts.diverge
rr, gg = diverge
chrs = [[x] for x in chrs.split(",")]
sizes = Sizes(sizes).mapping
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
chr_sizes, chr_sum_sizes, ratio = calc_ratio(chrs, sizes)
# Synteny panel
seqidsfile = make_seqids(chrs)
klayout = make_layout(chrs, chr_sum_sizes, ratio, template_f3a, shift=.05)
height = .07
r = height / 4
K = Karyotype(fig, root, seqidsfile, klayout, gap=gap,
height=height, lw=2, generank=False, sizes=sizes,
heightpad=r, roundrect=True, plot_label=False)
# Chromosome labels
for kl in K.layout:
if kl.empty:
continue
lx, ly = kl.xstart, kl.y
if lx < .11:
lx += .1
ly += .06
label = kl.label
root.text(lx - .015, ly, label, fontsize=15,
ha="right", va="center")
# Inset with datafiles
datafiles = ("chrA02.bzh.forxmgr", "parent.A02.per10kb.forxmgr",
"parent.C2.per10kb.forxmgr", "chrC02.bzh.forxmgr")
datafiles = [op.join(datadir, x) for x in datafiles]
tracks = K.tracks
hlfile = op.join(datadir, "bzh.regions.forhaibao")
xy_axes = []
for t, datafile in zip(tracks, datafiles):
ax = make_affix_axis(fig, t, -r, height=2 * r)
xy_axes.append(ax)
chr = t.seqids[0]
xy = XYtrack(ax, datafile, color="lightslategray")
start, end = 0, t.total
xy.interpolate(end)
xy.cap(ymax=40)
xy.import_hlfile(hlfile, chr, diverge=diverge)
xy.draw()
ax.set_xlim(start, end)
gauge_ax = make_affix_axis(fig, t, -r)
adjust_spines(gauge_ax, ["bottom"])
setup_gauge_ax(gauge_ax, start, end, gauge_step)
# Converted gene tracks
ax_Ar = make_affix_axis(fig, tracks[1], r, height=r/2)
ax_Co = make_affix_axis(fig, tracks[2], r, height=r/2)
order = Bed(bedfile).order
for asterisk in (False, True):
conversion_track(order, "data/Genes.Converted.seuil.0.6.AtoC.txt",
0, "A02", ax_Ar, rr, asterisk=asterisk)
conversion_track(order, "data/Genes.Converted.seuil.0.6.AtoC.txt",
1, "C2", ax_Co, gg, asterisk=asterisk)
conversion_track(order, "data/Genes.Converted.seuil.0.6.CtoA.txt",
0, "A02", ax_Ar, gg, ypos=1, asterisk=asterisk)
conversion_track(order, "data/Genes.Converted.seuil.0.6.CtoA.txt",
1, "C2", ax_Co, rr, ypos=1, asterisk=asterisk)
Ar, Co = xy_axes[1:3]
annotations = ((Ar, "Bra028920 Bra028897", "center", "1DAn2+"),
(Ar, "Bra020081 Bra020171", "right", "2DAn2+"),
(Ar, "Bra020218 Bra020286", "left", "3DAn2+"),
(Ar, "Bra008143 Bra008167", "left", "4DAn2-"),
(Ar, "Bra029317 Bra029251", "right", "5DAn2+ (GSL)"),
(Co, "Bo2g001000 Bo2g001300", "left", "1DCn2-"),
(Co, "Bo2g018560 Bo2g023700", "right", "2DCn2-"),
(Co, "Bo2g024450 Bo2g025390", "left", "3DCn2-"),
(Co, "Bo2g081060 Bo2g082340", "left", "4DCn2+"),
(Co, "Bo2g161510 Bo2g164260", "right", "5DCn2-"))
for ax, genes, ha, label in annotations:
g1, g2 = genes.split()
x1, x2 = order[g1][1].start, order[g2][1].start
if ha == "center":
x = (x1 + x2) / 2 * .8
elif ha == "left":
x = x2
else:
x = x1
label = r"\textit{{{0}}}".format(label)
color = rr if "+" in label else gg
ax.text(x, 30, label, color=color, fontsize=9, ha=ha, va="center")
ax_Ar.set_xlim(0, tracks[1].total)
ax_Ar.set_ylim(-1, 1)
ax_Co.set_xlim(0, tracks[2].total)
ax_Co.set_ylim(-1, 1)
# Plot coverage in resequencing lines
gstep = 5000000
order = "swede,kale,h165,yudal,aviso,abu,bristol".split(",")
labels_dict = {"h165": "Resynthesized (H165)", "abu": "Aburamasari"}
hlsuffix = "regions.forhaibao"
chr1, chr2 = "chrA02", "chrC02"
t1, t2 = tracks[0], tracks[-1]
s1, s2 = sizes[chr1], sizes[chr2]
canvas1 = (t1.xstart, .75, t1.xend - t1.xstart, .2)
c = Coverage(fig, root, canvas1, chr1, (0, s1), datadir,
order=order, gauge=None, plot_chr_label=False,
gauge_step=gstep, palette="gray",
cap=40, hlsuffix=hlsuffix, labels_dict=labels_dict,
diverge=diverge)
yys = c.yys
x1, x2 = .37, .72
tip = .02
annotations = ((x1, yys[2] + .3 * tip, tip, tip / 2, "FLC"),
(x1, yys[3] + .6 * tip, tip, tip / 2, "FLC"),
(x1, yys[5] + .6 * tip, tip, tip / 2, "FLC"),
(x2, yys[0] + .9 * tip, -1.2 * tip, 0, "GSL"),
(x2, yys[4] + .9 * tip, -1.2 * tip, 0, "GSL"),
(x2, yys[6] + .9 * tip, -1.2 * tip, 0, "GSL"))
arrowprops=dict(facecolor='black', shrink=.05, frac=.5,
width=1, headwidth=4)
for x, y, dx, dy, label in annotations:
label = r"\textit{{{0}}}".format(label)
root.annotate(label, xy=(x, y), xytext=(x + dx, y + dy),
arrowprops=arrowprops, color=rr, fontsize=9,
ha="center", va="center")
canvas2 = (t2.xstart, .05, t2.xend - t2.xstart, .2)
Coverage(fig, root, canvas2, chr2, (0, s2), datadir,
order=order, gauge=None, plot_chr_label=False,
gauge_step=gstep, palette="gray",
cap=40, hlsuffix=hlsuffix, labels_dict=labels_dict,
diverge=diverge)
pad = .03
labels = ((.1, .67, "A"), (t1.xstart - 3 * pad, .95 + pad, "B"),
(t2.xstart - 3 * pad, .25 + pad, "C"))
panel_labels(root, labels)
normalize_axes(root)
image_name = "napus-fig3." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts) | %prog fig3 chrA02,A02,C2,chrC02 chr.sizes all.bed data
Napus Figure 3 displays alignments between quartet chromosomes, inset
with read histograms. |
def send_command(self, cmd):
"""
Send a command to the remote SSH server.
:param cmd: The command to send
"""
logger.debug('Sending {0} command.'.format(cmd))
self.comm_chan.sendall(cmd + '\n') | Send a command to the remote SSH server.
:param cmd: The command to send |
def rgevolve(self, scale_out, **kwargs):
"""Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`."""
self._check_initial()
return rge.smeft_evolve(C_in=self.C_in,
scale_high=self.scale_high,
scale_in=self.scale_in,
scale_out=scale_out,
**kwargs) | Solve the SMEFT RGEs from the initial scale to `scale_out`.
Returns a dictionary with parameters and Wilson coefficients at
`scale_out`. Additional keyword arguments will be passed to
the ODE solver `scipy.integrate.odeint`. |
def create(self, rs_params):
"""create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set
"""
repl_id = rs_params.get('id', None)
if repl_id is not None and repl_id in self:
raise ReplicaSetError(
"replica set with id={id} already exists".format(id=repl_id))
repl = ReplicaSet(rs_params)
self[repl.repl_id] = repl
return repl.repl_id | create new replica set
Args:
rs_params - replica set configuration
Return repl_id which can use to take the replica set |
def prod(self, axis=None, dtype=None, out=None, keepdims=False):
"""Return the product of ``self``.
See Also
--------
numpy.prod
sum
"""
return self.elem.__array_ufunc__(
np.multiply, 'reduce', self.elem,
axis=axis, dtype=dtype, out=(out,), keepdims=keepdims) | Return the product of ``self``.
See Also
--------
numpy.prod
sum |
async def update_firmware(port: str,
firmware_file_path: str,
loop: Optional[asyncio.AbstractEventLoop])\
-> Tuple[str, Tuple[bool, str]]:
"""
Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude.
"""
ports_before_update = await _discover_ports()
config_file_path = os.path.join(package_root,
'config', 'modules', 'avrdude.conf')
kwargs: Dict[str, Any] = {
'stdout': asyncio.subprocess.PIPE,
'stderr': asyncio.subprocess.PIPE
}
if loop:
kwargs['loop'] = loop
proc = await asyncio.create_subprocess_exec(
'avrdude', '-C{}'.format(config_file_path), '-v',
'-p{}'.format(PART_NO),
'-c{}'.format(PROGRAMMER_ID),
'-P{}'.format(port),
'-b{}'.format(BAUDRATE), '-D',
'-Uflash:w:{}:i'.format(firmware_file_path),
**kwargs)
await proc.wait()
_result = await proc.communicate()
result = _result[1].decode()
avrdude_res = _format_avrdude_response(result)
if avrdude_res[0]:
log.debug(result)
else:
log.error("Failed to update module firmware for {}: {}"
.format(port, avrdude_res[1]))
new_port = await _port_on_mode_switch(ports_before_update)
log.info("New port: {}".format(new_port))
return new_port, avrdude_res | Run avrdude firmware upload command. Switch back to normal module port
Note: For modules with old bootloader, the kernel could assign the module
a new port after the update (since the board is automatically reset).
Scan for such a port change and use the appropriate port.
Returns a tuple of the new port to communicate on (or None if it was not
found) and a tuple of success and message from avrdude. |
def _ParseShVariables(self, lines):
"""Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values.
"""
paths = {}
for line in lines:
for entry in line:
if "=" in entry:
# Pad out the list so that it's always 2 elements, even if the split
# failed.
target, vals = (entry.split("=", 1) + [""])[:2]
if vals:
path_vals = vals.split(":")
else:
path_vals = []
self._ExpandPath(target, path_vals, paths)
elif entry not in self._SH_CONTINUATION:
# Stop processing the line unless the entry might allow paths to still
# be set, e.g.
# reserved words: "export"
# conditions: { PATH=VAL } && PATH=:$PATH || PATH=.
break
return paths | Extract env_var and path values from sh derivative shells.
Iterates over each line, word by word searching for statements that set the
path. These are either variables, or conditions that would allow a variable
to be set later in the line (e.g. export).
Args:
lines: A list of lines, each of which is a list of space separated words.
Returns:
a dictionary of path names and values. |
def button_number(self):
"""The button number that triggered this event, starting at 0.
For events that are not of type
:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,
this property raises :exc:`AttributeError`.
Note that the number returned is a generic sequential button number
and not a semantic button code as defined in ``linux/input.h``.
See `Tablet pad button numbers`_ for more details.
Returns:
int: The button triggering this event.
Raises:
AttributeError
"""
if self.type != EventType.TABLET_PAD_BUTTON:
raise AttributeError(_wrong_prop.format(self.type))
return self._libinput.libinput_event_tablet_pad_get_button_number(
self._handle) | The button number that triggered this event, starting at 0.
For events that are not of type
:attr:`~libinput.constant.Event.TABLET_PAD_BUTTON`,
this property raises :exc:`AttributeError`.
Note that the number returned is a generic sequential button number
and not a semantic button code as defined in ``linux/input.h``.
See `Tablet pad button numbers`_ for more details.
Returns:
int: The button triggering this event.
Raises:
AttributeError |
def surfacemass(self,R,log=False):
"""
NAME:
surfacemass
PURPOSE:
return the surface density profile at this R
INPUT:
R - Galactocentric radius (/ro)
log - if True, return the log (default: False)
OUTPUT:
Sigma(R)
HISTORY:
2010-03-26 - Written - Bovy (NYU)
"""
if log:
return -R/self._params[0]
else:
return sc.exp(-R/self._params[0]) | NAME:
surfacemass
PURPOSE:
return the surface density profile at this R
INPUT:
R - Galactocentric radius (/ro)
log - if True, return the log (default: False)
OUTPUT:
Sigma(R)
HISTORY:
2010-03-26 - Written - Bovy (NYU) |
def __runTaskMainLoop(self, numIters, learningOffAt=None):
""" Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number
"""
## Reset sequence states in the model, so it starts looking for a new
## sequence
self._model.resetSequenceStates()
self._currentRecordIndex = -1
while True:
# If killed by a terminator, stop running
if self._isKilled:
break
# If job stops or hypersearch ends, stop running
if self._isCanceled:
break
# If the process is about to be killed, set as orphaned
if self._isInterrupted.isSet():
self.__setAsOrphaned()
break
# If model is mature, stop running ONLY IF we are not the best model
# for the job. Otherwise, keep running so we can keep returning
# predictions to the user
if self._isMature:
if not self._isBestModel:
self._cmpReason = self._jobsDAO.CMPL_REASON_STOPPED
break
else:
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
# Turn off learning?
if learningOffAt is not None \
and self._currentRecordIndex == learningOffAt:
self._model.disableLearning()
# Read input record. Note that any failure here is a critical JOB failure
# and results in the job being immediately canceled and marked as
# failed. The runModelXXX code in hypesearch.utils, if it sees an
# exception of type utils.JobFailException, will cancel the job and
# copy the error message into the job record.
try:
inputRecord = self._inputSource.getNextRecordDict()
if self._currentRecordIndex < 0:
self._inputSource.setTimeout(10)
except Exception, e:
raise utils.JobFailException(ErrorCodes.streamReading, str(e.args),
traceback.format_exc())
if inputRecord is None:
# EOF
self._cmpReason = self._jobsDAO.CMPL_REASON_EOF
break
if inputRecord:
# Process input record
self._currentRecordIndex += 1
result = self._model.run(inputRecord=inputRecord)
# Compute metrics.
result.metrics = self.__metricMgr.update(result)
# If there are None, use defaults. see MetricsManager.getMetrics()
# TODO remove this when JAVA API server is gone
if not result.metrics:
result.metrics = self.__metricMgr.getMetrics()
# Write the result to the output cache. Don't write encodings, if they
# were computed
if InferenceElement.encodings in result.inferences:
result.inferences.pop(InferenceElement.encodings)
result.sensorInput.dataEncodings = None
self._writePrediction(result)
# Run periodic activities
self._periodic.tick()
if numIters >= 0 and self._currentRecordIndex >= numIters-1:
break
else:
# Input source returned an empty record.
#
# NOTE: This is okay with Stream-based Source (when it times out
# waiting for next record), but not okay with FileSource, which should
# always return either with a valid record or None for EOF.
raise ValueError("Got an empty record from FileSource: %r" %
inputRecord) | Main loop of the OPF Model Runner.
Parameters:
-----------------------------------------------------------------------
recordIterator: Iterator for counting number of records (see _runTask)
learningOffAt: If not None, learning is turned off when we reach this
iteration number |
def create_task(self, list_id, title, assignee_id=None, completed=None, recurrence_type=None, recurrence_count=None, due_date=None, starred=None):
''' Creates a new task with the given information in the list with the given ID '''
return tasks_endpoint.create_task(self, list_id, title, assignee_id=assignee_id, completed=completed, recurrence_type=recurrence_type, recurrence_count=recurrence_count, due_date=due_date, starred=starred) | Creates a new task with the given information in the list with the given ID |
def delete(self, cluster):
"""Deletes the cluster from memory.
:param cluster: cluster to delete
:type cluster: :py:class:`elasticluster.cluster.Cluster`
"""
if cluster.name not in self.clusters:
raise ClusterNotFound(
"Unable to delete non-existent cluster %s" % cluster.name)
del self.clusters[cluster.name] | Deletes the cluster from memory.
:param cluster: cluster to delete
:type cluster: :py:class:`elasticluster.cluster.Cluster` |
def get_size(self, boundary):
"""Returns the size in bytes that this param will be when encoded
with the given boundary."""
if self.filesize is not None:
valuesize = self.filesize
else:
valuesize = len(self.value)
return len(self.encode_hdr(boundary)) + 2 + valuesize | Returns the size in bytes that this param will be when encoded
with the given boundary. |
def masses_of_galaxies_within_ellipses_in_units(self, major_axis : dim.Length, unit_mass='angular',
critical_surface_density=None):
"""Compute the total mass of all galaxies in this plane within a ellipse of specified major-axis.
See *galaxy.angular_mass_within_ellipse* and *mass_profiles.angular_mass_within_ellipse* for details \
of how this is performed.
Parameters
----------
major_axis : float
The major-axis radius of the ellipse.
units_luminosity : str
The units the luminosity is returned in (eps | counts).
exposure_time : float
The exposure time of the observation, which converts luminosity from electrons per second units to counts.
"""
return list(map(lambda galaxy: galaxy.mass_within_ellipse_in_units(
major_axis=major_axis, unit_mass=unit_mass, kpc_per_arcsec=self.kpc_per_arcsec,
critical_surface_density=critical_surface_density),
self.galaxies)) | Compute the total mass of all galaxies in this plane within a ellipse of specified major-axis.
See *galaxy.angular_mass_within_ellipse* and *mass_profiles.angular_mass_within_ellipse* for details \
of how this is performed.
Parameters
----------
major_axis : float
The major-axis radius of the ellipse.
units_luminosity : str
The units the luminosity is returned in (eps | counts).
exposure_time : float
The exposure time of the observation, which converts luminosity from electrons per second units to counts. |
def _normalize_http_methods(http_method):
"""
Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods)
"""
if http_method.upper() == 'ANY':
for method in SamApiProvider._ANY_HTTP_METHODS:
yield method.upper()
else:
yield http_method.upper() | Normalizes Http Methods. Api Gateway allows a Http Methods of ANY. This is a special verb to denote all
supported Http Methods on Api Gateway.
:param str http_method: Http method
:yield str: Either the input http_method or one of the _ANY_HTTP_METHODS (normalized Http Methods) |
def filename_items_for_filetype(filenames, filetype_info):
"""Iterator over the filenames matching *filetype_info*."""
matched_files = []
for pattern in filetype_info['file_patterns']:
for filename in match_filenames(filenames, pattern):
if filename in matched_files:
continue
try:
filename_info = parse(
pattern, get_filebase(filename, pattern))
except ValueError:
logger.debug("Can't parse %s with %s.", filename, pattern)
continue
matched_files.append(filename)
yield filename, filename_info | Iterator over the filenames matching *filetype_info*. |
def create(self, deviceType):
"""
Register one or more new device types, each request can contain a maximum of 512KB.
"""
r = self._apiClient.post("api/v0002/device/types", deviceType)
if r.status_code == 201:
return DeviceType(apiClient=self._apiClient, **r.json())
else:
raise ApiException(r) | Register one or more new device types, each request can contain a maximum of 512KB. |
def allocate(self, size, max_time_to_block_ms):
"""
Allocate a buffer of the given size. This method blocks if there is not
enough memory and the buffer pool is configured with blocking mode.
Arguments:
size (int): The buffer size to allocate in bytes [ignored]
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
io.BytesIO
"""
with self._lock:
# check if we have a free buffer of the right size pooled
if self._free:
return self._free.popleft()
elif self._poolable_size == 0:
return io.BytesIO()
else:
# we are out of buffers and will have to block
buf = None
more_memory = threading.Condition(self._lock)
self._waiters.append(more_memory)
# loop over and over until we have a buffer or have reserved
# enough memory to allocate one
while buf is None:
start_wait = time.time()
more_memory.wait(max_time_to_block_ms / 1000.0)
end_wait = time.time()
if self.wait_time:
self.wait_time.record(end_wait - start_wait)
if self._free:
buf = self._free.popleft()
else:
self._waiters.remove(more_memory)
raise Errors.KafkaTimeoutError(
"Failed to allocate memory within the configured"
" max blocking time")
# remove the condition for this thread to let the next thread
# in line start getting memory
removed = self._waiters.popleft()
assert removed is more_memory, 'Wrong condition'
# signal any additional waiters if there is more memory left
# over for them
if self._free and self._waiters:
self._waiters[0].notify()
# unlock and return the buffer
return buf | Allocate a buffer of the given size. This method blocks if there is not
enough memory and the buffer pool is configured with blocking mode.
Arguments:
size (int): The buffer size to allocate in bytes [ignored]
max_time_to_block_ms (int): The maximum time in milliseconds to
block for buffer memory to be available
Returns:
io.BytesIO |
def prod(self):
"""Summary
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.aggr(
self.expr,
"*",
1,
self.weld_type
),
self.weld_type,
0
) | Summary
Returns:
TYPE: Description |
def update_ipsecpolicy(self, ipsecpolicy, body=None):
"""Updates an IPsecPolicy."""
return self.put(self.ipsecpolicy_path % (ipsecpolicy), body=body) | Updates an IPsecPolicy. |
def capabilities(self):
"""A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities.
"""
caps = []
for cap in DeviceCapability:
if self._libinput.libinput_device_has_capability(self._handle, cap):
caps.append(cap)
return tuple(caps) | A tuple of capabilities this device supports.
Returns:
(~libinput.constant.DeviceCapability): Device capabilities. |
def plos_doi_to_xmlurl(doi_string):
"""
Attempts to resolve a PLoS DOI into a URL path to the XML file.
"""
#Create URL to request DOI resolution from http://dx.doi.org
doi_url = 'http://dx.doi.org/{0}'.format(doi_string)
log.debug('DOI URL: {0}'.format(doi_url))
#Open the page, follow the redirect
try:
resolved_page = urllib.request.urlopen(doi_url)
except urllib.error.URLError as err:
print('Unable to resolve DOI URL, or could not connect')
raise err
else:
#Given the redirection, attempt to shape new request for PLoS servers
resolved_address = resolved_page.geturl()
log.debug('DOI resolved to {0}'.format(resolved_address))
parsed = urllib.parse.urlparse(resolved_address)
xml_url = '{0}://{1}'.format(parsed.scheme, parsed.netloc)
xml_url += '/article/fetchObjectAttachment.action?uri='
xml_path = parsed.path.replace(':', '%3A').replace('/', '%2F')
xml_path = xml_path.split('article%2F')[1]
xml_url += '{0}{1}'.format(xml_path, '&representation=XML')
log.debug('Shaped PLoS request for XML {0}'.format(xml_url))
#Return this url to the calling function
return xml_url | Attempts to resolve a PLoS DOI into a URL path to the XML file. |
def resize_hess(self, func):
"""
Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only.
"""
if func is None:
return None
@wraps(func)
def resized(*args, **kwargs):
out = func(*args, **kwargs)
# Make two dimensional, corresponding to a scalar function.
out = np.atleast_2d(np.squeeze(out))
mask = [p not in self._fixed_params for p in self.parameters]
return np.atleast_2d(out[mask, mask])
return resized | Removes values with identical indices to fixed parameters from the
output of func. func has to return the Hessian of a scalar function.
:param func: Hessian function to be wrapped. Is assumed to be the
Hessian of a scalar function.
:return: Hessian corresponding to free parameters only. |
def xlink_href_target(self, node, group=None):
"""
Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs
"""
xlink_href = node.attrib.get('{http://www.w3.org/1999/xlink}href')
if not xlink_href:
return None
# First handle any raster embedded image data
match = re.match(r"^data:image/(jpeg|png);base64", xlink_href)
if match:
img_format = match.groups()[0]
image_data = base64.decodestring(xlink_href[(match.span(0)[1] + 1):].encode('ascii'))
file_indicator, path = tempfile.mkstemp(suffix='.%s' % img_format)
with open(path, 'wb') as fh:
fh.write(image_data)
# Close temporary file (as opened by tempfile.mkstemp)
os.close(file_indicator)
# this needs to be removed later, not here...
# if exists(path): os.remove(path)
return path
# From here, we can assume this is a path.
if '#' in xlink_href:
iri, fragment = xlink_href.split('#', 1)
else:
iri, fragment = xlink_href, None
if iri:
# Only local relative paths are supported yet
if not isinstance(self.source_path, str):
logger.error(
"Unable to resolve image path '%s' as the SVG source is not a file system path." % iri
)
return None
path = os.path.normpath(os.path.join(os.path.dirname(self.source_path), iri))
if not os.access(path, os.R_OK):
return None
if path == self.source_path:
# Self-referencing, ignore the IRI part
iri = None
if iri:
if path.endswith('.svg'):
if path in self._parent_chain:
logger.error("Circular reference detected in file.")
raise CircularRefError()
if path not in self._external_svgs:
self._external_svgs[path] = ExternalSVG(path, self)
ext_svg = self._external_svgs[path]
if ext_svg.root_node is not None:
if fragment:
ext_frag = ext_svg.get_fragment(fragment)
if ext_frag is not None:
return ext_svg.renderer, ext_frag
else:
return ext_svg.renderer, ext_svg.root_node
else:
# A raster image path
try:
# This will catch invalid images
PDFImage(path, 0, 0)
except IOError:
logger.error("Unable to read the image %s. Skipping..." % path)
return None
return path
elif fragment:
# A pointer to an internal definition
if fragment in self.definitions:
return self, self.definitions[fragment]
else:
# The missing definition should appear later in the file
self.waiting_use_nodes[fragment].append((node, group))
return DELAYED | Return either:
- a tuple (renderer, node) when the the xlink:href attribute targets
a vector file or node
- the path to an image file for any raster image targets
- None if any problem occurs |
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(StudyOID=self.study_oid,
MetaDataVersionOID=self.metadata_version_oid,
EffectiveDate=dt_to_iso8601(self.effective_date))
builder.start("MetaDataVersionRef", params)
builder.end("MetaDataVersionRef") | Build XML by appending to builder |
def exists(table_name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a table exists.
CLI Example:
.. code-block:: bash
salt myminion boto_dynamodb.exists table_name region=us-east-1
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.describe_table(table_name)
except JSONResponseError as e:
if e.error_code == 'ResourceNotFoundException':
return False
raise
return True | Check to see if a table exists.
CLI Example:
.. code-block:: bash
salt myminion boto_dynamodb.exists table_name region=us-east-1 |
def format_axis(ax, label_padding=2, tick_padding=0, yticks_position='left'):
"""Set standardized axis formatting for figure."""
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position(yticks_position)
ax.yaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.set_tick_params(which='both', direction='out', labelsize=fontsize,
pad=tick_padding, length=2, width=0.5)
ax.xaxis.labelpad = label_padding
ax.yaxis.labelpad = label_padding
ax.xaxis.label.set_size(fontsize)
ax.yaxis.label.set_size(fontsize) | Set standardized axis formatting for figure. |
def detect(self, tokens):
"""Return a (abbr, long) pair for each abbreviation definition."""
results = []
for abbr_span, long_span in self.detect_spans(tokens):
results.append((tokens[abbr_span[0]:abbr_span[1]], tokens[long_span[0]:long_span[1]]))
return results | Return a (abbr, long) pair for each abbreviation definition. |
def _from_dict(cls, mapping):
"""Create an instance from the corresponding state mapping.
:type mapping: dict
:param mapping: the instance state.
"""
if mapping.get("all"):
return cls(all_=True)
r_mappings = mapping.get("ranges", ())
ranges = [KeyRange(**r_mapping) for r_mapping in r_mappings]
return cls(keys=mapping.get("keys", ()), ranges=ranges) | Create an instance from the corresponding state mapping.
:type mapping: dict
:param mapping: the instance state. |
def _make_futures(futmap_keys, class_check, make_result_fn):
"""
Create futures and a futuremap for the keys in futmap_keys,
and create a request-level future to be bassed to the C API.
"""
futmap = {}
for key in futmap_keys:
if class_check is not None and not isinstance(key, class_check):
raise ValueError("Expected list of {}".format(type(class_check)))
futmap[key] = concurrent.futures.Future()
if not futmap[key].set_running_or_notify_cancel():
raise RuntimeError("Future was cancelled prematurely")
# Create an internal future for the entire request,
# this future will trigger _make_..._result() and set result/exception
# per topic,future in futmap.
f = concurrent.futures.Future()
f.add_done_callback(lambda f: make_result_fn(f, futmap))
if not f.set_running_or_notify_cancel():
raise RuntimeError("Future was cancelled prematurely")
return f, futmap | Create futures and a futuremap for the keys in futmap_keys,
and create a request-level future to be bassed to the C API. |
def _on_return(self, text):
"""Called when the user presses return on the send message widget."""
# Ignore if the user hasn't typed a message.
if not text:
return
elif text.startswith('/image') and len(text.split(' ')) == 2:
# Temporary UI for testing image uploads
filename = text.split(' ')[1]
image_file = open(filename, 'rb')
text = ''
else:
image_file = None
text = replace_emoticons(text)
segments = hangups.ChatMessageSegment.from_str(text)
self._coroutine_queue.put(
self._handle_send_message(
self._conversation.send_message(
segments, image_file=image_file
)
)
) | Called when the user presses return on the send message widget. |
def platform_mapped(func):
"""Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14", # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit (don't)
"amd64": "64bit",
},
}
"""
def inner(*args, **kwargs):
# Since platform is being used within config lazy import config to prevent
# circular dependencies
from rez.config import config
# Original result
result = func(*args, **kwargs)
# The function name is used as primary key
entry = config.platform_map.get(func.__name__)
if entry:
for key, value in entry.iteritems():
result, changes = re.subn(key, value, result)
if changes > 0:
break
return result
return inner | Decorates functions for lookups within a config.platform_map dictionary.
The first level key is mapped to the func.__name__ of the decorated function.
Regular expressions are used on the second level key, values.
Note that there is no guaranteed order within the dictionary evaluation. Only the first matching
regular expression is being used.
For example:
config.platform_map = {
"os": {
r"Scientific Linux-(.*)": r"Scientific-\1", # Scientific Linux-x.x -> Scientific-x.x
r"Ubuntu-14.\d": r"Ubuntu-14", # Any Ubuntu-14.x -> Ubuntu-14
},
"arch": {
"x86_64": "64bit", # Maps both x86_64 and amd64 -> 64bit (don't)
"amd64": "64bit",
},
} |
def _lons(self):
"""Return the longitudes (in degrees) of the gridded data."""
lons = _np.linspace(0.0, 360.0 - 360.0 / self.nlon, num=self.nlon)
return lons | Return the longitudes (in degrees) of the gridded data. |
def _patch_distribution_metadata():
"""Patch write_pkg_file and read_pkg_file for higher metadata standards"""
for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
new_val = getattr(setuptools.dist, attr)
setattr(distutils.dist.DistributionMetadata, attr, new_val) | Patch write_pkg_file and read_pkg_file for higher metadata standards |
def dump(obj, fp, **kwargs):
"""
Serialize obj as a JSON formatted stream to fp (a .write()-supporting file-like object)
"""
return json.dump(obj, fp, cls=BioCJSONEncoder, **kwargs) | Serialize obj as a JSON formatted stream to fp (a .write()-supporting file-like object) |
def spd_eig(W, epsilon=1e-10, method='QR', canonical_signs=False):
""" Rank-reduced eigenvalue decomposition of symmetric positive definite matrix.
Removes all negligible eigenvalues
Parameters
----------
W : ndarray((n, n), dtype=float)
Symmetric positive-definite (spd) matrix.
epsilon : float
Truncation parameter. Eigenvalues with norms smaller than this cutoff will
be removed.
method : str
Method to perform the decomposition of :math:`W` before inverting. Options are:
* 'QR': QR-based robust eigenvalue decomposition of W
* 'schur': Schur decomposition of W
canonical_signs : boolean, default = False
Fix signs in V, s. t. the largest element of in every row of V is positive.
Returns
-------
s : ndarray(k)
k non-negligible eigenvalues, sorted by descending norms
V : ndarray(n, k)
k leading eigenvectors
"""
# check input
assert _np.allclose(W.T, W), 'W is not a symmetric matrix'
if method.lower() == 'qr':
from .eig_qr.eig_qr import eig_qr
s, V = eig_qr(W)
# compute the Eigenvalues of C0 using Schur factorization
elif method.lower() == 'schur':
from scipy.linalg import schur
S, V = schur(W)
s = _np.diag(S)
else:
raise ValueError('method not implemented: ' + method)
s, V = sort_by_norm(s, V) # sort them
# determine the cutoff. We know that C0 is an spd matrix,
# so we select the truncation threshold such that everything that is negative vanishes
evmin = _np.min(s)
if evmin < 0:
epsilon = max(epsilon, -evmin + 1e-16)
# determine effective rank m and perform low-rank approximations.
evnorms = _np.abs(s)
n = _np.shape(evnorms)[0]
m = n - _np.searchsorted(evnorms[::-1], epsilon)
if m == 0:
raise _ZeroRankError('All eigenvalues are smaller than %g, rank reduction would discard all dimensions.'%epsilon)
Vm = V[:, 0:m]
sm = s[0:m]
if canonical_signs:
# enforce canonical eigenvector signs
for j in range(m):
jj = _np.argmax(_np.abs(Vm[:, j]))
Vm[:, j] *= _np.sign(Vm[jj, j])
return sm, Vm | Rank-reduced eigenvalue decomposition of symmetric positive definite matrix.
Removes all negligible eigenvalues
Parameters
----------
W : ndarray((n, n), dtype=float)
Symmetric positive-definite (spd) matrix.
epsilon : float
Truncation parameter. Eigenvalues with norms smaller than this cutoff will
be removed.
method : str
Method to perform the decomposition of :math:`W` before inverting. Options are:
* 'QR': QR-based robust eigenvalue decomposition of W
* 'schur': Schur decomposition of W
canonical_signs : boolean, default = False
Fix signs in V, s. t. the largest element of in every row of V is positive.
Returns
-------
s : ndarray(k)
k non-negligible eigenvalues, sorted by descending norms
V : ndarray(n, k)
k leading eigenvectors |
def forwards(self, orm):
"Write your forwards methods here."
for instance in orm.LocationSource.objects.all():
try:
instance.user = instance.points.all()[0].user
instance.save()
except IndexError:
# No points for this source.
instance.delete() | Write your forwards methods here. |
def finalize(self, **kwargs):
"""
Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: dict
generic keyword arguments
"""
# Set the title
self.set_title(
"{} Ranking of {} Features".format(
self.ranking_.title(), len(self.features_)
)
) | Finalize executes any subclass-specific axes finalization steps.
The user calls poof and poof calls finalize.
Parameters
----------
kwargs: dict
generic keyword arguments |
def is_compression_coordinate(ds, variable):
'''
Returns True if the variable is a coordinate variable that defines a
compression scheme.
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: Variable name
'''
# Must be a coordinate variable
if not is_coordinate_variable(ds, variable):
return False
# must have a string attribute compress
compress = getattr(ds.variables[variable], 'compress', None)
if not isinstance(compress, basestring):
return False
if not compress:
return False
# This should never happen or be allowed
if variable in compress:
return False
# Must point to dimensions
for dim in compress.split():
if dim not in ds.dimensions:
return False
return True | Returns True if the variable is a coordinate variable that defines a
compression scheme.
:param netCDF4.Dataset nc: An open netCDF dataset
:param str variable: Variable name |
def _init(self):
"""Read the success byte."""
self._ready = self._file.read(1)
self._hall_left = self._file.read(2)
self._hall_right = self._file.read(2)
self._carriage_type = self._file.read(1)[0]
self._carriage_position = self._file.read(1)[0] | Read the success byte. |
def AddDescriptor(self, desc):
"""Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor.
"""
if not isinstance(desc, descriptor.Descriptor):
raise TypeError('Expected instance of descriptor.Descriptor.')
self._descriptors[desc.full_name] = desc
self._AddFileDescriptor(desc.file) | Adds a Descriptor to the pool, non-recursively.
If the Descriptor contains nested messages or enums, the caller must
explicitly register them. This method also registers the FileDescriptor
associated with the message.
Args:
desc: A Descriptor. |
def analyze(self, handle, filename):
"""Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: Task ID as a string
"""
# multipart post files.
files = {"file": (filename, handle)}
# ensure the handle is at offset 0.
handle.seek(0)
response = self._request("tasks/create/file", method='POST', files=files)
# return task id; try v1.3 and v2.0 API response formats
try:
return str(json.loads(response.content.decode('utf-8'))["task_id"])
except KeyError:
return str(json.loads(response.content.decode('utf-8'))["task_ids"][0]) | Submit a file for analysis.
:type handle: File handle
:param handle: Handle to file to upload for analysis.
:type filename: str
:param filename: File name.
:rtype: str
:return: Task ID as a string |
def get_build_config_by_labels_filtered(self, label_selectors, filter_key, filter_value):
"""
Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering.
"""
items = self.get_all_build_configs_by_labels(label_selectors)
if filter_value is not None:
build_configs = []
for build_config in items:
match_value = graceful_chain_get(build_config, *filter_key.split('.'))
if filter_value == match_value:
build_configs.append(build_config)
items = build_configs
if not items:
raise OsbsException(
"Build config not found for labels: %r" %
(label_selectors, ))
if len(items) > 1:
raise OsbsException(
"More than one build config found for labels: %r" %
(label_selectors, ))
return items[0] | Returns a build config matching the given label selectors, filtering against
another predetermined value. This method will raise OsbsException
if not exactly one build config is found after filtering. |
def download(self, filename, format='sdf', overwrite=False, resolvers=None, **kwargs):
""" Download the resolved structure as a file """
download(self.input, filename, format, overwrite, resolvers, **kwargs) | Download the resolved structure as a file |
def last_ser(self):
"""
Return the last `<c:ser>` element in the last xChart element, based
on series order (not necessarily the same element as document order).
"""
last_xChart = self.xCharts[-1]
sers = last_xChart.sers
if not sers:
return None
return sers[-1] | Return the last `<c:ser>` element in the last xChart element, based
on series order (not necessarily the same element as document order). |
def stars_list(self, **kwargs) -> SlackResponse:
"""Lists stars for a user."""
self._validate_xoxp_token()
return self.api_call("stars.list", http_verb="GET", params=kwargs) | Lists stars for a user. |
def read_pickle(fn):
"""Load a GOParser object from a pickle file.
The function automatically detects whether the file is compressed
with gzip.
Parameters
----------
fn: str
Path of the pickle file.
Returns
-------
`GOParser`
The GOParser object stored in the pickle file.
"""
with misc.open_plain_or_gzip(fn, 'rb') as fh:
parser = pickle.load(fh)
return parser | Load a GOParser object from a pickle file.
The function automatically detects whether the file is compressed
with gzip.
Parameters
----------
fn: str
Path of the pickle file.
Returns
-------
`GOParser`
The GOParser object stored in the pickle file. |
def extracting(self, *names, **kwargs):
"""Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given)."""
if not isinstance(self.val, Iterable):
raise TypeError('val is not iterable')
if isinstance(self.val, str_types):
raise TypeError('val must not be string')
if len(names) == 0:
raise ValueError('one or more name args must be given')
def _extract(x, name):
if self._check_dict_like(x, check_values=False, return_as_bool=True):
if name in x:
return x[name]
else:
raise ValueError('item keys %s did not contain key <%s>' % (list(x.keys()), name))
elif isinstance(x, Iterable):
self._check_iterable(x, name='item')
return x[name]
elif hasattr(x, name):
attr = getattr(x, name)
if callable(attr):
try:
return attr()
except TypeError:
raise ValueError('val method <%s()> exists, but is not zero-arg method' % name)
else:
return attr
else:
raise ValueError('val does not have property or zero-arg method <%s>' % name)
def _filter(x):
if 'filter' in kwargs:
if isinstance(kwargs['filter'], str_types):
return bool(_extract(x, kwargs['filter']))
elif self._check_dict_like(kwargs['filter'], check_values=False, return_as_bool=True):
for k in kwargs['filter']:
if isinstance(k, str_types):
if _extract(x, k) != kwargs['filter'][k]:
return False
return True
elif callable(kwargs['filter']):
return kwargs['filter'](x)
return False
return True
def _sort(x):
if 'sort' in kwargs:
if isinstance(kwargs['sort'], str_types):
return _extract(x, kwargs['sort'])
elif isinstance(kwargs['sort'], Iterable):
items = []
for k in kwargs['sort']:
if isinstance(k, str_types):
items.append(_extract(x, k))
return tuple(items)
elif callable(kwargs['sort']):
return kwargs['sort'](x)
return 0
extracted = []
for i in sorted(self.val, key=lambda x: _sort(x)):
if _filter(i):
items = [_extract(i, name) for name in names]
extracted.append(tuple(items) if len(items) > 1 else items[0])
return AssertionBuilder(extracted, self.description, self.kind) | Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given). |
def std(self, n, array=False):
"""标准差"""
result = talib.STDDEV(self.close, n)
if array:
return result
return result[-1] | 标准差 |
def cluster_health(index=None, level='cluster', local=False, hosts=None, profile=None):
'''
.. versionadded:: 2017.7.0
Return Elasticsearch cluster health.
index
Limit the information returned to a specific index
level
Specify the level of detail for returned information, default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
local
Return local information, do not retrieve the state from master node
CLI example::
salt myminion elasticsearch.cluster_health
'''
es = _get_instance(hosts, profile)
try:
return es.cluster.health(index=index, level=level, local=local)
except elasticsearch.TransportError as e:
raise CommandExecutionError("Cannot retrieve health information, server returned code {0} with message {1}".format(e.status_code, e.error)) | .. versionadded:: 2017.7.0
Return Elasticsearch cluster health.
index
Limit the information returned to a specific index
level
Specify the level of detail for returned information, default 'cluster', valid choices are: 'cluster', 'indices', 'shards'
local
Return local information, do not retrieve the state from master node
CLI example::
salt myminion elasticsearch.cluster_health |
def read_cstring(self) -> bool:
"""
read a double quoted string
Read following BNF rule else return False::
'"' -> ['\\' #char | ~'\\'] '"'
"""
self._stream.save_context()
idx = self._stream.index
if self.read_char("\"") and self.read_until("\"", "\\"):
txt = self._stream[idx:self._stream.index]
return self._stream.validate_context()
return self._stream.restore_context() | read a double quoted string
Read following BNF rule else return False::
'"' -> ['\\' #char | ~'\\'] '"' |
def format_progress(self, width):
"""Create the formatted string that displays the progress."""
chunk_widths = self._get_chunk_sizes(width)
progress_chunks = [chunk.format_chunk(chunk_width)
for (chunk, chunk_width)
in zip(self._progress_chunks, chunk_widths)]
return "{sep_start}{progress}{sep_end}".format(
sep_start=self.sep_start,
progress="".join(progress_chunks),
sep_end=self.sep_end
) | Create the formatted string that displays the progress. |
def H13(self):
"Information measure of correlation 2."
# An imaginary result has been encountered once in the Matlab
# version. The reason is unclear.
return np.sqrt(1 - np.exp(-2 * (self.hxy2 - self.H9()))) | Information measure of correlation 2. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.