code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def convertforoutput(self,outputfile):
"""Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Sourcefile is a CLAMOutputFile instance."""
assert isinstance(outputfile, CLAMOutputFile) #metadata of the destination file (file to be generated here)
if not outputfile.metadata.__class__ in self.acceptforoutput:
raise Exception("Convertor " + self.__class__.__name__ + " can not convert input files to " + outputfile.metadata.__class__.__name__ + "!")
return [] | Convert from one of the source formats into target format. Relevant if converters are used in OutputTemplates. Sourcefile is a CLAMOutputFile instance. |
def tag(self, tag):
"""Get a release by tag
"""
url = '%s/tags/%s' % (self, tag)
response = self.http.get(url, auth=self.auth)
response.raise_for_status()
return response.json() | Get a release by tag |
def sealedbox_encrypt(data, **kwargs):
'''
Encrypt data using a public key generated from `nacl.keygen`.
The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ='
'''
# ensure data is in bytes
data = salt.utils.stringutils.to_bytes(data)
pk = _get_pk(**kwargs)
b = libnacl.sealed.SealedBox(pk)
return base64.b64encode(b.encrypt(data)) | Encrypt data using a public key generated from `nacl.keygen`.
The encryptd data can be decrypted using `nacl.sealedbox_decrypt` only with the secret key.
CLI Examples:
.. code-block:: bash
salt-run nacl.sealedbox_encrypt datatoenc
salt-call --local nacl.sealedbox_encrypt datatoenc pk_file=/etc/salt/pki/master/nacl.pub
salt-call --local nacl.sealedbox_encrypt datatoenc pk='vrwQF7cNiNAVQVAiS3bvcbJUnF0cN6fU9YTZD9mBfzQ=' |
def addMenuItem( self, newItem, atItem ):
"""
Adds a new menu item at the given item.
:param newItem | <QTreeWidgetItem>
atItem | <QTreeWidgetItem>
"""
tree = self.uiMenuTREE
if ( not atItem ):
tree.addTopLevelItem(newItem)
elif ( atItem.data(0, Qt.UserRole) == 'menu' ):
atItem.addChild(newItem)
elif ( atItem.parent() ):
index = atItem.parent().indexOfChild(atItem)
atItem.parent().insertChild(index + 1, newItem)
else:
index = tree.indexOfTopLevelItem(atItem)
tree.insertTopLevelItem(index + 1, newItem) | Adds a new menu item at the given item.
:param newItem | <QTreeWidgetItem>
atItem | <QTreeWidgetItem> |
def simplex_projection(v, b=1):
r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)
Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com).
"""
v = np.asarray(v)
p = len(v)
# Sort v into u in descending order
v = (v > 0) * v
u = np.sort(v)[::-1]
sv = np.cumsum(u)
rho = np.where(u > (sv - b) / np.arange(1, p + 1))[0][-1]
theta = np.max([0, (sv[rho] - b) / (rho + 1)])
w = (v - theta)
w[w < 0] = 0
return w | r"""Projection vectors to the simplex domain
Implemented according to the paper: Efficient projections onto the
l1-ball for learning in high dimensions, John Duchi, et al. ICML 2008.
Implementation Time: 2011 June 17 by Bin@libin AT pmail.ntu.edu.sg
Optimization Problem: min_{w}\| w - v \|_{2}^{2}
s.t. sum_{i=1}^{m}=z, w_{i}\geq 0
Input: A vector v \in R^{m}, and a scalar z > 0 (default=1)
Output: Projection vector w
:Example:
>>> proj = simplex_projection([.4 ,.3, -.4, .5])
>>> proj # doctest: +NORMALIZE_WHITESPACE
array([ 0.33333333, 0.23333333, 0. , 0.43333333])
>>> print(proj.sum())
1.0
Original matlab implementation: John Duchi (jduchi@cs.berkeley.edu)
Python-port: Copyright 2013 by Thomas Wiecki (thomas.wiecki@gmail.com). |
def nucnorm(x0, rho, gamma):
"""
Proximal operator for the nuclear norm (sum of the singular values of a matrix)
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
"""
# compute SVD
u, s, v = np.linalg.svd(x0, full_matrices=False)
# soft threshold the singular values
sthr = np.maximum(s - (gamma / float(rho)), 0)
# reconstruct
x_out = (u.dot(np.diag(sthr)).dot(v))
return x_out | Proximal operator for the nuclear norm (sum of the singular values of a matrix)
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step |
def add_rec_new(self, k, val):
"""Recursively add a new value and its children to me, and assign a
variable to it.
Args:
k (str): The name of the variable to assign.
val (LispVal): The value to be added and assigned.
Returns:
LispVal: The added value.
"""
self.rec_new(val)
self[k] = val
return val | Recursively add a new value and its children to me, and assign a
variable to it.
Args:
k (str): The name of the variable to assign.
val (LispVal): The value to be added and assigned.
Returns:
LispVal: The added value. |
def set_web_master(self):
"""Parses the feed's webmaster and sets value"""
try:
self.web_master = self.soup.find('webmaster').string
except AttributeError:
self.web_master = None | Parses the feed's webmaster and sets value |
def _set_mode(self, v, load=False):
"""
Setter method for mode, mapped from YANG variable /interface/tunnel/mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mode.mode, is_container='container', presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel encapsulation method', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mode must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mode.mode, is_container='container', presence=False, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Tunnel encapsulation method', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='container', is_config=True)""",
})
self.__mode = t
if hasattr(self, '_set'):
self._set() | Setter method for mode, mapped from YANG variable /interface/tunnel/mode (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly. |
def _add_gmaf(self, variant_obj, info_dict):
"""Add the gmaf frequency
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary
"""
##TODO search for max freq in info dict
for transcript in variant_obj.transcripts:
gmaf_raw = transcript.GMAF
if gmaf_raw:
gmaf = float(gmaf_raw.split(':')[-1])
variant_obj.add_frequency('GMAF', gmaf)
if not variant_obj.thousand_g:
variant_obj.thousand_g = gmaf | Add the gmaf frequency
Args:
variant_obj (puzzle.models.Variant)
info_dict (dict): A info dictionary |
def formatdt(date: datetime.date, include_time: bool = True) -> str:
"""
Formats a ``datetime.date`` to ISO-8601 basic format, to minute accuracy
with no timezone (or, if ``include_time`` is ``False``, omit the time).
"""
if include_time:
return date.strftime("%Y-%m-%dT%H:%M")
else:
return date.strftime("%Y-%m-%d") | Formats a ``datetime.date`` to ISO-8601 basic format, to minute accuracy
with no timezone (or, if ``include_time`` is ``False``, omit the time). |
def plot_oneD(dataset, vars, filename, bins=60):
""" Plot 1D marginalised posteriors for the 'vars' of interest."""
n = len(vars)
fig, axes = plt.subplots(nrows=n,
ncols=1,
sharex=False,
sharey=False)
for i, x in enumerate(vars):
ax = axes[i]
P = posterior.oneD(dataset+'.h5', x, limits=limits(x), bins=bins)
P.plot(ax)
ax.set_xlabel(labels(x))
ax.set_yticklabels([])
fig.set_size_inches(4, 4*n)
fig.savefig(filename, dpi=200, bbox_inches='tight')
plt.close(fig) | Plot 1D marginalised posteriors for the 'vars' of interest. |
def load_observations((observations, regex, rename), path, filenames):
"""
Returns a provisional name based dictionary of observations of the object.
Each observations is keyed on the date. ie. a dictionary of dictionaries.
@param path: the directory where filenames are.
@type path str
@param filenames: list of files in path.
@type filenames list
@rtype None
"""
for filename in filenames:
if re.search(regex, filename) is None:
logging.error("Skipping {}".format(filename))
continue
obs = mpc.MPCReader().read(os.path.join(path, filename))
for ob in obs:
if "568" not in ob.observatory_code:
continue
if not isinstance(ob.comment, mpc.OSSOSComment):
continue
if ob.date < Time("2013-01-01 00:00:00"):
continue
if rename:
new_provisional_name = os.path.basename(filename)
new_provisional_name = new_provisional_name[0:new_provisional_name.find(".")]
rename_map[ob.provisional_name] = new_provisional_name
try:
key1 = ob.comment.frame.split('p')[0]
except Exception as ex:
logger.warning(str(ex))
logger.warning(ob.to_string())
continue
key2 = ob.provisional_name
if key1 not in observations:
observations[key1] = {}
if key2 in observations[key1]:
if observations[key1][key2]:
continue
if not observation.null_observation:
logger.error(filename)
logger.error(str(observations[key1][key2]))
raise ValueError("conflicting observations for {} in {}".format(key2, key1))
observations[key1][key2] = ob | Returns a provisional name based dictionary of observations of the object.
Each observations is keyed on the date. ie. a dictionary of dictionaries.
@param path: the directory where filenames are.
@type path str
@param filenames: list of files in path.
@type filenames list
@rtype None |
def configure_flair(self, subreddit, flair_enabled=False,
flair_position='right',
flair_self_assign=False,
link_flair_enabled=False,
link_flair_position='left',
link_flair_self_assign=False):
"""Configure the flair setting for the given subreddit.
:returns: The json response from the server.
"""
flair_enabled = 'on' if flair_enabled else 'off'
flair_self_assign = 'on' if flair_self_assign else 'off'
if not link_flair_enabled:
link_flair_position = ''
link_flair_self_assign = 'on' if link_flair_self_assign else 'off'
data = {'r': six.text_type(subreddit),
'flair_enabled': flair_enabled,
'flair_position': flair_position,
'flair_self_assign_enabled': flair_self_assign,
'link_flair_position': link_flair_position,
'link_flair_self_assign_enabled': link_flair_self_assign}
return self.request_json(self.config['flairconfig'], data=data) | Configure the flair setting for the given subreddit.
:returns: The json response from the server. |
def notifyObservers(self, arg=None):
'''If 'changed' indicates that this object
has changed, notify all its observers, then
call clearChanged(). Each observer has its
update() called with two arguments: this
observable object and the generic 'arg'.'''
self.mutex.acquire()
try:
if not self.changed:
return
# Make a local copy in case of synchronous
# additions of observers:
localArray = self.obs[:]
self.clearChanged()
finally:
self.mutex.release()
# Update observers
for observer in localArray:
observer.update(self, arg) | If 'changed' indicates that this object
has changed, notify all its observers, then
call clearChanged(). Each observer has its
update() called with two arguments: this
observable object and the generic 'arg'. |
def sg_min(tensor, opt):
r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`.
"""
return tf.reduce_min(tensor, axis=opt.axis, keep_dims=opt.keep_dims, name=opt.name) | r"""Computes the minimum of elements across axis of a tensor.
See `tf.reduce_min()` in tensorflow.
Args:
tensor: A `Tensor` (automatically given by chain).
opt:
axis : A tuple/list of integers or an integer. The axis to reduce.
keep_dims: If true, retains reduced dimensions with length 1.
name: If provided, replace current tensor's name.
Returns:
A `Tensor`. |
def _verify_jws(self, payload, key):
"""Verify the given JWS payload with the given key and return the payload"""
jws = JWS.from_compact(payload)
try:
alg = jws.signature.combined.alg.name
except KeyError:
msg = 'No alg value found in header'
raise SuspiciousOperation(msg)
if alg != self.OIDC_RP_SIGN_ALGO:
msg = "The provider algorithm {!r} does not match the client's " \
"OIDC_RP_SIGN_ALGO.".format(alg)
raise SuspiciousOperation(msg)
if isinstance(key, six.string_types):
# Use smart_bytes here since the key string comes from settings.
jwk = JWK.load(smart_bytes(key))
else:
# The key is a json returned from the IDP JWKS endpoint.
jwk = JWK.from_json(key)
if not jws.verify(jwk):
msg = 'JWS token verification failed.'
raise SuspiciousOperation(msg)
return jws.payload | Verify the given JWS payload with the given key and return the payload |
def delete(self, worker_id):
''' Stop and remove a worker '''
code = 200
if worker_id in self.jobs:
# NOTE pop it if done ?
self.jobs[worker_id]['worker'].revoke(terminate=True)
report = {
'id': worker_id,
'revoked': True
# FIXME Unable to serialize self.jobs[worker_id]
# 'session': self.jobs.pop(worker_id)
}
self.jobs.pop(worker_id)
else:
report = {'error': 'job {} unknown'.format(worker_id)}
code = 404
return flask.jsonify(report), code | Stop and remove a worker |
def match(self, location):
"""
Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname.
"""
if self.ssh_alias != location.ssh_alias:
# Never match locations on other systems.
return False
elif self.have_wildcards:
# Match filename patterns using fnmatch().
return fnmatch.fnmatch(location.directory, self.directory)
else:
# Compare normalized directory pathnames.
self = os.path.normpath(self.directory)
other = os.path.normpath(location.directory)
return self == other | Check if the given location "matches".
:param location: The :class:`Location` object to try to match.
:returns: :data:`True` if the two locations are on the same system and
the :attr:`directory` can be matched as a filename pattern or
a literal match on the normalized pathname. |
async def get_user_groups(request):
"""Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
"""
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError('acl_middleware not installed')
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups)) | Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed |
def get_range_slices(self, column_parent, predicate, range, consistency_level):
"""
returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_get_range_slices(column_parent, predicate, range, consistency_level)
return d | returns a subset of columns for a contiguous range of keys.
Parameters:
- column_parent
- predicate
- range
- consistency_level |
def extract_ape (archive, compression, cmd, verbosity, interactive, outdir):
"""Decompress an APE archive to a WAV file."""
outfile = util.get_single_outfile(outdir, archive, extension=".wav")
return [cmd, archive, outfile, '-d'] | Decompress an APE archive to a WAV file. |
def get_pytorch_link(ft)->str:
"Returns link to pytorch docs of `ft`."
name = ft.__name__
ext = '.html'
if name == 'device': return f'{PYTORCH_DOCS}tensor_attributes{ext}#torch-device'
if name == 'Tensor': return f'{PYTORCH_DOCS}tensors{ext}#torch-tensor'
if name.startswith('torchvision'):
doc_path = get_module_name(ft).replace('.', '/')
if inspect.ismodule(ft): name = name.replace('.', '-')
return f'{PYTORCH_DOCS}{doc_path}{ext}#{name}'
if name.startswith('torch.nn') and inspect.ismodule(ft): # nn.functional is special case
nn_link = name.replace('.', '-')
return f'{PYTORCH_DOCS}nn{ext}#{nn_link}'
paths = get_module_name(ft).split('.')
if len(paths) == 1: return f'{PYTORCH_DOCS}{paths[0]}{ext}#{paths[0]}.{name}'
offset = 1 if paths[1] == 'utils' else 0 # utils is a pytorch special case
doc_path = paths[1+offset]
if inspect.ismodule(ft): return f'{PYTORCH_DOCS}{doc_path}{ext}#module-{name}'
fnlink = '.'.join(paths[:(2+offset)]+[name])
return f'{PYTORCH_DOCS}{doc_path}{ext}#{fnlink}' | Returns link to pytorch docs of `ft`. |
def delete(self):
"""Deleting any existing copy of this object from the cache."""
key = self._key(self._all_keys())
_cache.delete(key) | Deleting any existing copy of this object from the cache. |
def write(
self,
mi_cmd_to_write,
timeout_sec=DEFAULT_GDB_TIMEOUT_SEC,
raise_error_on_timeout=True,
read_response=True,
):
"""Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.
timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout (bool): If read_response is True, raise error if no response is received
read_response (bool): Block and read response. If there is a separate thread running,
this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
NoGdbProcessError if there is no gdb subprocess running
TypeError if mi_cmd_to_write is not valid
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
# Ensure proper type of the mi command
if type(mi_cmd_to_write) in [str, unicode]:
pass
elif type(mi_cmd_to_write) == list:
mi_cmd_to_write = "\n".join(mi_cmd_to_write)
else:
raise TypeError(
"The gdb mi command must a be str or list. Got "
+ str(type(mi_cmd_to_write))
)
self.logger.debug("writing: %s", mi_cmd_to_write)
if not mi_cmd_to_write.endswith("\n"):
mi_cmd_to_write_nl = mi_cmd_to_write + "\n"
else:
mi_cmd_to_write_nl = mi_cmd_to_write
if USING_WINDOWS:
# select not implemented in windows for pipes
# assume it's always ready
outputready = [self.stdin_fileno]
else:
_, outputready, _ = select.select([], self.write_list, [], timeout_sec)
for fileno in outputready:
if fileno == self.stdin_fileno:
# ready to write
self.gdb_process.stdin.write(mi_cmd_to_write_nl.encode())
# don't forget to flush for Python3, otherwise gdb won't realize there is data
# to evaluate, and we won't get a response
self.gdb_process.stdin.flush()
else:
self.logger.error("got unexpected fileno %d" % fileno)
if read_response is True:
return self.get_gdb_response(
timeout_sec=timeout_sec, raise_error_on_timeout=raise_error_on_timeout
)
else:
return [] | Write to gdb process. Block while parsing responses from gdb for a maximum of timeout_sec.
Args:
mi_cmd_to_write (str or list): String to write to gdb. If list, it is joined by newlines.
timeout_sec (float): Maximum number of seconds to wait for response before exiting. Must be >= 0.
raise_error_on_timeout (bool): If read_response is True, raise error if no response is received
read_response (bool): Block and read response. If there is a separate thread running,
this can be false, and the reading thread read the output.
Returns:
List of parsed gdb responses if read_response is True, otherwise []
Raises:
NoGdbProcessError if there is no gdb subprocess running
TypeError if mi_cmd_to_write is not valid |
def names_in_chains(stream_item, aligner_data):
'''
Convert doc-level Rating object into a Label, and add that Label
to all Token in all coref chains identified by
aligner_data["chain_selector"]
:param stream_item: document that has a doc-level Rating to translate into token-level Labels.
:param aligner_data: dict containing:
chain_selector: ALL or ANY
annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id
If chain_selector==ALL, then only apply Label to chains in which
all of the Rating.mentions strings appear as substrings within at
least one of the Token.token strings.
If chain_selector==ANY, then apply Label to chains in which any of
the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all
the names in any of the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
'''
chain_selector = aligner_data.get('chain_selector', '')
assert chain_selector in _CHAIN_SELECTORS, \
'chain_selector: %r not in %r' % (chain_selector, _CHAIN_SELECTORS.keys())
## convert chain_selector to a function
chain_selector = _CHAIN_SELECTORS[chain_selector]
## make inverted index equiv_id --> (names, tokens)
equiv_ids = make_chains_with_names( stream_item.body.sentences )
required_annotator_id = aligner_data.get('annotator_id')
for annotator_id, ratings in stream_item.ratings.items():
if (required_annotator_id is not None) and (annotator_id != required_annotator_id):
continue
else:
for rating in ratings:
label = Label(annotator=rating.annotator,
target=rating.target)
for eqid, (chain_mentions, chain_tokens) in equiv_ids.items():
if chain_selector(rating.mentions, chain_mentions):
## apply the label
for tok in chain_tokens:
add_annotation(tok, label) | Convert doc-level Rating object into a Label, and add that Label
to all Token in all coref chains identified by
aligner_data["chain_selector"]
:param stream_item: document that has a doc-level Rating to translate into token-level Labels.
:param aligner_data: dict containing:
chain_selector: ALL or ANY
annotator_id: string to find at stream_item.Ratings[i].annotator.annotator_id
If chain_selector==ALL, then only apply Label to chains in which
all of the Rating.mentions strings appear as substrings within at
least one of the Token.token strings.
If chain_selector==ANY, then apply Label to chains in which any of
the Rating.mentions strings appear as a substring within at least
one of the Token.token strings.
If chain_selector==ANY_MULTI_TOKEN, then apply Label to chains in which all
the names in any of the Rating.mentions strings appear as a substring within at least
one of the Token.token strings. |
def get_project_children(self, project_id, name_contains, exclude_response_fields=None):
"""
Send GET to /projects/{project_id}/children filtering by a name.
:param project_id: str uuid of the project
:param name_contains: str name to filter folders by (if not None this method works recursively)
:param exclude_response_fields: [str]: list of fields to exclude in the response items
:return: requests.Response containing the successful result
"""
return self._get_children('projects', project_id, name_contains, exclude_response_fields) | Send GET to /projects/{project_id}/children filtering by a name.
:param project_id: str uuid of the project
:param name_contains: str name to filter folders by (if not None this method works recursively)
:param exclude_response_fields: [str]: list of fields to exclude in the response items
:return: requests.Response containing the successful result |
def create_empty(self, name=None, renderers=None, RootNetworkList=None, verbose=False):
"""
Create a new, empty network. The new network may be created as part of
an existing network collection or a new network collection.
:param name (string, optional): Enter the name of the new network.
:param renderers (string, optional): Select the renderer to use for the
new network view. By default, the standard Cytoscape 2D renderer (Ding)
will be used = [''],
:param RootNetworkList (string, optional): Choose the network collection
the new network should be part of. If no network collection is selected,
a new network collection is created. = [' -- Create new network collection --',
'cy:command_documentation_generation']
:param verbose: print more
"""
PARAMS=set_param(["name","renderers","RootNetworkList"],[name,renderers,RootNetworkList])
response=api(url=self.__url+"/create empty", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Create a new, empty network. The new network may be created as part of
an existing network collection or a new network collection.
:param name (string, optional): Enter the name of the new network.
:param renderers (string, optional): Select the renderer to use for the
new network view. By default, the standard Cytoscape 2D renderer (Ding)
will be used = [''],
:param RootNetworkList (string, optional): Choose the network collection
the new network should be part of. If no network collection is selected,
a new network collection is created. = [' -- Create new network collection --',
'cy:command_documentation_generation']
:param verbose: print more |
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
if (
isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody
):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append("") | check the node line number and check it if not yet done |
def rollback(self):
"""
Rollback this context:
- Position the consumer at the initial offsets.
"""
self.logger.info("Rolling back context: %s", self.initial_offsets)
self.update_consumer_offsets(self.initial_offsets) | Rollback this context:
- Position the consumer at the initial offsets. |
def map(source, func, *more_sources, ordered=True, task_limit=None):
"""Apply a given function to the elements of one or several
asynchronous sequences.
Each element is used as a positional argument, using the same order as
their respective sources. The generation continues until the shortest
sequence is exhausted. The function can either be synchronous or
asynchronous (coroutine function).
The results can either be returned in or out of order, depending on
the corresponding ``ordered`` argument. This argument is ignored if the
provided function is synchronous.
The coroutines run concurrently but their amount can be limited using
the ``task_limit`` argument. A value of ``1`` will cause the coroutines
to run sequentially. This argument is ignored if the provided function
is synchronous.
If more than one sequence is provided, they're also awaited concurrently,
so that their waiting times don't add up.
It might happen that the provided function returns a coroutine but is not
a coroutine function per se. In this case, one can wrap the function with
``aiostream.async_`` in order to force ``map`` to await the resulting
coroutine. The following example illustrates the use ``async_`` with a
lambda function::
from aiostream import stream, async_
...
ys = stream.map(xs, async_(lambda ms: asyncio.sleep(ms / 1000)))
"""
if asyncio.iscoroutinefunction(func):
return amap.raw(
source, func, *more_sources,
ordered=ordered, task_limit=task_limit)
return smap.raw(source, func, *more_sources) | Apply a given function to the elements of one or several
asynchronous sequences.
Each element is used as a positional argument, using the same order as
their respective sources. The generation continues until the shortest
sequence is exhausted. The function can either be synchronous or
asynchronous (coroutine function).
The results can either be returned in or out of order, depending on
the corresponding ``ordered`` argument. This argument is ignored if the
provided function is synchronous.
The coroutines run concurrently but their amount can be limited using
the ``task_limit`` argument. A value of ``1`` will cause the coroutines
to run sequentially. This argument is ignored if the provided function
is synchronous.
If more than one sequence is provided, they're also awaited concurrently,
so that their waiting times don't add up.
It might happen that the provided function returns a coroutine but is not
a coroutine function per se. In this case, one can wrap the function with
``aiostream.async_`` in order to force ``map`` to await the resulting
coroutine. The following example illustrates the use ``async_`` with a
lambda function::
from aiostream import stream, async_
...
ys = stream.map(xs, async_(lambda ms: asyncio.sleep(ms / 1000))) |
def get_symbol(units) -> str:
"""Get default symbol type.
Parameters
----------
units_str : string
Units.
Returns
-------
string
LaTeX formatted symbol.
"""
if kind(units) == "energy":
d = {}
d["nm"] = r"\lambda"
d["wn"] = r"\bar\nu"
d["eV"] = r"\hslash\omega"
d["Hz"] = r"f"
d["THz"] = r"f"
d["GHz"] = r"f"
return d.get(units, "E")
elif kind(units) == "delay":
return r"\tau"
elif kind(units) == "fluence":
return r"\mathcal{F}"
elif kind(units) == "pulse_width":
return r"\sigma"
elif kind(units) == "temperature":
return r"T"
else:
return kind(units) | Get default symbol type.
Parameters
----------
units_str : string
Units.
Returns
-------
string
LaTeX formatted symbol. |
def tohdf5(table, source, where=None, name=None, create=False, drop=False,
description=None, title='', filters=None, expectedrows=10000,
chunkshape=None, byteorder=None, createparents=False,
sample=1000):
"""
Write to an HDF5 table. If `create` is `False`, assumes the table
already exists, and attempts to truncate it before loading. If `create`
is `True`, a new table will be created, and if `drop` is True,
any existing table will be dropped first. If `description` is `None`,
the description will be guessed. E.g.::
>>> import petl as etl
>>> table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> etl.tohdf5(table1, 'example.h5', '/testgroup', 'testtable',
... drop=True, create=True, createparents=True)
>>> etl.fromhdf5('example.h5', '/testgroup', 'testtable')
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+
"""
import tables
it = iter(table)
if create:
with _get_hdf5_file(source, mode='a') as h5file:
if drop:
try:
h5file.get_node(where, name)
except tables.NoSuchNodeError:
pass
else:
h5file.remove_node(where, name)
# determine datatype
if description is None:
peek, it = iterpeek(it, sample)
# use a numpy dtype
description = infer_dtype(peek)
# create the table
h5file.create_table(where, name, description,
title=title,
filters=filters,
expectedrows=expectedrows,
chunkshape=chunkshape,
byteorder=byteorder,
createparents=createparents)
with _get_hdf5_table(source, where, name, mode='a') as h5table:
# truncate the existing table
h5table.truncate(0)
# load the data
_insert(it, h5table) | Write to an HDF5 table. If `create` is `False`, assumes the table
already exists, and attempts to truncate it before loading. If `create`
is `True`, a new table will be created, and if `drop` is True,
any existing table will be dropped first. If `description` is `None`,
the description will be guessed. E.g.::
>>> import petl as etl
>>> table1 = (('foo', 'bar'),
... (1, b'asdfgh'),
... (2, b'qwerty'),
... (3, b'zxcvbn'))
>>> etl.tohdf5(table1, 'example.h5', '/testgroup', 'testtable',
... drop=True, create=True, createparents=True)
>>> etl.fromhdf5('example.h5', '/testgroup', 'testtable')
+-----+-----------+
| foo | bar |
+=====+===========+
| 1 | b'asdfgh' |
+-----+-----------+
| 2 | b'qwerty' |
+-----+-----------+
| 3 | b'zxcvbn' |
+-----+-----------+ |
def get_ids(a):
"""
make copy of sequences with short identifier
"""
a_id = '%s.id.fa' % (a.rsplit('.', 1)[0])
a_id_lookup = '%s.id.lookup' % (a.rsplit('.', 1)[0])
if check(a_id) is True:
return a_id, a_id_lookup
a_id_f = open(a_id, 'w')
a_id_lookup_f = open(a_id_lookup, 'w')
ids = []
for seq in parse_fasta(open(a)):
id = id_generator()
while id in ids:
id = id_generator()
ids.append(id)
header = seq[0].split('>')[1]
name = remove_bad(header)
seq[0] = '>%s %s' % (id, header)
print('\n'.join(seq), file=a_id_f)
print('%s\t%s\t%s' % (id, name, header), file=a_id_lookup_f)
return a_id, a_id_lookup | make copy of sequences with short identifier |
def subtask(self, func, *args, **kw):
"""
Helper function for tasks needing to run subthreads. Takes care of
remembering that the subtask is running, and of cleaning up after it.
Example starting a simple.Task():
backend.subtask(task.ignore, 1, 'a', verbose=False)
*args and **kw will be propagated to `func`.
"""
self.started_task()
try:
func(*args, **kw)
except Exception:
self.log(ERROR, 'Subtask %s(*%s, **%s) failed!' % (func, args, kw))
finally:
self.finished_task() | Helper function for tasks needing to run subthreads. Takes care of
remembering that the subtask is running, and of cleaning up after it.
Example starting a simple.Task():
backend.subtask(task.ignore, 1, 'a', verbose=False)
*args and **kw will be propagated to `func`. |
def view_change_started(self, viewNo: int):
"""
Notifies primary decider about the fact that view changed to let it
prepare for election, which then will be started from outside by
calling decidePrimaries()
"""
if viewNo <= self.viewNo:
logger.warning("{}Provided view no {} is not greater"
" than the current view no {}"
.format(VIEW_CHANGE_PREFIX, viewNo, self.viewNo))
return False
self.previous_master_primary = self.node.master_primary_name
for replica in self.replicas.values():
replica.primaryName = None
return True | Notifies primary decider about the fact that view changed to let it
prepare for election, which then will be started from outside by
calling decidePrimaries() |
def _write_with_fallback(s, write, fileobj):
"""Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'.
"""
if IPythonIOStream is not None and isinstance(fileobj, IPythonIOStream):
# If the output stream is an IPython.utils.io.IOStream object that's
# not going to be very helpful to us since it doesn't raise any
# exceptions when an error occurs writing to its underlying stream.
# There's no advantage to us using IOStream.write directly though;
# instead just write directly to its underlying stream:
write = fileobj.stream.write
try:
write(s)
return write
except UnicodeEncodeError:
# Let's try the next approach...
pass
enc = locale.getpreferredencoding()
try:
Writer = codecs.getwriter(enc)
except LookupError:
Writer = codecs.getwriter(_DEFAULT_ENCODING)
f = Writer(fileobj)
write = f.write
try:
write(s)
return write
except UnicodeEncodeError:
Writer = codecs.getwriter('latin-1')
f = Writer(fileobj)
write = f.write
# If this doesn't work let the exception bubble up; I'm out of ideas
write(s)
return write | Write the supplied string with the given write function like
``write(s)``, but use a writer for the locale's preferred encoding in case
of a UnicodeEncodeError. Failing that attempt to write with 'utf-8' or
'latin-1'. |
def tag_exists(self, version):
"""Check if a tag has already been created with the name of the
version.
"""
for tag in self.available_tags():
if tag == version:
return True
return False | Check if a tag has already been created with the name of the
version. |
def delete_tenant(self, tenant):
"""
ADMIN ONLY. Removes the tenant from the system. There is no 'undo'
available, so you should be certain that the tenant specified is the
tenant you wish to delete.
"""
tenant_id = utils.get_id(tenant)
uri = "tenants/%s" % tenant_id
resp, resp_body = self.method_delete(uri)
if resp.status_code == 404:
raise exc.TenantNotFound("Tenant '%s' does not exist." % tenant) | ADMIN ONLY. Removes the tenant from the system. There is no 'undo'
available, so you should be certain that the tenant specified is the
tenant you wish to delete. |
def group_children( index, shared, min_kids=10, stop_types=STOP_TYPES, delete_children=True ):
"""Collect like-type children into sub-groups of objects for objects with long children-lists
Only group if:
* there are more than X children of type Y
* children are "simple"
* individual children have no children themselves
* individual children have no other parents...
"""
to_compress = []
for to_simplify in list(iterindex( index )):
if not isinstance( to_simplify, dict ):
continue
for typ,kids in children_types( to_simplify, index, stop_types=stop_types ).items():
kids = [k for k in kids if k and simple(k,shared, to_simplify)]
if len(kids) >= min_kids:
# we can group and compress out...
to_compress.append( (to_simplify,typ,kids))
for to_simplify,typ,kids in to_compress:
typ_address = new_address(index)
kid_addresses = [k['address'] for k in kids]
index[typ_address] = {
'address': typ_address,
'type': MANY_TYPE,
'name': typ,
'size': sum( [k.get('size',0) for k in kids], 0),
'parents': [to_simplify['address']],
}
shared[typ_address] = index[typ_address]['parents']
to_simplify['refs'][:] = [typ_address]
if delete_children:
for address in kid_addresses:
try:
del index[address]
except KeyError, err:
pass # already compressed out
try:
del shared[address]
except KeyError, err:
pass # already compressed out
index[typ_address]['refs'] = []
else:
index[typ_address]['refs'] = kid_addresses | Collect like-type children into sub-groups of objects for objects with long children-lists
Only group if:
* there are more than X children of type Y
* children are "simple"
* individual children have no children themselves
* individual children have no other parents... |
def from_array(array):
"""
Deserialize a new StickerMessage from a given dictionary.
:return: new StickerMessage instance.
:rtype: StickerMessage
"""
if array is None or not array:
return None
# end if
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = {}
if isinstance(array.get('sticker'), InputFile):
data['sticker'] = InputFile.from_array(array.get('sticker'))
elif isinstance(array.get('sticker'), str):
data['sticker'] = u(array.get('sticker'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), None):
data['receiver'] = None(array.get('chat_id'))
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of None, str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID(array.get('reply_to_message_id'))
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return StickerMessage(**data) | Deserialize a new StickerMessage from a given dictionary.
:return: new StickerMessage instance.
:rtype: StickerMessage |
def _fitImg(self, img):
'''
fit perspective and size of the input image to the reference image
'''
img = imread(img, 'gray')
if self.bg is not None:
img = cv2.subtract(img, self.bg)
if self.lens is not None:
img = self.lens.correct(img, keepSize=True)
(H, _, _, _, _, _, _, n_matches) = self.findHomography(img)
H_inv = self.invertHomography(H)
s = self.obj_shape
fit = cv2.warpPerspective(img, H_inv, (s[1], s[0]))
return fit, img, H, H_inv, n_matches | fit perspective and size of the input image to the reference image |
def _init_libcrypto():
'''
Set up libcrypto argtypes and initialize the library
'''
libcrypto = _load_libcrypto()
try:
libcrypto.OPENSSL_init_crypto()
except AttributeError:
# Support for OpenSSL < 1.1 (OPENSSL_API_COMPAT < 0x10100000L)
libcrypto.OPENSSL_no_config()
libcrypto.OPENSSL_add_all_algorithms_noconf()
libcrypto.RSA_new.argtypes = ()
libcrypto.RSA_new.restype = c_void_p
libcrypto.RSA_free.argtypes = (c_void_p, )
libcrypto.RSA_size.argtype = (c_void_p)
libcrypto.BIO_new_mem_buf.argtypes = (c_char_p, c_int)
libcrypto.BIO_new_mem_buf.restype = c_void_p
libcrypto.BIO_free.argtypes = (c_void_p, )
libcrypto.PEM_read_bio_RSAPrivateKey.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSAPrivateKey.restype = c_void_p
libcrypto.PEM_read_bio_RSA_PUBKEY.argtypes = (c_void_p, c_void_p, c_void_p, c_void_p)
libcrypto.PEM_read_bio_RSA_PUBKEY.restype = c_void_p
libcrypto.RSA_private_encrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
libcrypto.RSA_public_decrypt.argtypes = (c_int, c_char_p, c_char_p, c_void_p, c_int)
return libcrypto | Set up libcrypto argtypes and initialize the library |
async def upload_file(
self, file, *, part_size_kb=None, file_name=None, use_cache=None,
progress_callback=None):
"""
Uploads the specified file and returns a handle (an instance of
:tl:`InputFile` or :tl:`InputFileBig`, as required) which can be
later used before it expires (they are usable during less than a day).
Uploading a file will simply return a "handle" to the file stored
remotely in the Telegram servers, which can be later used on. This
will **not** upload the file to your own chat or any chat at all.
Args:
file (`str` | `bytes` | `file`):
The path of the file, byte array, or stream that will be sent.
Note that if a byte array or a stream is given, a filename
or its type won't be inferred, and it will be sent as an
"unnamed application/octet-stream".
part_size_kb (`int`, optional):
Chunk size when uploading files. The larger, the less
requests will be made (up to 512KB maximum).
file_name (`str`, optional):
The file name which will be used on the resulting InputFile.
If not specified, the name will be taken from the ``file``
and if this is not a ``str``, it will be ``"unnamed"``.
use_cache (`type`, optional):
The type of cache to use (currently either :tl:`InputDocument`
or :tl:`InputPhoto`). If present and the file is small enough
to need the MD5, it will be checked against the database,
and if a match is found, the upload won't be made. Instead,
an instance of type ``use_cache`` will be returned.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
Returns:
:tl:`InputFileBig` if the file size is larger than 10MB,
`telethon.tl.custom.inputsizedfile.InputSizedFile`
(subclass of :tl:`InputFile`) otherwise.
"""
if isinstance(file, (types.InputFile, types.InputFileBig)):
return file # Already uploaded
if not file_name and getattr(file, 'name', None):
file_name = file.name
if isinstance(file, str):
file_size = os.path.getsize(file)
elif isinstance(file, bytes):
file_size = len(file)
else:
if isinstance(file, io.IOBase) and file.seekable():
pos = file.tell()
else:
pos = None
# TODO Don't load the entire file in memory always
data = file.read()
if pos is not None:
file.seek(pos)
file = data
file_size = len(file)
# File will now either be a string or bytes
if not part_size_kb:
part_size_kb = utils.get_appropriated_part_size(file_size)
if part_size_kb > 512:
raise ValueError('The part size must be less or equal to 512KB')
part_size = int(part_size_kb * 1024)
if part_size % 1024 != 0:
raise ValueError(
'The part size must be evenly divisible by 1024')
# Set a default file name if None was specified
file_id = helpers.generate_random_long()
if not file_name:
if isinstance(file, str):
file_name = os.path.basename(file)
else:
file_name = str(file_id)
# If the file name lacks extension, add it if possible.
# Else Telegram complains with `PHOTO_EXT_INVALID_ERROR`
# even if the uploaded image is indeed a photo.
if not os.path.splitext(file_name)[-1]:
file_name += utils._get_extension(file)
# Determine whether the file is too big (over 10MB) or not
# Telegram does make a distinction between smaller or larger files
is_large = file_size > 10 * 1024 * 1024
hash_md5 = hashlib.md5()
if not is_large:
# Calculate the MD5 hash before anything else.
# As this needs to be done always for small files,
# might as well do it before anything else and
# check the cache.
if isinstance(file, str):
with open(file, 'rb') as stream:
file = stream.read()
hash_md5.update(file)
if use_cache:
cached = self.session.get_file(
hash_md5.digest(), file_size, cls=_CacheType(use_cache)
)
if cached:
return cached
part_count = (file_size + part_size - 1) // part_size
self._log[__name__].info('Uploading file of %d bytes in %d chunks of %d',
file_size, part_count, part_size)
with open(file, 'rb') if isinstance(file, str) else BytesIO(file)\
as stream:
for part_index in range(part_count):
# Read the file by in chunks of size part_size
part = stream.read(part_size)
# The SavePartRequest is different depending on whether
# the file is too large or not (over or less than 10MB)
if is_large:
request = functions.upload.SaveBigFilePartRequest(
file_id, part_index, part_count, part)
else:
request = functions.upload.SaveFilePartRequest(
file_id, part_index, part)
result = await self(request)
if result:
self._log[__name__].debug('Uploaded %d/%d',
part_index + 1, part_count)
if progress_callback:
progress_callback(stream.tell(), file_size)
else:
raise RuntimeError(
'Failed to upload file part {}.'.format(part_index))
if is_large:
return types.InputFileBig(file_id, part_count, file_name)
else:
return custom.InputSizedFile(
file_id, part_count, file_name, md5=hash_md5, size=file_size
) | Uploads the specified file and returns a handle (an instance of
:tl:`InputFile` or :tl:`InputFileBig`, as required) which can be
later used before it expires (they are usable during less than a day).
Uploading a file will simply return a "handle" to the file stored
remotely in the Telegram servers, which can be later used on. This
will **not** upload the file to your own chat or any chat at all.
Args:
file (`str` | `bytes` | `file`):
The path of the file, byte array, or stream that will be sent.
Note that if a byte array or a stream is given, a filename
or its type won't be inferred, and it will be sent as an
"unnamed application/octet-stream".
part_size_kb (`int`, optional):
Chunk size when uploading files. The larger, the less
requests will be made (up to 512KB maximum).
file_name (`str`, optional):
The file name which will be used on the resulting InputFile.
If not specified, the name will be taken from the ``file``
and if this is not a ``str``, it will be ``"unnamed"``.
use_cache (`type`, optional):
The type of cache to use (currently either :tl:`InputDocument`
or :tl:`InputPhoto`). If present and the file is small enough
to need the MD5, it will be checked against the database,
and if a match is found, the upload won't be made. Instead,
an instance of type ``use_cache`` will be returned.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(sent bytes, total)``.
Returns:
:tl:`InputFileBig` if the file size is larger than 10MB,
`telethon.tl.custom.inputsizedfile.InputSizedFile`
(subclass of :tl:`InputFile`) otherwise. |
def commit(self, sha):
"""Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param str sha: (required), sha of the commit
:returns: :class:`RepoCommit <github3.repos.commit.RepoCommit>` if
successful, otherwise None
"""
url = self._build_url('commits', sha, base_url=self._api)
json = self._json(self._get(url), 200)
return RepoCommit(json, self) if json else None | Get a single (repo) commit. See :func:`git_commit` for the Git Data
Commit.
:param str sha: (required), sha of the commit
:returns: :class:`RepoCommit <github3.repos.commit.RepoCommit>` if
successful, otherwise None |
def pttl(self, key):
"""
Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior).
"""
"""
Returns time to live in milliseconds if output_ms is True, else returns seconds.
"""
key = self._encode(key)
if key not in self.redis:
# as of redis 2.8, -2 is returned if the key does not exist
return long(-2) if self.strict else None
if key not in self.timeouts:
# as of redis 2.8, -1 is returned if the key is persistent
# redis-py returns None; command docs say -1
return long(-1) if self.strict else None
time_to_live = get_total_milliseconds(self.timeouts[key] - self.clock.now())
return long(max(-1, time_to_live)) | Emulate pttl
:param key: key for which pttl is requested.
:returns: the number of milliseconds till timeout, None if the key does not exist or if the
key has no timeout(as per the redis-py lib behavior). |
def get_realtime_alarm(username, auth, url):
"""Takes in no param as input to fetch RealTime Alarms from HP IMC RESTFUL API
:param username OpeatorName, String type. Required. Default Value "admin". Checks the operator
has the privileges to view the Real-Time Alarms.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries where each element of the list represents a single alarm as
pulled from the the current list of realtime alarms in the HPE IMC Platform
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> real_time_alarm = get_realtime_alarm('admin', auth.creds, auth.url)
>>> assert type(real_time_alarm) is list
>>> assert 'faultDesc' in real_time_alarm[0]
"""
f_url = url + "/imcrs/fault/faultRealTime?operatorName=" + username
response = requests.get(f_url, auth=auth, headers=HEADERS)
try:
realtime_alarm_list = (json.loads(response.text))
return realtime_alarm_list['faultRealTime']['faultRealTimeList']
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + ' get_realtime_alarm: An Error has occured' | Takes in no param as input to fetch RealTime Alarms from HP IMC RESTFUL API
:param username OpeatorName, String type. Required. Default Value "admin". Checks the operator
has the privileges to view the Real-Time Alarms.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:list of dictionaries where each element of the list represents a single alarm as
pulled from the the current list of realtime alarms in the HPE IMC Platform
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.alarms import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> real_time_alarm = get_realtime_alarm('admin', auth.creds, auth.url)
>>> assert type(real_time_alarm) is list
>>> assert 'faultDesc' in real_time_alarm[0] |
def run_duplicated_samples(in_prefix, in_type, out_prefix, base_dir, options):
"""Runs step1 (duplicated samples).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the :py:mod:`pyGenClean.DupSamples.duplicated_samples`
module. The required file type for this module is ``tfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed.
"""
# Creating the output directory
os.mkdir(out_prefix)
# We know we need tfile
required_type = "tfile"
check_input_files(in_prefix, in_type, required_type)
# We need to inject the name of the input file and the name of the output
# prefix
script_prefix = os.path.join(out_prefix, "dup_samples")
options += ["--{}".format(required_type), in_prefix,
"--out", script_prefix]
# We run the script
try:
duplicated_samples.main(options)
except duplicated_samples.ProgramError as e:
msg = "duplicated_samples: {}".format(e)
raise ProgramError(msg)
# Reading the number of duplicated samples
duplicated_count = defaultdict(int)
if os.path.isfile(script_prefix + ".duplicated_samples.tfam"):
with open(script_prefix + ".duplicated_samples.tfam", "r") as i_file:
duplicated_count = Counter([
tuple(createRowFromPlinkSpacedOutput(line)[:2])
for line in i_file
])
# Counting the number of zeroed out genotypes per duplicated sample
zeroed_out = defaultdict(int)
if os.path.isfile(script_prefix + ".zeroed_out"):
with open(script_prefix + ".zeroed_out", "r") as i_file:
zeroed_out = Counter([
tuple(line.rstrip("\r\n").split("\t")[:2])
for line in i_file.read().splitlines()[1:]
])
nb_zeroed_out = sum(zeroed_out.values())
# Checking the not good enough samples
not_good_enough = set()
if os.path.isfile(script_prefix + ".not_good_enough"):
with open(script_prefix + ".not_good_enough", "r") as i_file:
not_good_enough = {
tuple(line.rstrip("\r\n").split("\t")[:4])
for line in i_file.read().splitlines()[1:]
}
# Checking which samples were chosen
chosen_sample = set()
if os.path.isfile(script_prefix + ".chosen_samples.info"):
with open(script_prefix + ".chosen_samples.info", "r") as i_file:
chosen_sample = {
tuple(line.rstrip("\r\n").split("\t"))
for line in i_file.read().splitlines()[1:]
}
# Finding if some 'not_good_enough' samples were chosen
not_good_still = {s[2:] for s in chosen_sample & not_good_enough}
# We create a LaTeX summary
latex_file = os.path.join(script_prefix + ".summary.tex")
try:
with open(latex_file, "w") as o_file:
print >>o_file, latex_template.subsection(
duplicated_samples.pretty_name
)
text = (
"A total of {:,d} duplicated sample{} {} found.".format(
len(duplicated_count),
"s" if len(duplicated_count) > 1 else "",
"were" if len(duplicated_count) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
if len(duplicated_count) > 0:
text = (
"While merging duplicates, a total of {:,d} genotype{} {} "
"zeroed out. A total of {:,d} sample{} {} found to be not "
"good enough for duplicate completion.".format(
nb_zeroed_out,
"s" if nb_zeroed_out > 1 else "",
"were" if nb_zeroed_out > 1 else "was",
len(not_good_enough),
"s" if len(not_good_enough) > 1 else "",
"were" if len(not_good_enough) > 1 else "was",
)
)
print >>o_file, latex_template.wrap_lines(text)
table_label = re.sub(
r"[/\\]",
"_",
script_prefix,
) + "_dup_samples"
text = (
r"Table~\ref{" + table_label + "} summarizes the number "
"of each duplicated sample with some characteristics."
)
print >>o_file, latex_template.wrap_lines(text)
if len(not_good_still) > 0:
text = latex_template.textbf(
"There {} {:,d} sample{} that {} not good due to low "
"completion or concordance, but {} still selected as "
"the best duplicate (see Table~{}).".format(
"were" if len(not_good_still) > 1 else "was",
len(not_good_still),
"s" if len(not_good_still) > 1 else "",
"were" if len(not_good_still) > 1 else "was",
"were" if len(not_good_still) > 1 else "was",
r"~\ref{" + table_label + "}",
)
)
print >>o_file, latex_template.wrap_lines(text)
# Getting the template
longtable_template = latex_template.jinja2_env.get_template(
"longtable_template.tex",
)
# The table caption
table_caption = (
"Summary of the {:,d} duplicated sample{}. The number of "
"duplicates and the total number of zeroed out genotypes "
"are shown.".format(
len(duplicated_count),
"s" if len(duplicated_count) > 1 else "",
)
)
if len(not_good_still) > 0:
table_caption += (
" A total of {:,d} sample{} (highlighted) {} not good "
"enough for completion, but {} chosen as the best "
"duplicate, and {} still in the final "
"dataset).".format(
len(not_good_still),
"s" if len(not_good_still) > 1 else "",
"were" if len(not_good_still) > 1 else "was",
"were" if len(not_good_still) > 1 else "was",
"are" if len(not_good_still) > 1 else "is",
)
)
duplicated_samples_list = duplicated_count.most_common()
print >>o_file, longtable_template.render(
table_caption=table_caption,
table_label=table_label,
nb_col=4,
col_alignments="llrr",
text_size="scriptsize",
header_data=[("FID", 1), ("IID", 1), ("Nb Duplicate", 1),
("Nb Zeroed", 1)],
tabular_data=[
[latex_template.sanitize_tex(fid),
latex_template.sanitize_tex(iid),
"{:,d}".format(nb),
"{:,d}".format(zeroed_out[(fid, iid)])]
for (fid, iid), nb in duplicated_samples_list
],
highlighted=[
(fid, iid) in not_good_still
for fid, iid in [i[0] for i in duplicated_samples_list]
],
)
except IOError:
msg = "{}: cannot write LaTeX summary".format(latex_file)
raise ProgramError(msg)
# Writing the summary results
with open(os.path.join(base_dir, "results_summary.txt"), "a") as o_file:
print >>o_file, "# {}".format(script_prefix)
counter = Counter(duplicated_count.values()).most_common()
if counter:
print >>o_file, "Number of replicated samples"
else:
print >>o_file, "Number of replicated samples\t0"
for rep_type, rep_count in counter:
print >>o_file, " - x{}\t{:,d}\t\t-{:,d}".format(
rep_type,
rep_count,
(rep_count * rep_type) - rep_count,
)
print >>o_file, ("Poorly chosen replicated "
"samples\t{:,d}".format(len(not_good_still)))
print >>o_file, "---"
# We know this step does produce a new data set (tfile), so we return it
return _StepResult(
next_file=os.path.join(out_prefix, "dup_samples.final"),
next_file_type="tfile",
latex_summary=latex_file,
description=duplicated_samples.desc,
long_description=duplicated_samples.long_desc,
graph_path=None,
) | Runs step1 (duplicated samples).
:param in_prefix: the prefix of the input files.
:param in_type: the type of the input files.
:param out_prefix: the output prefix.
:param base_dir: the output directory.
:param options: the options needed.
:type in_prefix: str
:type in_type: str
:type out_prefix: str
:type base_dir: str
:type options: list
:returns: a tuple containing the prefix of the output files (the input
prefix for the next script) and the type of the output files
(``tfile``).
This function calls the :py:mod:`pyGenClean.DupSamples.duplicated_samples`
module. The required file type for this module is ``tfile``, hence the need
to use the :py:func:`check_input_files` to check if the file input file
type is the good one, or to create it if needed. |
def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request) | Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity |
def __run_pre_all(self):
"""Execute the pre-all.py and pre-all.sql files if they exist"""
# if the list of delta dirs is [delta1, delta2] the pre scripts of delta2 are
# executed before the pre scripts of delta1
for d in reversed(self.dirs):
pre_all_py_path = os.path.join(d, 'pre-all.py')
if os.path.isfile(pre_all_py_path):
print(' Applying pre-all.py...', end=' ')
self.__run_py_file(pre_all_py_path, 'pre-all')
print('OK')
pre_all_sql_path = os.path.join(d, 'pre-all.sql')
if os.path.isfile(pre_all_sql_path):
print(' Applying pre-all.sql...', end=' ')
self.__run_sql_file(pre_all_sql_path)
print('OK') | Execute the pre-all.py and pre-all.sql files if they exist |
def plotstat(data, circleinds=None, crossinds=None, edgeinds=None, url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight stat figure """
fields = ['imkur', 'specstd', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
specstd = [data['specstd'][i] for i in inds]
specstd_min = min(specstd)
specstd_max = max(specstd)
imkur = [data['imkur'][i] for i in inds]
imkur_min = min(imkur)
imkur_max = max(imkur)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='Spectral std',
y_axis_label='Image kurtosis', x_range=(specstd_min, specstd_max),
y_range=(imkur_min, imkur_max), tools=tools, output_backend='webgl')
stat.circle('specstd', 'imkur', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.cross('specstd', 'imkur', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
stat.circle('specstd', 'imkur', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = stat.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}_@key.png'.format(url_path, fileroot)
taptool = stat.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return stat | Make a light-weight stat figure |
def request_camera_sensors(blink, network, camera_id):
"""
Request camera sensor info for one camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to request sesnor info from.
"""
url = "{}/network/{}/camera/{}/signals".format(blink.urls.base_url,
network,
camera_id)
return http_get(blink, url) | Request camera sensor info for one camera.
:param blink: Blink instance.
:param network: Sync module network id.
:param camera_id: Camera ID of camera to request sesnor info from. |
def prompt_save_images(args):
"""Prompt user to save images when crawling (for pdf and HTML formats)."""
if args['images'] or args['no_images']:
return
if (args['pdf'] or args['html']) and (args['crawl'] or args['crawl_all']):
save_msg = ('Choosing to save images will greatly slow the'
' crawling process.\nSave images anyways? (y/n): ')
try:
save_images = utils.confirm_input(input(save_msg))
except (KeyboardInterrupt, EOFError):
return
args['images'] = save_images
args['no_images'] = not save_images | Prompt user to save images when crawling (for pdf and HTML formats). |
def build_calmjs_artifacts(dist, key, value, cmdclass=BuildCommand):
"""
Trigger the artifact build process through the setuptools.
"""
if value is not True:
return
build_cmd = dist.get_command_obj('build')
if not isinstance(build_cmd, cmdclass):
logger.error(
"'build' command in Distribution is not an instance of "
"'%s:%s' (got %r instead)",
cmdclass.__module__, cmdclass.__name__, build_cmd)
return
build_cmd.sub_commands.append((key, has_calmjs_artifact_declarations)) | Trigger the artifact build process through the setuptools. |
def update(self, client=None, unique_writer_identity=False):
"""API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink.
"""
client = self._require_client(client)
resource = client.sinks_api.sink_update(
self.project,
self.name,
self.filter_,
self.destination,
unique_writer_identity=unique_writer_identity,
)
self._update_from_api_repr(resource) | API call: update sink configuration via a PUT request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current sink.
:type unique_writer_identity: bool
:param unique_writer_identity: (Optional) determines the kind of
IAM identity returned as
writer_identity in the new sink. |
def get_signature(func):
"""
:type func: Callable
:rtype: str
"""
try:
return object.__getattribute__(func, '__tri_declarative_signature')
except AttributeError:
pass
try:
if sys.version_info[0] < 3: # pragma: no mutate
names, _, varkw, defaults = inspect.getargspec(func) # pragma: no mutate
else:
names, _, varkw, defaults, _, _, _ = inspect.getfullargspec(func) # pragma: no covererage
except TypeError:
return None
first_arg_index = 1 if inspect.ismethod(func) else 0 # Skip self argument on methods
number_of_defaults = len(defaults) if defaults else 0
if number_of_defaults > 0:
required = ','.join(sorted(names[first_arg_index:-number_of_defaults]))
optional = ','.join(sorted(names[-number_of_defaults:]))
else:
required = ','.join(sorted(names[first_arg_index:]))
optional = ''
wildcard = '*' if varkw is not None else ''
signature = '|'.join((required, optional, wildcard))
try:
object.__setattr__(func, '__tri_declarative_signature', signature)
except TypeError:
# For classes
type.__setattr__(func, '__tri_declarative_signature', signature)
except AttributeError:
pass
return signature | :type func: Callable
:rtype: str |
def contact(request):
"""Displays the contact form and sends the email"""
form = ContactForm(request.POST or None)
if form.is_valid():
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
sender = form.cleaned_data['sender']
cc_myself = form.cleaned_data['cc_myself']
recipients = settings.CONTACTFORM_RECIPIENTS
if cc_myself:
recipients.append(sender)
send_mail(getattr(settings, "CONTACTFORM_SUBJECT_PREFIX", '') + subject, message, sender, recipients)
return render(request, 'contactform/thanks.html')
return render( request, 'contactform/contact.html', {'form': form}) | Displays the contact form and sends the email |
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
"""
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
pred_bbox = pred_boxlist.bbox.numpy()
pred_label = pred_boxlist.get_field("labels").numpy()
pred_score = pred_boxlist.get_field("scores").numpy()
gt_bbox = gt_boxlist.bbox.numpy()
gt_label = gt_boxlist.get_field("labels").numpy()
gt_difficult = gt_boxlist.get_field("difficult").numpy()
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = boxlist_iou(
BoxList(pred_bbox_l, gt_boxlist.size),
BoxList(gt_bbox_l, gt_boxlist.size),
).numpy()
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
match[l].append(-1)
else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec | Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge. |
def add_file(self, filepath, gzip=False, cache_name=None):
"""Load a static file in the cache.
.. note:: Items are stored with the filepath as is (relative or absolute) as the key.
:param str|unicode filepath:
:param bool gzip: Use gzip compression.
:param str|unicode cache_name: If not set, default will be used.
"""
command = 'load-file-in-cache'
if gzip:
command += '-gzip'
cache_name = cache_name or ''
value = '%s %s' % (cache_name, filepath)
self._set(command, value.strip(), multi=True)
return self._section | Load a static file in the cache.
.. note:: Items are stored with the filepath as is (relative or absolute) as the key.
:param str|unicode filepath:
:param bool gzip: Use gzip compression.
:param str|unicode cache_name: If not set, default will be used. |
def _get_mixing_indices(size, seed=None, name=None):
"""Generates an array of indices suitable for mutation operation.
The mutation operation in differential evolution requires that for every
element of the population, three distinct other elements be chosen to produce
a trial candidate. This function generates an array of shape [size, 3]
satisfying the properties that:
(a). array[i, :] does not contain the index 'i'.
(b). array[i, :] does not contain any overlapping indices.
(c). All elements in the array are between 0 and size - 1 inclusive.
Args:
size: Scalar integer `Tensor`. The number of samples as well as a the range
of the indices to sample from.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'get_mixing_indices'.
Returns:
sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing
samples without replacement between 0 and size - 1 (inclusive) with the
`i`th row not including the number `i`.
"""
with tf.compat.v1.name_scope(
name, default_name='get_mixing_indices', values=[size]):
size = tf.convert_to_tensor(value=size)
dtype = size.dtype
seed_stream = distributions.SeedStream(seed, salt='get_mixing_indices')
first = tf.random.uniform([size],
maxval=size-1,
dtype=dtype,
seed=seed_stream())
second = tf.random.uniform([size],
maxval=size-2,
dtype=dtype,
seed=seed_stream())
third = tf.random.uniform([size],
maxval=size-3,
dtype=dtype,
seed=seed_stream())
# Shift second if it is on top of or to the right of first
second = tf.where(first < second, x=second, y=second + 1)
smaller = tf.math.minimum(first, second)
larger = tf.math.maximum(first, second)
# Shift the third one so it does not coincide with either the first or the
# second number. Assuming first < second, shift by 1 if the number is in
# [first, second) and by 2 if the number is greater than or equal to the
# second.
third = tf.where(third < smaller, x=third, y=third + 1)
third = tf.where(third < larger, x=third, y=third + 1)
sample = tf.stack([first, second, third], axis=1)
to_avoid = tf.expand_dims(tf.range(size), axis=-1)
sample = tf.where(sample < to_avoid, x=sample, y=sample + 1)
return sample | Generates an array of indices suitable for mutation operation.
The mutation operation in differential evolution requires that for every
element of the population, three distinct other elements be chosen to produce
a trial candidate. This function generates an array of shape [size, 3]
satisfying the properties that:
(a). array[i, :] does not contain the index 'i'.
(b). array[i, :] does not contain any overlapping indices.
(c). All elements in the array are between 0 and size - 1 inclusive.
Args:
size: Scalar integer `Tensor`. The number of samples as well as a the range
of the indices to sample from.
seed: `int` or None. The random seed for this `Op`. If `None`, no seed is
applied.
Default value: `None`.
name: Python `str` name prefixed to Ops created by this function.
Default value: 'get_mixing_indices'.
Returns:
sample: A `Tensor` of shape [size, 3] and same dtype as `size` containing
samples without replacement between 0 and size - 1 (inclusive) with the
`i`th row not including the number `i`. |
def post_translation(self, query, bug):
"""
Convert the results of getbug back to the ancient RHBZ value
formats
"""
ignore = query
# RHBZ _still_ returns component and version as lists, which
# deviates from upstream. Copy the list values to components
# and versions respectively.
if 'component' in bug and "components" not in bug:
val = bug['component']
bug['components'] = isinstance(val, list) and val or [val]
bug['component'] = bug['components'][0]
if 'version' in bug and "versions" not in bug:
val = bug['version']
bug['versions'] = isinstance(val, list) and val or [val]
bug['version'] = bug['versions'][0]
# sub_components isn't too friendly of a format, add a simpler
# sub_component value
if 'sub_components' in bug and 'sub_component' not in bug:
val = bug['sub_components']
bug['sub_component'] = ""
if isinstance(val, dict):
values = []
for vallist in val.values():
values += vallist
bug['sub_component'] = " ".join(values) | Convert the results of getbug back to the ancient RHBZ value
formats |
def setValue(self, key, value):
"""
Sets the value for the given key to the inputed value.
:param key | <str>
value | <variant>
"""
if self._customFormat:
self._customFormat.setValue(key, value)
else:
super(XSettings, self).setValue(key, wrapVariant(value)) | Sets the value for the given key to the inputed value.
:param key | <str>
value | <variant> |
def getXY(self, debug=False):
'''
Returns the I{screen} coordinates of this C{View}.
WARNING: Don't call self.getX() or self.getY() inside this method
or it will enter an infinite loop
@return: The I{screen} coordinates of this C{View}
'''
if DEBUG_COORDS or debug:
try:
_id = self.getId()
except:
_id = "NO_ID"
print >> sys.stderr, "getXY(%s %s ## %s)" % (self.getClass(), _id, self.getUniqueId())
x = self.__getX()
y = self.__getY()
if self.useUiAutomator:
return (x, y)
parent = self.parent
if DEBUG_COORDS: print >> sys.stderr, " getXY: x=%s y=%s parent=%s" % (x, y, parent.getUniqueId() if parent else "None")
hx = 0
''' Hierarchy accumulated X '''
hy = 0
''' Hierarchy accumulated Y '''
if DEBUG_COORDS: print >> sys.stderr, " getXY: not using UiAutomator, calculating parent coordinates"
while parent != None:
if DEBUG_COORDS: print >> sys.stderr, " getXY: parent: %s %s <<<<" % (parent.getClass(), parent.getId())
if SKIP_CERTAIN_CLASSES_IN_GET_XY_ENABLED:
if parent.getClass() in [ 'com.android.internal.widget.ActionBarView',
'com.android.internal.widget.ActionBarContextView',
'com.android.internal.view.menu.ActionMenuView',
'com.android.internal.policy.impl.PhoneWindow$DecorView' ]:
if DEBUG_COORDS: print >> sys.stderr, " getXY: skipping %s %s (%d,%d)" % (parent.getClass(), parent.getId(), parent.__getX(), parent.__getY())
parent = parent.parent
continue
if DEBUG_COORDS: print >> sys.stderr, " getXY: parent=%s x=%d hx=%d y=%d hy=%d" % (parent.getId(), x, hx, y, hy)
hx += parent.__getX()
hy += parent.__getY()
parent = parent.parent
(wvx, wvy) = self.__dumpWindowsInformation(debug=debug)
if DEBUG_COORDS or debug:
print >>sys.stderr, " getXY: wv=(%d, %d) (windows information)" % (wvx, wvy)
try:
if self.windowId:
fw = self.windows[self.windowId]
else:
fw = self.windows[self.currentFocus]
if DEBUG_STATUSBAR:
print >> sys.stderr, " getXY: focused window=", fw
print >> sys.stderr, " getXY: deciding whether to consider statusbar offset because current focused windows is at", (fw.wvx, fw.wvy), "parent", (fw.px, fw.py)
except KeyError:
fw = None
(sbw, sbh) = self.__obtainStatusBarDimensionsIfVisible()
if DEBUG_COORDS or debug:
print >>sys.stderr, " getXY: sb=(%d, %d) (statusbar dimensions)" % (sbw, sbh)
statusBarOffset = 0
pwx = 0
pwy = 0
if fw:
if DEBUG_COORDS:
print >>sys.stderr, " getXY: focused window=", fw, "sb=", (sbw, sbh)
if fw.wvy <= sbh: # it's very unlikely that fw.wvy < sbh, that is a window over the statusbar
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: yes, considering offset=", sbh
statusBarOffset = sbh
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: no, ignoring statusbar offset fw.wvy=", fw.wvy, ">", sbh
if fw.py == fw.wvy:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: but wait, fw.py == fw.wvy so we are adjusting by ", (fw.px, fw.py)
pwx = fw.px
pwy = fw.py
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: fw.py=%d <= fw.wvy=%d, no adjustment" % (fw.py, fw.wvy)
if DEBUG_COORDS or DEBUG_STATUSBAR or debug:
print >>sys.stderr, " getXY: returning (%d, %d) ***" % (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
print >>sys.stderr, " x=%d+%d+%d+%d" % (x,hx,wvx,pwx)
print >>sys.stderr, " y=%d+%d+%d-%d+%d" % (y,hy,wvy,statusBarOffset,pwy)
return (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy) | Returns the I{screen} coordinates of this C{View}.
WARNING: Don't call self.getX() or self.getY() inside this method
or it will enter an infinite loop
@return: The I{screen} coordinates of this C{View} |
def patch_config(config, data):
"""recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed"""
is_changed = False
for name, value in data.items():
if value is None:
if config.pop(name, None) is not None:
is_changed = True
elif name in config:
if isinstance(value, dict):
if isinstance(config[name], dict):
if patch_config(config[name], value):
is_changed = True
else:
config[name] = value
is_changed = True
elif str(config[name]) != str(value):
config[name] = value
is_changed = True
else:
config[name] = value
is_changed = True
return is_changed | recursively 'patch' `config` with `data`
:returns: `!True` if the `config` was changed |
def do_filter(self, arg):
"""Sets the filter for the test cases to include in the plot/table by name. Only those
test cases that include this text are included in plots, tables etc."""
if arg == "list":
msg.info("TEST CASE FILTERS")
for f in self.curargs["tfilter"]:
if f == "*":
msg.info(" * (default, matches all)")
else:
msg.info(" " + f)
elif arg not in self.curargs["tfilter"]:
self.curargs["tfilter"].append(arg)
self.do_filter("list") | Sets the filter for the test cases to include in the plot/table by name. Only those
test cases that include this text are included in plots, tables etc. |
def ListLanguageIdentifiers(self):
"""Lists the language identifiers."""
table_view = views.ViewsFactory.GetTableView(
self._views_format_type, column_names=['Identifier', 'Language'],
title='Language identifiers')
for language_id, value_list in sorted(
language_ids.LANGUAGE_IDENTIFIERS.items()):
table_view.AddRow([language_id, value_list[1]])
table_view.Write(self._output_writer) | Lists the language identifiers. |
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise ValueError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise ValueError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean | Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray |
def fit_mle(self, data, b=None):
"""%(super)s
b : float
The upper bound of the distribution. If None, fixed at sum(data)
"""
data = np.array(data)
length = len(data)
if not b:
b = np.sum(data)
return _trunc_logser_solver(length, b), b | %(super)s
b : float
The upper bound of the distribution. If None, fixed at sum(data) |
def search(query, medium, credentials):
"""Searches MyAnimeList for a [medium] matching the keyword(s) given by query.
:param query The keyword(s) to search with.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:return A list of all items that are of type [medium] and match the
given keywords, or, an empty list if none matched.
:raise ValueError For bad arguments.
"""
helpers.check_creds(credentials, header)
if len(query) == 0:
raise ValueError(constants.INVALID_EMPTY_QUERY)
api_query = helpers.get_query_url(medium, query)
if api_query is None:
raise ValueError(constants.INVALID_MEDIUM)
search_resp = requests.get(api_query, auth=credentials, headers=header)
if search_resp is None or search_resp.status_code == 204: # is there a better way to do this...
return []
query_soup = BeautifulSoup(search_resp.text, 'lxml')
if medium == tokens.Medium.ANIME:
entries = query_soup.anime
if entries is None:
return helpers.reschedule(search, constants.DEFAULT_WAIT_SECS, query,
medium, credentials)
return [objects.Anime(entry) for entry in entries.findAll('entry')]
elif medium == tokens.Medium.MANGA:
entries = query_soup.manga
if entries is None:
return helpers.reschedule(search, constants.DEFAULT_WAIT_SECS, query,
medium)
return [objects.Manga(entry) for entry in entries.findAll('entry')] | Searches MyAnimeList for a [medium] matching the keyword(s) given by query.
:param query The keyword(s) to search with.
:param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA).
:return A list of all items that are of type [medium] and match the
given keywords, or, an empty list if none matched.
:raise ValueError For bad arguments. |
def discover_by_name(input_directory, output_directory):
"""
Auto discover metric types from the files that exist in input_directory and return a list of metrics
:param: input_directory: The location to scan for log files
:param: output_directory: The location for the report
"""
metric_list = []
log_files = os.listdir(input_directory)
for log_file in log_files:
if log_file in CONSTANTS.SUPPORTED_FILENAME_MAPPING.keys():
metric_list.append(initialize_metric(CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], [log_file], None, [], output_directory, CONSTANTS.RESOURCE_PATH,
CONSTANTS.SUPPORTED_FILENAME_MAPPING[log_file], None, None, {}, None, None, {}))
else:
logger.warning('Unable to determine metric type for file: %s', log_file)
return metric_list | Auto discover metric types from the files that exist in input_directory and return a list of metrics
:param: input_directory: The location to scan for log files
:param: output_directory: The location for the report |
def train(self, inputData, numIterations, reset=False):
"""
Trains the SparseNet, with the provided data.
The reset parameter can be set to False if the network should not be
reset before training (for example for continuing a previous started
training).
:param inputData: (array) Input data, of dimension (inputDim, numPoints)
:param numIterations: (int) Number of training iterations
:param reset: (bool) If set to True, reset basis and history
"""
if not isinstance(inputData, np.ndarray):
inputData = np.array(inputData)
if reset:
self._reset()
for _ in xrange(numIterations):
self._iteration += 1
batch = self._getDataBatch(inputData)
# check input dimension, change if necessary
if batch.shape[0] != self.filterDim:
raise ValueError("Batches and filter dimesions don't match!")
activations = self.encode(batch)
self._learn(batch, activations)
if self._iteration % self.decayCycle == 0:
self.learningRate *= self.learningRateDecay
if self.verbosity >= 1:
self.plotLoss()
self.plotBasis() | Trains the SparseNet, with the provided data.
The reset parameter can be set to False if the network should not be
reset before training (for example for continuing a previous started
training).
:param inputData: (array) Input data, of dimension (inputDim, numPoints)
:param numIterations: (int) Number of training iterations
:param reset: (bool) If set to True, reset basis and history |
def flip(self,inplace=False):
"""
NAME:
flip
PURPOSE:
'flip' an orbit's initial conditions such that the velocities are minus the original velocities; useful for quick backward integration; returns a new Orbit instance
INPUT:
inplace= (False) if True, flip the orbit in-place, that is, without returning a new instance and also flip the velocities of the integrated orbit (if it exists)
OUTPUT:
Orbit instance that has the velocities of the current orbit flipped (inplace=False) or just flips all velocities of current instance (inplace=True)
HISTORY:
2014-06-17 - Written - Bovy (IAS)
2016-07-21 - Added inplace keyword - Bovy (UofT)
"""
if inplace:
self._orb.vxvv[1]= -self._orb.vxvv[1]
if len(self._orb.vxvv) > 2:
self._orb.vxvv[2]= -self._orb.vxvv[2]
if len(self._orb.vxvv) > 4:
self._orb.vxvv[4]= -self._orb.vxvv[4]
if hasattr(self._orb,'orbit'):
self._orb.orbit[:,1]= -self._orb.orbit[:,1]
if len(self._orb.vxvv) > 2:
self._orb.orbit[:,2]= -self._orb.orbit[:,2]
if len(self._orb.vxvv) > 4:
self._orb.orbit[:,4]= -self._orb.orbit[:,4]
if hasattr(self._orb,"_orbInterp"):
delattr(self._orb,"_orbInterp")
return None
orbSetupKwargs= {'ro':None,
'vo':None,
'zo':self._orb._zo,
'solarmotion':self._orb._solarmotion}
if self._orb._roSet:
orbSetupKwargs['ro']= self._orb._ro
if self._orb._voSet:
orbSetupKwargs['vo']= self._orb._vo
if len(self._orb.vxvv) == 2:
return Orbit(vxvv= [self._orb.vxvv[0],-self._orb.vxvv[1]],
**orbSetupKwargs)
elif len(self._orb.vxvv) == 3:
return Orbit(vxvv=[self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2]],**orbSetupKwargs)
elif len(self._orb.vxvv) == 4:
return Orbit(vxvv=[self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2],self._orb.vxvv[3]],
**orbSetupKwargs)
elif len(self._orb.vxvv) == 5:
return Orbit(vxvv=[self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2],self._orb.vxvv[3],
-self._orb.vxvv[4]],**orbSetupKwargs)
elif len(self._orb.vxvv) == 6:
return Orbit(vxvv= [self._orb.vxvv[0],-self._orb.vxvv[1],
-self._orb.vxvv[2],self._orb.vxvv[3],
-self._orb.vxvv[4],self._orb.vxvv[5]],
**orbSetupKwargs) | NAME:
flip
PURPOSE:
'flip' an orbit's initial conditions such that the velocities are minus the original velocities; useful for quick backward integration; returns a new Orbit instance
INPUT:
inplace= (False) if True, flip the orbit in-place, that is, without returning a new instance and also flip the velocities of the integrated orbit (if it exists)
OUTPUT:
Orbit instance that has the velocities of the current orbit flipped (inplace=False) or just flips all velocities of current instance (inplace=True)
HISTORY:
2014-06-17 - Written - Bovy (IAS)
2016-07-21 - Added inplace keyword - Bovy (UofT) |
def replies(self):
"""Return a list of the comment replies to this comment.
If the comment is not from a submission, :meth:`replies` will
always be an empty list unless you call :meth:`refresh()
before calling :meth:`replies` due to a limitation in
reddit's API.
"""
if self._replies is None or not self._has_fetched_replies:
response = self.reddit_session.request_json(self._fast_permalink)
if response[1]['data']['children']:
# pylint: disable=W0212
self._replies = response[1]['data']['children'][0]._replies
else:
# comment is "specially deleted", a reddit inconsistency;
# see #519, #524, #535, #537, and #552 it needs to be
# retreived via /api/info, but that's okay since these
# specially deleted comments always have the same json
# structure.
msg = ("Comment {0} was deleted or removed, and had "
"no replies when such happened, so it still "
"has no replies".format(self.name))
warn(msg, RuntimeWarning)
self._replies = []
# pylint: enable=W0212
self._has_fetched_replies = True
# Set the submission object if it is not set.
if not self._submission:
self._submission = response[0]['data']['children'][0]
return self._replies | Return a list of the comment replies to this comment.
If the comment is not from a submission, :meth:`replies` will
always be an empty list unless you call :meth:`refresh()
before calling :meth:`replies` due to a limitation in
reddit's API. |
def transaction_search_query(self, transaction_type, **kwargs):
"""
Query the Yelp Transaction Search API.
documentation: https://www.yelp.com/developers/documentation/v3/transactions_search
required parameters:
* transaction_type - transaction type
* one of either:
* location - text specifying a location to search for
* latitude and longitude
"""
if not transaction_type:
raise ValueError('A valid transaction type (parameter "transaction_type") must be provided.')
if not kwargs.get('location') and (not kwargs.get('latitude') or not kwargs.get('longitude')):
raise ValueError('A valid location (parameter "location") or latitude/longitude combination '
'(parameters "latitude" and "longitude") must be provided.')
return self._query(TRANSACTION_SEARCH_API_URL.format(transaction_type), **kwargs) | Query the Yelp Transaction Search API.
documentation: https://www.yelp.com/developers/documentation/v3/transactions_search
required parameters:
* transaction_type - transaction type
* one of either:
* location - text specifying a location to search for
* latitude and longitude |
def get_group_members(group_name, region=None, key=None, keyid=None, profile=None):
'''
Get group information.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_group mygroup
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
marker = None
truncated = True
users = []
while truncated:
info = conn.get_group(group_name, marker=marker, max_items=1000)
if not info:
return False
truncated = bool(info['get_group_response']['get_group_result']['is_truncated'])
if truncated and 'marker' in info['get_group_response']['get_group_result']:
marker = info['get_group_response']['get_group_result']['marker']
else:
marker = None
truncated = False
users += info['get_group_response']['get_group_result']['users']
return users
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to get members for IAM group %s.', group_name)
return False | Get group information.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt myminion boto_iam.get_group mygroup |
def set_sort_function(sortable, callback, column=0):
"""
*sortable* is a :class:`gtk.TreeSortable` instance.
*callback* will be passed two items and must return a value like
the built-in `cmp`.
*column* is an integer adressing the column that holds items.
This will re-sort even if *callback* is the same as before.
.. note::
When sorting a `ListStore` without a `TreeModelSort` you have to call
`set_sort_column_id(-1, gtk.SORT_ASCENDING)` once, *after* this.
"""
sortable.set_default_sort_func(
lambda tree, itera, iterb: _normalize(callback(
tree.get_value(itera, column),
tree.get_value(iterb, column)
))
) | *sortable* is a :class:`gtk.TreeSortable` instance.
*callback* will be passed two items and must return a value like
the built-in `cmp`.
*column* is an integer adressing the column that holds items.
This will re-sort even if *callback* is the same as before.
.. note::
When sorting a `ListStore` without a `TreeModelSort` you have to call
`set_sort_column_id(-1, gtk.SORT_ASCENDING)` once, *after* this. |
def user_post_save(sender, **kwargs):
"""
After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save.
"""
# Disable post_save during manage.py loaddata
if kwargs.get("raw", False):
return False
user, created = kwargs["instance"], kwargs["created"]
disabled = getattr(user, "_disable_account_creation", not settings.ACCOUNT_CREATE_ON_SAVE)
if created and not disabled:
Account.create(user=user) | After User.save is called we check to see if it was a created user. If so,
we check if the User object wants account creation. If all passes we
create an Account object.
We only run on user creation to avoid having to check for existence on
each call to User.save. |
def experiments_fmri_upsert_property(self, experiment_id, properties):
"""Upsert property of fMRI data object associated with given experiment.
Raises ValueError if given property dictionary results in an illegal
operation.
Parameters
----------
experiment_id : string
Unique experiment identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
FMRIDataHandle
Handle for updated object of None if object doesn't exist
"""
# Get experiment fMRI to ensure that it exists. Needed to get fMRI
# data object identifier for given experiment identifier
fmri = self.experiments_fmri_get(experiment_id)
if fmri is None:
return None
# Update properties for fMRI object using the object identifier
return self.funcdata.upsert_object_property(fmri.identifier, properties) | Upsert property of fMRI data object associated with given experiment.
Raises ValueError if given property dictionary results in an illegal
operation.
Parameters
----------
experiment_id : string
Unique experiment identifier
properties : Dictionary()
Dictionary of property names and their new values.
Returns
-------
FMRIDataHandle
Handle for updated object of None if object doesn't exist |
def extend(self, definitions):
""" Add several definitions at once. Existing definitions are
replaced silently.
:param definitions: The names and definitions.
:type definitions: a :term:`mapping` or an :term:`iterable` with
two-value :class:`tuple` s """
for name, definition in dict(definitions).items():
self.add(name, definition) | Add several definitions at once. Existing definitions are
replaced silently.
:param definitions: The names and definitions.
:type definitions: a :term:`mapping` or an :term:`iterable` with
two-value :class:`tuple` s |
def create_new_client(self, filename=None, give_focus=True):
"""Create a new notebook or load a pre-existing one."""
# Generate the notebook name (in case of a new one)
if not filename:
if not osp.isdir(NOTEBOOK_TMPDIR):
os.makedirs(NOTEBOOK_TMPDIR)
nb_name = 'untitled' + str(self.untitled_num) + '.ipynb'
filename = osp.join(NOTEBOOK_TMPDIR, nb_name)
nb_contents = nbformat.v4.new_notebook()
nbformat.write(nb_contents, filename)
self.untitled_num += 1
# Save spyder_pythonpath before creating a client
# because it's needed by our kernel spec.
if not self.testing:
CONF.set('main', 'spyder_pythonpath',
self.main.get_spyder_pythonpath())
# Open the notebook with nbopen and get the url we need to render
try:
server_info = nbopen(filename)
except (subprocess.CalledProcessError, NBServerError):
QMessageBox.critical(
self,
_("Server error"),
_("The Jupyter Notebook server failed to start or it is "
"taking too much time to do it. Please start it in a "
"system terminal with the command 'jupyter notebook' to "
"check for errors."))
# Create a welcome widget
# See issue 93
self.untitled_num -= 1
self.create_welcome_client()
return
welcome_client = self.create_welcome_client()
client = NotebookClient(self, filename)
self.add_tab(client)
if NOTEBOOK_TMPDIR not in filename:
self.add_to_recent(filename)
self.setup_menu_actions()
client.register(server_info)
client.load_notebook()
if welcome_client and not self.testing:
self.tabwidget.setCurrentIndex(0) | Create a new notebook or load a pre-existing one. |
def read_json(fp, local_files, dir_files, name_bytes):
"""
Read json properties from the zip file
:param fp: a file pointer
:param local_files: the local files structure
:param dir_files: the directory headers
:param name: the name of the json file to read
:return: the json properites as a dictionary, if found
The file pointer will be at a location following the
local file entry after this method.
The local_files and dir_files should be passed from
the results of parse_zip.
"""
if name_bytes in dir_files:
json_pos = local_files[dir_files[name_bytes][1]][1]
json_len = local_files[dir_files[name_bytes][1]][2]
fp.seek(json_pos)
json_properties = fp.read(json_len)
return json.loads(json_properties.decode("utf-8"))
return None | Read json properties from the zip file
:param fp: a file pointer
:param local_files: the local files structure
:param dir_files: the directory headers
:param name: the name of the json file to read
:return: the json properites as a dictionary, if found
The file pointer will be at a location following the
local file entry after this method.
The local_files and dir_files should be passed from
the results of parse_zip. |
def is_eligible(self, segment):
"""
A segment is eligible to have its children subsegments streamed
if it is sampled and it breaches streaming threshold.
"""
if not segment or not segment.sampled:
return False
return segment.get_total_subsegments_size() > self.streaming_threshold | A segment is eligible to have its children subsegments streamed
if it is sampled and it breaches streaming threshold. |
def unflagging_question(self, id, attempt, validation_token, quiz_submission_id, access_code=None):
"""
Unflagging a question.
Remove the flag that you previously set on a quiz question after you've
returned to it.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - quiz_submission_id
"""ID"""
path["quiz_submission_id"] = quiz_submission_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - attempt
"""The attempt number of the quiz submission being taken. Note that this
must be the latest attempt index, as questions for earlier attempts can
not be modified."""
data["attempt"] = attempt
# REQUIRED - validation_token
"""The unique validation token you received when the Quiz Submission was
created."""
data["validation_token"] = validation_token
# OPTIONAL - access_code
"""Access code for the Quiz, if any."""
if access_code is not None:
data["access_code"] = access_code
self.logger.debug("PUT /api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/quiz_submissions/{quiz_submission_id}/questions/{id}/unflag".format(**path), data=data, params=params, no_data=True) | Unflagging a question.
Remove the flag that you previously set on a quiz question after you've
returned to it. |
def int_bytes(cls, string):
'''Convert string describing size to int.'''
if string[-1] in ('k', 'm'):
value = cls.int_0_inf(string[:-1])
unit = string[-1]
if unit == 'k':
value *= 2 ** 10
else:
value *= 2 ** 20
return value
else:
return cls.int_0_inf(string) | Convert string describing size to int. |
def getaddrinfo(node, service=0, family=0, socktype=0, protocol=0, flags=0, timeout=30):
"""Resolve an Internet *node* name and *service* into a socket address.
The *family*, *socktype* and *protocol* are optional arguments that specify
the address family, socket type and protocol, respectively. The *flags*
argument allows you to pass flags to further modify the resolution process.
See the :func:`socket.getaddrinfo` function for a detailed description of
these arguments.
The return value is a list of ``(family, socktype, proto, canonname,
sockaddr)`` tuples. The fifth element (``sockaddr``) is the socket address.
It will be a 2-tuple ``(addr, port)`` for an IPv4 address, and a 4-tuple
``(addr, port, flowinfo, scopeid)`` for an IPv6 address.
The address resolution is performed in the libuv thread pool.
"""
hub = get_hub()
with switch_back(timeout) as switcher:
request = pyuv.dns.getaddrinfo(hub.loop, node, service, family,
socktype, protocol, flags, callback=switcher)
switcher.add_cleanup(request.cancel)
result = hub.switch()
result, error = result[0]
if error:
message = pyuv.errno.strerror(error)
raise pyuv.error.UVError(error, message)
return result | Resolve an Internet *node* name and *service* into a socket address.
The *family*, *socktype* and *protocol* are optional arguments that specify
the address family, socket type and protocol, respectively. The *flags*
argument allows you to pass flags to further modify the resolution process.
See the :func:`socket.getaddrinfo` function for a detailed description of
these arguments.
The return value is a list of ``(family, socktype, proto, canonname,
sockaddr)`` tuples. The fifth element (``sockaddr``) is the socket address.
It will be a 2-tuple ``(addr, port)`` for an IPv4 address, and a 4-tuple
``(addr, port, flowinfo, scopeid)`` for an IPv6 address.
The address resolution is performed in the libuv thread pool. |
def attached_socket(self, *args, **kwargs):
"""Opens a raw socket in a ``with`` block to write data to Splunk.
The arguments are identical to those for :meth:`attach`. The socket is
automatically closed at the end of the ``with`` block, even if an
exception is raised in the block.
:param host: The host value for events written to the stream.
:type host: ``string``
:param source: The source value for events written to the stream.
:type source: ``string``
:param sourcetype: The sourcetype value for events written to the
stream.
:type sourcetype: ``string``
:returns: Nothing.
**Example**::
import splunklib.client as client
s = client.connect(...)
index = s.indexes['some_index']
with index.attached_socket(sourcetype='test') as sock:
sock.send('Test event\\r\\n')
"""
try:
sock = self.attach(*args, **kwargs)
yield sock
finally:
sock.shutdown(socket.SHUT_RDWR)
sock.close() | Opens a raw socket in a ``with`` block to write data to Splunk.
The arguments are identical to those for :meth:`attach`. The socket is
automatically closed at the end of the ``with`` block, even if an
exception is raised in the block.
:param host: The host value for events written to the stream.
:type host: ``string``
:param source: The source value for events written to the stream.
:type source: ``string``
:param sourcetype: The sourcetype value for events written to the
stream.
:type sourcetype: ``string``
:returns: Nothing.
**Example**::
import splunklib.client as client
s = client.connect(...)
index = s.indexes['some_index']
with index.attached_socket(sourcetype='test') as sock:
sock.send('Test event\\r\\n') |
async def blow_out(self, mount):
"""
Force any remaining liquid to dispense. The liquid will be dispensed at
the current location of pipette
"""
this_pipette = self._attached_instruments[mount]
if not this_pipette:
raise top_types.PipetteNotAttachedError(
"No pipette attached to {} mount".format(mount.name))
self._backend.set_active_current(Axis.of_plunger(mount),
this_pipette.config.plunger_current)
try:
await self._move_plunger(
mount, this_pipette.config.blow_out)
except Exception:
self._log.exception('Blow out failed')
raise
finally:
this_pipette.set_current_volume(0) | Force any remaining liquid to dispense. The liquid will be dispensed at
the current location of pipette |
def hash_input(inpt, algo=HASH_SHA256):
"""Generates a hash from a given String
with a specified algorithm and returns the
hash in hexadecimal form. Default: sha256."""
if (algo == HASH_MD5):
hashcode = hashlib.md5()
elif (algo == HASH_SHA1):
hashcode = hashlib.sha1()
elif (algo == HASH_SHA224):
hashcode = hashlib.sha224()
elif (algo == HASH_SHA256):
hashcode = hashlib.sha256()
elif (algo == HASH_SHA384):
hashcode = hashlib.sha384()
elif (algo == HASH_SHA512):
hashcode = hashlib.sha512()
if sys.version_info.major == 2:
inpt = bytes(inpt)
else:
inpt = bytes(inpt, "utf-8")
hashcode.update(inpt)
hexhash = hashcode.hexdigest()
return hexhash | Generates a hash from a given String
with a specified algorithm and returns the
hash in hexadecimal form. Default: sha256. |
def profile_poly2o(data, mask):
"""Fit a 2D 2nd order polynomial to `data[mask]`"""
# lmfit
params = lmfit.Parameters()
params.add(name="mx", value=0)
params.add(name="my", value=0)
params.add(name="mxy", value=0)
params.add(name="ax", value=0)
params.add(name="ay", value=0)
params.add(name="off", value=np.average(data[mask]))
fr = lmfit.minimize(poly2o_residual, params, args=(data, mask))
bg = poly2o_model(fr.params, data.shape)
return bg | Fit a 2D 2nd order polynomial to `data[mask]` |
def to_dict(self):
"""
Returns:
dict: Concise represented as a dictionary.
"""
final_res = {
"param": self._param,
"unused_param": self.unused_param,
"execution_time": self._exec_time,
"output": {"accuracy": self.get_accuracy(),
"weights": self.get_weights(),
"splines": self._splines
}
}
return final_res | Returns:
dict: Concise represented as a dictionary. |
def get_extended_attrtext(value):
"""attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later).
"""
m = _non_extended_attribute_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected extended attrtext but found {!r}".format(value))
attrtext = m.group()
value = value[len(attrtext):]
attrtext = ValueTerminal(attrtext, 'extended-attrtext')
_validate_xtext(attrtext)
return attrtext, value | attrtext = 1*(any non-ATTRIBUTE_ENDS character plus '%')
This is a special parsing routine so that we get a value that
includes % escapes as a single string (which we decode as a single
string later). |
def add_column_xsd(self, tb, column, attrs):
""" Add the XSD for a column to tb (a TreeBuilder) """
if column.nullable:
attrs['minOccurs'] = str(0)
attrs['nillable'] = 'true'
for cls, xsd_type in six.iteritems(self.SIMPLE_XSD_TYPES):
if isinstance(column.type, cls):
attrs['type'] = xsd_type
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, Geometry):
geometry_type = column.type.geometry_type
xsd_type = self.SIMPLE_GEOMETRY_XSD_TYPES[geometry_type]
attrs['type'] = xsd_type
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Enum):
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction', {'base': 'xsd:string'}) \
as tb:
for enum in column.type.enums:
with tag(tb, 'xsd:enumeration', {'value': enum}):
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.Numeric):
if column.type.scale is None and column.type.precision is None:
attrs['type'] = 'xsd:decimal'
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction',
{'base': 'xsd:decimal'}) as tb:
if column.type.scale is not None:
with tag(tb, 'xsd:fractionDigits',
{'value': str(column.type.scale)}) \
as tb:
pass
if column.type.precision is not None:
precision = column.type.precision
with tag(tb, 'xsd:totalDigits',
{'value': str(precision)}) \
as tb:
pass
self.element_callback(tb, column)
return tb
if isinstance(column.type, sqlalchemy.String) \
or isinstance(column.type, sqlalchemy.Text) \
or isinstance(column.type, sqlalchemy.Unicode) \
or isinstance(column.type, sqlalchemy.UnicodeText):
if column.type.length is None:
attrs['type'] = 'xsd:string'
with tag(tb, 'xsd:element', attrs) as tb:
self.element_callback(tb, column)
return tb
else:
with tag(tb, 'xsd:element', attrs) as tb:
with tag(tb, 'xsd:simpleType') as tb:
with tag(tb, 'xsd:restriction',
{'base': 'xsd:string'}) as tb:
with tag(tb, 'xsd:maxLength',
{'value': str(column.type.length)}):
pass
self.element_callback(tb, column)
return tb
raise UnsupportedColumnTypeError(column.type) | Add the XSD for a column to tb (a TreeBuilder) |
def find_environment(env_id=None, env_name=None):
"""
find the environment according environment id (prioritary) or environment name
:param env_id: the environment id
:param env_name: the environment name
:return: found environment or None if not found
"""
LOGGER.debug("EnvironmentService.find_environment")
if (env_id is None or not env_id) and (env_name is None or not env_name):
raise exceptions.ArianeCallParametersError('id and name')
if (env_id is not None and env_id) and (env_name is not None and env_name):
LOGGER.warn('Both id and name are defined. Will give you search on id.')
env_name = None
params = None
if env_id is not None and env_id:
params = {'id': env_id}
elif env_name is not None and env_name:
params = {'name': env_name}
ret = None
if params is not None:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
response = EnvironmentService.requester.call(args)
if response.rc == 0:
ret = Environment.json_2_environment(response.response_content)
elif response.rc != 404:
err_msg = 'EnvironmentService.find_environment - Problem while finding environment (id:' + \
str(env_id) + ', name:' + str(env_name) + '). ' + \
'Reason: ' + str(response.response_content) + '-' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(
err_msg
)
return ret | find the environment according environment id (prioritary) or environment name
:param env_id: the environment id
:param env_name: the environment name
:return: found environment or None if not found |
def gen_sites(path):
" Seek sites by path. "
for root, _, _ in walklevel(path, 2):
try:
yield Site(root)
except AssertionError:
continue | Seek sites by path. |
def get_config_dict(config):
'''
获取配置数据字典
对传入的配置包进行格式化处理,生成一个字典对象
:param object config: 配置模块
:return: 配置数据字典
:rtype: dict
'''
dst = {}
tmp = config.__dict__
key_list = dir(config)
key_list.remove('os')
for k, v in tmp.items():
if k in key_list and not k.startswith('_'):
dst[k] = v
return dst | 获取配置数据字典
对传入的配置包进行格式化处理,生成一个字典对象
:param object config: 配置模块
:return: 配置数据字典
:rtype: dict |
def flatten(d, parent_key='', separator='__'):
"""
Flatten a nested dictionary.
Parameters
----------
d: dict_like
Dictionary to flatten.
parent_key: string, optional
Concatenated names of the parent keys.
separator: string, optional
Separator between the names of the each key.
The default separator is '_'.
Examples
--------
>>> d = {'alpha': 1, 'beta': {'a': 10, 'b': 42}}
>>> flatten(d) == {'alpha': 1, 'beta_a': 10, 'beta_b': 42}
True
>>> flatten(d, separator='.') == {'alpha': 1, 'beta.a': 10, 'beta.b': 42}
True
"""
items = []
for k, v in d.items():
new_key = parent_key + separator + k if parent_key else k
if isinstance(v, (dict, OrderedDict)):
items.extend(flatten(v, new_key, separator).items())
else:
items.append((new_key, v))
return OrderedDict(items) | Flatten a nested dictionary.
Parameters
----------
d: dict_like
Dictionary to flatten.
parent_key: string, optional
Concatenated names of the parent keys.
separator: string, optional
Separator between the names of the each key.
The default separator is '_'.
Examples
--------
>>> d = {'alpha': 1, 'beta': {'a': 10, 'b': 42}}
>>> flatten(d) == {'alpha': 1, 'beta_a': 10, 'beta_b': 42}
True
>>> flatten(d, separator='.') == {'alpha': 1, 'beta.a': 10, 'beta.b': 42}
True |
def ul(
self,
text):
"""*convert plain-text to MMD unordered list*
**Key Arguments:**
- ``text`` -- the text to convert to MMD unordered list
**Return:**
- ``ul`` -- the MMD unordered list
**Usage:**
To convert text to a MMD unordered list:
.. code-block:: python
ul = md.ul(" This is a list item ")
print ul
# OUTPUT:
# * This is a list item
#
"""
m = self.reWS.match(text)
ul = []
for l in m.group(2).split("\n"):
prefix, text, suffix = self._snip_whitespace(l)
ul.append("%(prefix)s* %(text)s " % locals())
return ("\n").join(ul) + "\n\n" | *convert plain-text to MMD unordered list*
**Key Arguments:**
- ``text`` -- the text to convert to MMD unordered list
**Return:**
- ``ul`` -- the MMD unordered list
**Usage:**
To convert text to a MMD unordered list:
.. code-block:: python
ul = md.ul(" This is a list item ")
print ul
# OUTPUT:
# * This is a list item
# |
def features(sender=''):
'''Returns a list of signature features.'''
return [
# This one isn't from paper.
# Meant to match companies names, sender's names, address.
many_capitalized_words,
# This one is not from paper.
# Line is too long.
# This one is less aggressive than `Line is too short`
lambda line: 1 if len(line) > TOO_LONG_SIGNATURE_LINE else 0,
# Line contains email pattern.
binary_regex_search(RE_EMAIL),
# Line contains url.
binary_regex_search(RE_URL),
# Line contains phone number pattern.
binary_regex_search(RE_RELAX_PHONE),
# Line matches the regular expression "^[\s]*---*[\s]*$".
binary_regex_match(RE_SEPARATOR),
# Line has a sequence of 10 or more special characters.
binary_regex_search(RE_SPECIAL_CHARS),
# Line contains any typical signature words.
binary_regex_search(RE_SIGNATURE_WORDS),
# Line contains a pattern like Vitor R. Carvalho or William W. Cohen.
binary_regex_search(RE_NAME),
# Percentage of punctuation symbols in the line is larger than 50%
lambda line: 1 if punctuation_percent(line) > 50 else 0,
# Percentage of punctuation symbols in the line is larger than 90%
lambda line: 1 if punctuation_percent(line) > 90 else 0,
contains_sender_names(sender)
] | Returns a list of signature features. |
def alpha_beta(returns,
factor_returns,
risk_free=0.0,
period=DAILY,
annualization=None,
out=None):
"""Calculates annualized alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float
"""
returns, factor_returns = _aligned_series(returns, factor_returns)
return alpha_beta_aligned(
returns,
factor_returns,
risk_free=risk_free,
period=period,
annualization=annualization,
out=out,
) | Calculates annualized alpha and beta.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
factor_returns : pd.Series
Daily noncumulative returns of the factor to which beta is
computed. Usually a benchmark such as the market.
- This is in the same style as returns.
risk_free : int, float, optional
Constant risk-free return throughout the period. For example, the
interest rate on a three month us treasury bill.
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
alpha : float
beta : float |
def get_note(self, noteid):
"""Fetch a single note
:param folderid: The UUID of the note
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/{}'.format(noteid))
return response | Fetch a single note
:param folderid: The UUID of the note |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.