code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def format_citations(zid, url='https://zenodo.org/', hits=10, tag_prefix='v'):
"""Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs
"""
# query for metadata
url = ('{url}/api/records/?'
'page=1&'
'size={hits}&'
'q=conceptrecid:"{id}"&'
'sort=-version&'
'all_versions=True'.format(id=zid, url=url, hits=hits))
metadata = requests.get(url).json()
lines = []
for i, hit in enumerate(metadata['hits']['hits']):
version = hit['metadata']['version'][len(tag_prefix):]
lines.append('-' * len(version))
lines.append(version)
lines.append('-' * len(version))
lines.append('')
lines.append('.. image:: {badge}\n'
' :target: {doi}'.format(**hit['links']))
if i < hits - 1:
lines.append('')
return '\n'.join(lines) | Query and format a citations page from Zenodo entries
Parameters
----------
zid : `int`, `str`
the Zenodo ID of the target record
url : `str`, optional
the base URL of the Zenodo host, defaults to ``https://zenodo.org``
hist : `int`, optional
the maximum number of hits to show, default: ``10``
tag_prefix : `str`, optional
the prefix for git tags. This is removed to generate the section
headers in the output RST
Returns
-------
rst : `str`
an RST-formatted string of DOI badges with URLs |
def convert_dotted(params):
""" Convert dotted keys in :params: dictset to a nested dictset.
E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}}
"""
if not isinstance(params, dictset):
params = dictset(params)
dotted_items = {k: v for k, v in params.items() if '.' in k}
if dotted_items:
dicts = [str2dict(key, val) for key, val in dotted_items.items()]
dotted = six.functools.reduce(merge_dicts, dicts)
params = params.subset(['-' + k for k in dotted_items.keys()])
params.update(dict(dotted))
return params | Convert dotted keys in :params: dictset to a nested dictset.
E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}} |
def reconstruct_from_shape(self, shape, optimize=False):
"""
Shape is a tuple that may contain integers, shape symbols (tf, keras, theano) and UnknownSize (keras, mxnet)
known axes can be integers or symbols, but not Nones
"""
axes_lengths = list(self.elementary_axes_lengths)
if self.ellipsis_positions != (math.inf, math.inf):
if len(shape) < len(self.input_composite_axes) - 1:
raise EinopsError('Expected at least {} dimensions, got {}'.format(
len(self.input_composite_axes) - 1, len(shape)))
else:
if len(shape) != len(self.input_composite_axes):
raise EinopsError('Expected {} dimensions, got {}'.format(len(self.input_composite_axes), len(shape)))
for input_axis, (known_axes, unknown_axes) in enumerate(self.input_composite_axes):
before_ellipsis = input_axis
after_ellipsis = input_axis + len(shape) - len(self.input_composite_axes)
if input_axis == self.ellipsis_positions[0]:
assert len(known_axes) == 0 and len(unknown_axes) == 1
unknown_axis, = unknown_axes
ellipsis_shape = shape[before_ellipsis:after_ellipsis + 1]
if any(d is None for d in ellipsis_shape):
raise EinopsError("Couldn't infer shape for one or more axes represented by ellipsis")
axes_lengths[unknown_axis] = _product(ellipsis_shape)
else:
if input_axis < self.ellipsis_positions[0]:
length = shape[before_ellipsis]
else:
length = shape[after_ellipsis]
known_product = 1
for axis in known_axes:
known_product *= axes_lengths[axis]
if len(unknown_axes) == 0:
if isinstance(length, int) and isinstance(known_product, int) and length != known_product:
raise EinopsError('Shape mismatch, {} != {}'.format(length, known_product))
else:
if isinstance(length, int) and isinstance(known_product, int) and length % known_product != 0:
raise EinopsError("Shape mismatch, can't divide axis of length {} in chunks of {}".format(
length, known_product))
unknown_axis, = unknown_axes
axes_lengths[unknown_axis] = length // known_product
init_shapes = axes_lengths
reduced_axes_lengths = [dim for i, dim in enumerate(axes_lengths) if i not in self.reduced_elementary_axes]
final_shapes = []
for output_axis, grouping in enumerate(self.output_composite_axes):
if output_axis == self.ellipsis_positions[1]:
final_shapes.extend(ellipsis_shape)
else:
lengths = [reduced_axes_lengths[elementary_axis] for elementary_axis in grouping]
if any(l is None for l in lengths):
final_shapes.append(None)
else:
final_shapes.append(_product(lengths))
reduced_axes = self.reduced_elementary_axes
axes_reordering = self.final_axes_grouping_flat
if optimize:
return _optimize_transformation(init_shapes, reduced_axes, axes_reordering, final_shapes)
else:
return init_shapes, reduced_axes, axes_reordering, final_shapes | Shape is a tuple that may contain integers, shape symbols (tf, keras, theano) and UnknownSize (keras, mxnet)
known axes can be integers or symbols, but not Nones |
def execute(self, *args, **kwargs):
"""Analogous to :any:`sqlite3.Cursor.execute`
:returns: self
"""
with self:
self._cursor.execute(*args, **kwargs) | Analogous to :any:`sqlite3.Cursor.execute`
:returns: self |
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
raise AzureHttpError(message, http_error.status) | Simple error handler for azure. |
def selected_exercise(func):
"""
Passes the selected exercise as the first argument to func.
"""
@wraps(func)
def inner(*args, **kwargs):
exercise = Exercise.get_selected()
return func(exercise, *args, **kwargs)
return inner | Passes the selected exercise as the first argument to func. |
def fill_blind_pores(im):
r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels
"""
im = sp.copy(im)
holes = find_disconnected_voxels(im)
im[holes] = False
return im | r"""
Fills all pores that are not connected to the edges of the image.
Parameters
----------
im : ND-array
The image of the porous material
Returns
-------
image : ND-array
A version of ``im`` but with all the disconnected pores removed.
See Also
--------
find_disconnected_voxels |
def optimize(self, objective_fct, iterations=None, min_iterations=1,
args=(), verb_disp=None, logger=None, call_back=None):
"""find minimizer of `objective_fct`.
CAVEAT: the return value for `optimize` has changed to ``self``.
Arguments
---------
`objective_fct`
function be to minimized
`iterations`
number of (maximal) iterations, while ``not self.stop()``
`min_iterations`
minimal number of iterations, even if ``not self.stop()``
`args`
arguments passed to `objective_fct`
`verb_disp`
print to screen every `verb_disp` iteration, if ``None``
the value from ``self.logger`` is "inherited", if
available.
``logger``
a `BaseDataLogger` instance, which must be compatible
with the type of ``self``.
``call_back``
call back function called like ``call_back(self)`` or
a list of call back functions.
``return self``, that is, the `OOOptimizer` instance.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(7 * [0.1], 0.5
... ).optimize(cma.fcts.rosen, verb_disp=100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(es.result()[0])
array([ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992])
"""
assert iterations is None or min_iterations <= iterations
if not hasattr(self, 'logger'):
self.logger = logger
logger = self.logger = logger or self.logger
if not isinstance(call_back, list):
call_back = [call_back]
citer = 0
while not self.stop() or citer < min_iterations:
if iterations is not None and citer >= iterations:
return self.result()
citer += 1
X = self.ask() # deliver candidate solutions
fitvals = [objective_fct(x, *args) for x in X]
self.tell(X, fitvals) # all the work is done here
self.disp(verb_disp)
for f in call_back:
f is None or f(self)
logger.add(self) if logger else None
# signal logger that we left the loop
# TODO: this is very ugly, because it assumes modulo keyword
# argument *and* modulo attribute to be available
try:
logger.add(self, modulo=bool(logger.modulo)) if logger else None
except TypeError:
print(' suppressing the final call of the logger in ' +
'OOOptimizer.optimize (modulo keyword parameter not ' +
'available)')
except AttributeError:
print(' suppressing the final call of the logger in ' +
'OOOptimizer.optimize (modulo attribute not ' +
'available)')
if verb_disp:
self.disp(1)
if verb_disp in (1, True):
print('termination by', self.stop())
print('best f-value =', self.result()[1])
print('solution =', self.result()[0])
return self | find minimizer of `objective_fct`.
CAVEAT: the return value for `optimize` has changed to ``self``.
Arguments
---------
`objective_fct`
function be to minimized
`iterations`
number of (maximal) iterations, while ``not self.stop()``
`min_iterations`
minimal number of iterations, even if ``not self.stop()``
`args`
arguments passed to `objective_fct`
`verb_disp`
print to screen every `verb_disp` iteration, if ``None``
the value from ``self.logger`` is "inherited", if
available.
``logger``
a `BaseDataLogger` instance, which must be compatible
with the type of ``self``.
``call_back``
call back function called like ``call_back(self)`` or
a list of call back functions.
``return self``, that is, the `OOOptimizer` instance.
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy(7 * [0.1], 0.5
... ).optimize(cma.fcts.rosen, verb_disp=100)
(4_w,9)-CMA-ES (mu_w=2.8,w_1=49%) in dimension 7 (seed=630721393)
Iterat #Fevals function value axis ratio sigma minstd maxstd min:sec
1 9 3.163954777181882e+01 1.0e+00 4.12e-01 4e-01 4e-01 0:0.0
2 18 3.299006223906629e+01 1.0e+00 3.60e-01 3e-01 4e-01 0:0.0
3 27 1.389129389866704e+01 1.1e+00 3.18e-01 3e-01 3e-01 0:0.0
100 900 2.494847340045985e+00 8.6e+00 5.03e-02 2e-02 5e-02 0:0.3
200 1800 3.428234862999135e-01 1.7e+01 3.77e-02 6e-03 3e-02 0:0.5
300 2700 3.216640032470860e-04 5.6e+01 6.62e-03 4e-04 9e-03 0:0.8
400 3600 6.155215286199821e-12 6.6e+01 7.44e-06 1e-07 4e-06 0:1.1
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
438 3942 1.187372505161762e-14 6.0e+01 3.27e-07 4e-09 9e-08 0:1.2
('termination by', {'tolfun': 1e-11})
('best f-value =', 1.1189867885201275e-14)
('solution =', array([ 1. , 1. , 1. , 0.99999999, 0.99999998,
0.99999996, 0.99999992]))
>>> print(es.result()[0])
array([ 1. 1. 1. 0.99999999 0.99999998 0.99999996
0.99999992]) |
def exprvar(name, index=None):
r"""Return a unique Expression variable.
A Boolean *variable* is an abstract numerical quantity that may assume any
value in the set :math:`B = \{0, 1\}`.
The ``exprvar`` function returns a unique Boolean variable instance
represented by a logic expression.
Variable instances may be used to symbolically construct larger expressions.
A variable is defined by one or more *names*,
and zero or more *indices*.
Multiple names establish hierarchical namespaces,
and multiple indices group several related variables.
If the ``name`` parameter is a single ``str``,
it will be converted to ``(name, )``.
The ``index`` parameter is optional;
when empty, it will be converted to an empty tuple ``()``.
If the ``index`` parameter is a single ``int``,
it will be converted to ``(index, )``.
Given identical names and indices, the ``exprvar`` function will always
return the same variable:
>>> exprvar('a', 0) is exprvar('a', 0)
True
To create several single-letter variables:
>>> a, b, c, d = map(exprvar, 'abcd')
To create variables with multiple names (inner-most first):
>>> fifo_push = exprvar(('push', 'fifo'))
>>> fifo_pop = exprvar(('pop', 'fifo'))
.. seealso::
For creating arrays of variables with incremental indices,
use the :func:`pyeda.boolalg.bfarray.exprvars` function.
"""
bvar = boolfunc.var(name, index)
try:
var = _LITS[bvar.uniqid]
except KeyError:
var = _LITS[bvar.uniqid] = Variable(bvar)
return var | r"""Return a unique Expression variable.
A Boolean *variable* is an abstract numerical quantity that may assume any
value in the set :math:`B = \{0, 1\}`.
The ``exprvar`` function returns a unique Boolean variable instance
represented by a logic expression.
Variable instances may be used to symbolically construct larger expressions.
A variable is defined by one or more *names*,
and zero or more *indices*.
Multiple names establish hierarchical namespaces,
and multiple indices group several related variables.
If the ``name`` parameter is a single ``str``,
it will be converted to ``(name, )``.
The ``index`` parameter is optional;
when empty, it will be converted to an empty tuple ``()``.
If the ``index`` parameter is a single ``int``,
it will be converted to ``(index, )``.
Given identical names and indices, the ``exprvar`` function will always
return the same variable:
>>> exprvar('a', 0) is exprvar('a', 0)
True
To create several single-letter variables:
>>> a, b, c, d = map(exprvar, 'abcd')
To create variables with multiple names (inner-most first):
>>> fifo_push = exprvar(('push', 'fifo'))
>>> fifo_pop = exprvar(('pop', 'fifo'))
.. seealso::
For creating arrays of variables with incremental indices,
use the :func:`pyeda.boolalg.bfarray.exprvars` function. |
def position_fingerprint(
word, n_bits=16, most_common=MOST_COMMON_LETTERS_CG, bits_per_letter=3
):
"""Return the position fingerprint.
This is a wrapper for :py:meth:`Position.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
bits_per_letter : int
The bits to assign for letter position
Returns
-------
int
The position fingerprint
Examples
--------
>>> bin(position_fingerprint('hat'))
'0b1110100011111111'
>>> bin(position_fingerprint('niall'))
'0b1111110101110010'
>>> bin(position_fingerprint('colin'))
'0b1111111110010111'
>>> bin(position_fingerprint('atcg'))
'0b1110010001111111'
>>> bin(position_fingerprint('entreatment'))
'0b101011111111'
"""
return Position().fingerprint(word, n_bits, most_common, bits_per_letter) | Return the position fingerprint.
This is a wrapper for :py:meth:`Position.fingerprint`.
Parameters
----------
word : str
The word to fingerprint
n_bits : int
Number of bits in the fingerprint returned
most_common : list
The most common tokens in the target language, ordered by frequency
bits_per_letter : int
The bits to assign for letter position
Returns
-------
int
The position fingerprint
Examples
--------
>>> bin(position_fingerprint('hat'))
'0b1110100011111111'
>>> bin(position_fingerprint('niall'))
'0b1111110101110010'
>>> bin(position_fingerprint('colin'))
'0b1111111110010111'
>>> bin(position_fingerprint('atcg'))
'0b1110010001111111'
>>> bin(position_fingerprint('entreatment'))
'0b101011111111' |
def getByteStatistic(self, wanInterfaceId=1, timeout=1):
"""Execute GetTotalBytesSent&GetTotalBytesReceived actions to get WAN statistics.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: a tuple of two values, total bytes sent and total bytes received
:rtype: list[int]
"""
namespace = Wan.getServiceType("getByteStatistic") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetTotalBytesSent", timeout=timeout)
results2 = self.execute(uri, namespace, "GetTotalBytesReceived", timeout=timeout)
return [int(results["NewTotalBytesSent"]),
int(results2["NewTotalBytesReceived"])] | Execute GetTotalBytesSent&GetTotalBytesReceived actions to get WAN statistics.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: a tuple of two values, total bytes sent and total bytes received
:rtype: list[int] |
def run_scratch(self, path_to_scratch, num_cores=1, outname=None, outdir=None, force_rerun=False):
"""Run SCRATCH on the sequence_file that was loaded into the class.
Args:
path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh
outname: Prefix to name the output files
outdir: Directory to store the output files
force_rerun: Flag to force rerunning of SCRATCH even if the output files exist
Returns:
"""
if not outname:
outname = self.project_name
if not outdir:
outdir = ''
outname = op.join(outdir, outname)
self.out_sspro = '{}.ss'.format(outname)
self.out_sspro8 = '{}.ss8'.format(outname)
self.out_accpro = '{}.acc'.format(outname)
self.out_accpro20 = '{}.acc20'.format(outname)
# TODO: check for multiple output files in command_runner
ssbio.utils.command_runner(
shell_command='{} {} {} {}'.format(path_to_scratch, self.seq_file, outname, num_cores),
force_rerun_flag=force_rerun, outfile_checker='{}.ss'.format(outname)) | Run SCRATCH on the sequence_file that was loaded into the class.
Args:
path_to_scratch: Path to the SCRATCH executable, run_SCRATCH-1D_predictors.sh
outname: Prefix to name the output files
outdir: Directory to store the output files
force_rerun: Flag to force rerunning of SCRATCH even if the output files exist
Returns: |
def from_df(cls, df):
"""Creates an OrbitPopulation from a DataFrame.
:param df:
:class:`pandas.DataFrame` object. Must contain the following
columns: ``['M1','M2','P','ecc','mean_anomaly','obsx','obsy','obsz']``,
i.e., as what is accessed via :attr:`OrbitPopulation.dataframe`.
:return:
:class:`OrbitPopulation`.
"""
return cls(df['M1'], df['M2'], df['P'],
ecc=df['ecc'], mean_anomaly=df['mean_anomaly'],
obsx=df['obsx'], obsy=df['obsy'], obsz=df['obsz']) | Creates an OrbitPopulation from a DataFrame.
:param df:
:class:`pandas.DataFrame` object. Must contain the following
columns: ``['M1','M2','P','ecc','mean_anomaly','obsx','obsy','obsz']``,
i.e., as what is accessed via :attr:`OrbitPopulation.dataframe`.
:return:
:class:`OrbitPopulation`. |
def _htmlify_text(self, s):
"""Make text HTML-friendly."""
colored = self._handle_ansi_color_codes(html.escape(s))
return linkify(self._buildroot, colored, self._linkify_memo).replace('\n', '</br>') | Make text HTML-friendly. |
def flush(self):
"""
Remove all items from the cache.
"""
if os.path.isdir(self._directory):
for root, dirs, files in os.walk(self._directory, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name)) | Remove all items from the cache. |
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape) | SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive. |
def as_dict(self, cache=None, fetch=True):
"""Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached.
"""
if not self._fetched and fetch:
info = self.fetch(cache)
elif self._use_cache(cache):
info = self._attrs.copy()
else:
info = {}
info.update(url=self.url)
return info | Return torrent properties as a dictionary.
Set the cache flag to False to disable the cache. On the other hand,
set the fetch flag to False to avoid fetching data if it's not cached. |
def get_site_model(oqparam):
"""
Convert the NRML file into an array of site parameters.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:returns:
an array with fields lon, lat, vs30, ...
"""
req_site_params = get_gsim_lt(oqparam).req_site_params
arrays = []
for fname in oqparam.inputs['site_model']:
if isinstance(fname, str) and fname.endswith('.csv'):
sm = read_csv(fname)
if 'site_id' in sm.dtype.names:
raise InvalidFile('%s: you passed a sites.csv file instead of '
'a site_model.csv file!' % fname)
z = numpy.zeros(len(sm), sorted(sm.dtype.descr))
for name in z.dtype.names: # reorder the fields
z[name] = sm[name]
arrays.append(z)
continue
nodes = nrml.read(fname).siteModel
params = [valid.site_param(node.attrib) for node in nodes]
missing = req_site_params - set(params[0])
if 'vs30measured' in missing: # use a default of False
missing -= {'vs30measured'}
for param in params:
param['vs30measured'] = False
if 'backarc' in missing: # use a default of False
missing -= {'backarc'}
for param in params:
param['backarc'] = False
if missing:
raise InvalidFile('%s: missing parameter %s' %
(oqparam.inputs['site_model'],
', '.join(missing)))
# NB: the sorted in sorted(params[0]) is essential, otherwise there is
# an heisenbug in scenario/test_case_4
site_model_dt = numpy.dtype([(p, site.site_param_dt[p])
for p in sorted(params[0])])
sm = numpy.array([tuple(param[name] for name in site_model_dt.names)
for param in params], site_model_dt)
arrays.append(sm)
return numpy.concatenate(arrays) | Convert the NRML file into an array of site parameters.
:param oqparam:
an :class:`openquake.commonlib.oqvalidation.OqParam` instance
:returns:
an array with fields lon, lat, vs30, ... |
def rpc(self, request, args):
"""RPC
:param request:
:args ???:
"""
if request.method != 'POST':
return self.error(405, request)
payload = request.get_data(as_text=True) or '{}'
request_method = request.args.get('method')
if not request_method:
return self.error(
400, request,
message="A query string parameter method= is missing."
)
name_map = self.service.__nirum_method_names__
try:
method_facial_name = name_map.behind_names[request_method]
except KeyError:
return self.error(
400,
request,
message="Service doesn't have procedure named '{}'.".format(
request_method
)
)
try:
service_method = getattr(self.service, method_facial_name)
except AttributeError:
return self.error(
400,
request,
message="Service has no procedure '{}'.".format(
request_method
)
)
if not callable(service_method):
return self.error(
400, request,
message="Remote procedure '{}' is not callable.".format(
request_method
)
)
try:
request_json = json.loads(payload)
except ValueError:
return self.error(
400,
request,
message="Invalid JSON payload: '{}'.".format(payload)
)
type_hints = self.service.__nirum_service_methods__[method_facial_name]
try:
arguments = self._parse_procedure_arguments(
type_hints,
request_json
)
except (NirumProcedureArgumentValueError,
NirumProcedureArgumentRequiredError) as e:
return self.error(400, request, message=str(e))
method_error_types = self.service.__nirum_method_error_types__
if not callable(method_error_types): # generated by older compiler
method_error_types = method_error_types.get
method_error = method_error_types(method_facial_name, ())
try:
result = service_method(**arguments)
except method_error as e:
return self._raw_response(400, serialize_meta(e))
return_type = type_hints['_return']
if type_hints.get('_v', 1) >= 2:
return_type = return_type()
if not self._check_return_type(return_type, result):
return self.error(
400,
request,
message="Incorrect return type '{0}' "
"for '{1}'. expected '{2}'.".format(
typing._type_repr(result.__class__),
request_method,
typing._type_repr(return_type)
)
)
else:
return self._raw_response(200, serialize_meta(result)) | RPC
:param request:
:args ???: |
def validate_fields_only_with_permissions(self, val, caller_permissions):
"""
To pass field validation, no required field should be missing.
This method assumes that the contents of each field have already been
validated on assignment, so it's merely a presence check.
Should only be called for callers with extra permissions.
"""
self.validate_fields_only(val)
# check if type has been patched
for extra_permission in caller_permissions.permissions:
all_field_names = '_all_{}_field_names_'.format(extra_permission)
for field_name in getattr(self.definition, all_field_names, set()):
if not hasattr(val, field_name):
raise ValidationError("missing required field '%s'" % field_name) | To pass field validation, no required field should be missing.
This method assumes that the contents of each field have already been
validated on assignment, so it's merely a presence check.
Should only be called for callers with extra permissions. |
def from_pyfile(self, filename: str, silent: bool=False) -> None:
"""Load the configuration from a Python cfg or py file.
See Python's ConfigParser docs for details on the cfg format.
It is a common practice to load the defaults from the source
using the :meth:`from_object` and then override with a cfg or
py file, for example
.. code-block:: python
app.config.from_object('config_module')
app.config.from_pyfile('production.cfg')
Arguments:
filename: The filename which when appended to
:attr:`root_path` gives the path to the file
"""
file_path = self.root_path / filename
try:
spec = importlib.util.spec_from_file_location("module.name", file_path) # type: ignore
if spec is None: # Likely passed a cfg file
parser = ConfigParser()
parser.optionxform = str # type: ignore # Prevents lowercasing of keys
with open(file_path) as file_:
config_str = '[section]\n' + file_.read()
parser.read_string(config_str)
self.from_mapping(parser['section'])
else:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
self.from_object(module)
except (FileNotFoundError, IsADirectoryError):
if not silent:
raise | Load the configuration from a Python cfg or py file.
See Python's ConfigParser docs for details on the cfg format.
It is a common practice to load the defaults from the source
using the :meth:`from_object` and then override with a cfg or
py file, for example
.. code-block:: python
app.config.from_object('config_module')
app.config.from_pyfile('production.cfg')
Arguments:
filename: The filename which when appended to
:attr:`root_path` gives the path to the file |
def enroll(self, uuid, organization, from_date=MIN_PERIOD_DATE, to_date=MAX_PERIOD_DATE,
merge=False):
"""Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False
"""
# Empty or None values for uuid and organizations are not allowed
if not uuid or not organization:
return CMD_SUCCESS
try:
api.add_enrollment(self.db, uuid, organization, from_date, to_date)
code = CMD_SUCCESS
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
code = e.code
except AlreadyExistsError as e:
if not merge:
msg_data = {
'uuid': uuid,
'org': organization,
'from_dt': str(from_date),
'to_dt': str(to_date)
}
msg = "enrollment for '%(uuid)s' at '%(org)s' (from: %(from_dt)s, to: %(to_dt)s) already exists in the registry"
msg = msg % msg_data
self.error(msg)
code = e.code
if not merge:
return code
try:
api.merge_enrollments(self.db, uuid, organization)
except (NotFoundError, InvalidValueError) as e:
# These exceptions were checked above. If any of these raises
# is due to something really wrong has happened
raise RuntimeError(str(e))
return CMD_SUCCESS | Enroll a unique identity in an organization.
This method adds a new relationship between the unique identity,
identified by <uuid>, and <organization>. Both entities must exist
on the registry before creating the new enrollment.
The period of the enrollment can be given with the parameters <from_date>
and <to_date>, where "from_date <= to_date". Default values for these
dates are '1900-01-01' and '2100-01-01'.
When "merge" parameter is set to True, those overlapped enrollments related
to <uuid> and <organization> found on the registry will be merged. The given
enrollment will be also merged.
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
:param merge: merge overlapped enrollments; by default, it is set to False |
def _copy_listed(self: T, names) -> T:
"""Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation.
"""
variables = OrderedDict() # type: OrderedDict[Any, Variable]
coord_names = set()
indexes = OrderedDict() # type: OrderedDict[Any, pd.Index]
for name in names:
try:
variables[name] = self._variables[name]
except KeyError:
ref_name, var_name, var = _get_virtual_variable(
self._variables, name, self._level_coords, self.dims)
variables[var_name] = var
if ref_name in self._coord_names or ref_name in self.dims:
coord_names.add(var_name)
if (var_name,) == var.dims:
indexes[var_name] = var.to_index()
needed_dims = set() # type: set
for v in variables.values():
needed_dims.update(v.dims)
dims = dict((k, self.dims[k]) for k in needed_dims)
for k in self._coord_names:
if set(self.variables[k].dims) <= needed_dims:
variables[k] = self._variables[k]
coord_names.add(k)
if k in self.indexes:
indexes[k] = self.indexes[k]
return self._replace(variables, coord_names, dims, indexes=indexes) | Create a new Dataset with the listed variables from this dataset and
the all relevant coordinates. Skips all validation. |
def get_group_policy(self, group_name, policy_name):
"""
Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
"""
params = {'GroupName' : group_name,
'PolicyName' : policy_name}
return self.get_response('GetGroupPolicy', params, verb='POST') | Retrieves the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get. |
def lharmonicmean (inlist):
"""
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum | Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist) |
def printf(format, *args):
"""Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args."""
sys.stdout.write(str(format) % args)
return if_(args, lambda: args[-1], lambda: format) | Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args. |
def get_setter(cls, prop_name, # @NoSelf
user_setter=None, setter_takes_name=False,
user_getter=None, getter_takes_name=False):
"""Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value."""
if user_setter:
if setter_takes_name:
# wraps the property name
def _setter(self, val):
return user_setter(self, prop_name, val)
else: _setter = user_setter
return _setter
def _setter(self, val): # @DuplicatedSignature
setattr(self, PROP_NAME % {'prop_name' : prop_name}, val)
return
return _setter | Similar to get_getter, but for setting property
values. If user_getter is specified, that it may be used to
get the old value of the property before setting it (this
is the case in some derived classes' implementation). if
getter_takes_name is True and user_getter is not None, than
the property name is passed to the given getter to retrieve
the property value. |
def load(fin, dtype=np.float32, max_vocab=None):
"""
Load word embedding file.
Args:
fin (File): File object to read. File should be open for reading ascii.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
Returns:
numpy.ndarray: Word embedding representation vectors
dict: Mapping from words to vector indices.
"""
vocab = {}
arr = None
i = 0
for line in fin:
if max_vocab is not None and i >= max_vocab:
break
try:
token, v = _parse_line(line, dtype)
except (ValueError, IndexError):
raise ParseError(b'Parsing error in line: ' + line)
if token in vocab:
parse_warn(b'Duplicated vocabulary ' + token)
continue
if arr is None:
arr = np.array(v, dtype=dtype).reshape(1, -1)
else:
if arr.shape[1] != len(v):
raise ParseError(b'Vector size did not match in line: ' + line)
arr = np.append(arr, [v], axis=0)
vocab[token] = i
i += 1
return arr, vocab | Load word embedding file.
Args:
fin (File): File object to read. File should be open for reading ascii.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
Returns:
numpy.ndarray: Word embedding representation vectors
dict: Mapping from words to vector indices. |
def make_article_info_correspondences(self, article_info_div):
"""
Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content.
"""
corresps = self.article.root.xpath('./front/article-meta/author-notes/corresp')
if corresps:
corresp_div = etree.SubElement(article_info_div,
'div',
{'id': 'correspondence'})
for corresp in corresps:
sub_div = etree.SubElement(corresp_div,
'div',
{'id': corresp.attrib['id']})
append_all_below(sub_div, corresp) | Articles generally provide a first contact, typically an email address
for one of the authors. This will supply that content. |
def _expand_subsystems(self, scope_infos):
"""Add all subsystems tied to a scope, right after that scope."""
# Get non-global subsystem dependencies of the specified subsystem client.
def subsys_deps(subsystem_client_cls):
for dep in subsystem_client_cls.subsystem_dependencies_iter():
if dep.scope != GLOBAL_SCOPE:
yield self._scope_to_info[dep.options_scope]
for x in subsys_deps(dep.subsystem_cls):
yield x
for scope_info in scope_infos:
yield scope_info
if scope_info.optionable_cls is not None:
# We don't currently subclass GlobalOptionsRegistrar, and I can't think of any reason why
# we would, but might as well be robust.
if issubclass(scope_info.optionable_cls, GlobalOptionsRegistrar):
# We were asked for global help, so also yield for all global subsystems.
for scope, info in self._scope_to_info.items():
if info.category == ScopeInfo.SUBSYSTEM and enclosing_scope(scope) == GLOBAL_SCOPE:
yield info
for subsys_dep in subsys_deps(info.optionable_cls):
yield subsys_dep
elif issubclass(scope_info.optionable_cls, SubsystemClientMixin):
for subsys_dep in subsys_deps(scope_info.optionable_cls):
yield subsys_dep | Add all subsystems tied to a scope, right after that scope. |
def build_instance_name(inst, obj=None):
"""Return an instance name from an instance, and set instance.path """
if obj is None:
for _ in inst.properties.values():
inst.path.keybindings.__setitem__(_.name, _.value)
return inst.path
if not isinstance(obj, list):
return build_instance_name(inst, get_keys_from_class(obj))
keys = {}
for _ in obj:
if _ not in inst.properties:
raise pywbem.CIMError(pywbem.CIM_ERR_FAILED,
"Instance of %s is missing key property %s" \
%(inst.classname, _))
keys[_] = inst[_]
inst.path = pywbem.CIMInstanceName(classname=inst.classname,
keybindings=keys,
namespace=inst.path.namespace,
host=inst.path.host)
return inst.path | Return an instance name from an instance, and set instance.path |
def close_socket(sock):
'''Shutdown and close the socket.'''
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
except Exception:
pass
try:
sock.close()
except Exception:
pass | Shutdown and close the socket. |
def _merge_wf_outputs(new, cur, parallel):
"""Merge outputs for a sub-workflow, replacing variables changed in later steps.
ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps
"""
new_ids = set([])
out = []
for v in new:
outv = {}
outv["source"] = v["id"]
outv["id"] = "%s" % get_base_id(v["id"])
outv["type"] = v["type"]
if "secondaryFiles" in v:
outv["secondaryFiles"] = v["secondaryFiles"]
if tz.get_in(["outputBinding", "secondaryFiles"], v):
outv["secondaryFiles"] = tz.get_in(["outputBinding", "secondaryFiles"], v)
new_ids.add(outv["id"])
out.append(outv)
for outv in cur:
if outv["id"] not in new_ids:
out.append(outv)
return out | Merge outputs for a sub-workflow, replacing variables changed in later steps.
ignore_ids are those used internally in a sub-workflow but not exposed to subsequent steps |
def run(self, input_func=_stdin_):
"""Run the sections."""
# reset question count
self.qcount = 1
for section_name in self.survey:
self.run_section(section_name, input_func) | Run the sections. |
def getDigitalID(self,num):
"""
Reads the COMTRADE ID of a given channel number.
The number to be given is the same of the COMTRADE header.
"""
listidx = self.Dn.index(num) # Get the position of the channel number.
return self.Dch_id[listidx] | Reads the COMTRADE ID of a given channel number.
The number to be given is the same of the COMTRADE header. |
def get_knowledge_category_id(self):
"""Gets the grade ``Id`` associated with the knowledge dimension.
return: (osid.id.Id) - the grade ``Id``
raise: IllegalState - ``has_knowledge_category()`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['knowledgeCategoryId']):
raise errors.IllegalState('this Objective has no knowledge_category')
else:
return Id(self._my_map['knowledgeCategoryId']) | Gets the grade ``Id`` associated with the knowledge dimension.
return: (osid.id.Id) - the grade ``Id``
raise: IllegalState - ``has_knowledge_category()`` is ``false``
*compliance: mandatory -- This method must be implemented.* |
def _display_big_warning(self, content):
""" Displays a BIG warning """
print("")
print(BOLD + WARNING + "--- WARNING ---" + ENDC)
print(WARNING + content + ENDC)
print("") | Displays a BIG warning |
def data_contains_key_builder(key: str) -> NodePredicate: # noqa: D202
"""Build a filter that passes only on nodes that have the given key in their data dictionary.
:param key: A key for the node's data dictionary
"""
def data_contains_key(_: BELGraph, node: BaseEntity) -> bool:
"""Pass only for a node that contains the enclosed key in its data dictionary.
:return: If the node contains the enclosed key in its data dictionary
"""
return key in node
return data_contains_key | Build a filter that passes only on nodes that have the given key in their data dictionary.
:param key: A key for the node's data dictionary |
def get_languages_from_item(ct_item, item):
"""
Get the languages configured for the current item
:param ct_item:
:param item:
:return:
"""
try:
item_lan = TransItemLanguage.objects.filter(content_type__pk=ct_item.id, object_id=item.id).get()
languages = [lang.code for lang in item_lan.languages.all()]
return languages
except TransItemLanguage.DoesNotExist:
return [] | Get the languages configured for the current item
:param ct_item:
:param item:
:return: |
def setup_signals(self, ):
"""Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None
"""
prjlvl = self.prjbrws.get_level(0)
prjlvl.new_root.connect(self.update_browsers)
for rb in self._releasetype_button_mapping.values():
rb.toggled.connect(self.releasetype_btn_toggled)
shotdesclvl = self.shotbrws.get_level(3)
shotselcb = partial(self.selection_changed,
source=self.shotbrws,
update=self.shotverbrws,
commentbrowser=self.shotcommentbrws,
mapper=self.shot_info_mapper)
shotdesclvl.new_root.connect(shotselcb)
shotverlvl = self.shotverbrws.get_level(0)
shotverlvl.new_root.connect(self.shot_ver_sel_changed)
shotmappercb = partial(self.set_mapper_index, mapper=self.shot_info_mapper)
shotverlvl.new_root.connect(shotmappercb)
shotverlvl.new_root.connect(partial(self.shotcommentbrws.set_root, 0))
assetdesclvl = self.assetbrws.get_level(3)
assetselcb = partial(self.selection_changed,
source=self.assetbrws,
update=self.assetverbrws,
commentbrowser=self.assetcommentbrws,
mapper=self.asset_info_mapper)
assetdesclvl.new_root.connect(assetselcb)
assetverlvl = self.assetverbrws.get_level(0)
assetverlvl.new_root.connect(self.asset_ver_sel_changed)
assetmappercb = partial(self.set_mapper_index, mapper=self.asset_info_mapper)
assetverlvl.new_root.connect(assetmappercb)
assetverlvl.new_root.connect(partial(self.assetcommentbrws.set_root, 0))
self.current_pb.clicked.connect(self.set_to_current)
self.asset_open_path_tb.clicked.connect(self.open_asset_path)
self.shot_open_path_tb.clicked.connect(self.open_shot_path)
self.refresh_tb.clicked.connect(self.refresh) | Connect the signals with the slots to make the ui functional
:returns: None
:rtype: None
:raises: None |
def to_xml(self, xml_declaration=True):
"""
Return the contents of this verb as an XML string
:param bool xml_declaration: Include the XML declaration. Defaults to True
"""
xml = ET.tostring(self.xml()).decode('utf-8')
return '<?xml version="1.0" encoding="UTF-8"?>{}'.format(xml) if xml_declaration else xml | Return the contents of this verb as an XML string
:param bool xml_declaration: Include the XML declaration. Defaults to True |
def masters_by_queue(self, region, queue):
"""
Get the master league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the master players for
:returns: LeagueListDTO
"""
url, query = LeagueApiV4Urls.master_by_queue(region=region, queue=queue)
return self._raw_request(self.masters_by_queue.__name__, region, url, query) | Get the master league for a given queue.
:param string region: the region to execute this request on
:param string queue: the queue to get the master players for
:returns: LeagueListDTO |
def check_email_status(mx_resolver, recipient_address, sender_address, smtp_timeout=10, helo_hostname=None):
"""
Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict
"""
domain = recipient_address[recipient_address.find('@') + 1:]
if helo_hostname is None:
helo_hostname = domain
ret = {'status': 101, 'extended_status': None, 'message': "The server is unable to connect."}
records = []
try:
records = mx_resolver.get_mx_records(helo_hostname)
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed in MX lookup."
smtp = smtplib.SMTP(timeout=smtp_timeout)
for mx in records:
try:
connection_status, connection_message = smtp.connect(mx.exchange)
if connection_status == 220:
smtp.helo(domain)
smtp.mail(sender_address)
status, message = smtp.rcpt(recipient_address)
ret['status'] = status
pattern = re.compile('(\d+\.\d+\.\d+)')
matches = re.match(pattern, message)
if matches:
ret['extended_status'] = matches.group(1)
ret['message'] = message
smtp.quit()
break
except smtplib.SMTPConnectError:
ret['status'] = 111
ret['message'] = "Connection refused or unable to open an SMTP stream."
except smtplib.SMTPServerDisconnected:
ret['status'] = 111
ret['extended_status'] = "SMTP Server disconnected"
except socket.gaierror:
ret['status'] = 512
ret['extended_status'] = "5.1.2 Domain name address resolution failed."
return ret | Checks if an email might be valid by getting the status from the SMTP server.
:param mx_resolver: MXResolver
:param recipient_address: string
:param sender_address: string
:param smtp_timeout: integer
:param helo_hostname: string
:return: dict |
def get(self, key: Any, default: Any = None) -> Any:
"""
获取 cookie 中的 value
"""
if key in self:
return self[key].value
return default | 获取 cookie 中的 value |
def _iter_candidate_groups(self, init_match, edges0, edges1):
"""Divide the edges into groups"""
# collect all end vertices0 and end vertices1 that belong to the same
# group.
sources = {}
for start_vertex0, end_vertex0 in edges0:
l = sources.setdefault(start_vertex0, [])
l.append(end_vertex0)
dests = {}
for start_vertex1, end_vertex1 in edges1:
start_vertex0 = init_match.reverse[start_vertex1]
l = dests.setdefault(start_vertex0, [])
l.append(end_vertex1)
for start_vertex0, end_vertices0 in sources.items():
end_vertices1 = dests.get(start_vertex0, [])
yield end_vertices0, end_vertices1 | Divide the edges into groups |
def AddBlob(self, blob_id, length):
"""Add another blob to this image using its hash.
Once a blob is added that is smaller than the chunksize we finalize the
file, since handling adding more blobs makes the code much more complex.
Args:
blob_id: rdf_objects.BlobID object.
length: int length of blob
Raises:
IOError: if blob has been finalized.
"""
if self.finalized and length > 0:
raise IOError("Can't add blobs to finalized BlobImage")
self.content_dirty = True
self.index.seek(0, 2)
self.index.write(blob_id.AsBytes())
self.size += length
if length < self.chunksize:
self.finalized = True | Add another blob to this image using its hash.
Once a blob is added that is smaller than the chunksize we finalize the
file, since handling adding more blobs makes the code much more complex.
Args:
blob_id: rdf_objects.BlobID object.
length: int length of blob
Raises:
IOError: if blob has been finalized. |
def disconnect(self, chassis_list):
"""Remove connection with one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names)
"""
self._check_session()
if not isinstance(chassis_list, (list, tuple, set, dict, frozenset)):
chassis_list = (chassis_list,)
if len(chassis_list) == 1:
self._rest.delete_request('connections', chassis_list[0])
else:
params = {chassis: True for chassis in chassis_list}
params['action'] = 'disconnect'
self._rest.post_request('connections', None, params) | Remove connection with one or more chassis.
Arguments:
chassis_list -- List of chassis (IP addresses or DNS names) |
def cli(ctx, dname, site):
"""
Enable the <site> under the specified <domain>
"""
assert isinstance(ctx, Context)
dname = domain_parse(dname).hostname
domain = Session.query(Domain).filter(Domain.name == dname).first()
if not domain:
click.secho('No such domain: {dn}'.format(dn=dname), fg='red', bold=True, err=True)
return
site_name = site
site = Site.get(domain, site_name)
if not site:
click.secho('No such site: {site}'.format(site=site_name), fg='red', bold=True, err=True)
return
p = Echo('Constructing paths and configuration files...')
site.enable()
p.done()
# Restart Nginx
p = Echo('Restarting web server...')
FNULL = open(os.devnull, 'w')
subprocess.check_call(['service', 'nginx', 'restart'], stdout=FNULL, stderr=subprocess.STDOUT)
p.done() | Enable the <site> under the specified <domain> |
def unzoom(self, event=None, set_bounds=True):
""" zoom out 1 level, or to full data range """
lims = None
if len(self.conf.zoom_lims) > 1:
lims = self.conf.zoom_lims.pop()
ax = self.axes
if lims is None: # auto scale
self.conf.zoom_lims = [None]
xmin, xmax, ymin, ymax = self.data_range
lims = {self.axes: [xmin, xmax, ymin, ymax]}
self.set_viewlimits()
self.canvas.draw() | zoom out 1 level, or to full data range |
def item(self, current_item):
"""
Return the current item.
@param current_item: Current item
@type param: django.models
@return: Value and label of the current item
@rtype : dict
"""
return {
'value': text(getattr(current_item, self.get_field_name())),
'label': self.label(current_item)
} | Return the current item.
@param current_item: Current item
@type param: django.models
@return: Value and label of the current item
@rtype : dict |
def all(self):
""" Returns list with all indexed partitions. """
partitions = []
for partition in self.index.searcher().documents():
partitions.append(
PartitionSearchResult(dataset_vid=partition['dataset_vid'], vid=partition['vid'], score=1))
return partitions | Returns list with all indexed partitions. |
def auth_required(*auth_methods):
"""
Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms.
"""
login_mechanisms = {
'token': lambda: _check_token(),
'basic': lambda: _check_http_auth(),
'session': lambda: current_user.is_authenticated
}
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
h = {}
mechanisms = [(method, login_mechanisms.get(method))
for method in auth_methods]
for method, mechanism in mechanisms:
if mechanism and mechanism():
return fn(*args, **kwargs)
elif method == 'basic':
r = _security.default_http_auth_realm
h['WWW-Authenticate'] = 'Basic realm="%s"' % r
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_response(headers=h)
return decorated_view
return wrapper | Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms. |
def gen_passwd(self):
'''
reseting password
'''
post_data = self.get_post_data()
userinfo = MUser.get_by_name(post_data['u'])
sub_timestamp = int(post_data['t'])
cur_timestamp = tools.timestamp()
if cur_timestamp - sub_timestamp < 600 and cur_timestamp > sub_timestamp:
pass
else:
kwd = {
'info': '密码重置已超时!',
'link': '/user/reset-password',
}
self.set_status(400)
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
hash_str = tools.md5(userinfo.user_name + post_data['t'] + userinfo.user_pass)
if hash_str == post_data['p']:
pass
else:
kwd = {
'info': '密码重置验证出错!',
'link': '/user/reset-password',
}
self.set_status(400)
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo, )
new_passwd = tools.get_uu8d()
MUser.update_pass(userinfo.uid, new_passwd)
kwd = {
'user_name': userinfo.user_name,
'new_pass': new_passwd,
}
self.render('user/user_show_pass.html',
cfg=config.CMS_CFG,
kwd=kwd,
userinfo=self.userinfo, ) | reseting password |
def add_source(self, name, src_dict, free=None, init_source=True,
save_source_maps=True, use_pylike=True,
use_single_psf=False, **kwargs):
"""Add a source to the ROI model. This function may be called
either before or after `~fermipy.gtanalysis.GTAnalysis.setup`.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or source object defining the source properties
(coordinates, spectral parameters, etc.).
free : bool
Initialize the source with a free normalization parameter.
use_pylike : bool
Create source maps with pyLikelihood.
use_single_psf : bool
Use the PSF model calculated for the ROI center. If false
then a new model will be generated using the position of
the source.
"""
if self.roi.has_source(name):
msg = 'Source %s already exists.' % name
self.logger.error(msg)
raise Exception(msg)
loglevel = kwargs.pop('loglevel', self.loglevel)
self.logger.log(loglevel, 'Adding source ' + name)
src = self.roi.create_source(name, src_dict, rescale=True)
self.make_template(src)
for c in self.components:
c.add_source(name, src_dict, free=free,
save_source_maps=save_source_maps,
use_pylike=use_pylike,
use_single_psf=use_single_psf)
if self._like is None:
return
if self.config['gtlike']['edisp'] and src.name not in \
self.config['gtlike']['edisp_disable']:
self.set_edisp_flag(src.name, True)
self.like.syncSrcParams(str(name))
self.like.model = self.like.components[0].model
# if free is not None:
# self.free_norm(name, free, loglevel=logging.DEBUG)
if init_source:
self._init_source(name)
self._update_roi()
if self._fitcache is not None:
self._fitcache.update_source(name) | Add a source to the ROI model. This function may be called
either before or after `~fermipy.gtanalysis.GTAnalysis.setup`.
Parameters
----------
name : str
Source name.
src_dict : dict or `~fermipy.roi_model.Source` object
Dictionary or source object defining the source properties
(coordinates, spectral parameters, etc.).
free : bool
Initialize the source with a free normalization parameter.
use_pylike : bool
Create source maps with pyLikelihood.
use_single_psf : bool
Use the PSF model calculated for the ROI center. If false
then a new model will be generated using the position of
the source. |
def from_datetime(self, dt):
"""
generates a UUID for a given datetime
:param dt: datetime
:type dt: datetime
:return:
"""
global _last_timestamp
epoch = datetime(1970, 1, 1, tzinfo=dt.tzinfo)
offset = epoch.tzinfo.utcoffset(epoch).total_seconds() if epoch.tzinfo else 0
timestamp = (dt - epoch).total_seconds() - offset
node = None
clock_seq = None
nanoseconds = int(timestamp * 1e9)
timestamp = int(nanoseconds // 100) + 0x01b21dd213814000
if clock_seq is None:
import random
clock_seq = random.randrange(1 << 14) # instead of stable storage
time_low = timestamp & 0xffffffff
time_mid = (timestamp >> 32) & 0xffff
time_hi_version = (timestamp >> 48) & 0x0fff
clock_seq_low = clock_seq & 0xff
clock_seq_hi_variant = (clock_seq >> 8) & 0x3f
if node is None:
node = getnode()
return pyUUID(fields=(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node), version=1) | generates a UUID for a given datetime
:param dt: datetime
:type dt: datetime
:return: |
def cancel(self):
'''
Cancel a running workflow.
Args:
None
Returns:
None
'''
if not self.id:
raise WorkflowError('Workflow is not running. Cannot cancel.')
if self.batch_values:
self.workflow.batch_workflow_cancel(self.id)
else:
self.workflow.cancel(self.id) | Cancel a running workflow.
Args:
None
Returns:
None |
def diagnose_embedding(emb, source, target):
"""A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains
"""
if not hasattr(source, 'edges'):
source = nx.Graph(source)
if not hasattr(target, 'edges'):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y | A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains |
def _index_range(self, version, symbol, from_version=None, **kwargs):
"""
Tuple describing range to read from the ndarray - closed:open
"""
from_index = None
if from_version:
from_index = from_version['up_to']
return from_index, None | Tuple describing range to read from the ndarray - closed:open |
def run_evaluate(self) -> None:
"""
Overrides the base evaluation to set the value to the evaluation result of the value
expression in the schema
"""
result = None
self.eval_error = False
if self._needs_evaluation:
result = self._schema.value.evaluate(self._evaluation_context)
self.eval_error = result is None
if self.eval_error:
return
# Only set the value if it conforms to the field type
if not self._schema.is_type_of(result):
try:
result = self._schema.type_object(result)
except Exception as err:
logging.debug('{} in casting {} to {} for field {}. Error: {}'.format(
type(err).__name__, result, self._schema.type,
self._schema.fully_qualified_name, err))
self.eval_error = True
return
try:
result = self._schema.sanitize_object(result)
except Exception as err:
logging.debug('{} in sanitizing {} of type {} for field {}. Error: {}'.format(
type(err).__name__, result, self._schema.type, self._schema.fully_qualified_name,
err))
self.eval_error = True
return
self.value = result | Overrides the base evaluation to set the value to the evaluation result of the value
expression in the schema |
def parse(value, pattern='{head}{padding}{tail} [{ranges}]'):
'''Parse *value* into a :py:class:`~clique.collection.Collection`.
Use *pattern* to extract information from *value*. It may make use of the
following keys:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``.
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes.
.. note::
*holes* only makes sense if *range* or *ranges* is also present.
'''
# Construct regular expression for given pattern.
expressions = {
'head': '(?P<head>.*)',
'tail': '(?P<tail>.*)',
'padding': '%(?P<padding>\d*)d',
'range': '(?P<range>\d+-\d+)?',
'ranges': '(?P<ranges>[\d ,\-]+)?',
'holes': '(?P<holes>[\d ,\-]+)'
}
pattern_regex = re.escape(pattern)
for key, expression in expressions.items():
pattern_regex = pattern_regex.replace(
'\{{{0}\}}'.format(key),
expression
)
pattern_regex = '^{0}$'.format(pattern_regex)
# Match pattern against value and use results to construct collection.
match = re.search(pattern_regex, value)
if match is None:
raise ValueError('Value did not match pattern.')
groups = match.groupdict()
if 'padding' in groups and groups['padding']:
groups['padding'] = int(groups['padding'])
else:
groups['padding'] = 0
# Create collection and then add indexes.
collection = Collection(
groups.get('head', ''),
groups.get('tail', ''),
groups['padding']
)
if groups.get('range', None) is not None:
start, end = map(int, groups['range'].split('-'))
collection.indexes.update(range(start, end + 1))
if groups.get('ranges', None) is not None:
parts = [part.strip() for part in groups['ranges'].split(',')]
for part in parts:
index_range = list(map(int, part.split('-', 2)))
if len(index_range) > 1:
# Index range.
for index in range(index_range[0], index_range[1] + 1):
collection.indexes.add(index)
else:
# Single index.
collection.indexes.add(index_range[0])
if 'holes' in groups:
parts = [part.strip() for part in groups['holes'].split(',')]
for part in parts:
index_range = map(int, part.split('-', 2))
if len(index_range) > 1:
# Index range.
for index in range(index_range[0], index_range[1] + 1):
collection.indexes.remove(index)
else:
# Single index.
collection.indexes.remove(index_range[0])
return collection | Parse *value* into a :py:class:`~clique.collection.Collection`.
Use *pattern* to extract information from *value*. It may make use of the
following keys:
* *head* - Common leading part of the collection.
* *tail* - Common trailing part of the collection.
* *padding* - Padding value in ``%0d`` format.
* *range* - Total range in the form ``start-end``.
* *ranges* - Comma separated ranges of indexes.
* *holes* - Comma separated ranges of missing indexes.
.. note::
*holes* only makes sense if *range* or *ranges* is also present. |
def get_title(self, obj):
"""Set search entry title for object"""
search_title = self.get_model_config_value(obj, 'search_title')
if not search_title:
return super().get_title(obj)
return search_title.format(**obj.__dict__) | Set search entry title for object |
def tcp_receive(self):
"""Receive data from TCP port."""
data = self.conn.recv(self.BUFFER_SIZE)
if type(data) != str:
# Python 3 specific
data = data.decode("utf-8")
return str(data) | Receive data from TCP port. |
def compile_tilebus(files, env, outdir=None, header_only=False):
"""Given a path to a *.cdb file, process it and generate c tables and/or headers containing the information."""
if outdir is None:
dirs = env["ARCH"].build_dirs()
outdir = dirs['build']
cmdmap_c_path = os.path.join(outdir, 'command_map_c.c')
cmdmap_h_path = os.path.join(outdir, 'command_map_c.h')
config_c_path = os.path.join(outdir, 'config_variables_c.c')
config_h_path = os.path.join(outdir, 'config_variables_c.h')
if header_only:
return env.Command([cmdmap_h_path, config_h_path], files,
action=env.Action(tb_h_file_creation, "Creating header files from TileBus definitions"))
else:
env['MIBFILE'] = '#' + cmdmap_c_path
return env.Command([cmdmap_c_path, cmdmap_h_path, config_c_path, config_h_path], files,
action=env.Action(tb_c_file_creation, "Compiling TileBus commands and config variables")) | Given a path to a *.cdb file, process it and generate c tables and/or headers containing the information. |
def _delete(self, pos, idx):
"""
Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
"""
_maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index
keys_pos = _keys[pos]
lists_pos = _lists[pos]
del keys_pos[idx]
del lists_pos[idx]
self._len -= 1
len_keys_pos = len(keys_pos)
if len_keys_pos > self._half:
_maxes[pos] = keys_pos[-1]
if len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_keys) > 1:
if not pos:
pos += 1
prev = pos - 1
_keys[prev].extend(_keys[pos])
_lists[prev].extend(_lists[pos])
_maxes[prev] = _keys[prev][-1]
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_keys_pos:
_maxes[pos] = keys_pos[-1]
else:
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:] | Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc. |
def image_import(infile, force):
"""Import image anchore data from a JSON file."""
ecode = 0
try:
with open(infile, 'r') as FH:
savelist = json.loads(FH.read())
except Exception as err:
anchore_print_err("could not load input file: " + str(err))
ecode = 1
if ecode == 0:
for record in savelist:
try:
imageId = record['image']['imageId']
if contexts['anchore_db'].is_image_present(imageId) and not force:
anchore_print("image ("+str(imageId)+") already exists in DB, skipping import.")
else:
imagedata = record['image']['imagedata']
try:
rc = contexts['anchore_db'].save_image_new(imageId, report=imagedata)
if not rc:
contexts['anchore_db'].delete_image(imageId)
raise Exception("save to anchore DB failed")
except Exception as err:
contexts['anchore_db'].delete_image(imageId)
raise err
except Exception as err:
anchore_print_err("could not store image ("+str(imageId)+") from import file: "+ str(err))
ecode = 1
sys.exit(ecode) | Import image anchore data from a JSON file. |
def convert_branch(self, old_node, new_node, ids_to_skip, comment_dict=None):
"""
Recursively walk a indicator logic tree, starting from a Indicator node.
Converts OpenIOC 1.1 Indicator/IndicatorItems to Openioc 1.0 and preserves order.
:param old_node: An Indicator node, which we walk down to convert
:param new_node: An Indicator node, which we add new IndicatorItem and Indicator nodes too
:param ids_to_skip: set of node @id values not to convert
:param comment_dict: maps ids to comment values. only applied to IndicatorItem nodes
:return: returns True upon completion.
:raises: DowngradeError if there is a problem during the conversion.
"""
expected_tag = 'Indicator'
if old_node.tag != expected_tag:
raise DowngradeError('old_node expected tag is [%s]' % expected_tag)
if not comment_dict:
comment_dict = {}
for node in old_node.getchildren():
node_id = node.get('id')
if node_id in ids_to_skip:
continue
if node.tag == 'IndicatorItem':
negation = node.get('negate')
condition = node.get('condition')
if 'true' in negation.lower():
new_condition = condition + 'not'
else:
new_condition = condition
document = node.xpath('Context/@document')[0]
search = node.xpath('Context/@search')[0]
content_type = node.xpath('Content/@type')[0]
content = node.findtext('Content')
context_type = node.xpath('Context/@type')[0]
new_ii_node = ioc_api.make_indicatoritem_node(condition=condition,
document=document,
search=search,
content_type=content_type,
content=content,
context_type=context_type,
nid=node_id)
# set condition
new_ii_node.attrib['condition'] = new_condition
# set comment
if node_id in comment_dict:
comment = comment_dict[node_id]
comment_node = et.Element('Comment')
comment_node.text = comment
new_ii_node.append(comment_node)
# remove preserver-case and negate
del new_ii_node.attrib['negate']
del new_ii_node.attrib['preserve-case']
new_node.append(new_ii_node)
elif node.tag == 'Indicator':
operator = node.get('operator')
if operator.upper() not in ['OR', 'AND']:
raise DowngradeError('Indicator@operator is not AND/OR. [%s] has [%s]' % (node_id, operator))
new_i_node = ioc_api.make_indicator_node(operator, node_id)
new_node.append(new_i_node)
self.convert_branch(node, new_i_node, ids_to_skip, comment_dict)
else:
# should never get here
raise DowngradeError('node is not a Indicator/IndicatorItem')
return True | Recursively walk a indicator logic tree, starting from a Indicator node.
Converts OpenIOC 1.1 Indicator/IndicatorItems to Openioc 1.0 and preserves order.
:param old_node: An Indicator node, which we walk down to convert
:param new_node: An Indicator node, which we add new IndicatorItem and Indicator nodes too
:param ids_to_skip: set of node @id values not to convert
:param comment_dict: maps ids to comment values. only applied to IndicatorItem nodes
:return: returns True upon completion.
:raises: DowngradeError if there is a problem during the conversion. |
def _from_dict(cls, _dict):
"""Initialize a DialogSuggestionValue object from a json dictionary."""
args = {}
if 'input' in _dict:
args['input'] = MessageInput._from_dict(_dict.get('input'))
if 'intents' in _dict:
args['intents'] = [
RuntimeIntent._from_dict(x) for x in (_dict.get('intents'))
]
if 'entities' in _dict:
args['entities'] = [
RuntimeEntity._from_dict(x) for x in (_dict.get('entities'))
]
return cls(**args) | Initialize a DialogSuggestionValue object from a json dictionary. |
def main(): # pragma: nocover
"""Return exit code of zero iff directory is not changed.
"""
p = argparse.ArgumentParser()
p.add_argument(
'directory',
help="Directory to check"
)
p.add_argument(
'--verbose', '-v', action='store_true',
help="increase verbosity"
)
args = p.parse_args()
import sys
_changed = changed(sys.argv[1], args=args)
sys.exit(_changed) | Return exit code of zero iff directory is not changed. |
def fix_flags(self, flags):
"""Fixes standard TensorBoard CLI flags to parser."""
FlagsError = base_plugin.FlagsError
if flags.version_tb:
pass
elif flags.inspect:
if flags.logdir and flags.event_file:
raise FlagsError(
'Must specify either --logdir or --event_file, but not both.')
if not (flags.logdir or flags.event_file):
raise FlagsError('Must specify either --logdir or --event_file.')
elif not flags.db and not flags.logdir:
raise FlagsError('A logdir or db must be specified. '
'For example `tensorboard --logdir mylogdir` '
'or `tensorboard --db sqlite:~/.tensorboard.db`. '
'Run `tensorboard --helpfull` for details and examples.')
if flags.path_prefix.endswith('/'):
flags.path_prefix = flags.path_prefix[:-1] | Fixes standard TensorBoard CLI flags to parser. |
def get_members_of_group(self, gname):
"""Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the services in the group
:rtype: list[alignak.objects.service.Service]
"""
hostgroup = self.find_by_name(gname)
if hostgroup:
return hostgroup.get_services()
return [] | Get all members of a group which name is given in parameter
:param gname: name of the group
:type gname: str
:return: list of the services in the group
:rtype: list[alignak.objects.service.Service] |
def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step):
"""
This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames:
"""
all_mt_feats = numpy.array([])
signal_idx = numpy.array([])
process_times = []
types = ('*.wav', '*.aif', '*.aiff', '*.ogg')
wav_file_list = []
for files in types:
wav_file_list.extend(glob.glob(os.path.join(dirName, files)))
wav_file_list = sorted(wav_file_list)
for i, wavFile in enumerate(wav_file_list):
[fs, x] = audioBasicIO.readAudioFile(wavFile)
if isinstance(x, int):
continue
x = audioBasicIO.stereo2mono(x)
[mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs),
round(mt_step * fs),
round(fs * st_win),
round(fs * st_step))
mt_term_feats = numpy.transpose(mt_term_feats)
if len(all_mt_feats) == 0: # append feature vector
all_mt_feats = mt_term_feats
signal_idx = numpy.zeros((mt_term_feats.shape[0], ))
else:
all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats))
signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], )))
return (all_mt_feats, signal_idx, wav_file_list) | This function extracts the mid-term features of the WAVE
files of a particular folder without averaging each file.
ARGUMENTS:
- dirName: the path of the WAVE directory
- mt_win, mt_step: mid-term window and step (in seconds)
- st_win, st_step: short-term window and step (in seconds)
RETURNS:
- X: A feature matrix
- Y: A matrix of file labels
- filenames: |
def partition(predicate, iterable):
"""Use a predicate to partition true and false entries.
Reference
---------
Python itertools documentation.
"""
t1, t2 = tee(iterable)
return filterfalse(predicate, t1), filter(predicate, t2) | Use a predicate to partition true and false entries.
Reference
---------
Python itertools documentation. |
def write_version(name=None, path=None):
"""Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json
"""
# Written like this for coverage purposes.
# http://stackoverflow.com/questions/5850268/how-to-test-or-mock-if-name-main-contents/27084447#27084447
if name in (None, '__main__'):
path = path or os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"version.json")
contents = {
'version': __version__,
'version_string': __version_string__,
}
with open(path, 'w') as filehandle:
filehandle.write(json.dumps(contents, sort_keys=True, indent=4)) | Write the version info to ../version.json, for setup.py.
Args:
name (Optional[str]): this is for the ``write_version(name=__name__)``
below. That's one way to both follow the
``if __name__ == '__main__':`` convention but also allow for full
coverage without ignoring parts of the file.
path (Optional[str]): the path to write the version json to. Defaults
to ../version.json |
def check_voltage(grid, mode):
""" Checks for voltage stability issues at all nodes for MV or LV grid
Parameters
----------
grid : GridDing0
Grid identifier.
mode : str
Kind of grid ('MV' or 'LV').
Returns
-------
:any:`list` of :any:`GridDing0`
List of critical nodes, sorted descending by voltage difference.
Notes
-----
The examination is done in two steps, according to [#]_ :
1. It is checked #TODO: what?
2. #TODO: what's next?
References
----------
.. [#] dena VNS
"""
crit_nodes = {}
if mode == 'MV':
# load max. voltage difference for load and feedin case
mv_max_v_level_lc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_max_v_level_lc_diff_normal'))
mv_max_v_level_fc_diff_normal = float(cfg_ding0.get('mv_routing_tech_constraints',
'mv_max_v_level_fc_diff_normal'))
# check nodes' voltages
voltage_station = grid._station.voltage_res
for node in grid.graph_nodes_sorted():
try:
# compare node's voltage with max. allowed voltage difference for load and feedin case
if (abs(voltage_station[0] - node.voltage_res[0]) > mv_max_v_level_lc_diff_normal) or\
(abs(voltage_station[1] - node.voltage_res[1]) > mv_max_v_level_fc_diff_normal):
crit_nodes[node] = {'node': node,
'v_diff': max([abs(v2-v1) for v1, v2 in zip(node.voltage_res, voltage_station)])}
except:
pass
elif mode == 'LV':
raise NotImplementedError
if crit_nodes:
logger.info('==> {} nodes have voltage issues.'.format(len(crit_nodes)))
return [_['node'] for _ in sorted(crit_nodes.values(), key=lambda _: _['v_diff'], reverse=True)] | Checks for voltage stability issues at all nodes for MV or LV grid
Parameters
----------
grid : GridDing0
Grid identifier.
mode : str
Kind of grid ('MV' or 'LV').
Returns
-------
:any:`list` of :any:`GridDing0`
List of critical nodes, sorted descending by voltage difference.
Notes
-----
The examination is done in two steps, according to [#]_ :
1. It is checked #TODO: what?
2. #TODO: what's next?
References
----------
.. [#] dena VNS |
def handler(self):
"""Run the required analyses"""
printtime('Creating and populating objects', self.start)
self.populate()
printtime('Populating {} sequence profiles'.format(self.analysistype), self.start)
self.profiler()
# Annotate sequences with prokka
self.annotatethreads()
# Run the analyses
self.cdsthreads()
# Find core coding features
self.cdssequencethreads()
# Extract the sequence for each coding feature
self.allelematchthreads()
# Determine sequence types from the analyses
printtime('Determining {} sequence types'.format(self.analysistype), self.start)
self.sequencetyper()
# Create reports
printtime('Creating {} reports'.format(self.analysistype), self.start)
self.reporter() | Run the required analyses |
def create(context, job_id, name, type, url, data):
"""create(context, job_id, name, type, url, data)
Create an analytic.
>>> dcictl analytic-create [OPTIONS]
:param string job-id: The job on which to attach the analytic
:param string name: Name of the analytic [required]
:param string type: Type of the analytic [required]
:param string url: Url of the bug [optional]
:param string data: JSON data of the analytic
"""
result = analytic.create(context, job_id=job_id, name=name, type=type,
url=url, data=data)
utils.format_output(result, context.format) | create(context, job_id, name, type, url, data)
Create an analytic.
>>> dcictl analytic-create [OPTIONS]
:param string job-id: The job on which to attach the analytic
:param string name: Name of the analytic [required]
:param string type: Type of the analytic [required]
:param string url: Url of the bug [optional]
:param string data: JSON data of the analytic |
def _append(lst, indices, value):
"""Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required.
"""
for i, idx in enumerate(indices):
# We need to loop because sometimes indices can increment by more than 1 due to missing tokens.
# Example: Sentence with no words after filtering words.
while len(lst) <= idx:
# Update max counts whenever a new sublist is created.
# There is no need to worry about indices beyond `i` since they will end up creating new lists as well.
lst.append([])
lst = lst[idx]
# Add token and update token max count.
lst.append(value) | Adds `value` to `lst` list indexed by `indices`. Will create sub lists as required. |
def _imm_merge_class(cls, parent):
'''
_imm_merge_class(imm_class, parent) updates the given immutable class imm_class to have the
appropriate attributes of its given parent class. The parents should be passed through this
function in method-resolution order.
'''
# If this is not an immutable parent, ignore it
if not hasattr(parent, '_pimms_immutable_data_'): return cls
# otherwise, let's look at the data
cdat = cls._pimms_immutable_data_
pdat = parent._pimms_immutable_data_
# for params, values, and checks, we add them to cls only if they do not already exist in cls
cparams = cdat['params']
cvalues = cdat['values']
cconsts = cdat['consts']
for (param, (dflt, tx_fn, arg_lists, check_fns, deps)) in six.iteritems(pdat['params']):
if param not in cparams and param not in cvalues:
cparams[param] = (dflt, tx_fn, [], [], [])
for (value, (arg_list, calc_fn, deps)) in six.iteritems(pdat['values']):
if value in cparams:
raise ValueError('cannot convert value into parameter: %s' % value)
if value not in cvalues:
cvalues[value] = (arg_list, calc_fn, [])
if len(arg_list) == 0:
cconsts[value] = ([], [])
cchecks = cdat['checks']
for (check, (arg_list, check_fn)) in six.iteritems(pdat['checks']):
if check not in cchecks:
cchecks[check] = (arg_list, check_fn)
# That's it for now
return cls | _imm_merge_class(imm_class, parent) updates the given immutable class imm_class to have the
appropriate attributes of its given parent class. The parents should be passed through this
function in method-resolution order. |
def AuthenticateSessionId(self, username, password):
"""
Authenticate using a username and password.
The SenseApi object will store the obtained session_id internally until a call to LogoutSessionId is performed.
@param username (string) - CommonSense username
@param password (string) - MD5Hash of CommonSense password
@return (bool) - Boolean indicating whether AuthenticateSessionId was successful
"""
self.__setAuthenticationMethod__('authenticating_session_id')
parameters = {'username':username, 'password':password}
if self.__SenseApiCall__("/login.json", "POST", parameters = parameters):
try:
response = json.loads(self.__response__)
except:
self.__setAuthenticationMethod__('not_authenticated')
self.__error__ = "notjson"
return False
try:
self.__session_id__ = response['session_id']
self.__setAuthenticationMethod__('session_id')
return True
except:
self.__setAuthenticationMethod__('not_authenticated')
self.__error__ = "no session_id"
return False
else:
self.__setAuthenticationMethod__('not_authenticated')
self.__error__ = "api call unsuccessful"
return False | Authenticate using a username and password.
The SenseApi object will store the obtained session_id internally until a call to LogoutSessionId is performed.
@param username (string) - CommonSense username
@param password (string) - MD5Hash of CommonSense password
@return (bool) - Boolean indicating whether AuthenticateSessionId was successful |
def create_precursor_quant_lookup(quantdb, mzmlfn_feats, quanttype,
rttol, mztol, mztoltype):
"""Fills quant sqlite with precursor quant from:
features - generator of xml features from openms
"""
featparsermap = {'kronik': kronik_featparser,
'openms': openms_featparser,
}
features = []
mzmlmap = quantdb.get_mzmlfile_map()
for specfn, feat_element in mzmlfn_feats:
feat = featparsermap[quanttype](feat_element)
features.append((mzmlmap[specfn], feat['rt'], feat['mz'],
feat['charge'], feat['intensity'])
)
if len(features) == DB_STORE_CHUNK:
quantdb.store_ms1_quants(features)
features = []
quantdb.store_ms1_quants(features)
quantdb.index_precursor_quants()
align_quants_psms(quantdb, rttol, mztol, mztoltype) | Fills quant sqlite with precursor quant from:
features - generator of xml features from openms |
def decode(self, input, final=False):
"""Decode one chunk of the input.
:param input: A byte string.
:param final:
Indicate that no more input is available.
Must be :obj:`True` if this is the last call.
:returns: An Unicode string.
"""
decoder = self._decoder
if decoder is not None:
return decoder(input, final)
input = self._buffer + input
encoding, input = _detect_bom(input)
if encoding is None:
if len(input) < 3 and not final: # Not enough data yet.
self._buffer = input
return ''
else: # No BOM
encoding = self._fallback_encoding
decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
self._decoder = decoder
self.encoding = encoding
return decoder(input, final) | Decode one chunk of the input.
:param input: A byte string.
:param final:
Indicate that no more input is available.
Must be :obj:`True` if this is the last call.
:returns: An Unicode string. |
def get_customer_transitions(self, issue_id_or_key):
"""
Returns a list of transitions that customers can perform on the request
:param issue_id_or_key: str
:return:
"""
url = 'rest/servicedeskapi/request/{}/transition'.format(issue_id_or_key)
return self.get(url, headers=self.experimental_headers) | Returns a list of transitions that customers can perform on the request
:param issue_id_or_key: str
:return: |
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# Preprocess to obtain features
F = self._preprocess()
# Normalize
F = msaf.utils.normalize(F, norm_type=self.config["bound_norm_feats"])
# Make sure that the M_gaussian is even
if self.config["M_gaussian"] % 2 == 1:
self.config["M_gaussian"] += 1
# Median filter
F = median_filter(F, M=self.config["m_median"])
#plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show()
# Self similarity matrix
S = compute_ssm(F)
# Compute gaussian kernel
G = compute_gaussian_krnl(self.config["M_gaussian"])
#plt.imshow(S, interpolation="nearest", aspect="auto"); plt.show()
# Compute the novelty curve
nc = compute_nc(S, G)
# Find peaks in the novelty curve
est_idxs = pick_peaks(nc, L=self.config["L_peaks"])
# Add first and last frames
est_idxs = np.concatenate(([0], est_idxs, [F.shape[0] - 1]))
# Empty labels
est_labels = np.ones(len(est_idxs) - 1) * -1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
return est_idxs, est_labels | Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments. |
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels | Flattens predictions in the batch |
def init_app(state):
"""
Prepare the Flask application for Flask-Split.
:param state: :class:`BlueprintSetupState` instance
"""
app = state.app
app.config.setdefault('SPLIT_ALLOW_MULTIPLE_EXPERIMENTS', False)
app.config.setdefault('SPLIT_DB_FAILOVER', False)
app.config.setdefault('SPLIT_IGNORE_IP_ADDRESSES', [])
app.config.setdefault('SPLIT_ROBOT_REGEX', r"""
(?i)\b(
Baidu|
Gigabot|
Googlebot|
libwww-perl|
lwp-trivial|
msnbot|
SiteUptime|
Slurp|
WordPress|
ZIBB|
ZyBorg
)\b
""")
app.jinja_env.globals.update({
'ab_test': ab_test,
'finished': finished
})
@app.template_filter()
def percentage(number):
number *= 100
if abs(number) < 10:
return "%.1f%%" % round(number, 1)
else:
return "%d%%" % round(number) | Prepare the Flask application for Flask-Split.
:param state: :class:`BlueprintSetupState` instance |
def hex_color(value):
'''Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and
returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0.
'''
r = ((value >> (8 * 2)) & 255) / 255.0
g = ((value >> (8 * 1)) & 255) / 255.0
b = ((value >> (8 * 0)) & 255) / 255.0
return (r, g, b) | Accepts a hexadecimal color `value` in the format ``0xrrggbb`` and
returns an (r, g, b) tuple where 0.0 <= r, g, b <= 1.0. |
def validate_request_method_to_operation(request_method, path_definition):
"""
Given a request method, validate that the request method is valid for the
api path.
If so, return the operation definition related to this request method.
"""
try:
operation_definition = path_definition[request_method]
except KeyError:
allowed_methods = set(REQUEST_METHODS).intersection(path_definition.keys())
raise ValidationError(
MESSAGES['request']['invalid_method'].format(
request_method, allowed_methods,
),
)
return operation_definition | Given a request method, validate that the request method is valid for the
api path.
If so, return the operation definition related to this request method. |
def _append_data(self, value, _file):
"""Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file
"""
# binascii.b2a_base64(value) -> plistlib.Data
# binascii.a2b_base64(Data) -> value(bytes)
_tabs = '\t' * self._tctr
_text = base64.b64encode(value).decode() # value.hex() # str(value)[2:-1]
_labs = '{tabs}<data>{text}</data>\n'.format(tabs=_tabs, text=_text)
# _labs = '{tabs}<data>\n'.format(tabs=_tabs)
# _list = []
# for _item in textwrap.wrap(value.hex(), 32):
# _text = ' '.join(textwrap.wrap(_item, 2))
# _item = '{tabs}\t{text}'.format(tabs=_tabs, text=_text)
# _list.append(_item)
# _labs += '\n'.join(_list)
# _data = [H for H in iter(
# functools.partial(io.StringIO(value.hex()).read, 2), '')
# ] # to split bytes string into length-2 hex string list
# _labs += '\n{tabs}</data>\n'.format(tabs=_tabs)
_file.write(_labs) | Call this function to write data contents.
Keyword arguments:
* value - dict, content to be dumped
* _file - FileIO, output file |
def run(self, job_list):
"""
Runs a job set which consists of the jobs in an iterable job list.
"""
if self._closed:
raise RuntimeError("master is closed")
return self._manager.add_job_set(job_list) | Runs a job set which consists of the jobs in an iterable job list. |
def NewFd(self, fd, URL, encoding, options):
"""Setup an xmltextReader to parse an XML from a file
descriptor. NOTE that the file descriptor will not be
closed when the reader is closed or reset. The parsing
flags @options are a combination of xmlParserOption. This
reuses the existing @reader xmlTextReader. """
ret = libxml2mod.xmlReaderNewFd(self._o, fd, URL, encoding, options)
return ret | Setup an xmltextReader to parse an XML from a file
descriptor. NOTE that the file descriptor will not be
closed when the reader is closed or reset. The parsing
flags @options are a combination of xmlParserOption. This
reuses the existing @reader xmlTextReader. |
def create_generate(kind, project, resource, offset):
"""A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'.
"""
generate = eval('Generate' + kind.title())
return generate(project, resource, offset) | A factory for creating `Generate` objects
`kind` can be 'variable', 'function', 'class', 'module' or
'package'. |
def create(blocks, mode='basic', inplanes=16, divisor=4, num_classes=1000):
""" Vel factory function """
block_dict = {
'basic': BasicBlock,
'bottleneck': Bottleneck
}
def instantiate(**_):
return ResNetV2(block_dict[mode], blocks, inplanes=inplanes, divisor=divisor, num_classes=num_classes)
return ModelFactory.generic(instantiate) | Vel factory function |
def _pybossa_req(method, domain, id=None, payload=None, params={},
headers={'content-type': 'application/json'},
files=None):
"""
Send a JSON request.
Returns True if everything went well, otherwise it returns the status
code of the response.
"""
url = _opts['endpoint'] + '/api/' + domain
if id is not None:
url += '/' + str(id)
if 'api_key' in _opts:
params['api_key'] = _opts['api_key']
if method == 'get':
r = requests.get(url, params=params)
elif method == 'post':
if files is None and headers['content-type'] == 'application/json':
r = requests.post(url, params=params, headers=headers,
data=json.dumps(payload))
else:
r = requests.post(url, params=params, files=files, data=payload)
elif method == 'put':
r = requests.put(url, params=params, headers=headers,
data=json.dumps(payload))
elif method == 'delete':
r = requests.delete(url, params=params, headers=headers,
data=json.dumps(payload))
if r.status_code // 100 == 2:
if r.text and r.text != '""':
return json.loads(r.text)
else:
return True
else:
return json.loads(r.text) | Send a JSON request.
Returns True if everything went well, otherwise it returns the status
code of the response. |
def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"):
"""
Run the actual request
"""
backend_url = "{}://{}{}".format(scheme, netloc, path)
try:
response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers))
self._return_response(response)
except Exception as e:
body = "Invalid response from backend: '{}' Server might be busy".format(e.message)
logging.debug(body)
self.send_error(httplib.SERVICE_UNAVAILABLE, body) | Run the actual request |
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`BumpSequence`.
"""
bump_sequence_op = Xdr.types.BumpSequenceOp(self.bump_to)
self.body.type = Xdr.const.BUMP_SEQUENCE
self.body.bumpSequenceOp = bump_sequence_op
return super(BumpSequence, self).to_xdr_object() | Creates an XDR Operation object that represents this
:class:`BumpSequence`. |
def newChild(self, ns, name, content):
"""Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. """
if ns is None: ns__o = None
else: ns__o = ns._o
ret = libxml2mod.xmlNewChild(self._o, ns__o, name, content)
if ret is None:raise treeError('xmlNewChild() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a new child element, added at the end of
@parent children list. @ns and @content parameters are
optional (None). If @ns is None, the newly created element
inherits the namespace of @parent. If @content is non None,
a child list containing the TEXTs and ENTITY_REFs node will
be created. NOTE: @content is supposed to be a piece of XML
CDATA, so it allows entity references. XML special chars
must be escaped first by using
xmlEncodeEntitiesReentrant(), or xmlNewTextChild() should
be used. |
def update_warning(self):
"""Update the warning label, buttons state and sequence text."""
new_qsequence = self.new_qsequence
new_sequence = self.new_sequence
self.text_new_sequence.setText(
new_qsequence.toString(QKeySequence.NativeText))
conflicts = self.check_conflicts()
if len(self._qsequences) == 0:
warning = SEQUENCE_EMPTY
tip = ''
icon = QIcon()
elif conflicts:
warning = SEQUENCE_CONFLICT
template = '<i>{0}<b>{1}</b>{2}</i>'
tip_title = _('The new shortcut conflicts with:') + '<br>'
tip_body = ''
for s in conflicts:
tip_body += ' - {0}: {1}<br>'.format(s.context, s.name)
tip_body = tip_body[:-4] # Removing last <br>
tip_override = '<br>Press <b>OK</b> to unbind '
tip_override += 'it' if len(conflicts) == 1 else 'them'
tip_override += ' and assign it to <b>{}</b>'.format(self.name)
tip = template.format(tip_title, tip_body, tip_override)
icon = get_std_icon('MessageBoxWarning')
elif new_sequence in BLACKLIST:
warning = IN_BLACKLIST
template = '<i>{0}<b>{1}</b></i>'
tip_title = _('Forbidden key sequence!') + '<br>'
tip_body = ''
use = BLACKLIST[new_sequence]
if use is not None:
tip_body = use
tip = template.format(tip_title, tip_body)
icon = get_std_icon('MessageBoxWarning')
elif self.check_singlekey() is False or self.check_ascii() is False:
warning = INVALID_KEY
template = '<i>{0}</i>'
tip = _('Invalid key sequence entered') + '<br>'
icon = get_std_icon('MessageBoxWarning')
else:
warning = NO_WARNING
tip = 'This shortcut is valid.'
icon = get_std_icon('DialogApplyButton')
self.warning = warning
self.conflicts = conflicts
self.helper_button.setIcon(icon)
self.button_ok.setEnabled(
self.warning in [NO_WARNING, SEQUENCE_CONFLICT])
self.label_warning.setText(tip)
# Everytime after update warning message, update the label height
new_height = self.label_warning.sizeHint().height()
self.label_warning.setMaximumHeight(new_height) | Update the warning label, buttons state and sequence text. |
def _install(archive_filename, install_args=()):
"""Install Setuptools."""
with archive_context(archive_filename):
# installing
log.warn('Installing Setuptools')
if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
# exitcode will be 2
return 2 | Install Setuptools. |
def document_endpoint(endpoint):
"""Extract the full documentation dictionary from the endpoint."""
descr = clean_description(py_doc_trim(endpoint.__doc__))
docs = {
'name': endpoint._route_name,
'http_method': endpoint._http_method,
'uri': endpoint._uri,
'description': descr,
'arguments': extract_endpoint_arguments(endpoint),
'returns': format_endpoint_returns_doc(endpoint),
}
if hasattr(endpoint, "_success"):
docs["success"] = endpoint._success
if hasattr(endpoint, "_requires_permission"):
docs["requires_permission"] = endpoint._requires_permission
return docs | Extract the full documentation dictionary from the endpoint. |
def afx_adafactor():
"""Adafactor with recommended learning rate schedule."""
hparams = afx_adam()
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
return hparams | Adafactor with recommended learning rate schedule. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.