code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def suspendJustTabProviders(installation):
"""
Replace INavigableElements with facades that indicate their suspension.
"""
if installation.suspended:
raise RuntimeError("Installation already suspended")
powerups = list(installation.allPowerups)
for p in powerups:
if INavigableElement.providedBy(p):
p.store.powerDown(p, INavigableElement)
sne = SuspendedNavigableElement(store=p.store, originalNE=p)
p.store.powerUp(sne, INavigableElement)
p.store.powerUp(sne, ISuspender)
installation.suspended = True | Replace INavigableElements with facades that indicate their suspension. |
def num_adjacent(self, i, j):
""" Counts the number of adjacent nonzero pixels to a given pixel.
Parameters
----------
i : int
row index of query pixel
j : int
col index of query pixel
Returns
-------
int
number of adjacent nonzero pixels
"""
# check values
if i < 1 or i > self.height - 2 or j < 1 and j > self.width - 2:
raise ValueError('Pixels out of bounds')
# count the number of blacks
count = 0
diffs = [[-1, 0], [1, 0], [0, -1], [0, 1]]
for d in diffs:
if self.data[i + d[0]][j + d[1]] > self._threshold:
count += 1
return count | Counts the number of adjacent nonzero pixels to a given pixel.
Parameters
----------
i : int
row index of query pixel
j : int
col index of query pixel
Returns
-------
int
number of adjacent nonzero pixels |
def sorted_timeseries(self, ascending=True):
"""Returns a sorted copy of the TimeSeries, preserving the original one.
As an assumption this new TimeSeries is not ordered anymore if a new value is added.
:param boolean ascending: Determines if the TimeSeries will be ordered ascending
or descending.
:return: Returns a new TimeSeries instance sorted in the requested order.
:rtype: TimeSeries
"""
sortorder = 1
if not ascending:
sortorder = -1
data = sorted(self._timeseriesData, key=lambda i: sortorder * i[0])
newTS = TimeSeries(self._normalized)
for entry in data:
newTS.add_entry(*entry)
newTS._sorted = ascending
return newTS | Returns a sorted copy of the TimeSeries, preserving the original one.
As an assumption this new TimeSeries is not ordered anymore if a new value is added.
:param boolean ascending: Determines if the TimeSeries will be ordered ascending
or descending.
:return: Returns a new TimeSeries instance sorted in the requested order.
:rtype: TimeSeries |
def download_media(self, media_id):
"""
下载多媒体文件
详情请参考 http://mp.weixin.qq.com/wiki/10/78b15308b053286e2a66b33f0f0f5fb6.html
:param media_id: 媒体文件 ID
:return: requests 的 Response 实例
"""
return self.request.get(
'https://api.weixin.qq.com/cgi-bin/media/get',
params={
'media_id': media_id,
},
stream=True,
) | 下载多媒体文件
详情请参考 http://mp.weixin.qq.com/wiki/10/78b15308b053286e2a66b33f0f0f5fb6.html
:param media_id: 媒体文件 ID
:return: requests 的 Response 实例 |
def stream(self, to=values.unset, from_=values.unset,
date_sent_before=values.unset, date_sent=values.unset,
date_sent_after=values.unset, limit=None, page_size=None):
"""
Streams MessageInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode to: Filter by messages sent to this number
:param unicode from_: Filter by from number
:param datetime date_sent_before: Filter by date sent
:param datetime date_sent: Filter by date sent
:param datetime date_sent_after: Filter by date sent
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.message.MessageInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
to=to,
from_=from_,
date_sent_before=date_sent_before,
date_sent=date_sent,
date_sent_after=date_sent_after,
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit']) | Streams MessageInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode to: Filter by messages sent to this number
:param unicode from_: Filter by from number
:param datetime date_sent_before: Filter by date sent
:param datetime date_sent: Filter by date sent
:param datetime date_sent_after: Filter by date sent
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.message.MessageInstance] |
def plot_wfdb(record=None, annotation=None, plot_sym=False,
time_units='samples', title=None, sig_style=[''],
ann_style=['r*'], ecg_grids=[], figsize=None, return_fig=False):
"""
Subplot individual channels of a wfdb record and/or annotation.
This function implements the base functionality of the `plot_items`
function, while allowing direct input of wfdb objects.
If the record object is input, the function will extract from it:
- signal values, from the `p_signal` (priority) or `d_signal` attribute
- sampling frequency, from the `fs` attribute
- signal names, from the `sig_name` attribute
- signal units, from the `units` attribute
If the annotation object is input, the function will extract from it:
- sample locations, from the `sample` attribute
- symbols, from the `symbol` attribute
- the annotation channels, from the `chan` attribute
- the sampling frequency, from the `fs` attribute if present, and if fs
was not already extracted from the `record` argument.
Parameters
----------
record : wfdb Record, optional
The Record object to be plotted
annotation : wfdb Annotation, optional
The Annotation object to be plotted
plot_sym : bool, optional
Whether to plot the annotation symbols on the graph.
time_units : str, optional
The x axis unit. Allowed options are: 'samples', 'seconds',
'minutes', and 'hours'.
title : str, optional
The title of the graph.
sig_style : list, optional
A list of strings, specifying the style of the matplotlib plot
for each signal channel. The list length should match the number
of signal channels. If the list has a length of 1, the style
will be used for all channels.
ann_style : list, optional
A list of strings, specifying the style of the matplotlib plot
for each annotation channel. The list length should match the
number of annotation channels. If the list has a length of 1,
the style will be used for all channels.
ecg_grids : list, optional
A list of integers specifying channels in which to plot ecg grids. May
also be set to 'all' for all channels. Major grids at 0.5mV, and minor
grids at 0.125mV. All channels to be plotted with grids must have
`sig_units` equal to 'uV', 'mV', or 'V'.
figsize : tuple, optional
Tuple pair specifying the width, and height of the figure. It is the
'figsize' argument passed into matplotlib.pyplot's `figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
Returns
-------
figure : matplotlib figure, optional
The matplotlib figure generated. Only returned if the 'return_fig'
option is set to True.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/100', sampto=3000)
>>> annotation = wfdb.rdann('sample-data/100', 'atr', sampto=3000)
>>> wfdb.plot_wfdb(record=record, annotation=annotation, plot_sym=True
time_units='seconds', title='MIT-BIH Record 100',
figsize=(10,4), ecg_grids='all')
"""
(signal, ann_samp, ann_sym, fs,
ylabel, record_name) = get_wfdb_plot_items(record=record,
annotation=annotation,
plot_sym=plot_sym)
return plot_items(signal=signal, ann_samp=ann_samp, ann_sym=ann_sym, fs=fs,
time_units=time_units, ylabel=ylabel,
title=(title or record_name),
sig_style=sig_style,
ann_style=ann_style, ecg_grids=ecg_grids,
figsize=figsize, return_fig=return_fig) | Subplot individual channels of a wfdb record and/or annotation.
This function implements the base functionality of the `plot_items`
function, while allowing direct input of wfdb objects.
If the record object is input, the function will extract from it:
- signal values, from the `p_signal` (priority) or `d_signal` attribute
- sampling frequency, from the `fs` attribute
- signal names, from the `sig_name` attribute
- signal units, from the `units` attribute
If the annotation object is input, the function will extract from it:
- sample locations, from the `sample` attribute
- symbols, from the `symbol` attribute
- the annotation channels, from the `chan` attribute
- the sampling frequency, from the `fs` attribute if present, and if fs
was not already extracted from the `record` argument.
Parameters
----------
record : wfdb Record, optional
The Record object to be plotted
annotation : wfdb Annotation, optional
The Annotation object to be plotted
plot_sym : bool, optional
Whether to plot the annotation symbols on the graph.
time_units : str, optional
The x axis unit. Allowed options are: 'samples', 'seconds',
'minutes', and 'hours'.
title : str, optional
The title of the graph.
sig_style : list, optional
A list of strings, specifying the style of the matplotlib plot
for each signal channel. The list length should match the number
of signal channels. If the list has a length of 1, the style
will be used for all channels.
ann_style : list, optional
A list of strings, specifying the style of the matplotlib plot
for each annotation channel. The list length should match the
number of annotation channels. If the list has a length of 1,
the style will be used for all channels.
ecg_grids : list, optional
A list of integers specifying channels in which to plot ecg grids. May
also be set to 'all' for all channels. Major grids at 0.5mV, and minor
grids at 0.125mV. All channels to be plotted with grids must have
`sig_units` equal to 'uV', 'mV', or 'V'.
figsize : tuple, optional
Tuple pair specifying the width, and height of the figure. It is the
'figsize' argument passed into matplotlib.pyplot's `figure` function.
return_fig : bool, optional
Whether the figure is to be returned as an output argument.
Returns
-------
figure : matplotlib figure, optional
The matplotlib figure generated. Only returned if the 'return_fig'
option is set to True.
Examples
--------
>>> record = wfdb.rdrecord('sample-data/100', sampto=3000)
>>> annotation = wfdb.rdann('sample-data/100', 'atr', sampto=3000)
>>> wfdb.plot_wfdb(record=record, annotation=annotation, plot_sym=True
time_units='seconds', title='MIT-BIH Record 100',
figsize=(10,4), ecg_grids='all') |
def removeDataset(self, dataset):
"""
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
"""
for datasetRecord in models.Dataset.select().where(
models.Dataset.id == dataset.getId()):
datasetRecord.delete_instance(recursive=True) | Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset. |
def invariant_image_similarity(image1, image2,
local_search_iterations=0, metric='MI',
thetas=np.linspace(0,360,5),
thetas2=np.linspace(0,360,5),
thetas3=np.linspace(0,360,5),
scale_image=1, do_reflection=False,
txfn=None, transform='Affine'):
"""
Similarity metrics between two images as a function of geometry
Compute similarity metric between two images as image is rotated about its
center w/ or w/o optimization
ANTsR function: `invariantImageSimilarity`
Arguments
---------
image1 : ANTsImage
reference image
image2 : ANTsImage
moving image
local_search_iterations : integer
integer controlling local search in multistart
metric : string
which metric to use
MI
GC
thetas : 1D-ndarray/list/tuple
numeric vector of search angles in degrees
thetas2 : 1D-ndarray/list/tuple
numeric vector of search angles in degrees around principal axis 2 (3D)
thetas3 : 1D-ndarray/list/tuple
numeric vector of search angles in degrees around principal axis 3 (3D)
scale_image : scalar
global scale
do_reflection : boolean
whether to reflect image about principal axis
txfn : string (optional)
if present, write optimal tx to .mat file
transform : string
type of transform to use
Rigid
Similarity
Affine
Returns
-------
pd.DataFrame
dataframe with metric values and transformation parameters
Example
-------
>>> import ants
>>> img1 = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.image_read(ants.get_ants_data('r64'))
>>> metric = ants.invariant_image_similarity(img1,img2)
"""
if transform not in {'Rigid', 'Similarity', 'Affine'}:
raise ValueError('transform must be one of Rigid/Similarity/Affine')
if image1.pixeltype != 'float':
image1 = image1.clone('float')
if image2.pixeltype != 'float':
image2 = image2.clone('float')
if txfn is None:
txfn = mktemp(suffix='.mat')
# convert thetas to radians
thetain = (thetas * math.pi) / 180.
thetain2 = (thetas2 * math.pi) / 180.
thetain3 = (thetas3 * math.pi) / 180.
image1 = utils.iMath(image1, 'Normalize')
image2 = utils.iMath(image2, 'Normalize')
idim = image1.dimension
fpname = ['FixedParam%i'%i for i in range(1,idim+1)]
if not do_reflection:
libfn = utils.get_lib_fn('invariantImageSimilarity_%s%iD' % (transform, idim))
r1 = libfn(image1.pointer,
image2.pointer,
list(thetain),
list(thetain2),
list(thetain3),
local_search_iterations,
metric,
scale_image,
int(do_reflection),
txfn)
r1 = np.asarray(r1)
pnames = ['Param%i'%i for i in range(1,r1.shape[1])]
pnames[(len(pnames)-idim):len(pnames)] = fpname
r1 = pd.DataFrame(r1, columns=['MetricValue']+pnames)
return r1, txfn
else:
txfn1 = mktemp(suffix='.mat')
txfn2 = mktemp(suffix='.mat')
txfn3 = mktemp(suffix='.mat')
txfn4 = mktemp(suffix='.mat')
libfn = utils.get_lib_fn('invariantImageSimilarity_%s%iD' % (transform, idim))
## R1 ##
r1 = libfn(image1.pointer,
image2.pointer,
list(thetain),
list(thetain2),
list(thetain3),
local_search_iterations,
metric,
scale_image,
0,
txfn1)
r1 = np.asarray(r1)
pnames = ['Param%i'%i for i in range(1,r1.shape[1])]
pnames[(len(pnames)-idim):len(pnames)] = fpname
r1 = pd.DataFrame(r1, columns=['MetricValue']+pnames)
## R2 ##
r2 = libfn(image1.pointer,
image2.pointer,
list(thetain),
list(thetain2),
list(thetain3),
local_search_iterations,
metric,
scale_image,
1,
txfn2)
r2 = np.asarray(r2)
r2 = pd.DataFrame(r2, columns=['MetricValue']+pnames)
## R3 ##
r3 = libfn(image1.pointer,
image2.pointer,
list(thetain),
list(thetain2),
list(thetain3),
local_search_iterations,
metric,
scale_image,
2,
txfn3)
r3 = np.asarray(r3)
r3 = pd.DataFrame(r3, columns=['MetricValue']+pnames)
## R4 ##
r4 = libfn(image1.pointer,
image2.pointer,
list(thetain),
list(thetain2),
list(thetain3),
local_search_iterations,
metric,
scale_image,
3,
txfn4)
r4 = np.asarray(r4)
r4 = pd.DataFrame(r4, columns=['MetricValue']+pnames)
rmins = [np.min(r1.iloc[:,0]), np.min(r2.iloc[:,0]), np.min(r3.iloc[:,0]), np.min(r4.iloc[:,0])]
ww = np.argmin(rmins)
if ww == 0:
return r1, txfn1
elif ww == 1:
return r2, txfn2
elif ww == 2:
return r3, txfn3
elif ww == 3:
return r4, txfn4 | Similarity metrics between two images as a function of geometry
Compute similarity metric between two images as image is rotated about its
center w/ or w/o optimization
ANTsR function: `invariantImageSimilarity`
Arguments
---------
image1 : ANTsImage
reference image
image2 : ANTsImage
moving image
local_search_iterations : integer
integer controlling local search in multistart
metric : string
which metric to use
MI
GC
thetas : 1D-ndarray/list/tuple
numeric vector of search angles in degrees
thetas2 : 1D-ndarray/list/tuple
numeric vector of search angles in degrees around principal axis 2 (3D)
thetas3 : 1D-ndarray/list/tuple
numeric vector of search angles in degrees around principal axis 3 (3D)
scale_image : scalar
global scale
do_reflection : boolean
whether to reflect image about principal axis
txfn : string (optional)
if present, write optimal tx to .mat file
transform : string
type of transform to use
Rigid
Similarity
Affine
Returns
-------
pd.DataFrame
dataframe with metric values and transformation parameters
Example
-------
>>> import ants
>>> img1 = ants.image_read(ants.get_ants_data('r16'))
>>> img2 = ants.image_read(ants.get_ants_data('r64'))
>>> metric = ants.invariant_image_similarity(img1,img2) |
def setsebools(pairs, persist=False):
'''
Set the value of multiple booleans
CLI Example:
.. code-block:: bash
salt '*' selinux.setsebools '{virt_use_usb: on, squid_use_tproxy: off}'
'''
if not isinstance(pairs, dict):
return {}
if persist:
cmd = 'setsebool -P '
else:
cmd = 'setsebool '
for boolean, value in six.iteritems(pairs):
cmd = '{0} {1}={2}'.format(cmd, boolean, value)
return not __salt__['cmd.retcode'](cmd, python_shell=False) | Set the value of multiple booleans
CLI Example:
.. code-block:: bash
salt '*' selinux.setsebools '{virt_use_usb: on, squid_use_tproxy: off}' |
def run(self, arguments=None, get_unknowns=False):
"""
Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)).
"""
# redirect PIPE signal to quiet kill script, if not on Windows
if os.name != 'nt':
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
if get_unknowns:
if arguments:
self.args, self.unknown_args = (self.argparser.parse_known_args
(args=arguments.split()))
else:
(self.args,
self.unknown_args) = self.argparser.parse_known_args()
self.args = vars(self.args)
else:
if arguments:
myargs = arguments.split()
self.args = vars(self.argparser.parse_args
(args=myargs))
else:
self.args = vars(self.argparser.parse_args())
self.progress_bar_enabled = (not (self.args['no_progressbar'] or
self.is_stdin)) | Init point to execute the script.
If `arguments` string is given, will evaluate the arguments, else
evaluates sys.argv. Any inheriting class should extend the run method
(but first calling BaseCmdLineTool.run(self)). |
def search(self,
start_predictions: torch.Tensor,
start_state: StateType,
step: StepFunctionType) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``.
"""
batch_size = start_predictions.size()[0]
# List of (batch_size, beam_size) tensors. One for each time step. Does not
# include the start symbols, which are implicit.
predictions: List[torch.Tensor] = []
# List of (batch_size, beam_size) tensors. One for each time step. None for
# the first. Stores the index n for the parent prediction, i.e.
# predictions[t-1][i][n], that it came from.
backpointers: List[torch.Tensor] = []
# Calculate the first timestep. This is done outside the main loop
# because we are going from a single decoder input (the output from the
# encoder) to the top `beam_size` decoder outputs. On the other hand,
# within the main loop we are going from the `beam_size` elements of the
# beam to `beam_size`^2 candidates from which we will select the top
# `beam_size` elements for the next iteration.
# shape: (batch_size, num_classes)
start_class_log_probabilities, state = step(start_predictions, start_state)
num_classes = start_class_log_probabilities.size()[1]
# Make sure `per_node_beam_size` is not larger than `num_classes`.
if self.per_node_beam_size > num_classes:
raise ConfigurationError(f"Target vocab size ({num_classes:d}) too small "
f"relative to per_node_beam_size ({self.per_node_beam_size:d}).\n"
f"Please decrease beam_size or per_node_beam_size.")
# shape: (batch_size, beam_size), (batch_size, beam_size)
start_top_log_probabilities, start_predicted_classes = \
start_class_log_probabilities.topk(self.beam_size)
if self.beam_size == 1 and (start_predicted_classes == self._end_index).all():
warnings.warn("Empty sequences predicted. You may want to increase the beam size or ensure "
"your step function is working properly.",
RuntimeWarning)
return start_predicted_classes.unsqueeze(-1), start_top_log_probabilities
# The log probabilities for the last time step.
# shape: (batch_size, beam_size)
last_log_probabilities = start_top_log_probabilities
# shape: [(batch_size, beam_size)]
predictions.append(start_predicted_classes)
# Log probability tensor that mandates that the end token is selected.
# shape: (batch_size * beam_size, num_classes)
log_probs_after_end = start_class_log_probabilities.new_full(
(batch_size * self.beam_size, num_classes),
float("-inf")
)
log_probs_after_end[:, self._end_index] = 0.
# Set the same state for each element in the beam.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
unsqueeze(1).\
expand(batch_size, self.beam_size, *last_dims).\
reshape(batch_size * self.beam_size, *last_dims)
for timestep in range(self.max_steps - 1):
# shape: (batch_size * beam_size,)
last_predictions = predictions[-1].reshape(batch_size * self.beam_size)
# If every predicted token from the last step is `self._end_index`,
# then we can stop early.
if (last_predictions == self._end_index).all():
break
# Take a step. This get the predicted log probs of the next classes
# and updates the state.
# shape: (batch_size * beam_size, num_classes)
class_log_probabilities, state = step(last_predictions, state)
# shape: (batch_size * beam_size, num_classes)
last_predictions_expanded = last_predictions.unsqueeze(-1).expand(
batch_size * self.beam_size,
num_classes
)
# Here we are finding any beams where we predicted the end token in
# the previous timestep and replacing the distribution with a
# one-hot distribution, forcing the beam to predict the end token
# this timestep as well.
# shape: (batch_size * beam_size, num_classes)
cleaned_log_probabilities = torch.where(
last_predictions_expanded == self._end_index,
log_probs_after_end,
class_log_probabilities
)
# shape (both): (batch_size * beam_size, per_node_beam_size)
top_log_probabilities, predicted_classes = \
cleaned_log_probabilities.topk(self.per_node_beam_size)
# Here we expand the last log probabilities to (batch_size * beam_size, per_node_beam_size)
# so that we can add them to the current log probs for this timestep.
# This lets us maintain the log probability of each element on the beam.
# shape: (batch_size * beam_size, per_node_beam_size)
expanded_last_log_probabilities = last_log_probabilities.\
unsqueeze(2).\
expand(batch_size, self.beam_size, self.per_node_beam_size).\
reshape(batch_size * self.beam_size, self.per_node_beam_size)
# shape: (batch_size * beam_size, per_node_beam_size)
summed_top_log_probabilities = top_log_probabilities + expanded_last_log_probabilities
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_summed = summed_top_log_probabilities.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# shape: (batch_size, beam_size * per_node_beam_size)
reshaped_predicted_classes = predicted_classes.\
reshape(batch_size, self.beam_size * self.per_node_beam_size)
# Keep only the top `beam_size` beam indices.
# shape: (batch_size, beam_size), (batch_size, beam_size)
restricted_beam_log_probs, restricted_beam_indices = reshaped_summed.topk(self.beam_size)
# Use the beam indices to extract the corresponding classes.
# shape: (batch_size, beam_size)
restricted_predicted_classes = reshaped_predicted_classes.gather(1, restricted_beam_indices)
predictions.append(restricted_predicted_classes)
# shape: (batch_size, beam_size)
last_log_probabilities = restricted_beam_log_probs
# The beam indices come from a `beam_size * per_node_beam_size` dimension where the
# indices with a common ancestor are grouped together. Hence
# dividing by per_node_beam_size gives the ancestor. (Note that this is integer
# division as the tensor is a LongTensor.)
# shape: (batch_size, beam_size)
backpointer = restricted_beam_indices / self.per_node_beam_size
backpointers.append(backpointer)
# Keep only the pieces of the state tensors corresponding to the
# ancestors created this iteration.
for key, state_tensor in state.items():
_, *last_dims = state_tensor.size()
# shape: (batch_size, beam_size, *)
expanded_backpointer = backpointer.\
view(batch_size, self.beam_size, *([1] * len(last_dims))).\
expand(batch_size, self.beam_size, *last_dims)
# shape: (batch_size * beam_size, *)
state[key] = state_tensor.\
reshape(batch_size, self.beam_size, *last_dims).\
gather(1, expanded_backpointer).\
reshape(batch_size * self.beam_size, *last_dims)
if not torch.isfinite(last_log_probabilities).all():
warnings.warn("Infinite log probabilities encountered. Some final sequences may not make sense. "
"This can happen when the beam size is larger than the number of valid (non-zero "
"probability) transitions that the step function produces.",
RuntimeWarning)
# Reconstruct the sequences.
# shape: [(batch_size, beam_size, 1)]
reconstructed_predictions = [predictions[-1].unsqueeze(2)]
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[-1]
for timestep in range(len(predictions) - 2, 0, -1):
# shape: (batch_size, beam_size, 1)
cur_preds = predictions[timestep].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(cur_preds)
# shape: (batch_size, beam_size)
cur_backpointers = backpointers[timestep - 1].gather(1, cur_backpointers)
# shape: (batch_size, beam_size, 1)
final_preds = predictions[0].gather(1, cur_backpointers).unsqueeze(2)
reconstructed_predictions.append(final_preds)
# shape: (batch_size, beam_size, max_steps)
all_predictions = torch.cat(list(reversed(reconstructed_predictions)), 2)
return all_predictions, last_log_probabilities | Given a starting state and a step function, apply beam search to find the
most likely target sequences.
Notes
-----
If your step function returns ``-inf`` for some log probabilities
(like if you're using a masked log-softmax) then some of the "best"
sequences returned may also have ``-inf`` log probability. Specifically
this happens when the beam size is smaller than the number of actions
with finite log probability (non-zero probability) returned by the step function.
Therefore if you're using a mask you may want to check the results from ``search``
and potentially discard sequences with non-finite log probability.
Parameters
----------
start_predictions : ``torch.Tensor``
A tensor containing the initial predictions with shape ``(batch_size,)``.
Usually the initial predictions are just the index of the "start" token
in the target vocabulary.
start_state : ``StateType``
The initial state passed to the ``step`` function. Each value of the state dict
should be a tensor of shape ``(batch_size, *)``, where ``*`` means any other
number of dimensions.
step : ``StepFunctionType``
A function that is responsible for computing the next most likely tokens,
given the current state and the predictions from the last time step.
The function should accept two arguments. The first being a tensor
of shape ``(group_size,)``, representing the index of the predicted
tokens from the last time step, and the second being the current state.
The ``group_size`` will be ``batch_size * beam_size``, except in the initial
step, for which it will just be ``batch_size``.
The function is expected to return a tuple, where the first element
is a tensor of shape ``(group_size, target_vocab_size)`` containing
the log probabilities of the tokens for the next step, and the second
element is the updated state. The tensor in the state should have shape
``(group_size, *)``, where ``*`` means any other number of dimensions.
Returns
-------
Tuple[torch.Tensor, torch.Tensor]
Tuple of ``(predictions, log_probabilities)``, where ``predictions``
has shape ``(batch_size, beam_size, max_steps)`` and ``log_probabilities``
has shape ``(batch_size, beam_size)``. |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkerChannelContext for this WorkerChannelInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelContext
"""
if self._context is None:
self._context = WorkerChannelContext(
self._version,
workspace_sid=self._solution['workspace_sid'],
worker_sid=self._solution['worker_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: WorkerChannelContext for this WorkerChannelInstance
:rtype: twilio.rest.taskrouter.v1.workspace.worker.worker_channel.WorkerChannelContext |
def multi_select(self, elements_to_select):
"""
Multi-select any number of elements.
:param elements_to_select: list of WebElement instances
:return: None
"""
# Click the first element
first_element = elements_to_select.pop()
self.click(first_element)
# Click the rest
for index, element in enumerate(elements_to_select, start=1):
self.multi_click(element) | Multi-select any number of elements.
:param elements_to_select: list of WebElement instances
:return: None |
def prepend_items(self, items, **kwargs):
"""Method to prepend data to multiple :class:`~.Item` objects.
.. seealso:: :meth:`append_items`
"""
rv = self.prepend_multi(items, **kwargs)
for k, v in items.dict.items():
if k.success:
k.value = v['fragment'] + k.value
return rv | Method to prepend data to multiple :class:`~.Item` objects.
.. seealso:: :meth:`append_items` |
def _table_materialize(table):
"""
Force schema resolution for a joined table, selecting all fields from
all tables.
"""
if table._is_materialized():
return table
op = ops.MaterializedJoin(table)
return op.to_expr() | Force schema resolution for a joined table, selecting all fields from
all tables. |
def before_after_apply(self, before_fn, after_fn, leaf_fn=None):
"""Applies the functions to each node in a subtree using an traversal in which
encountered twice: once right before its descendants, and once right
after its last descendant
"""
stack = [self]
while stack:
node = stack.pop()
if node.is_leaf:
if leaf_fn:
leaf_fn(node)
while node.is_last_child_of_parent:
node = node._parent
if node:
after_fn(node)
else:
break
else:
before_fn(node)
stack.extend([i for i in reversed(node._children)]) | Applies the functions to each node in a subtree using an traversal in which
encountered twice: once right before its descendants, and once right
after its last descendant |
def generate_colormap(self,colormap=None,reverse=False):
"""use 1 colormap for the whole abf. You can change it!."""
if colormap is None:
colormap = pylab.cm.Dark2
self.cm=colormap
self.colormap=[]
for i in range(self.sweeps): #TODO: make this the only colormap
self.colormap.append(colormap(i/self.sweeps))
if reverse:
self.colormap.reverse() | use 1 colormap for the whole abf. You can change it!. |
def get_layer_modes(subcategory):
"""Return all sorted layer modes from exposure or hazard.
:param subcategory: Hazard or Exposure key.
:type subcategory: str
:returns: List of layer modes definition.
:rtype: list
"""
layer_modes = definition(subcategory)['layer_modes']
return sorted(layer_modes, key=lambda k: k['key']) | Return all sorted layer modes from exposure or hazard.
:param subcategory: Hazard or Exposure key.
:type subcategory: str
:returns: List of layer modes definition.
:rtype: list |
def pieces(self):
"""
Number of pieces the content is split into or ``None`` if :attr:`piece_size`
returns ``None``
"""
if self.piece_size is None:
return None
else:
return math.ceil(self.size / self.piece_size) | Number of pieces the content is split into or ``None`` if :attr:`piece_size`
returns ``None`` |
def setPalette(self, palette):
"""
Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None
"""
self._palette = XNodePalette(palette) if palette is not None else None
self.setDirty() | Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None |
def _is_already_configured(configuration_details):
"""Returns `True` when alias already in shell config."""
path = Path(configuration_details.path).expanduser()
with path.open('r') as shell_config:
return configuration_details.content in shell_config.read() | Returns `True` when alias already in shell config. |
def fprocess(infilep,outfilep):
"""
Scans an input file for LA equations between double square brackets,
e.g. [[ M3_mymatrix = M3_anothermatrix^-1 ]], and replaces the expression
with a comment containing the equation followed by nested function calls
that implement the equation as C code. A trailing semi-colon is appended.
The equation within [[ ]] should NOT end with a semicolon as that will raise
a ParseException. However, it is ok to have a semicolon after the right brackets.
Other text in the file is unaltered.
The arguments are file objects (NOT file names) opened for reading and
writing, respectively.
"""
pattern = r'\[\[\s*(.*?)\s*\]\]'
eqn = re.compile(pattern,re.DOTALL)
s = infilep.read()
def parser(mo):
ccode = parse(mo.group(1))
return "/* %s */\n%s;\nLAParserBufferReset();\n"%(mo.group(1),ccode)
content = eqn.sub(parser,s)
outfilep.write(content) | Scans an input file for LA equations between double square brackets,
e.g. [[ M3_mymatrix = M3_anothermatrix^-1 ]], and replaces the expression
with a comment containing the equation followed by nested function calls
that implement the equation as C code. A trailing semi-colon is appended.
The equation within [[ ]] should NOT end with a semicolon as that will raise
a ParseException. However, it is ok to have a semicolon after the right brackets.
Other text in the file is unaltered.
The arguments are file objects (NOT file names) opened for reading and
writing, respectively. |
def pbkdf2(hash_algorithm, password, salt, iterations, key_length):
"""
Implements PBKDF2 from PKCS#5 v2.2 in pure Python
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError(pretty_message(
'''
iterations must be greater than 0 - is %s
''',
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError(pretty_message(
'''
key_length must be greater than 0 - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", not %s
''',
repr(hash_algorithm)
))
algo = getattr(hashlib, hash_algorithm)
hash_length = {
'md5': 16,
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
blocks = int(math.ceil(key_length / hash_length))
original_hmac = hmac.new(password, None, algo)
int_pack = struct.Struct(b'>I').pack
output = b''
for block in range(1, blocks + 1):
prf = original_hmac.copy()
prf.update(salt + int_pack(block))
last = prf.digest()
u = int_from_bytes(last)
for _ in range(2, iterations + 1):
prf = original_hmac.copy()
prf.update(last)
last = prf.digest()
u ^= int_from_bytes(last)
t = int_to_bytes(u)
output += t
return output[0:key_length] | Implements PBKDF2 from PKCS#5 v2.2 in pure Python
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:return:
The derived key as a byte string |
def do_kpl_on(self, args):
"""Turn on a KeypadLinc button.
Usage:
kpl_on address group
"""
params = args.split()
address = None
group = None
try:
address = params[0]
group = int(params[1])
except IndexError:
_LOGGING.error("Address and group are regquired")
self.do_help('kpl_status')
except TypeError:
_LOGGING.error("Group must be an integer")
self.do_help('kpl_status')
if address and group:
self.tools.kpl_on(address, group) | Turn on a KeypadLinc button.
Usage:
kpl_on address group |
def is_connected(self):
"""
Return `True` if the Xmrs represents a connected graph.
Subgraphs can be connected through things like arguments,
QEQs, and label equalities.
"""
nids = set(self._nodeids) # the nids left to find
if len(nids) == 0:
raise XmrsError('Cannot compute connectedness of an empty Xmrs.')
# build a basic dict graph of relations
edges = []
# label connections
for lbl in self.labels():
lblset = self.labelset(lbl)
edges.extend((x, y) for x in lblset for y in lblset if x != y)
# argument connections
_vars = self._vars
for nid in nids:
for rarg, tgt in self.args(nid).items():
if tgt not in _vars:
continue
if IVARG_ROLE in _vars[tgt]['refs']:
tgtnids = list(_vars[tgt]['refs'][IVARG_ROLE])
elif tgt in self._hcons:
tgtnids = list(self.labelset(self.hcon(tgt)[2]))
elif 'LBL' in _vars[tgt]['refs']:
tgtnids = list(_vars[tgt]['refs']['LBL'])
else:
tgtnids = []
# connections are bidirectional
edges.extend((nid, t) for t in tgtnids if nid != t)
edges.extend((t, nid) for t in tgtnids if nid != t)
g = {nid: set() for nid in nids}
for x, y in edges:
g[x].add(y)
connected_nids = _bfs(g)
if connected_nids == nids:
return True
elif connected_nids.difference(nids):
raise XmrsError(
'Possibly bogus nodeids: {}'
.format(', '.join(connected_nids.difference(nids)))
)
return False | Return `True` if the Xmrs represents a connected graph.
Subgraphs can be connected through things like arguments,
QEQs, and label equalities. |
def contains(self, other):
"""
Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise.
"""
if isinstance(other, tuple):
x, y = other
else:
x, y = other.x, other.y
return self.x1 <= x <= self.x2 and self.y1 <= y <= self.y2 | Estimate whether the bounding box contains a point.
Parameters
----------
other : tuple of number or imgaug.Keypoint
Point to check for.
Returns
-------
bool
True if the point is contained in the bounding box, False otherwise. |
def as_nonlinear(self, params=None):
"""Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object.
"""
if params is None:
params = self.params
nlm = Model(None, self.data, self.invsigma)
nlm.set_func(lambda p, x: npoly.polyval(x, p),
self.pnames,
args=(self.x,))
if params is not None:
nlm.solve(params)
return nlm | Return a `Model` equivalent to this object. The nonlinear solver is less
efficient, but lets you freeze parameters, compute uncertainties, etc.
If the `params` argument is provided, solve() will be called on the
returned object with those parameters. If it is `None` and this object
has parameters in `self.params`, those will be use. Otherwise, solve()
will not be called on the returned object. |
def prepare_read(data, method='readlines', mode='r'):
"""Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data
"""
if hasattr(data, 'readlines'):
data = getattr(data, method)()
elif isinstance(data, list):
if method == 'read':
return ''.join(data)
elif isinstance(data, basestring):
data = getattr(open(data, mode), method)()
else:
raise TypeError('Unable to handle data of type %r' % type(data))
return data | Prepare various input types for parsing.
Args:
data (iter): Data to read
method (str): Method to process data with
mode (str): Custom mode to process with, if data is a file
Returns:
list: List suitable for parsing
Raises:
TypeError: Invalid value for data |
def convert_to_array(pmap, nsites, imtls, inner_idx=0):
"""
Convert the probability map into a composite array with header
of the form PGA-0.1, PGA-0.2 ...
:param pmap: probability map
:param nsites: total number of sites
:param imtls: a DictArray with IMT and levels
:returns: a composite array of lenght nsites
"""
lst = []
# build the export dtype, of the form PGA-0.1, PGA-0.2 ...
for imt, imls in imtls.items():
for iml in imls:
lst.append(('%s-%s' % (imt, iml), F32))
curves = numpy.zeros(nsites, numpy.dtype(lst))
for sid, pcurve in pmap.items():
curve = curves[sid]
idx = 0
for imt, imls in imtls.items():
for iml in imls:
curve['%s-%s' % (imt, iml)] = pcurve.array[idx, inner_idx]
idx += 1
return curves | Convert the probability map into a composite array with header
of the form PGA-0.1, PGA-0.2 ...
:param pmap: probability map
:param nsites: total number of sites
:param imtls: a DictArray with IMT and levels
:returns: a composite array of lenght nsites |
def linked(prefix):
"""Return set of canonical names of linked packages in `prefix`."""
logger.debug(str(prefix))
if not isdir(prefix):
return set()
meta_dir = join(prefix, 'conda-meta')
if not isdir(meta_dir):
# We might have nothing in linked (and no conda-meta directory)
return set()
return set(fn[:-5] for fn in os.listdir(meta_dir)
if fn.endswith('.json')) | Return set of canonical names of linked packages in `prefix`. |
def create_endpoint(port=0, service_name='unknown', ipv4=None, ipv6=None):
"""Create a zipkin Endpoint object.
An Endpoint object holds information about the network context of a span.
:param port: int value of the port. Defaults to 0
:param service_name: service name as a str. Defaults to 'unknown'
:param ipv4: ipv4 host address
:param ipv6: ipv6 host address
:returns: thrift Endpoint object
"""
ipv4_int = 0
ipv6_binary = None
# Convert ip address to network byte order
if ipv4:
ipv4_int = struct.unpack('!i', socket.inet_pton(socket.AF_INET, ipv4))[0]
if ipv6:
ipv6_binary = socket.inet_pton(socket.AF_INET6, ipv6)
# Zipkin passes unsigned values in signed types because Thrift has no
# unsigned types, so we have to convert the value.
port = struct.unpack('h', struct.pack('H', port))[0]
return zipkin_core.Endpoint(
ipv4=ipv4_int,
ipv6=ipv6_binary,
port=port,
service_name=service_name,
) | Create a zipkin Endpoint object.
An Endpoint object holds information about the network context of a span.
:param port: int value of the port. Defaults to 0
:param service_name: service name as a str. Defaults to 'unknown'
:param ipv4: ipv4 host address
:param ipv6: ipv6 host address
:returns: thrift Endpoint object |
def add_input(self, name, value=None):
'''Create a new input variable called ``name`` for this process
and initialize it with the given ``value``.
Quantity is accessible in two ways:
* as a process attribute, i.e. ``proc.name``
* as a member of the input dictionary,
i.e. ``proc.input['name']``
Use attribute method to set values, e.g.
```proc.name = value ```
:param str name: name of diagnostic quantity to be initialized
:param array value: initial value for quantity [default: None]
'''
self._input_vars.append(name)
self.__setattr__(name, value) | Create a new input variable called ``name`` for this process
and initialize it with the given ``value``.
Quantity is accessible in two ways:
* as a process attribute, i.e. ``proc.name``
* as a member of the input dictionary,
i.e. ``proc.input['name']``
Use attribute method to set values, e.g.
```proc.name = value ```
:param str name: name of diagnostic quantity to be initialized
:param array value: initial value for quantity [default: None] |
def all_requests_view(request):
'''
Show user a list of enabled request types, the number of requests of each
type and a link to see them all.
'''
# Pseudo-dictionary, actually a list with items of form
# (request_type.name.title(), number_of_type_requests, name, enabled,
# glyphicon)
types_dict = list()
for request_type in RequestType.objects.all():
requests = Request.objects.filter(request_type=request_type)
# Hide the count for private requests
if not request_type.managers.filter(incumbent__user=request.user):
requests = requests.exclude(
~Q(owner__user=request.user), private=True,
)
number_of_requests = requests.count()
types_dict.append((
request_type.name.title(), number_of_requests,
request_type.url_name, request_type.enabled,
request_type.glyphicon,
))
return render_to_response('all_requests.html', {
'page_name': "Archives - All Requests",
'types_dict': types_dict,
}, context_instance=RequestContext(request)) | Show user a list of enabled request types, the number of requests of each
type and a link to see them all. |
def fallback(message: str, ex: Exception) -> None:
"""
Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure
"""
logging.error('%s', message)
logging.exception('%s', ex)
sys.exit(1) | Fallback procedure when a cli command fails.
:param message: message to be logged
:param ex: Exception which caused the failure |
def convert_upload_string_to_file(i):
"""
Input: {
file_content_base64 - string transmitted through Internet
(filename) - file name to write (if empty, generate tmp file)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
filename - filename with full path
filename_ext - filename extension
}
"""
import base64
x=i['file_content_base64']
fc=base64.urlsafe_b64decode(str(x)) # convert from unicode to str since base64 works on strings
# should be safe in Python 2.x and 3.x
fn=i.get('filename','')
if fn=='':
rx=gen_tmp_file({'prefix':'tmp-'})
if rx['return']>0: return rx
px=rx['file_name']
else:
px=fn
fn1, fne = os.path.splitext(px)
if os.path.isfile(px):
return {'return':1, 'error':'file already exists in the current directory'}
try:
fx=open(px, 'wb')
fx.write(fc)
fx.close()
except Exception as e:
return {'return':1, 'error':'problem writing file='+px+' ('+format(e)+')'}
return {'return':0, 'filename':px, 'filename_ext': fne} | Input: {
file_content_base64 - string transmitted through Internet
(filename) - file name to write (if empty, generate tmp file)
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
filename - filename with full path
filename_ext - filename extension
} |
def create(self, data, resource='data'):
"""Create an object of resource:
* data
* project
* processor
* trigger
* template
:param data: Object values
:type data: dict
:param resource: Resource name
:type resource: string
"""
if isinstance(data, dict):
data = json.dumps(data)
if not isinstance(data, str):
raise ValueError(mgs='data must be dict, str or unicode')
resource = resource.lower()
if resource not in ('data', 'project', 'processor', 'trigger', 'template'):
raise ValueError(mgs='resource must be data, project, processor, trigger or template')
if resource == 'project':
resource = 'case'
url = urlparse.urljoin(self.url, '/api/v1/{}/'.format(resource))
return requests.post(url,
data=data,
auth=self.auth,
headers={
'cache-control': 'no-cache',
'content-type': 'application/json',
'accept': 'application/json, text/plain, */*',
'referer': self.url,
}) | Create an object of resource:
* data
* project
* processor
* trigger
* template
:param data: Object values
:type data: dict
:param resource: Resource name
:type resource: string |
def stsci2(hdulist, filename):
"""For STScI GEIS files, need to do extra steps."""
# Write output file name to the primary header
instrument = hdulist[0].header.get('INSTRUME', '')
if instrument in ("WFPC2", "FOC"):
hdulist[0].header['FILENAME'] = filename | For STScI GEIS files, need to do extra steps. |
def remove_permission(self, queue, label):
"""
Remove a permission from a queue.
:type queue: :class:`boto.sqs.queue.Queue`
:param queue: The queue object
:type label: str or unicode
:param label: The unique label associated with the permission
being removed.
:rtype: bool
:return: True if successful, False otherwise.
"""
params = {'Label': label}
return self.get_status('RemovePermission', params, queue.id) | Remove a permission from a queue.
:type queue: :class:`boto.sqs.queue.Queue`
:param queue: The queue object
:type label: str or unicode
:param label: The unique label associated with the permission
being removed.
:rtype: bool
:return: True if successful, False otherwise. |
def characterize_local_files(filedir, max_bytes=MAX_FILE_DEFAULT):
"""
Collate local file info as preperation for Open Humans upload.
Note: Files with filesize > max_bytes are not included in returned info.
:param filedir: This field is target directory to get files from.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m.
"""
file_data = {}
logging.info('Characterizing files in {}'.format(filedir))
for filename in os.listdir(filedir):
filepath = os.path.join(filedir, filename)
file_stats = os.stat(filepath)
creation_date = arrow.get(file_stats.st_ctime).isoformat()
file_size = file_stats.st_size
if file_size <= max_bytes:
file_md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
file_md5.update(chunk)
md5 = file_md5.hexdigest()
file_data[filename] = {
'tags': guess_tags(filename),
'description': '',
'md5': md5,
'creation_date': creation_date,
}
return file_data | Collate local file info as preperation for Open Humans upload.
Note: Files with filesize > max_bytes are not included in returned info.
:param filedir: This field is target directory to get files from.
:param max_bytes: This field is the maximum file size to consider. Its
default value is 128m. |
def parse_shifts(self):
"""
Parse shifts from TOI report
:returns: self if successfule else None
"""
lx_doc = self.html_doc()
pl_heads = lx_doc.xpath('//td[contains(@class, "playerHeading")]')
for pl in pl_heads:
sh_sum = { }
pl_text = pl.xpath('text()')[0]
num_name = pl_text.replace(',','').split(' ')
sh_sum['player_num'] = int(num_name[0]) if num_name[0].isdigit() else -1
sh_sum['player_name'] = { 'first': num_name[2], 'last': num_name[1] }
first_shift = pl.xpath('../following-sibling::tr')[1]
sh_sum['shifts'], last_shift = self.__player_shifts(first_shift)
while ('Per' not in last_shift.xpath('.//text()')):
last_shift = last_shift.xpath('following-sibling::tr')[0]
per_summ = last_shift.xpath('.//tr')[0]
sh_sum['by_period'], last_sum = self.__get_by_per_summ(per_summ)
self.by_player[sh_sum['player_num']] = sh_sum
return self if self.by_player else None | Parse shifts from TOI report
:returns: self if successfule else None |
def write_languages(f, l):
"""Write language information."""
f.write("Languages = {%s" % os.linesep)
for lang in sorted(l):
f.write(" %r: %r,%s" % (lang, l[lang], os.linesep))
f.write("}%s" % os.linesep) | Write language information. |
def exclude_states(omega, gamma, r, Lij, states, excluded_states):
"""Exclude states from matrices.
This function takes the matrices and excludes the states listed in
excluded_states.
"""
Ne = len(omega)
excluded_indices = [i for i in range(Ne) if states[i] in excluded_states]
omega_new = []; gamma_new = []; r_new = [[], [], []]; Lij_new = []
for i in range(Ne):
row_om = []; row_ga = []; row_L = []
for j in range(Ne):
if j not in excluded_indices:
row_om += [omega[i][j]]
row_ga += [gamma[i][j]]
row_L += [Lij[i][j]]
if i not in excluded_indices:
omega_new += [row_om]
gamma_new += [row_ga]
Lij_new += [row_L]
for p in range(3):
for i in range(Ne):
row_r = []
for j in range(Ne):
if j not in excluded_indices:
row_r += [r[p][i][j]]
if i not in excluded_indices:
r_new[p] += [row_r]
states_new = [states[i] for i in range(Ne) if i not in excluded_indices]
return omega_new, gamma_new, r_new, Lij_new, states_new | Exclude states from matrices.
This function takes the matrices and excludes the states listed in
excluded_states. |
def get_available_ip6_for_vip(self, id_evip, name):
"""
Get and save a available IP in the network ipv6 for vip request
:param id_evip: Vip environment identifier. Integer value and greater than zero.
:param name: Ip description
:return: Dictionary with the following structure:
::
{'ip': {'bloco1':<bloco1>,
'bloco2':<bloco2>,
'bloco3':<bloco3>,
'bloco4':<bloco4>,
'bloco5':<bloco5>,
'bloco6':<bloco6>,
'bloco7':<bloco7>,
'bloco8':<bloco8>,
'id':<id>,
'networkipv6':<networkipv6>,
'description':<description>}}
:raise IpNotAvailableError: Network dont have available IP for vip environment.
:raise EnvironmentVipNotFoundError: Vip environment not registered.
:raise UserNotAuthorizedError: User dont have permission to perform operation.
:raise InvalidParameterError: Vip environment identifier is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database.
"""
if not is_valid_int_param(id_evip):
raise InvalidParameterError(
u'Vip environment identifier is invalid or was not informed.')
url = 'ip/availableip6/vip/' + str(id_evip) + "/"
ip_map = dict()
ip_map['id_evip'] = id_evip
ip_map['name'] = name
code, xml = self.submit({'ip_map': ip_map}, 'POST', url)
return self.response(code, xml) | Get and save a available IP in the network ipv6 for vip request
:param id_evip: Vip environment identifier. Integer value and greater than zero.
:param name: Ip description
:return: Dictionary with the following structure:
::
{'ip': {'bloco1':<bloco1>,
'bloco2':<bloco2>,
'bloco3':<bloco3>,
'bloco4':<bloco4>,
'bloco5':<bloco5>,
'bloco6':<bloco6>,
'bloco7':<bloco7>,
'bloco8':<bloco8>,
'id':<id>,
'networkipv6':<networkipv6>,
'description':<description>}}
:raise IpNotAvailableError: Network dont have available IP for vip environment.
:raise EnvironmentVipNotFoundError: Vip environment not registered.
:raise UserNotAuthorizedError: User dont have permission to perform operation.
:raise InvalidParameterError: Vip environment identifier is none or invalid.
:raise XMLError: Networkapi failed to generate the XML response.
:raise DataBaseError: Networkapi failed to access the database. |
def sys_dup2(self, fd, newfd):
"""
Duplicates an open fd to newfd. If newfd is open, it is first closed
:rtype: int
:param fd: the open file descriptor to duplicate.
:param newfd: the file descriptor to alias the file described by fd.
:return: newfd.
"""
try:
file = self._get_fd(fd)
except FdError as e:
logger.info("DUP2: Passed fd is not open. Returning EBADF")
return -e.err
soft_max, hard_max = self._rlimits[self.RLIMIT_NOFILE]
if newfd >= soft_max:
logger.info("DUP2: newfd is above max descriptor table size")
return -errno.EBADF
if self._is_fd_open(newfd):
self._close(newfd)
if newfd >= len(self.files):
self.files.extend([None] * (newfd + 1 - len(self.files)))
self.files[newfd] = self.files[fd]
logger.debug('sys_dup2(%d,%d) -> %d', fd, newfd, newfd)
return newfd | Duplicates an open fd to newfd. If newfd is open, it is first closed
:rtype: int
:param fd: the open file descriptor to duplicate.
:param newfd: the file descriptor to alias the file described by fd.
:return: newfd. |
def make_valid_string(self, string=''):
""" Inputting a value for the first time """
if not self.is_valid_str(string):
if string in self.val_map and not self.allow_dups:
raise IndexError("Value {} has already been given to the sanitizer".format(string))
internal_name = super(_NameSanitizer, self).make_valid_string()
self.val_map[string] = internal_name
return internal_name
else:
if self.map_valid:
self.val_map[string] = string
return string | Inputting a value for the first time |
def _getUserSid(user):
'''
return a state error dictionary, with 'sid' as a field if it could be returned
if user is None, sid will also be None
'''
ret = {}
sid_pattern = r'^S-1(-\d+){1,}$'
if user and re.match(sid_pattern, user, re.I):
try:
sid = win32security.GetBinarySid(user)
except Exception as e:
ret['result'] = False
ret['comment'] = 'Unable to obtain the binary security identifier for {0}. The exception was {1}.'.format(
user, e)
else:
try:
win32security.LookupAccountSid('', sid)
ret['result'] = True
ret['sid'] = sid
except Exception as e:
ret['result'] = False
ret['comment'] = 'Unable to lookup the account for the security identifier {0}. The exception was {1}.'.format(
user, e)
else:
try:
sid = win32security.LookupAccountName('', user)[0] if user else None
ret['result'] = True
ret['sid'] = sid
except Exception as e:
ret['result'] = False
ret['comment'] = 'Unable to obtain the security identifier for {0}. The exception was {1}.'.format(
user, e)
return ret | return a state error dictionary, with 'sid' as a field if it could be returned
if user is None, sid will also be None |
def numSteps(self, row):
"""Gets the number of steps for the parameter at
index *row* will yeild
"""
param = self._parameters[row]
return self.nStepsForParam(param) | Gets the number of steps for the parameter at
index *row* will yeild |
def submit_form_id(self, id_):
"""
Submit the form with given id (used to disambiguate between multiple
forms).
"""
form = ElementSelector(
world.browser,
str('id("{id}")'.format(id=id_)),
)
assert form, "Cannot find a form with ID '{}' on the page.".format(id_)
form.submit() | Submit the form with given id (used to disambiguate between multiple
forms). |
def get_hash(self, salt, plain_password):
"""Return the hashed password, salt + SHA-256."""
return hashlib.sha256(salt.encode() + plain_password.encode()).hexdigest() | Return the hashed password, salt + SHA-256. |
def cigarRead(fileHandleOrFile):
"""Reads a list of pairwise alignments into a pairwise alignment structure.
Query and target are reversed!
"""
fileHandle = _getFileHandle(fileHandleOrFile)
#p = re.compile("cigar:\\s+(.+)\\s+([0-9]+)\\s+([0-9]+)\\s+([\\+\\-\\.])\\s+(.+)\\s+([0-9]+)\\s+([0-9]+)\\s+([\\+\\-\\.])\\s+(.+)\\s+(.*)\\s*)*")
p = re.compile("cigar:\\s+(.+)\\s+([0-9]+)\\s+([0-9]+)\\s+([\\+\\-\\.])\\s+(.+)\\s+([0-9]+)\\s+([0-9]+)\\s+([\\+\\-\\.])\\s+([^\\s]+)(\\s+(.*)\\s*)*")
line = fileHandle.readline()
while line != '':
pA = cigarReadFromString(line)
if pA != None:
yield pA
line = fileHandle.readline()
if isinstance(fileHandleOrFile, "".__class__):
fileHandle.close() | Reads a list of pairwise alignments into a pairwise alignment structure.
Query and target are reversed! |
def process_shells_ordered(self, shells):
"""Processing a list of shells one after the other."""
output = []
for shell in shells:
entry = shell['entry']
config = ShellConfig(script=entry['script'], title=entry['title'] if 'title' in entry else '',
model=shell['model'], env=shell['env'], item=shell['item'],
dry_run=shell['dry_run'], debug=shell['debug'], strict=shell['strict'],
variables=shell['variables'],
temporary_scripts_path=shell['temporary_scripts_path'])
result = Adapter(self.process_shell(get_creator_by_name(shell['creator']), entry, config))
output += result.output
self.__handle_variable(entry, result.output)
if not result.success:
return {'success': False, 'output': output}
return {'success': True, 'output': output} | Processing a list of shells one after the other. |
async def remove(self, device, force=False, detach=False, eject=False,
lock=False):
"""
Unmount or lock the device depending on device type.
:param device: device object, block device path or mount path
:param bool force: recursively remove all child devices
:param bool detach: detach the root drive
:param bool eject: remove media from the root drive
:param bool lock: lock the associated LUKS cleartext slave
:returns: whether all attempted operations succeeded
"""
device = self._find_device(device)
if device.is_filesystem:
if device.is_mounted or not device.is_loop or detach is False:
success = await self.unmount(device)
elif device.is_crypto:
if force and device.is_unlocked:
await self.auto_remove(device.luks_cleartext_holder, force=True)
success = await self.lock(device)
elif (force
and (device.is_partition_table or device.is_drive)
and self.is_handleable(device)):
kw = dict(force=True, detach=detach, eject=eject, lock=lock)
tasks = [
self.auto_remove(child, **kw)
for child in self.get_all_handleable()
if _is_parent_of(device, child)
]
results = await gather(*tasks)
success = all(results)
else:
self._log.info(_('not removing {0}: unhandled device', device))
success = False
# if these operations work, everything is fine, we can return True:
if lock and device.is_luks_cleartext:
device = device.luks_cleartext_slave
if self.is_handleable(device):
success = await self.lock(device)
if eject:
success = await self.eject(device)
if (detach or detach is None) and device.is_loop:
success = await self.delete(device, remove=False)
elif detach:
success = await self.detach(device)
return success | Unmount or lock the device depending on device type.
:param device: device object, block device path or mount path
:param bool force: recursively remove all child devices
:param bool detach: detach the root drive
:param bool eject: remove media from the root drive
:param bool lock: lock the associated LUKS cleartext slave
:returns: whether all attempted operations succeeded |
def event_payment(self, date, time, pid, commerce_id, transaction_id, request_ip, token, webpay_server):
'''Record the payment event
Official handler writes this information to TBK_EVN%Y%m%d file.
'''
raise NotImplementedError("Logging Handler must implement event_payment") | Record the payment event
Official handler writes this information to TBK_EVN%Y%m%d file. |
def update_constants(nmrstar2cfg="", nmrstar3cfg="", resonance_classes_cfg="", spectrum_descriptions_cfg=""):
"""Update constant variables.
:return: None
:rtype: :py:obj:`None`
"""
nmrstar_constants = {}
resonance_classes = {}
spectrum_descriptions = {}
this_directory = os.path.dirname(__file__)
nmrstar2_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar2.json")
nmrstar3_config_filepath = os.path.join(this_directory, "conf/constants_nmrstar3.json")
resonance_classes_config_filepath = os.path.join(this_directory, "conf/resonance_classes.json")
spectrum_descriptions_config_filepath = os.path.join(this_directory, "conf/spectrum_descriptions.json")
with open(nmrstar2_config_filepath, "r") as nmrstar2config, open(nmrstar3_config_filepath, "r") as nmrstar3config:
nmrstar_constants["2"] = json.load(nmrstar2config)
nmrstar_constants["3"] = json.load(nmrstar3config)
with open(resonance_classes_config_filepath, "r") as config:
resonance_classes.update(json.load(config))
with open(spectrum_descriptions_config_filepath, "r") as config:
spectrum_descriptions.update(json.load(config))
if nmrstar2cfg:
with open(nmrstar2cfg, "r") as nmrstar2config:
nmrstar_constants["2"].update(json.load(nmrstar2config))
if nmrstar3cfg:
with open(nmrstar2cfg, "r") as nmrstar3config:
nmrstar_constants["3"].update(json.load(nmrstar3config))
if resonance_classes_cfg:
with open(nmrstar2cfg, "r") as config:
resonance_classes.update(json.load(config))
if spectrum_descriptions_cfg:
with open(spectrum_descriptions_cfg, "r") as config:
spectrum_descriptions.update(json.load(config))
NMRSTAR_CONSTANTS.update(nmrstar_constants)
RESONANCE_CLASSES.update(resonance_classes)
SPECTRUM_DESCRIPTIONS.update(spectrum_descriptions) | Update constant variables.
:return: None
:rtype: :py:obj:`None` |
def make_pattern(self, pattern, listsep=','):
"""Make pattern for a data type with the specified cardinality.
.. code-block:: python
yes_no_pattern = r"yes|no"
many_yes_no = Cardinality.one_or_more.make_pattern(yes_no_pattern)
:param pattern: Regular expression for type (as string).
:param listsep: List separator for multiple items (as string, optional)
:return: Regular expression pattern for type with cardinality.
"""
if self is Cardinality.one:
return pattern
elif self is Cardinality.zero_or_one:
return self.schema % pattern
else:
return self.schema % (pattern, listsep, pattern) | Make pattern for a data type with the specified cardinality.
.. code-block:: python
yes_no_pattern = r"yes|no"
many_yes_no = Cardinality.one_or_more.make_pattern(yes_no_pattern)
:param pattern: Regular expression for type (as string).
:param listsep: List separator for multiple items (as string, optional)
:return: Regular expression pattern for type with cardinality. |
def migration(self, from_ver: int, to_ver: int):
"""Decorator to create and register a migration.
>>> manager = MigrationManager()
>>> @manager.migration(0, 1)
... def migrate(conn):
... pass
"""
def decorator(func):
migration = Migration(from_ver, to_ver, func)
self.register_migration(migration)
return func
return decorator | Decorator to create and register a migration.
>>> manager = MigrationManager()
>>> @manager.migration(0, 1)
... def migrate(conn):
... pass |
def ekgi(selidx, row, element):
"""
Return an element of an entry in a column of integer type in a specified
row.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgi_c.html
:param selidx: Index of parent column in SELECT clause.
:type selidx: int
:param row: Row to fetch from.
:type row: int
:param element: Index of element, within column entry, to fetch.
:type element: int
:return:
Integer element of column entry,
Flag indicating whether column entry was null.
:rtype: tuple
"""
selidx = ctypes.c_int(selidx)
row = ctypes.c_int(row)
element = ctypes.c_int(element)
idata = ctypes.c_int()
null = ctypes.c_int()
found = ctypes.c_int()
libspice.ekgi_c(selidx, row, element, ctypes.byref(idata),
ctypes.byref(null), ctypes.byref(found))
return idata.value, null.value, bool(found.value) | Return an element of an entry in a column of integer type in a specified
row.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekgi_c.html
:param selidx: Index of parent column in SELECT clause.
:type selidx: int
:param row: Row to fetch from.
:type row: int
:param element: Index of element, within column entry, to fetch.
:type element: int
:return:
Integer element of column entry,
Flag indicating whether column entry was null.
:rtype: tuple |
def chemical_symbols(self):
"""Chemical symbols char [number of atom species][symbol length]."""
charr = self.read_value("chemical_symbols")
symbols = []
for v in charr:
s = "".join(c.decode("utf-8") for c in v)
symbols.append(s.strip())
return symbols | Chemical symbols char [number of atom species][symbol length]. |
def force_log(self, logType, message, data=None, tback=None, stdout=True, file=True):
"""
Force logging a message of a certain logtype whether logtype level is allowed or not.
:Parameters:
#. logType (string): A defined logging type.
#. message (string): Any message to log.
#. tback (None, str, list): Stack traceback to print and/or write to
log file. In general, this should be traceback.extract_stack
#. stdout (boolean): Whether to force logging to standard output.
#. file (boolean): Whether to force logging to file.
"""
# log to stdout
log = self._format_message(logType=logType, message=message, data=data, tback=tback)
if stdout:
self.__log_to_stdout(self.__logTypeFormat[logType][0] + log + self.__logTypeFormat[logType][1] + "\n")
try:
self.__stdout.flush()
except:
pass
try:
os.fsync(self.__stdout.fileno())
except:
pass
if file:
# log to file
self.__log_to_file(log)
self.__log_to_file("\n")
try:
self.__logFileStream.flush()
except:
pass
try:
os.fsync(self.__logFileStream.fileno())
except:
pass
# set last logged message
self.__lastLogged[logType] = log
self.__lastLogged[-1] = log | Force logging a message of a certain logtype whether logtype level is allowed or not.
:Parameters:
#. logType (string): A defined logging type.
#. message (string): Any message to log.
#. tback (None, str, list): Stack traceback to print and/or write to
log file. In general, this should be traceback.extract_stack
#. stdout (boolean): Whether to force logging to standard output.
#. file (boolean): Whether to force logging to file. |
def config_param(self, conf_alias, param):
"""
Получает настройки с сервера, кеширует локально и дает простой интерфейс их получения
:param conf_alias:
:param param:
:return:
"""
data = self.data_get(conf_alias)
flat_cache = self.__data_get_flatten_cache.get(conf_alias)
if flat_cache is None:
flat_cache = self.__flatten_dict(data, '', '.')
self.__data_get_flatten_cache[conf_alias] = flat_cache
if param not in flat_cache:
raise KeyError("Key not found: " + conf_alias)
return flat_cache.get(param) | Получает настройки с сервера, кеширует локально и дает простой интерфейс их получения
:param conf_alias:
:param param:
:return: |
def _parse_attributes(self, value):
"""Parse non standard atrributes."""
from zigpy.zcl import foundation as f
attributes = {}
attribute_names = {
1: BATTERY_VOLTAGE_MV,
3: TEMPERATURE,
4: XIAOMI_ATTR_4,
5: XIAOMI_ATTR_5,
6: XIAOMI_ATTR_6,
10: PATH
}
result = {}
while value:
skey = int(value[0])
svalue, value = f.TypeValue.deserialize(value[1:])
result[skey] = svalue.value
for item, value in result.items():
key = attribute_names[item] \
if item in attribute_names else "0xff01-" + str(item)
attributes[key] = value
if BATTERY_VOLTAGE_MV in attributes:
attributes[BATTERY_LEVEL] = int(
self._calculate_remaining_battery_percentage(
attributes[BATTERY_VOLTAGE_MV]
)
)
return attributes | Parse non standard atrributes. |
def collect(self):
"""
Overrides the Collector.collect method
"""
# Handle collection time intervals correctly
CollectTime = int(time.time())
time_delta = float(self.config['interval'])
if not self.LastCollectTime:
self.LastCollectTime = CollectTime - time_delta
host = self.config['host']
port = self.config['port']
celerymon_url = "http://%s:%s/api/task/?since=%i" % (
host, port, self.LastCollectTime)
response = urllib2.urlopen(celerymon_url)
body = response.read()
celery_data = json.loads(body)
results = dict()
total_messages = 0
for data in celery_data:
name = str(data[1]['name'])
if name not in results:
results[name] = dict()
state = str(data[1]['state'])
if state not in results[name]:
results[name][state] = 1
else:
results[name][state] += 1
total_messages += 1
# Publish Metric
self.publish('total_messages', total_messages)
for result in results:
for state in results[result]:
metric_value = results[result][state]
metric_name = "%s.%s" % (result, state)
self.publish(metric_name, metric_value)
self.LastCollectTime = CollectTime | Overrides the Collector.collect method |
def execute_dry_run(self, dialect=None, billing_tier=None):
"""Dry run a query, to check the validity of the query and return some useful statistics.
Args:
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A dict with 'cacheHit' and 'totalBytesProcessed' fields.
Raises:
An exception if the query was malformed.
"""
try:
query_result = self._api.jobs_insert_query(self._sql, self._code, self._imports,
dry_run=True,
table_definitions=self._external_tables,
dialect=dialect, billing_tier=billing_tier)
except Exception as e:
raise e
return query_result['statistics']['query'] | Dry run a query, to check the validity of the query and return some useful statistics.
Args:
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
A dict with 'cacheHit' and 'totalBytesProcessed' fields.
Raises:
An exception if the query was malformed. |
def log_stats(self):
"""Output the stats to the LOGGER."""
if not self.stats.get('counts'):
if self.consumers:
LOGGER.info('Did not receive any stats data from children')
return
if self.poll_data['processes']:
LOGGER.warning('%i process(es) did not respond with stats: %r',
len(self.poll_data['processes']),
self.poll_data['processes'])
if self.stats['counts']['processes'] > 1:
LOGGER.info('%i consumers processed %i messages with %i errors',
self.stats['counts']['processes'],
self.stats['counts']['processed'],
self.stats['counts']['failed'])
for key in self.stats['consumers'].keys():
LOGGER.info('%i %s %s processed %i messages with %i errors',
self.stats['consumers'][key]['processes'], key,
self.consumer_keyword(self.stats['consumers'][key]),
self.stats['consumers'][key]['processed'],
self.stats['consumers'][key]['failed']) | Output the stats to the LOGGER. |
def from_headers (strheader):
"""Parse cookie data from a string in HTTP header (RFC 2616) format.
@return: list of cookies
@raises: ValueError for incomplete or invalid data
"""
res = []
fp = StringIO(strheader)
headers = httplib.HTTPMessage(fp, seekable=True)
if "Host" not in headers:
raise ValueError("Required header 'Host:' missing")
host = headers["Host"]
path= headers.get("Path", "/")
for header in headers.getallmatchingheaders("Set-Cookie"):
headervalue = header.split(':', 1)[1]
for pairs in cookielib.split_header_words([headervalue]):
for name, value in pairs:
cookie = requests.cookies.create_cookie(name, value,
domain=host, path=path)
res.append(cookie)
return res | Parse cookie data from a string in HTTP header (RFC 2616) format.
@return: list of cookies
@raises: ValueError for incomplete or invalid data |
def affected_start(self):
"""Return affected start position in 0-based coordinates
For SNVs, MNVs, and deletions, the behaviour is the start position.
In the case of insertions, the position behind the insert position is
returned, yielding a 0-length interval together with
:py:meth:`~Record.affected_end`
"""
types = {alt.type for alt in self.ALT} # set!
BAD_MIX = {INS, SV, BND, SYMBOLIC} # don't mix well with others
if (BAD_MIX & types) and len(types) == 1 and list(types)[0] == INS:
# Only insertions, return 0-based position right of first base
return self.POS # right of first base
else: # Return 0-based start position of first REF base
return self.POS - 1 | Return affected start position in 0-based coordinates
For SNVs, MNVs, and deletions, the behaviour is the start position.
In the case of insertions, the position behind the insert position is
returned, yielding a 0-length interval together with
:py:meth:`~Record.affected_end` |
def main():
"""Sanitizes the loaded *.ipynb."""
with open(sys.argv[1], 'r') as nbfile:
notebook = json.load(nbfile)
# remove kernelspec (venvs)
try:
del notebook['metadata']['kernelspec']
except KeyError:
pass
# remove outputs and metadata, set execution counts to None
for cell in notebook['cells']:
try:
if cell['cell_type'] == 'code':
cell['outputs'] = []
cell['execution_count'] = None
cell['metadata'] = {}
except KeyError:
pass
with open(sys.argv[1], 'w') as nbfile:
json.dump(notebook, nbfile, indent=1) | Sanitizes the loaded *.ipynb. |
def limit(self, limit):
""" Limit the number of rows returned from the database.
:param limit: The number of rows to return in the recipe. 0 will
return all rows.
:type limit: int
"""
if self._limit != limit:
self.dirty = True
self._limit = limit
return self | Limit the number of rows returned from the database.
:param limit: The number of rows to return in the recipe. 0 will
return all rows.
:type limit: int |
def evaluate(self, data):
"""Evaluate the code needed to compute a given Data object."""
expression_engine = data.process.requirements.get('expression-engine', None)
if expression_engine is not None:
expression_engine = self.get_expression_engine(expression_engine)
# Parse steps.
steps = data.process.run.get('program', None)
if steps is None:
return
if not isinstance(steps, list):
raise ExecutionError('Workflow program must be a list of steps.')
# Expression engine evaluation context.
context = {
'input': data.input,
'steps': collections.OrderedDict(),
}
for index, step in enumerate(steps):
try:
step_id = step['id']
step_slug = step['run']
except KeyError as error:
raise ExecutionError('Incorrect definition of step "{}", missing property "{}".'.format(
step.get('id', index), error
))
# Fetch target process.
process = Process.objects.filter(slug=step_slug).order_by('-version').first()
if not process:
raise ExecutionError('Incorrect definition of step "{}", invalid process "{}".'.format(
step_id, step_slug
))
# Process all input variables.
step_input = step.get('input', {})
if not isinstance(step_input, dict):
raise ExecutionError('Incorrect definition of step "{}", input must be a dictionary.'.format(
step_id
))
data_input = self._evaluate_expressions(expression_engine, step_id, step_input, context)
# Create the data object.
data_object = Data.objects.create(
process=process,
contributor=data.contributor,
tags=data.tags,
input=data_input,
)
DataDependency.objects.create(
parent=data,
child=data_object,
kind=DataDependency.KIND_SUBPROCESS,
)
# Copy permissions.
copy_permissions(data, data_object)
# Copy collections.
for collection in data.collection_set.all():
collection.data.add(data_object)
context['steps'][step_id] = data_object.pk
# Immediately set our status to done and output all data object identifiers.
data.output = {
'steps': list(context['steps'].values()),
}
data.status = Data.STATUS_DONE | Evaluate the code needed to compute a given Data object. |
def delete_snapshot_range(self, start_id, end_id):
"""Starts deleting the specified snapshot range. This is limited to
linear snapshot lists, which means there may not be any other child
snapshots other than the direct sequence between the start and end
snapshot. If the start and end snapshot point to the same snapshot this
method is completely equivalent to :py:func:`delete_snapshot` . See
:py:class:`ISnapshot` for an introduction to snapshots. The
conditions and many details are the same as with
:py:func:`delete_snapshot` .
This operation is generally faster than deleting snapshots one by one
and often also needs less extra disk space before freeing up disk space
by deleting the removed disk images corresponding to the snapshot.
This API method is right now not implemented!
in start_id of type str
UUID of the first snapshot to delete.
in end_id of type str
UUID of the last snapshot to delete.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorInvalidVmState`
The running virtual machine prevents deleting this snapshot. This
happens only in very specific situations, usually snapshots can be
deleted without trouble while a VM is running. The error message
text explains the reason for the failure.
raises :class:`OleErrorNotimpl`
The method is not implemented yet.
"""
if not isinstance(start_id, basestring):
raise TypeError("start_id can only be an instance of type basestring")
if not isinstance(end_id, basestring):
raise TypeError("end_id can only be an instance of type basestring")
progress = self._call("deleteSnapshotRange",
in_p=[start_id, end_id])
progress = IProgress(progress)
return progress | Starts deleting the specified snapshot range. This is limited to
linear snapshot lists, which means there may not be any other child
snapshots other than the direct sequence between the start and end
snapshot. If the start and end snapshot point to the same snapshot this
method is completely equivalent to :py:func:`delete_snapshot` . See
:py:class:`ISnapshot` for an introduction to snapshots. The
conditions and many details are the same as with
:py:func:`delete_snapshot` .
This operation is generally faster than deleting snapshots one by one
and often also needs less extra disk space before freeing up disk space
by deleting the removed disk images corresponding to the snapshot.
This API method is right now not implemented!
in start_id of type str
UUID of the first snapshot to delete.
in end_id of type str
UUID of the last snapshot to delete.
return progress of type :class:`IProgress`
Progress object to track the operation completion.
raises :class:`VBoxErrorInvalidVmState`
The running virtual machine prevents deleting this snapshot. This
happens only in very specific situations, usually snapshots can be
deleted without trouble while a VM is running. The error message
text explains the reason for the failure.
raises :class:`OleErrorNotimpl`
The method is not implemented yet. |
def OpenMessageDialog(self, Username, Text=u''):
"""Opens "Send an IM Message" dialog.
:Parameters:
Username : str
Message target.
Text : unicode
Message text.
"""
self.OpenDialog('IM', Username, tounicode(Text)) | Opens "Send an IM Message" dialog.
:Parameters:
Username : str
Message target.
Text : unicode
Message text. |
def get_graph_url(self, target, graphite_url=None):
"""Get Graphite URL."""
return self._graphite_url(target, graphite_url=graphite_url, raw_data=False) | Get Graphite URL. |
def image_present(name, visibility='public', protected=None,
checksum=None, location=None, disk_format='raw', wait_for=None,
timeout=30):
'''
Checks if given image is present with properties
set as specified.
An image should got through the stages 'queued', 'saving'
before becoming 'active'. The attribute 'checksum' can
only be checked once the image is active.
If you don't specify 'wait_for' but 'checksum' the function
will wait for the image to become active before comparing
checksums. If you don't specify checksum either the function
will return when the image reached 'saving'.
The default timeout for both is 30 seconds.
Supported properties:
- visibility ('public' or 'private')
- protected (bool)
- checksum (string, md5sum)
- location (URL, to copy from)
- disk_format ('raw' (default), 'vhd', 'vhdx', 'vmdk', 'vdi', 'iso',
'qcow2', 'aki', 'ari' or 'ami')
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': '',
}
acceptable = ['queued', 'saving', 'active']
if wait_for is None and checksum is None:
wait_for = 'saving'
elif wait_for is None and checksum is not None:
wait_for = 'active'
# Just pop states until we reach the
# first acceptable one:
while len(acceptable) > 1:
if acceptable[0] == wait_for:
break
else:
acceptable.pop(0)
image, msg = _find_image(name)
if image is False:
if __opts__['test']:
ret['result'] = None
else:
ret['result'] = False
ret['comment'] = msg
return ret
log.debug(msg)
# No image yet and we know where to get one
if image is None and location is not None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'glance.image_present would ' \
'create an image from {0}'.format(location)
return ret
image = __salt__['glance.image_create'](name=name,
protected=protected, visibility=visibility,
location=location, disk_format=disk_format)
log.debug('Created new image:\n%s', image)
ret['changes'] = {
name:
{
'new':
{
'id': image['id']
},
'old': None
}
}
timer = timeout
# Kinda busy-loopy but I don't think the Glance
# API has events we can listen for
while timer > 0:
if 'status' in image and \
image['status'] in acceptable:
log.debug('Image %s has reached status %s',
image['name'], image['status'])
break
else:
timer -= 5
time.sleep(5)
image, msg = _find_image(name)
if not image:
ret['result'] = False
ret['comment'] += 'Created image {0} '.format(
name) + ' vanished:\n' + msg
return ret
if timer <= 0 and image['status'] not in acceptable:
ret['result'] = False
ret['comment'] += 'Image didn\'t reach an acceptable '+\
'state ({0}) before timeout:\n'.format(acceptable)+\
'\tLast status was "{0}".\n'.format(image['status'])
# There's no image but where would I get one??
elif location is None:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'No location to copy image from specified,\n' +\
'glance.image_present would not create one'
else:
ret['result'] = False
ret['comment'] = 'No location to copy image from specified,\n' +\
'not creating a new image.'
return ret
# If we've created a new image also return its last status:
if name in ret['changes']:
ret['changes'][name]['new']['status'] = image['status']
if visibility:
if image['visibility'] != visibility:
old_value = image['visibility']
if not __opts__['test']:
image = __salt__['glance.image_update'](
id=image['id'], visibility=visibility)
# Check if image_update() worked:
if image['visibility'] != visibility:
if not __opts__['test']:
ret['result'] = False
elif __opts__['test']:
ret['result'] = None
ret['comment'] += '"visibility" is {0}, '\
'should be {1}.\n'.format(image['visibility'],
visibility)
else:
if 'new' in ret['changes']:
ret['changes']['new']['visibility'] = visibility
else:
ret['changes']['new'] = {'visibility': visibility}
if 'old' in ret['changes']:
ret['changes']['old']['visibility'] = old_value
else:
ret['changes']['old'] = {'visibility': old_value}
else:
ret['comment'] += '"visibility" is correct ({0}).\n'.format(
visibility)
if protected is not None:
if not isinstance(protected, bool) or image['protected'] ^ protected:
if not __opts__['test']:
ret['result'] = False
else:
ret['result'] = None
ret['comment'] += '"protected" is {0}, should be {1}.\n'.format(
image['protected'], protected)
else:
ret['comment'] += '"protected" is correct ({0}).\n'.format(
protected)
if 'status' in image and checksum:
if image['status'] == 'active':
if 'checksum' not in image:
# Refresh our info about the image
image = __salt__['glance.image_show'](image['id'])
if 'checksum' not in image:
if not __opts__['test']:
ret['result'] = False
else:
ret['result'] = None
ret['comment'] += 'No checksum available for this image:\n' +\
'\tImage has status "{0}".'.format(image['status'])
elif image['checksum'] != checksum:
if not __opts__['test']:
ret['result'] = False
else:
ret['result'] = None
ret['comment'] += '"checksum" is {0}, should be {1}.\n'.format(
image['checksum'], checksum)
else:
ret['comment'] += '"checksum" is correct ({0}).\n'.format(
checksum)
elif image['status'] in ['saving', 'queued']:
ret['comment'] += 'Checksum won\'t be verified as image ' +\
'hasn\'t reached\n\t "status=active" yet.\n'
log.debug('glance.image_present will return: %s', ret)
return ret | Checks if given image is present with properties
set as specified.
An image should got through the stages 'queued', 'saving'
before becoming 'active'. The attribute 'checksum' can
only be checked once the image is active.
If you don't specify 'wait_for' but 'checksum' the function
will wait for the image to become active before comparing
checksums. If you don't specify checksum either the function
will return when the image reached 'saving'.
The default timeout for both is 30 seconds.
Supported properties:
- visibility ('public' or 'private')
- protected (bool)
- checksum (string, md5sum)
- location (URL, to copy from)
- disk_format ('raw' (default), 'vhd', 'vhdx', 'vmdk', 'vdi', 'iso',
'qcow2', 'aki', 'ari' or 'ami') |
def runMultiplePass(df, model, nMultiplePass, nTrain):
"""
run CLA model through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
result = model.run(inputRecord)
if j % 100 == 0:
print " pass %i, record %i" % (nPass, j)
# reset temporal memory
model._getTPRegion().getSelf()._tfdr.reset()
return model | run CLA model through data record 0:nTrain nMultiplePass passes |
def change_default_radii(def_map):
"""Change the default radii
"""
s = current_system()
rep = current_representation()
rep.radii_state.default = [def_map[t] for t in s.type_array]
rep.radii_state.reset() | Change the default radii |
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter) | Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout. |
def time(self, pattern='%H:%M:%S', end_datetime=None):
"""
Get a time string (24h format by default)
:param pattern format
:example '15:02:34'
"""
return self.date_time(
end_datetime=end_datetime).time().strftime(pattern) | Get a time string (24h format by default)
:param pattern format
:example '15:02:34' |
def make_subdirs(self):
"""The purpose of this method is to, if necessary, create all of the
subdirectories leading up to the file to the written."""
# Pull off everything below the root.
subpath = self.full_path[len(self.context.root):]
log.debug("make_subdirs: subpath is %s", subpath)
# Split on directory separators, but drop the last one, as it should
# be the filename.
dirs = subpath.split(os.sep)[:-1]
log.debug("dirs is %s", dirs)
current = self.context.root
for dir in dirs:
if dir:
current = os.path.join(current, dir)
if os.path.isdir(current):
log.debug("%s is already an existing directory", current)
else:
os.mkdir(current, 0o700) | The purpose of this method is to, if necessary, create all of the
subdirectories leading up to the file to the written. |
def and_(cls, obj, **kwargs):
"""Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if all `kwargs` expression are `True`, `False` otherwise.
:rtype: bool
"""
return cls.__eval_seqexp(obj, operator.and_, **kwargs) | Query an object
:param obj:
object to test
:param kwargs: query specified in kwargssql
:return:
`True` if all `kwargs` expression are `True`, `False` otherwise.
:rtype: bool |
def sender(self, jid: str):
"""
Set jid of the sender
Args:
jid (str): jid of the sender
"""
if jid is not None and not isinstance(jid, str):
raise TypeError("'sender' MUST be a string")
self._sender = aioxmpp.JID.fromstr(jid) if jid is not None else None | Set jid of the sender
Args:
jid (str): jid of the sender |
def versionString(version):
"""Create version string.
For a sequence containing version information such as (2, 0, 0, 'pre'),
this returns a printable string such as '2.0pre'.
The micro version number is only excluded from the string if it is zero.
"""
ver = list(map(str, version))
numbers, rest = ver[:2 if ver[2] == '0' else 3], ver[3:]
return '.'.join(numbers) + '-'.join(rest) | Create version string.
For a sequence containing version information such as (2, 0, 0, 'pre'),
this returns a printable string such as '2.0pre'.
The micro version number is only excluded from the string if it is zero. |
def disambiguate_text(self, text, language=None, entities=None):
""" Call the disambiguation service in order to get meanings.
Args:
text (str): Text to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status.
"""
body = {
"text": text,
"entities": [],
"onlyNER": "false",
"customisation": "generic"
}
if language:
body['language'] = {"lang": language}
if entities:
body['entities'] = entities
result, status_code = self._process_query(body)
if status_code != 200:
logger.debug('Disambiguation failed.')
return result, status_code | Call the disambiguation service in order to get meanings.
Args:
text (str): Text to be disambiguated.
language (str): language of text (if known)
entities (list): list of entities or mentions to be supplied by
the user.
Returns:
dict, int: API response and API status. |
def pubsub_sub(self, topic, discover=False, **kwargs):
"""Subscribe to mesages on a given topic
Subscribing to a topic in IPFS means anytime
a message is published to a topic, the subscribers
will be notified of the publication.
The connection with the pubsub topic is opened and read.
The Subscription returned should be used inside a context
manager to ensure that it is closed properly and not left
hanging.
.. code-block:: python
>>> sub = c.pubsub_sub('testing')
>>> with c.pubsub_sub('testing') as sub:
# publish a message 'hello' to the topic 'testing'
... c.pubsub_pub('testing', 'hello')
... for message in sub:
... print(message)
... # Stop reading the subscription after
... # we receive one publication
... break
{'from': '<base64encoded IPFS id>',
'data': 'aGVsbG8=',
'topicIDs': ['testing']}
# NOTE: in order to receive published data
# you must already be subscribed to the topic at publication
# time.
Parameters
----------
topic : str
Name of a topic to subscribe to
discover : bool
Try to discover other peers subscibed to the same topic
(defaults to False)
Returns
-------
Generator wrapped in a context
manager that maintains a connection
stream to the given topic.
"""
args = (topic, discover)
return SubChannel(self._client.request('/pubsub/sub', args,
stream=True, decoder='json')) | Subscribe to mesages on a given topic
Subscribing to a topic in IPFS means anytime
a message is published to a topic, the subscribers
will be notified of the publication.
The connection with the pubsub topic is opened and read.
The Subscription returned should be used inside a context
manager to ensure that it is closed properly and not left
hanging.
.. code-block:: python
>>> sub = c.pubsub_sub('testing')
>>> with c.pubsub_sub('testing') as sub:
# publish a message 'hello' to the topic 'testing'
... c.pubsub_pub('testing', 'hello')
... for message in sub:
... print(message)
... # Stop reading the subscription after
... # we receive one publication
... break
{'from': '<base64encoded IPFS id>',
'data': 'aGVsbG8=',
'topicIDs': ['testing']}
# NOTE: in order to receive published data
# you must already be subscribed to the topic at publication
# time.
Parameters
----------
topic : str
Name of a topic to subscribe to
discover : bool
Try to discover other peers subscibed to the same topic
(defaults to False)
Returns
-------
Generator wrapped in a context
manager that maintains a connection
stream to the given topic. |
def recruit(self):
"""Recruit participants to the experiment as needed.
This method runs whenever a participant successfully completes the
experiment (participants who fail to finish successfully are
automatically replaced). By default it recruits 1 participant at a time
until all networks are full.
"""
if not self.networks(full=False):
self.log("All networks full: closing recruitment", "-----")
self.recruiter.close_recruitment() | Recruit participants to the experiment as needed.
This method runs whenever a participant successfully completes the
experiment (participants who fail to finish successfully are
automatically replaced). By default it recruits 1 participant at a time
until all networks are full. |
def displayMousePosition(xOffset=0, yOffset=0):
"""This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor."""
print('Press Ctrl-C to quit.')
if xOffset != 0 or yOffset != 0:
print('xOffset: %s yOffset: %s' % (xOffset, yOffset))
resolution = size()
try:
while True:
# Get and print the mouse coordinates.
x, y = position()
positionStr = 'X: ' + str(x - xOffset).rjust(4) + ' Y: ' + str(y - yOffset).rjust(4)
if (x - xOffset) < 0 or (y - yOffset) < 0 or (x - xOffset) >= resolution[0] or (y - yOffset) >= resolution[1]:
pixelColor = ('NaN', 'NaN', 'NaN')
else:
pixelColor = pyscreeze.screenshot().getpixel((x, y))
positionStr += ' RGB: (' + str(pixelColor[0]).rjust(3)
positionStr += ', ' + str(pixelColor[1]).rjust(3)
positionStr += ', ' + str(pixelColor[2]).rjust(3) + ')'
sys.stdout.write(positionStr)
sys.stdout.write('\b' * len(positionStr))
sys.stdout.flush()
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.flush() | This function is meant to be run from the command line. It will
automatically display the location and RGB of the mouse cursor. |
def get_identifier(self, origin=None):
"""Read the next token and raise an exception if it is not an identifier.
@raises dns.exception.SyntaxError:
@rtype: string
"""
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
return token.value | Read the next token and raise an exception if it is not an identifier.
@raises dns.exception.SyntaxError:
@rtype: string |
def start_wsgi_server(port, addr='', registry=REGISTRY):
"""Starts a WSGI server for prometheus metrics as a daemon thread."""
app = make_wsgi_app(registry)
httpd = make_server(addr, port, app, handler_class=_SilentHandler)
t = threading.Thread(target=httpd.serve_forever)
t.daemon = True
t.start() | Starts a WSGI server for prometheus metrics as a daemon thread. |
def remove_tag(tag_name, string):
"""
Remove open and close tags - the tags themselves only - using
a non-greedy angle bracket pattern match
"""
if not string:
return string
pattern = re.compile('</?' + tag_name + '.*?>')
string = pattern.sub('', string)
return string | Remove open and close tags - the tags themselves only - using
a non-greedy angle bracket pattern match |
def beautify(self, string):
"""
Wraps together all actions needed to beautify a string, i.e.
parse the string and then stringify the phrases (replace tags
with formatting codes).
Arguments:
string (str): The string to beautify/parse.
Returns:
The parsed, stringified and ultimately beautified string.
Raises:
errors.ArgumentError if phrases were found, but not a single style
(flag combination) was supplied.
"""
if not string:
return string
# string may differ because of escaped characters
string, phrases = self.parse(string)
if not phrases:
return string
if not self.positional and not self.always:
raise errors.ArgumentError("Found phrases, but no styles "
"were supplied!")
return self.stringify(string, phrases) | Wraps together all actions needed to beautify a string, i.e.
parse the string and then stringify the phrases (replace tags
with formatting codes).
Arguments:
string (str): The string to beautify/parse.
Returns:
The parsed, stringified and ultimately beautified string.
Raises:
errors.ArgumentError if phrases were found, but not a single style
(flag combination) was supplied. |
def gcs_get_file(bucketname,
filename,
local_file,
altexts=None,
client=None,
service_account_json=None,
raiseonfail=False):
"""This gets a single file from a Google Cloud Storage bucket.
Parameters
----------
bucketname : str
The name of the GCS bucket to download the file from.
filename : str
The full name of the file to download, including all prefixes.
local_file : str
Path to where the downloaded file will be stored.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : google.cloud.storage.Client instance
The instance of the Client to use to perform the download operation. If
this is None, a new Client will be used. If this is None and
`service_account_json` points to a downloaded JSON file with GCS
credentials, a new Client with the provided credentials will be used. If
this is not None, the existing Client instance will be used.
service_account_json : str
Path to a downloaded GCS credentials JSON file.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful.
"""
if not client:
if (service_account_json is not None and
os.path.exists(service_account_json)):
client = storage.Client.from_service_account_json(
service_account_json
)
else:
client = storage.Client()
try:
bucket = client.get_bucket(bucketname)
blob = bucket.get_blob(filename)
blob.download_to_filename(local_file)
return local_file
except Exception as e:
for alt_extension in altexts:
split_ext = os.path.splitext(filename)
check_file = split_ext[0] + alt_extension
try:
bucket = client.get_bucket(bucket)
blob = bucket.get_blob(check_file)
blob.download_to_filename(
local_file.replace(split_ext[-1],
alt_extension)
)
return local_file.replace(split_ext[-1],
alt_extension)
except Exception as e:
pass
else:
LOGEXCEPTION('could not download gs://%s/%s' % (bucket, filename))
if raiseonfail:
raise
return None | This gets a single file from a Google Cloud Storage bucket.
Parameters
----------
bucketname : str
The name of the GCS bucket to download the file from.
filename : str
The full name of the file to download, including all prefixes.
local_file : str
Path to where the downloaded file will be stored.
altexts : None or list of str
If not None, this is a list of alternate extensions to try for the file
other than the one provided in `filename`. For example, to get anything
that's an .sqlite where .sqlite.gz is expected, use altexts=[''] to
strip the .gz.
client : google.cloud.storage.Client instance
The instance of the Client to use to perform the download operation. If
this is None, a new Client will be used. If this is None and
`service_account_json` points to a downloaded JSON file with GCS
credentials, a new Client with the provided credentials will be used. If
this is not None, the existing Client instance will be used.
service_account_json : str
Path to a downloaded GCS credentials JSON file.
raiseonfail : bool
If True, will re-raise whatever Exception caused the operation to fail
and break out immediately.
Returns
-------
str
Path to the downloaded filename or None if the download was
unsuccessful. |
def dictionize(fields: Sequence, records: Sequence) -> Generator:
"""Create dictionaries mapping fields to record data."""
return (dict(zip(fields, rec)) for rec in records) | Create dictionaries mapping fields to record data. |
def deterministic_crowding(self,parents,offspring,X_parents,X_offspring):
"""deterministic crowding implementation (for non-steady state).
offspring compete against the parent they are most similar to, here defined as
the parent they are most correlated with.
the offspring only replace their parent if they are more fit.
"""
# get children locations produced from crossover
cross_children = [i for i,o in enumerate(offspring) if len(o.parentid) > 1]
# order offspring so that they are lined up with their most similar parent
for c1,c2 in zip(cross_children[::2], cross_children[1::2]):
# get parent locations
p_loc = [j for j,p in enumerate(parents) if p.id in offspring[c1].parentid]
if len(p_loc) != 2:
continue
# if child is more correlated with its non-root parent
if r2_score(X_parents[p_loc[0]],X_offspring[c1]) + r2_score(X_parents[p_loc[1]],X_offspring[c2]) < r2_score(X_parents[p_loc[0]],X_offspring[c2]) + r2_score(X_parents[p_loc[1]],X_offspring[c1]):
# swap offspring
offspring[c1],offspring[c2] = offspring[c2],offspring[c1]
survivors = []
survivor_index = []
for i,(p,o) in enumerate(zip(parents,offspring)):
if p.fitness >= o.fitness:
survivors.append(copy.deepcopy(p))
survivor_index.append(i)
else:
survivors.append(copy.deepcopy(o))
survivor_index.append(i+len(parents))
# return survivors along with their indices
return survivors, survivor_index | deterministic crowding implementation (for non-steady state).
offspring compete against the parent they are most similar to, here defined as
the parent they are most correlated with.
the offspring only replace their parent if they are more fit. |
def DisableCronJob(self, cronjob_id):
"""Disables a cronjob."""
job = self.cronjobs.get(cronjob_id)
if job is None:
raise db.UnknownCronJobError("Cron job %s not known." % cronjob_id)
job.enabled = False | Disables a cronjob. |
def build(self, _resource, _cache=True, updatecontent=True, **kwargs):
"""Build a schema class from input _resource.
:param _resource: object from where get the right schema.
:param bool _cache: use _cache system.
:param bool updatecontent: if True (default) update result.
:rtype: Schema.
"""
result = None
if _cache and _resource in self._schemasbyresource:
result = self._schemasbyresource[_resource]
else:
for builder in self._builders.values():
try:
result = builder.build(_resource=_resource, **kwargs)
except Exception:
pass
else:
break
if result is None:
raise ValueError('No builder found for {0}'.format(_resource))
if _cache:
self._schemasbyresource[_resource] = result
if updatecontent:
from ..utils import updatecontent
updatecontent(result, updateparents=False)
return result | Build a schema class from input _resource.
:param _resource: object from where get the right schema.
:param bool _cache: use _cache system.
:param bool updatecontent: if True (default) update result.
:rtype: Schema. |
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask_left = np.in1d(self._cumcount_array(), nth_values)
mask_right = np.in1d(self._cumcount_array(ascending=False) + 1,
-nth_values)
mask = mask_left | mask_right
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'.".format(
dropna=dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# Note: when agg-ing picker doesn't raise this,
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed {dropna}).".format(
dropna=dropna))
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
from pandas.core.groupby.grouper import _get_grouper
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if (len(self.obj) == len(dropped) or
len(result) == len(self.grouper.result_index)):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result | Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
%(see_also)s
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying `dropna` allows count ignoring ``NaN``
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying `as_index=False` in `groupby` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0 |
def validate_signature(self, signature, data, encoding='utf8'):
"""Validate the signature for the provided data.
Args:
signature (str or bytes or bytearray): Signature that was provided
for the request.
data (str or bytes or bytearray): Data string to validate against
the signature.
encoding (str, optional): If a string was provided for ``data`` or
``signature``, this is the character encoding.
Returns:
bool: Whether the signature is valid for the provided data.
"""
if isinstance(data, string_types):
data = bytearray(data, encoding)
if isinstance(signature, string_types):
signature = bytearray(signature, encoding)
secret_key = bytearray(self.secret_key, 'utf8')
hashed = hmac.new(secret_key, data, sha1)
encoded = b64encode(hashed.digest())
return encoded.strip() == signature.strip() | Validate the signature for the provided data.
Args:
signature (str or bytes or bytearray): Signature that was provided
for the request.
data (str or bytes or bytearray): Data string to validate against
the signature.
encoding (str, optional): If a string was provided for ``data`` or
``signature``, this is the character encoding.
Returns:
bool: Whether the signature is valid for the provided data. |
def render_scene(self):
"render scene one time"
self.canvas.SetCurrent ( self.context )
self.renderer.render_scene()
# Done rendering
# self.canvas.SwapBuffers()
if self.canvas.IsDoubleBuffered():
self.canvas.SwapBuffers()
print ("double buffered") # Do not want
else:
pass
# TODO: SwapBuffers() seems required to show on desktop monitor,
# but causes stalling when monitor is slower than VR headset
self.canvas.SwapBuffers() | render scene one time |
def add_atmost(self, lits, k, no_return=True):
"""
This method is responsible for adding a new *native* AtMostK (see
:mod:`pysat.card`) constraint into :class:`Minicard`.
**Note that none of the other solvers supports native AtMostK
constraints**.
An AtMostK constraint is :math:`\sum_{i=1}^{n}{x_i}\leq k`. A
native AtMostK constraint should be given as a pair ``lits`` and
``k``, where ``lits`` is a list of literals in the sum.
:param lits: a list of literals.
:param k: upper bound on the number of satisfied literals
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type lits: iterable(int)
:type k: int
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc', bootstrap_with=[[1], [2], [3]])
>>> s.add_atmost(lits=[1, 2, 3], k=2, no_return=False)
False
>>> # the AtMostK constraint is in conflict with initial unit clauses
"""
if self.solver:
res = self.solver.add_atmost(lits, k, no_return)
if not no_return:
return res | This method is responsible for adding a new *native* AtMostK (see
:mod:`pysat.card`) constraint into :class:`Minicard`.
**Note that none of the other solvers supports native AtMostK
constraints**.
An AtMostK constraint is :math:`\sum_{i=1}^{n}{x_i}\leq k`. A
native AtMostK constraint should be given as a pair ``lits`` and
``k``, where ``lits`` is a list of literals in the sum.
:param lits: a list of literals.
:param k: upper bound on the number of satisfied literals
:param no_return: check solver's internal formula and return the
result, if set to ``False``.
:type lits: iterable(int)
:type k: int
:type no_return: bool
:rtype: bool if ``no_return`` is set to ``False``.
A usage example is the following:
.. code-block:: python
>>> s = Solver(name='mc', bootstrap_with=[[1], [2], [3]])
>>> s.add_atmost(lits=[1, 2, 3], k=2, no_return=False)
False
>>> # the AtMostK constraint is in conflict with initial unit clauses |
def find_packages(self, root_target, chroot):
"""Detect packages, namespace packages and resources from an existing chroot.
:returns: a tuple of:
set(packages)
set(namespace_packages)
map(package => set(files))
"""
base = os.path.join(chroot.path(), self.SOURCE_ROOT)
packages, namespace_packages = set(), set()
resources = defaultdict(set)
def iter_files():
for root, _, files in safe_walk(base):
module = os.path.relpath(root, base).replace(os.path.sep, '.')
for filename in files:
yield module, filename, os.path.join(root, filename)
# establish packages, namespace packages in first pass
inits_to_check = {}
for module, filename, real_filename in iter_files():
if filename != '__init__.py':
continue
packages.add(module)
inits_to_check[real_filename] = module
namespace_packages = {inits_to_check[init]
for init in self.filter_namespace_packages(root_target,
inits_to_check.keys())}
# second pass establishes non-source content (resources)
for module, filename, real_filename in iter_files():
if filename.endswith('.py'):
if module not in packages:
# TODO(wickman) Consider changing this to a full-on error as it could indicate bad BUILD
# hygiene.
# raise cls.UndefinedSource('{} is source but does not belong to a package!'
# .format(filename))
self.context.log.warn('{} is source but does not belong to a package.'
.format(real_filename))
else:
continue
submodule = self.nearest_subpackage(module, packages)
if submodule == module:
resources[submodule].add(filename)
else:
assert module.startswith(submodule + '.')
relative_module = module[len(submodule) + 1:]
relative_filename = os.path.join(relative_module.replace('.', os.path.sep), filename)
resources[submodule].add(relative_filename)
return packages, namespace_packages, resources | Detect packages, namespace packages and resources from an existing chroot.
:returns: a tuple of:
set(packages)
set(namespace_packages)
map(package => set(files)) |
def get_named_type(type_): # noqa: F811
"""Unwrap possible wrapping type"""
if type_:
unwrapped_type = type_
while is_wrapping_type(unwrapped_type):
unwrapped_type = cast(GraphQLWrappingType, unwrapped_type)
unwrapped_type = unwrapped_type.of_type
return cast(GraphQLNamedType, unwrapped_type)
return None | Unwrap possible wrapping type |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.