sentence1 stringlengths 52 3.87M | sentence2 stringlengths 1 47.2k | label stringclasses 1 value |
|---|---|---|
def _check_dna(self, dna):
""" Check that a DNA string only contains characters in ``GENETIC_MATERIAL_OPTIONS``. """
valid_chars = set(self.GENETIC_MATERIAL_OPTIONS)
assert all(char in valid_chars for char in dna) | Check that a DNA string only contains characters in ``GENETIC_MATERIAL_OPTIONS``. | entailment |
def mutate(self, p_mutate):
"""
Check each element for mutation, swapping "0" for "1" and vice-versa.
"""
new_dna = []
for bit in self.dna:
if random.random() < p_mutate:
bit = '1' if bit == '0' else '0'
new_dna.append(bit)
self.dna = ''.join(new_dna) | Check each element for mutation, swapping "0" for "1" and vice-versa. | entailment |
def deep_merge(base, extra):
"""
Deeply merge two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence.
"""
if extra is None:
return
for key, value in extra.items():
if value is None:
if key in base:
del base[key]
# If the key represents a dict on both given dicts, merge the sub-dicts
elif isinstance(base.get(key), dict) and isinstance(value, dict):
deep_merge(base[key], value)
else:
# Otherwise, set the key on the base to be the value of the extra.
base[key] = value | Deeply merge two dictionaries, overriding existing keys in the base.
:param base: The base dictionary which will be merged into.
:param extra: The dictionary to merge into the base. Keys from this
dictionary will take precedence. | entailment |
def make_client(api_version, session=None,
endpoint=None, service_type='monitoring'):
"""Returns an monitoring API client."""
client_cls = utils.get_client_class('monitoring', api_version, VERSION_MAP)
c = client_cls(
session=session,
service_type=service_type,
endpoint=endpoint,
app_name='monascaclient',
app_version=version.version_string,
)
return c | Returns an monitoring API client. | entailment |
def create_command_class(name, func_module):
"""Dynamically creates subclass of MigratingCommand.
Method takes name of the function, module it is part of
and builds the subclass of :py:class:`MigratingCommand`.
Having a subclass of :py:class:`cliff.command.Command` is mandatory
for the osc-lib integration.
:param name: name of the function
:type name: basestring
:param func_module: the module function is part of
:type func_module: module
:return: command name, subclass of :py:class:`MigratingCommand`
:rtype: tuple(basestring, class)
"""
cmd_name = name[3:].replace('_', '-')
callback = getattr(func_module, name)
desc = callback.__doc__ or ''
help = desc.strip().split('\n')[0]
arguments = getattr(callback, 'arguments', [])
body = {
'_args': arguments,
'_callback': staticmethod(callback),
'_description': desc,
'_epilog': desc,
'_help': help
}
claz = type('%sCommand' % cmd_name.title().replace('-', ''),
(MigratingCommand,), body)
return cmd_name, claz | Dynamically creates subclass of MigratingCommand.
Method takes name of the function, module it is part of
and builds the subclass of :py:class:`MigratingCommand`.
Having a subclass of :py:class:`cliff.command.Command` is mandatory
for the osc-lib integration.
:param name: name of the function
:type name: basestring
:param func_module: the module function is part of
:type func_module: module
:return: command name, subclass of :py:class:`MigratingCommand`
:rtype: tuple(basestring, class) | entailment |
def _session(kwargs):
"""Returns or reuses session.
Method takes care of providing instance of
session object for the client.
:param kwargs: all params (without api_version) client was initialized with
:type kwargs: dict
:returns: session object
:rtype keystoneauth1.session.Session
"""
if 'session' in kwargs:
LOG.debug('Reusing session')
sess = kwargs.get('session')
if not isinstance(sess, k_session.Session):
msg = ('session should be an instance of %s' % k_session.Session)
LOG.error(msg)
raise RuntimeError(msg)
else:
LOG.debug('Initializing new session')
auth = _get_auth_handler(kwargs)
sess = _get_session(auth, kwargs)
return sess | Returns or reuses session.
Method takes care of providing instance of
session object for the client.
:param kwargs: all params (without api_version) client was initialized with
:type kwargs: dict
:returns: session object
:rtype keystoneauth1.session.Session | entailment |
def load(cls, path):
"""
Loads an instance of the class from a file.
Parameters
----------
path : str
Path to an HDF5 file.
Examples
--------
This is an abstract data type, but let us say that ``Foo`` inherits
from ``Saveable``. To construct an object of this class from a file, we
do:
>>> foo = Foo.load('foo.h5') #doctest: +SKIP
"""
if path is None:
return cls.load_from_dict({})
else:
d = io.load(path)
return cls.load_from_dict(d) | Loads an instance of the class from a file.
Parameters
----------
path : str
Path to an HDF5 file.
Examples
--------
This is an abstract data type, but let us say that ``Foo`` inherits
from ``Saveable``. To construct an object of this class from a file, we
do:
>>> foo = Foo.load('foo.h5') #doctest: +SKIP | entailment |
def register(cls, name):
"""Decorator to register a class."""
def register_decorator(reg_cls):
def name_func(self):
return name
reg_cls.name = property(name_func)
assert issubclass(reg_cls, cls), \
"Must be subclass matching your NamedRegistry class"
cls.REGISTRY[name] = reg_cls
return reg_cls
return register_decorator | Decorator to register a class. | entailment |
def construct(cls, name, *args, **kwargs):
"""
Constructs an instance of an object given its name.
"""
return cls.REGISTRY[name](*args, **kwargs) | Constructs an instance of an object given its name. | entailment |
def config():
"""
Loads and returns a ConfigParser from ``~/.deepdish.conf``.
"""
conf = ConfigParser()
# Set up defaults
conf.add_section('io')
conf.set('io', 'compression', 'zlib')
conf.read(os.path.expanduser('~/.deepdish.conf'))
return conf | Loads and returns a ConfigParser from ``~/.deepdish.conf``. | entailment |
def resize_by_factor(im, factor):
"""
Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides.
"""
_import_skimage()
from skimage.transform.pyramids import pyramid_reduce, pyramid_expand
if factor < 1:
return pyramid_reduce(im, downscale=1/factor)
elif factor > 1:
return pyramid_expand(im, upscale=factor)
else:
return im | Resizes the image according to a factor. The image is pre-filtered
with a Gaussian and then resampled with bilinear interpolation.
This function uses scikit-image and essentially combines its
`pyramid_reduce` with `pyramid_expand` into one function.
Returns the same object if factor is 1, not a copy.
Parameters
----------
im : ndarray, ndim=2 or 3
Image. Either 2D or 3D with 3 or 4 channels.
factor : float
Resize factor, e.g. a factor of 0.5 will halve both sides. | entailment |
def asgray(im):
"""
Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image.
"""
if im.ndim == 2:
return im
elif im.ndim == 3 and im.shape[2] in (3, 4):
return im[..., :3].mean(axis=-1)
else:
raise ValueError('Invalid image format') | Takes an image and returns its grayscale version by averaging the color
channels. if an alpha channel is present, it will simply be ignored. If a
grayscale image is given, the original image is returned.
Parameters
----------
image : ndarray, ndim 2 or 3
RGB or grayscale image.
Returns
-------
gray_image : ndarray, ndim 2
Grayscale version of image. | entailment |
def crop(im, size):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2 | Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping. | entailment |
def crop_or_pad(im, size, value=0):
"""
Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping.
"""
diff = [im.shape[index] - size[index] for index in (0, 1)]
im2 = im[diff[0]//2:diff[0]//2 + size[0], diff[1]//2:diff[1]//2 + size[1]]
return im2 | Crops an image in the center.
Parameters
----------
size : tuple, (height, width)
Finally size after cropping. | entailment |
def load(path, dtype=np.float64):
"""
Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred.
"""
_import_skimage()
import skimage.io
im = skimage.io.imread(path)
if dtype == np.uint8:
return im
elif dtype in {np.float16, np.float32, np.float64}:
return im.astype(dtype) / 255
else:
raise ValueError('Unsupported dtype') | Loads an image from file.
Parameters
----------
path : str
Path to image file.
dtype : np.dtype
Defaults to ``np.float64``, which means the image will be returned as a
float with values between 0 and 1. If ``np.uint8`` is specified, the
values will be between 0 and 255 and no conversion cost will be
incurred. | entailment |
def load_raw(path):
"""
Load image using PIL/Pillow without any processing. This is particularly
useful for palette images, which will be loaded using their palette index
values as opposed to `load` which will convert them to RGB.
Parameters
----------
path : str
Path to image file.
"""
_import_pil()
from PIL import Image
return np.array(Image.open(path)) | Load image using PIL/Pillow without any processing. This is particularly
useful for palette images, which will be loaded using their palette index
values as opposed to `load` which will convert them to RGB.
Parameters
----------
path : str
Path to image file. | entailment |
def save(path, im):
"""
Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image.
"""
from PIL import Image
if im.dtype == np.uint8:
pil_im = Image.fromarray(im)
else:
pil_im = Image.fromarray((im*255).astype(np.uint8))
pil_im.save(path) | Saves an image to file.
If the image is type float, it will assume to have values in [0, 1].
Parameters
----------
path : str
Path to which the image will be saved.
im : ndarray (image)
Image. | entailment |
def integrate(ii, r0, c0, r1, c1):
"""
Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window.
"""
# This line is modified
S = np.zeros(ii.shape[-1])
S += ii[r1, c1]
if (r0 - 1 >= 0) and (c0 - 1 >= 0):
S += ii[r0 - 1, c0 - 1]
if (r0 - 1 >= 0):
S -= ii[r0 - 1, c1]
if (c0 - 1 >= 0):
S -= ii[r1, c0 - 1]
return S | Use an integral image to integrate over a given window.
Parameters
----------
ii : ndarray
Integral image.
r0, c0 : int
Top-left corner of block to be summed.
r1, c1 : int
Bottom-right corner of block to be summed.
Returns
-------
S : int
Integral (sum) over the given window. | entailment |
def offset(img, offset, fill_value=0):
"""
Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0.
"""
sh = img.shape
if sh == (0, 0):
return img
else:
x = np.empty(sh)
x[:] = fill_value
x[max(offset[0], 0):min(sh[0]+offset[0], sh[0]),
max(offset[1], 0):min(sh[1]+offset[1], sh[1])] = \
img[max(-offset[0], 0):min(sh[0]-offset[0], sh[0]),
max(-offset[1], 0):min(sh[1]-offset[1], sh[1])]
return x | Moves the contents of image without changing the image size. The missing
values are given a specified fill value.
Parameters
----------
img : array
Image.
offset : (vertical_offset, horizontal_offset)
Tuple of length 2, specifying the offset along the two axes.
fill_value : dtype of img
Fill value. Defaults to 0. | entailment |
def bounding_box(alpha, threshold=0.1):
"""
Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold.
"""
assert alpha.ndim == 2
# Take the bounding box of the support, with a certain threshold.
supp_axs = [alpha.max(axis=1-i) for i in range(2)]
# Check first and last value of that threshold
bb = [np.where(supp_axs[i] > threshold)[0][[0, -1]] for i in range(2)]
return (bb[0][0], bb[1][0], bb[0][1], bb[1][1]) | Returns a bounding box of the support.
Parameters
----------
alpha : ndarray, ndim=2
Any one-channel image where the background has zero or low intensity.
threshold : float
The threshold that divides background from foreground.
Returns
-------
bounding_box : (top, left, bottom, right)
The bounding box describing the smallest rectangle containing the
foreground object, as defined by the threshold. | entailment |
def bounding_box_as_binary_map(alpha, threshold=0.1):
"""
Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background.
"""
bb = bounding_box(alpha)
x = np.zeros(alpha.shape, dtype=np.bool_)
x[bb[0]:bb[2], bb[1]:bb[3]] = 1
return x | Similar to `bounding_box`, except returns the bounding box as a
binary map the same size as the input.
Same parameters as `bounding_box`.
Returns
-------
binary_map : ndarray, ndim=2, dtype=np.bool_
Binary map with True if object and False if background. | entailment |
def extract_patches(images, patch_shape, samples_per_image=40, seed=0,
cycle=True):
"""
Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show()
"""
rs = np.random.RandomState(seed)
for Xi in itr.cycle(images):
# How many patches could we extract?
w, h = [Xi.shape[i]-patch_shape[i] for i in range(2)]
assert w > 0 and h > 0
# Maybe shuffle an iterator of the indices?
indices = np.asarray(list(itr.product(range(w), range(h))))
rs.shuffle(indices)
for x, y in indices[:samples_per_image]:
yield Xi[x:x+patch_shape[0], y:y+patch_shape[1]] | Takes a set of images and yields randomly chosen patches of specified size.
Parameters
----------
images : iterable
The images have to be iterable, and each element must be a Numpy array
with at least two spatial 2 dimensions as the first and second axis.
patch_shape : tuple, length 2
The spatial shape of the patches that should be extracted. If the
images have further dimensions beyond the spatial, the patches will
copy these too.
samples_per_image : int
Samples to extract before moving on to the next image.
seed : int
Seed with which to select the patches.
cycle : bool
If True, then the function will produce patches indefinitely, by going
back to the first image when all are done. If False, the iteration will
stop when there are no more images.
Returns
-------
patch_generator
This function returns a generator that will produce patches.
Examples
--------
>>> import deepdish as dd
>>> import matplotlib.pylab as plt
>>> import itertools
>>> images = ag.io.load_example('mnist')
Now, let us say we want to exact patches from the these, where each patch
has at least some activity.
>>> gen = dd.image.extract_patches(images, (5, 5))
>>> gen = (x for x in gen if x.mean() > 0.1)
>>> patches = np.array(list(itertools.islice(gen, 25)))
>>> patches.shape
(25, 5, 5)
>>> dd.plot.images(patches)
>>> plt.show() | entailment |
def bytesize(arr):
"""
Returns the memory byte size of a Numpy array as an integer.
"""
byte_size = np.prod(arr.shape) * np.dtype(arr.dtype).itemsize
return byte_size | Returns the memory byte size of a Numpy array as an integer. | entailment |
def apply_once(func, arr, axes, keepdims=True):
"""
Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
"""
all_axes = np.arange(arr.ndim)
if isinstance(axes, int):
axes = {axes}
else:
axes = set(axis % arr.ndim for axis in axes)
principal_axis = min(axes)
for i, axis in enumerate(axes):
axis0 = principal_axis + i
if axis != axis0:
all_axes[axis0], all_axes[axis] = all_axes[axis], all_axes[axis0]
transposed_arr = arr.transpose(all_axes)
new_shape = []
new_shape_keepdims = []
for axis, dim in enumerate(arr.shape):
if axis == principal_axis:
new_shape.append(-1)
elif axis not in axes:
new_shape.append(dim)
if axis in axes:
new_shape_keepdims.append(1)
else:
new_shape_keepdims.append(dim)
collapsed = np.apply_along_axis(func,
principal_axis,
transposed_arr.reshape(new_shape))
if keepdims:
return collapsed.reshape(new_shape_keepdims)
else:
return collapsed | Similar to `numpy.apply_over_axes`, except this performs the operation over
a flattened version of all the axes, meaning that the function will only be
called once. This only makes a difference for non-linear functions.
Parameters
----------
func : callback
Function that operates well on Numpy arrays and returns a single value
of compatible dtype.
arr : ndarray
Array to do operation over.
axes : int or iterable
Specifies the axes to perform the operation. Only one call will be made
to `func`, with all values flattened.
keepdims : bool
By default, this is True, so the collapsed dimensions remain with
length 1. This is simlar to `numpy.apply_over_axes` in that regard. If
this is set to False, the dimensions are removed, just like when using
for instance `numpy.sum` over a single axis. Note that this is safer
than subsequently calling squeeze, since this option will preserve
length-1 dimensions that were not operated on.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
>>> rs = np.random.RandomState(0)
>>> x = rs.uniform(size=(10, 3, 3))
Image that you have ten 3x3 images and you want to calculate each image's
intensity standard deviation:
>>> np.apply_over_axes(np.std, x, [1, 2]).ravel()
array([ 0.06056838, 0.08230712, 0.08135083, 0.09938963, 0.08533604,
0.07830725, 0.066148 , 0.07983019, 0.08134123, 0.01839635])
This is the same as ``x.std(1).std(1)``, which is not the standard
deviation of all 9 pixels together. To fix this we can flatten the pixels
and try again:
>>> x.reshape(10, 9).std(axis=1)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717])
This is exactly what this function does for you:
>>> dd.apply_once(np.std, x, [1, 2], keepdims=False)
array([ 0.17648981, 0.32849108, 0.29409526, 0.25547501, 0.23649064,
0.26928468, 0.20081239, 0.33052397, 0.29950855, 0.26535717]) | entailment |
def tupled_argmax(a):
"""
Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2)
"""
return np.unravel_index(np.argmax(a), np.shape(a)) | Argmax that returns an index tuple. Note that `numpy.argmax` will return a
scalar index as if you had flattened the array.
Parameters
----------
a : array_like
Input array.
Returns
-------
index : tuple
Tuple of index, even if `a` is one-dimensional. Note that this can
immediately be used to index `a` as in ``a[index]``.
Examples
--------
>>> import numpy as np
>>> import deepdish as dd
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> dd.tupled_argmax(a)
(1, 2) | entailment |
def timed(name=None, file=sys.stdout, callback=None, wall_clock=True):
"""
Context manager to make it easy to time the execution of a piece of code.
This timer will never run your code several times and is meant more for
simple in-production timing, instead of benchmarking. Reports the
wall-clock time (using `time.time`) and not the processor time.
Parameters
----------
name : str
Name of the timing block, to identify it.
file : file handler
Which file handler to print the results to. Default is standard output.
If a numpy array and size 1 is given, the time in seconds will be
stored inside it. Ignored if `callback` is set.
callback : callable
This offer even more flexibility than `file`. The callable will be
called at the end of the execution with a single floating point
argument with the elapsed time in seconds.
Examples
--------
>>> import deepdish as dd
>>> import time
The `timed` function is a context manager, so everything inside the
``with`` block will be timed. The results will be printed by default to
standard output:
>>> with dd.timed('Sleep'): # doctest: +SKIP
... time.sleep(1)
[timed] Sleep: 1.001035451889038 s
Using the `callback` parameter, we can accumulate multiple runs into a
list:
>>> times = []
>>> for i in range(3): # doctest: +SKIP
... with dd.timed(callback=times.append):
... time.sleep(1)
>>> times # doctest: +SKIP
[1.0035350322723389, 1.0035550594329834, 1.0039470195770264]
"""
start = time.time()
yield
end = time.time()
delta = end - start
if callback is not None:
callback(delta)
elif isinstance(file, np.ndarray) and len(file) == 1:
file[0] = delta
else:
name_str = ' {}'.format(name) if name is not None else ''
print(("[timed]{0}: {1} s".format(name_str, delta)), file=file) | Context manager to make it easy to time the execution of a piece of code.
This timer will never run your code several times and is meant more for
simple in-production timing, instead of benchmarking. Reports the
wall-clock time (using `time.time`) and not the processor time.
Parameters
----------
name : str
Name of the timing block, to identify it.
file : file handler
Which file handler to print the results to. Default is standard output.
If a numpy array and size 1 is given, the time in seconds will be
stored inside it. Ignored if `callback` is set.
callback : callable
This offer even more flexibility than `file`. The callable will be
called at the end of the execution with a single floating point
argument with the elapsed time in seconds.
Examples
--------
>>> import deepdish as dd
>>> import time
The `timed` function is a context manager, so everything inside the
``with`` block will be timed. The results will be printed by default to
standard output:
>>> with dd.timed('Sleep'): # doctest: +SKIP
... time.sleep(1)
[timed] Sleep: 1.001035451889038 s
Using the `callback` parameter, we can accumulate multiple runs into a
list:
>>> times = []
>>> for i in range(3): # doctest: +SKIP
... with dd.timed(callback=times.append):
... time.sleep(1)
>>> times # doctest: +SKIP
[1.0035350322723389, 1.0035550594329834, 1.0039470195770264] | entailment |
def pad(data, padwidth, value=0.0):
"""
Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[..., :] = value
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
return new_data | Pad an array with a specific value.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
value : data.dtype
The value with which to pad. Default is ``0.0``.
See also
--------
pad_to_size, pad_repeat_border, pad_repeat_border_corner
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((3, 3))
>>> dd.util.pad(x, (1, 2), value=0.0)
array([[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 1., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]]) | entailment |
def pad_to_size(data, shape, value=0.0):
"""
This is similar to `pad`, except you specify the final shape of the array.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning. If a dimension is specified as ``-1``, then it
will remain its current size along that dimension.
value : data.dtype
The value with which to pad. Default is ``0.0``. This can even be an
array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the
size of the padded array.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((4, 2))
>>> dd.util.pad_to_size(x, (5, 5))
array([[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
"""
shape = [data.shape[i] if shape[i] == -1 else shape[i]
for i in range(len(shape))]
new_data = np.empty(shape)
new_data[:] = value
II = [slice((shape[i] - data.shape[i])//2,
(shape[i] - data.shape[i])//2 + data.shape[i])
for i in range(len(shape))]
new_data[II] = data
return new_data | This is similar to `pad`, except you specify the final shape of the array.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning. If a dimension is specified as ``-1``, then it
will remain its current size along that dimension.
value : data.dtype
The value with which to pad. Default is ``0.0``. This can even be an
array, as long as ``pdata[:] = value`` is valid, where ``pdata`` is the
size of the padded array.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array with zeros.
>>> x = np.ones((4, 2))
>>> dd.util.pad_to_size(x, (5, 5))
array([[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 1., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]]) | entailment |
def pad_repeat_border(data, padwidth):
"""
Similar to `pad`, except the border value from ``data`` is used to pad.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its borders:
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border(x, 2)
array([[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 4, 4, 4, 5, 6, 7, 7, 7],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11]])
"""
data = np.asarray(data)
shape = data.shape
if isinstance(padwidth, int):
padwidth = (padwidth,)*len(shape)
padded_shape = tuple(map(lambda ix: ix[1]+padwidth[ix[0]]*2,
enumerate(shape)))
new_data = np.empty(padded_shape, dtype=data.dtype)
new_data[[slice(w, -w) if w > 0 else slice(None) for w in padwidth]] = data
for i, pw in enumerate(padwidth):
if pw > 0:
selection = [slice(None)] * data.ndim
selection2 = [slice(None)] * data.ndim
# Lower boundary
selection[i] = slice(0, pw)
selection2[i] = slice(pw, pw+1)
new_data[tuple(selection)] = new_data[tuple(selection2)]
# Upper boundary
selection[i] = slice(-pw, None)
selection2[i] = slice(-pw-1, -pw)
new_data[tuple(selection)] = new_data[tuple(selection2)]
return new_data | Similar to `pad`, except the border value from ``data`` is used to pad.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
padwidth : int or tuple
If int, it will pad using this amount at the beginning and end of all
dimensions. If it is a tuple (of same length as `ndim`), then the
padding amount will be specified per axis.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its borders:
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border(x, 2)
array([[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 0, 0, 0, 1, 2, 3, 3, 3],
[ 4, 4, 4, 5, 6, 7, 7, 7],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11],
[ 8, 8, 8, 9, 10, 11, 11, 11]]) | entailment |
def pad_repeat_border_corner(data, shape):
"""
Similar to `pad_repeat_border`, except the padding is always done on the
upper end of each axis and the target size is specified.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its upper borders.
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border_corner(x, (5, 5))
array([[ 0., 1., 2., 3., 3.],
[ 4., 5., 6., 7., 7.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.]])
"""
new_data = np.empty(shape)
new_data[[slice(upper) for upper in data.shape]] = data
for i in range(len(shape)):
selection = [slice(None)]*i + [slice(data.shape[i], None)]
selection2 = [slice(None)]*i + [slice(data.shape[i]-1, data.shape[i])]
new_data[selection] = new_data[selection2]
return new_data | Similar to `pad_repeat_border`, except the padding is always done on the
upper end of each axis and the target size is specified.
Parameters
----------
data : ndarray
Numpy array of any dimension and type.
shape : tuple
Final shape of padded array. Should be tuple of length ``data.ndim``.
If it has to pad unevenly, it will pad one more at the end of the axis
than at the beginning.
Examples
--------
>>> import deepdish as dd
>>> import numpy as np
Pad an array by repeating its upper borders.
>>> shape = (3, 4)
>>> x = np.arange(np.prod(shape)).reshape(shape)
>>> dd.util.pad_repeat_border_corner(x, (5, 5))
array([[ 0., 1., 2., 3., 3.],
[ 4., 5., 6., 7., 7.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.],
[ 8., 9., 10., 11., 11.]]) | entailment |
def _dict_native_ok(d):
"""
This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled.
"""
if len(d) >= 256:
return False
# All keys must be strings
for k in d:
if not isinstance(k, six.string_types):
return False
return True | This checks if a dictionary can be saved natively as HDF5 groups.
If it can't, it will be pickled. | entailment |
def _load_nonlink_level(handler, level, pathtable, pathname):
"""
Loads level and builds appropriate type, without handling softlinks
"""
if isinstance(level, tables.Group):
if _sns and (level._v_title.startswith('SimpleNamespace:') or
DEEPDISH_IO_ROOT_IS_SNS in level._v_attrs):
val = SimpleNamespace()
dct = val.__dict__
elif level._v_title.startswith('list:'):
dct = {}
val = []
else:
dct = {}
val = dct
# in case of recursion, object needs to be put in pathtable
# before trying to fully load it
pathtable[pathname] = val
# Load sub-groups
for grp in level:
lev = _load_level(handler, grp, pathtable)
n = grp._v_name
# Check if it's a complicated pair or a string-value pair
if n.startswith('__pair'):
dct[lev['key']] = lev['value']
else:
dct[n] = lev
# Load attributes
for name in level._v_attrs._f_list():
if name.startswith(DEEPDISH_IO_PREFIX):
continue
v = level._v_attrs[name]
dct[name] = v
if level._v_title.startswith('list:'):
N = int(level._v_title[len('list:'):])
for i in range(N):
val.append(dct['i{}'.format(i)])
return val
elif level._v_title.startswith('tuple:'):
N = int(level._v_title[len('tuple:'):])
lst = []
for i in range(N):
lst.append(dct['i{}'.format(i)])
return tuple(lst)
elif level._v_title.startswith('nonetype:'):
return None
elif is_pandas_dataframe(level):
assert _pandas, "pandas is required to read this file"
store = _HDFStoreWithHandle(handler)
return store.get(level._v_pathname)
elif level._v_title.startswith('sparse:'):
frm = level._v_attrs.format
if frm in ('csr', 'csc', 'bsr'):
shape = tuple(level.shape[:])
cls = {'csr': sparse.csr_matrix,
'csc': sparse.csc_matrix,
'bsr': sparse.bsr_matrix}
matrix = cls[frm](shape)
matrix.data = level.data[:]
matrix.indices = level.indices[:]
matrix.indptr = level.indptr[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'dia':
shape = tuple(level.shape[:])
matrix = sparse.dia_matrix(shape)
matrix.data = level.data[:]
matrix.offsets = level.offsets[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
elif frm == 'coo':
shape = tuple(level.shape[:])
matrix = sparse.coo_matrix(shape)
matrix.data = level.data[:]
matrix.col = level.col[:]
matrix.row = level.row[:]
matrix.maxprint = level._v_attrs.maxprint
return matrix
else:
raise ValueError('Unknown sparse matrix type: {}'.format(frm))
else:
return val
elif isinstance(level, tables.VLArray):
if level.shape == (1,):
return _load_pickled(level)
else:
return level[:]
elif isinstance(level, tables.Array):
if 'zeroarray_dtype' in level._v_attrs:
# Unpack zero-size arrays (shape is stored in an HDF5 array and
# type is stored in the attibute 'zeroarray_dtype')
dtype = level._v_attrs.zeroarray_dtype
sh = level[:]
return np.zeros(tuple(sh), dtype=dtype)
if 'strtype' in level._v_attrs:
strtype = level._v_attrs.strtype
itemsize = level._v_attrs.itemsize
if strtype == b'unicode':
return level[:].view(dtype=(np.unicode_, itemsize))
elif strtype == b'ascii':
return level[:].view(dtype=(np.string_, itemsize))
# This serves two purposes:
# (1) unpack big integers: the only time we save arrays like this
# (2) unpack non-deepdish "scalars"
if level.shape == ():
return level[()]
return level[:] | Loads level and builds appropriate type, without handling softlinks | entailment |
def _load_level(handler, level, pathtable):
"""
Loads level and builds appropriate type, handling softlinks if necessary
"""
if isinstance(level, tables.link.SoftLink):
# this is a link, so see if target is already loaded, return it
pathname = level.target
node = level()
else:
# not a link, but it might be a target that's already been
# loaded ... if so, return it
pathname = level._v_pathname
node = level
try:
return pathtable[pathname]
except KeyError:
pathtable[pathname] = _load_nonlink_level(handler, node, pathtable,
pathname)
return pathtable[pathname] | Loads level and builds appropriate type, handling softlinks if necessary | entailment |
def save(path, data, compression='default'):
"""
Save any Python structure to an HDF5 file. It is particularly suited for
Numpy arrays. This function works similar to ``numpy.save``, except if you
save a Python object at the top level, you do not need to issue
``data.flat[0]`` to retrieve it from inside a Numpy array of type
``object``.
Some types of objects get saved natively in HDF5. The rest get serialized
automatically. For most needs, you should be able to stick to the natively
supported types, which are:
* Dictionaries
* Short lists and tuples (<256 in length)
* Basic data types (including strings and None)
* Numpy arrays
* Scipy sparse matrices
* Pandas ``DataFrame``, ``Series``, and ``Panel``
* SimpleNamespaces (for Python >= 3.3, but see note below)
A recommendation is to always convert your data to using only these types
That way your data will be portable and can be opened through any HDF5
reader. A class that helps you with this is
:class:`deepdish.util.Saveable`.
Lists and tuples are supported and can contain heterogeneous types. This is
mostly useful and plays well with HDF5 for short lists and tuples. If you
have a long list (>256) it will be serialized automatically. However,
in such cases it is common for the elements to have the same type, in which
case we strongly recommend converting to a Numpy array first.
Note that the SimpleNamespace type will be read in as dictionaries for
earlier versions of Python.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
You can change the default compression method to ``blosc`` (much faster,
but less portable) by creating a ``~/.deepdish.conf`` with::
[io]
compression: blosc
This is the recommended compression method if you plan to use your HDF5
files exclusively through deepdish (or PyTables).
Parameters
----------
path : string
Filename to which the data is saved.
data : anything
Data to be saved. This can be anything from a Numpy array, a string, an
object, or a dictionary containing all of them including more
dictionaries.
compression : string or tuple
Set compression method, choosing from `blosc`, `zlib`, `lzo`, `bzip2`
and more (see PyTables documentation). It can also be specified as a
tuple (e.g. ``('blosc', 5)``), with the latter value specifying the
level of compression, choosing from 0 (no compression) to 9 (maximum
compression). Set to `None` to turn off compression. The default is
`zlib`, since it is highly portable; for much greater speed, try for
instance `blosc`.
See also
--------
load
"""
filters = _get_compression_filters(compression)
with tables.open_file(path, mode='w') as h5file:
# If the data is a dictionary, put it flatly in the root
group = h5file.root
group._v_attrs[DEEPDISH_IO_VERSION_STR] = IO_VERSION
idtable = {} # dict to keep track of objects already saved
# Sparse matrices match isinstance(data, dict), so we'll have to be
# more strict with the type checking
if type(data) == type({}) and _dict_native_ok(data):
idtable[id(data)] = '/'
for key, value in data.items():
_save_level(h5file, group, value, name=key,
filters=filters, idtable=idtable)
elif (_sns and isinstance(data, SimpleNamespace) and
_dict_native_ok(data.__dict__)):
idtable[id(data)] = '/'
group._v_attrs[DEEPDISH_IO_ROOT_IS_SNS] = True
for key, value in data.__dict__.items():
_save_level(h5file, group, value, name=key,
filters=filters, idtable=idtable)
else:
_save_level(h5file, group, data, name='data',
filters=filters, idtable=idtable)
# Mark this to automatically unpack when loaded
group._v_attrs[DEEPDISH_IO_UNPACK] = True | Save any Python structure to an HDF5 file. It is particularly suited for
Numpy arrays. This function works similar to ``numpy.save``, except if you
save a Python object at the top level, you do not need to issue
``data.flat[0]`` to retrieve it from inside a Numpy array of type
``object``.
Some types of objects get saved natively in HDF5. The rest get serialized
automatically. For most needs, you should be able to stick to the natively
supported types, which are:
* Dictionaries
* Short lists and tuples (<256 in length)
* Basic data types (including strings and None)
* Numpy arrays
* Scipy sparse matrices
* Pandas ``DataFrame``, ``Series``, and ``Panel``
* SimpleNamespaces (for Python >= 3.3, but see note below)
A recommendation is to always convert your data to using only these types
That way your data will be portable and can be opened through any HDF5
reader. A class that helps you with this is
:class:`deepdish.util.Saveable`.
Lists and tuples are supported and can contain heterogeneous types. This is
mostly useful and plays well with HDF5 for short lists and tuples. If you
have a long list (>256) it will be serialized automatically. However,
in such cases it is common for the elements to have the same type, in which
case we strongly recommend converting to a Numpy array first.
Note that the SimpleNamespace type will be read in as dictionaries for
earlier versions of Python.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
You can change the default compression method to ``blosc`` (much faster,
but less portable) by creating a ``~/.deepdish.conf`` with::
[io]
compression: blosc
This is the recommended compression method if you plan to use your HDF5
files exclusively through deepdish (or PyTables).
Parameters
----------
path : string
Filename to which the data is saved.
data : anything
Data to be saved. This can be anything from a Numpy array, a string, an
object, or a dictionary containing all of them including more
dictionaries.
compression : string or tuple
Set compression method, choosing from `blosc`, `zlib`, `lzo`, `bzip2`
and more (see PyTables documentation). It can also be specified as a
tuple (e.g. ``('blosc', 5)``), with the latter value specifying the
level of compression, choosing from 0 (no compression) to 9 (maximum
compression). Set to `None` to turn off compression. The default is
`zlib`, since it is highly portable; for much greater speed, try for
instance `blosc`.
See also
--------
load | entailment |
def load(path, group=None, sel=None, unpack=False):
"""
Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save
"""
with tables.open_file(path, mode='r') as h5file:
pathtable = {} # dict to keep track of objects already loaded
if group is not None:
if isinstance(group, str):
data = _load_specific_level(h5file, h5file, group, sel=sel,
pathtable=pathtable)
else: # Assume group is a list or tuple
data = []
for g in group:
data_i = _load_specific_level(h5file, h5file, g, sel=sel,
pathtable=pathtable)
data.append(data_i)
data = tuple(data)
else:
grp = h5file.root
auto_unpack = (DEEPDISH_IO_UNPACK in grp._v_attrs and
grp._v_attrs[DEEPDISH_IO_UNPACK])
do_unpack = unpack or auto_unpack
if do_unpack and len(grp._v_children) == 1:
name = next(iter(grp._v_children))
data = _load_specific_level(h5file, grp, name, sel=sel,
pathtable=pathtable)
do_unpack = False
elif sel is not None:
raise ValueError("Must specify group with `sel` unless it "
"automatically unpacks")
else:
data = _load_level(h5file, grp, pathtable)
if DEEPDISH_IO_VERSION_STR in grp._v_attrs:
v = grp._v_attrs[DEEPDISH_IO_VERSION_STR]
else:
v = 0
if v > IO_VERSION:
warnings.warn('This file was saved with a newer version of '
'deepdish. Please upgrade to make sure it loads '
'correctly.')
# Attributes can't be unpacked with the method above, so fall back
# to this
if do_unpack and isinstance(data, dict) and len(data) == 1:
data = next(iter(data.values()))
return data | Loads an HDF5 saved with `save`.
This function requires the `PyTables <http://www.pytables.org/>`_ module to
be installed.
Parameters
----------
path : string
Filename from which to load the data.
group : string or list
Load a specific group in the HDF5 hierarchy. If `group` is a list of
strings, then a tuple will be returned with all the groups that were
specified.
sel : slice or tuple of slices
If you specify `group` and the target is a numpy array, then you can
use this to slice it. This is useful for opening subsets of large HDF5
files. To compose the selection, you can use `deepdish.aslice`.
unpack : bool
If True, a single-entry dictionaries will be unpacked and the value
will be returned directly. That is, if you save ``dict(a=100)``, only
``100`` will be loaded.
Returns
-------
data : anything
Hopefully an identical reconstruction of the data that was saved.
See also
--------
save | entailment |
def sorted_maybe_numeric(x):
"""
Sorts x with numeric semantics if all keys are nonnegative integers.
Otherwise uses standard string sorting.
"""
all_numeric = all(map(str.isdigit, x))
if all_numeric:
return sorted(x, key=int)
else:
return sorted(x) | Sorts x with numeric semantics if all keys are nonnegative integers.
Otherwise uses standard string sorting. | entailment |
def abbreviate(s, maxlength=25):
"""Color-aware abbreviator"""
assert maxlength >= 4
skip = False
abbrv = None
i = 0
for j, c in enumerate(s):
if c == '\033':
skip = True
elif skip:
if c == 'm':
skip = False
else:
i += 1
if i == maxlength - 1:
abbrv = s[:j] + '\033[0m...'
elif i > maxlength:
break
if i <= maxlength:
return s
else:
return abbrv | Color-aware abbreviator | entailment |
def extend_settings(self, data_id, files, secrets):
"""Extend the settings the manager will serialize.
:param data_id: The :class:`~resolwe.flow.models.Data` object id
being prepared for.
:param files: The settings dictionary to be serialized. Keys are
filenames, values are the objects that will be serialized
into those files. Standard filenames are listed in
``resolwe.flow.managers.protocol.ExecutorFiles``.
:param secrets: Secret files dictionary describing additional secret
file content that should be created and made available to
processes with special permissions. Keys are filenames, values
are the raw strings that should be written into those files.
"""
data = Data.objects.select_related('process').get(pk=data_id)
files[ExecutorFiles.DJANGO_SETTINGS].update({
'USE_TZ': settings.USE_TZ,
'FLOW_EXECUTOR_TOOLS_PATHS': self.get_tools_paths(),
})
files[ExecutorFiles.DATA] = model_to_dict(data)
files[ExecutorFiles.DATA_LOCATION] = model_to_dict(data.location)
files[ExecutorFiles.PROCESS] = model_to_dict(data.process)
files[ExecutorFiles.PROCESS]['resource_limits'] = data.process.get_resource_limits()
# Add secrets if the process has permission to read them.
secrets.update(data.resolve_secrets()) | Extend the settings the manager will serialize.
:param data_id: The :class:`~resolwe.flow.models.Data` object id
being prepared for.
:param files: The settings dictionary to be serialized. Keys are
filenames, values are the objects that will be serialized
into those files. Standard filenames are listed in
``resolwe.flow.managers.protocol.ExecutorFiles``.
:param secrets: Secret files dictionary describing additional secret
file content that should be created and made available to
processes with special permissions. Keys are filenames, values
are the raw strings that should be written into those files. | entailment |
def get_tools_paths(self):
"""Get tools' paths."""
if settings.DEBUG or is_testing():
return list(get_apps_tools().values())
else:
tools_root = settings.FLOW_TOOLS_ROOT
subdirs = next(os.walk(tools_root))[1]
return [os.path.join(tools_root, sdir) for sdir in subdirs] | Get tools' paths. | entailment |
def resolve_data_path(self, data=None, filename=None):
"""Resolve data path for use with the executor.
:param data: Data object instance
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given data file in programs executed using this executor
"""
if data is None:
return settings.FLOW_EXECUTOR['DATA_DIR']
return data.location.get_path(filename=filename) | Resolve data path for use with the executor.
:param data: Data object instance
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given data file in programs executed using this executor | entailment |
def resolve_upload_path(self, filename=None):
"""Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor
"""
if filename is None:
return settings.FLOW_EXECUTOR['UPLOAD_DIR']
return os.path.join(settings.FLOW_EXECUTOR['UPLOAD_DIR'], filename) | Resolve upload path for use with the executor.
:param filename: Filename to resolve
:return: Resolved filename, which can be used to access the
given uploaded file in programs executed using this
executor | entailment |
def wrap(access_pyxb, read_only=False):
"""Work with the AccessPolicy in a SystemMetadata PyXB object.
Args:
access_pyxb : AccessPolicy PyXB object
The AccessPolicy to modify.
read_only: bool
Do not update the wrapped AccessPolicy.
When only a single AccessPolicy operation is needed, there's no need to use this
context manager. Instead, use the generated context manager wrappers.
"""
w = AccessPolicyWrapper(access_pyxb)
yield w
if not read_only:
w.get_normalized_pyxb() | Work with the AccessPolicy in a SystemMetadata PyXB object.
Args:
access_pyxb : AccessPolicy PyXB object
The AccessPolicy to modify.
read_only: bool
Do not update the wrapped AccessPolicy.
When only a single AccessPolicy operation is needed, there's no need to use this
context manager. Instead, use the generated context manager wrappers. | entailment |
def wrap_sysmeta_pyxb(sysmeta_pyxb, read_only=False):
"""Work with the AccessPolicy in a SystemMetadata PyXB object.
Args:
sysmeta_pyxb : SystemMetadata PyXB object
SystemMetadata containing the AccessPolicy to modify.
read_only: bool
Do not update the wrapped AccessPolicy.
When only a single AccessPolicy operation is needed, there's no need to use
this context manager. Instead, use the generated context manager wrappers.
There is no clean way in Python to make a context manager that allows client code to
replace the object that is passed out of the manager. The AccessPolicy schema does not
allow the AccessPolicy element to be empty. However, the SystemMetadata schema
specifies the AccessPolicy as optional. By wrapping the SystemMetadata instead of the
AccessPolicy when working with AccessPolicy that is within SystemMetadata, the wrapper
can handle the situation of empty AccessPolicy by instead dropping the AccessPolicy
from the SystemMetadata.
"""
w = AccessPolicyWrapper(sysmeta_pyxb.accessPolicy)
yield w
if not read_only:
sysmeta_pyxb.accessPolicy = w.get_normalized_pyxb() | Work with the AccessPolicy in a SystemMetadata PyXB object.
Args:
sysmeta_pyxb : SystemMetadata PyXB object
SystemMetadata containing the AccessPolicy to modify.
read_only: bool
Do not update the wrapped AccessPolicy.
When only a single AccessPolicy operation is needed, there's no need to use
this context manager. Instead, use the generated context manager wrappers.
There is no clean way in Python to make a context manager that allows client code to
replace the object that is passed out of the manager. The AccessPolicy schema does not
allow the AccessPolicy element to be empty. However, the SystemMetadata schema
specifies the AccessPolicy as optional. By wrapping the SystemMetadata instead of the
AccessPolicy when working with AccessPolicy that is within SystemMetadata, the wrapper
can handle the situation of empty AccessPolicy by instead dropping the AccessPolicy
from the SystemMetadata. | entailment |
def get_highest_perm_str(self, subj_str):
"""
Args:
subj_str : str
Subject for which to retrieve the highest permission.
Return:
The highest permission for subject or None if subject does not have any permissions.
"""
pres_perm_set = self._present_perm_set_for_subj(self._perm_dict, subj_str)
return (
None if not pres_perm_set else self._highest_perm_from_iter(pres_perm_set)
) | Args:
subj_str : str
Subject for which to retrieve the highest permission.
Return:
The highest permission for subject or None if subject does not have any permissions. | entailment |
def get_effective_perm_list(self, subj_str):
"""
Args:
subj_str : str
Subject for which to retrieve the effective permissions.
Returns:
list of str: List of permissions up to and including the highest permission for
subject, ordered lower to higher, or empty list if subject does not have any
permissions.
E.g.: If 'write' is highest permission for subject, return ['read', 'write'].
"""
highest_perm_str = self.get_highest_perm_str(subj_str)
if highest_perm_str is None:
return []
return self._equal_or_lower_perm_list(highest_perm_str) | Args:
subj_str : str
Subject for which to retrieve the effective permissions.
Returns:
list of str: List of permissions up to and including the highest permission for
subject, ordered lower to higher, or empty list if subject does not have any
permissions.
E.g.: If 'write' is highest permission for subject, return ['read', 'write']. | entailment |
def get_subjects_with_equal_or_higher_perm(self, perm_str):
"""
Args:
perm_str : str
Permission, ``read``, ``write`` or ``changePermission``.
Returns:
set of str : Subj that have perm equal or higher than ``perm_str``.
Since the lowest permission a subject can have is ``read``, passing ``read``
will return all subjects.
"""
self._assert_valid_permission(perm_str)
return {
s
for p in self._equal_or_higher_perm(perm_str)
for s in self._perm_dict.get(p, set())
} | Args:
perm_str : str
Permission, ``read``, ``write`` or ``changePermission``.
Returns:
set of str : Subj that have perm equal or higher than ``perm_str``.
Since the lowest permission a subject can have is ``read``, passing ``read``
will return all subjects. | entailment |
def dump(self):
"""Dump the current state to debug level log."""
logging.debug('AccessPolicy:')
map(
logging.debug,
[
' {}'.format(s)
for s in pprint.pformat(self.get_normalized_perm_list()).splitlines()
],
) | Dump the current state to debug level log. | entailment |
def subj_has_perm(self, subj_str, perm_str):
"""Returns:
bool: ``True`` if ``subj_str`` has perm equal to or higher than ``perm_str``.
"""
self._assert_valid_permission(perm_str)
return perm_str in self.get_effective_perm_list(subj_str) | Returns:
bool: ``True`` if ``subj_str`` has perm equal to or higher than ``perm_str``. | entailment |
def add_authenticated_read(self):
"""Add ``read`` perm for all authenticated subj.
Public ``read`` is removed if present.
"""
self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read')
self.add_perm(d1_common.const.SUBJECT_AUTHENTICATED, 'read') | Add ``read`` perm for all authenticated subj.
Public ``read`` is removed if present. | entailment |
def add_verified_read(self):
"""Add ``read`` perm for all verified subj.
Public ``read`` is removed if present.
"""
self.remove_perm(d1_common.const.SUBJECT_PUBLIC, 'read')
self.add_perm(d1_common.const.SUBJECT_VERIFIED, 'read') | Add ``read`` perm for all verified subj.
Public ``read`` is removed if present. | entailment |
def add_perm(self, subj_str, perm_str):
"""Add a permission for a subject.
Args:
subj_str : str
Subject for which to add permission(s)
perm_str : str
Permission to add. Implicitly adds all lower permissions. E.g., ``write``
will also add ``read``.
"""
self._assert_valid_permission(perm_str)
self._perm_dict.setdefault(perm_str, set()).add(subj_str) | Add a permission for a subject.
Args:
subj_str : str
Subject for which to add permission(s)
perm_str : str
Permission to add. Implicitly adds all lower permissions. E.g., ``write``
will also add ``read``. | entailment |
def remove_perm(self, subj_str, perm_str):
"""Remove permission from a subject.
Args:
subj_str : str
Subject for which to remove permission(s)
perm_str : str
Permission to remove. Implicitly removes all higher permissions. E.g., ``write``
will also remove ``changePermission`` if previously granted.
"""
self._assert_valid_permission(perm_str)
for perm_str in self._equal_or_higher_perm(perm_str):
self._perm_dict.setdefault(perm_str, set()).discard(subj_str) | Remove permission from a subject.
Args:
subj_str : str
Subject for which to remove permission(s)
perm_str : str
Permission to remove. Implicitly removes all higher permissions. E.g., ``write``
will also remove ``changePermission`` if previously granted. | entailment |
def remove_subj(self, subj_str):
"""Remove all permissions for subject.
Args:
subj_str : str
Subject for which to remove all permissions. Since subjects can only be present
in the AccessPolicy when they have one or more permissions, this removes the
subject itself as well.
The subject may still have access to the obj. E.g.:
* The obj has public access.
* The subj has indirect access by being in a group which has access.
* The subj has an equivalent subj that has access.
* The subj is set as the rightsHolder for the object.
"""
for subj_set in list(self._perm_dict.values()):
subj_set -= {subj_str} | Remove all permissions for subject.
Args:
subj_str : str
Subject for which to remove all permissions. Since subjects can only be present
in the AccessPolicy when they have one or more permissions, this removes the
subject itself as well.
The subject may still have access to the obj. E.g.:
* The obj has public access.
* The subj has indirect access by being in a group which has access.
* The subj has an equivalent subj that has access.
* The subj is set as the rightsHolder for the object. | entailment |
def _perm_dict_from_pyxb(self, access_pyxb):
"""Return dict representation of AccessPolicy PyXB obj."""
subj_dict = self._subj_dict_from_pyxb(access_pyxb)
return self._perm_dict_from_subj_dict(subj_dict) | Return dict representation of AccessPolicy PyXB obj. | entailment |
def _perm_dict_from_subj_dict(self, subj_dict):
"""Return dict where keys and values of ``subj_dict`` have been flipped
around."""
perm_dict = {}
for subj_str, perm_set in list(subj_dict.items()):
for perm_str in perm_set:
perm_dict.setdefault(perm_str, set()).add(subj_str)
return perm_dict | Return dict where keys and values of ``subj_dict`` have been flipped
around. | entailment |
def _pyxb_from_perm_dict(self, perm_dict):
"""Return an AccessPolicy PyXB representation of ``perm_dict``
- If ``norm_perm_list`` is empty, None is returned. The schema does not allow
AccessPolicy to be empty, but in SystemMetadata, it can be left out
altogether. So returning None instead of an empty AccessPolicy allows the
result to be inserted directly into a SystemMetadata PyXB object.
"""
norm_perm_list = self._norm_perm_list_from_perm_dict(perm_dict)
return self._pyxb_from_norm_perm_list(norm_perm_list) | Return an AccessPolicy PyXB representation of ``perm_dict``
- If ``norm_perm_list`` is empty, None is returned. The schema does not allow
AccessPolicy to be empty, but in SystemMetadata, it can be left out
altogether. So returning None instead of an empty AccessPolicy allows the
result to be inserted directly into a SystemMetadata PyXB object. | entailment |
def _pyxb_from_norm_perm_list(self, norm_perm_list):
"""Return an AccessPolicy PyXB representation of ``norm_perm_list``"""
# Using accessPolicy() instead of AccessPolicy() and accessRule() instead of
# AccessRule() gives PyXB the type information required for using this as a
# root element.
access_pyxb = d1_common.types.dataoneTypes.accessPolicy()
for perm_str, subj_list in norm_perm_list:
rule_pyxb = d1_common.types.dataoneTypes.accessRule()
rule_pyxb.permission.append(perm_str)
for subj_str in subj_list:
rule_pyxb.subject.append(subj_str)
access_pyxb.allow.append(rule_pyxb)
if len(access_pyxb.allow):
return access_pyxb | Return an AccessPolicy PyXB representation of ``norm_perm_list`` | entailment |
def _subj_dict_from_pyxb(self, access_pyxb):
"""Return a dict representation of ``access_pyxb``, which is an AccessPolicy
PyXB object.
This also remove any duplicate subjects and permissions in the PyXB object.
"""
subj_dict = {}
for allow_pyxb in access_pyxb.allow:
perm_set = set()
for perm_pyxb in allow_pyxb.permission:
perm_set.add(perm_pyxb)
for subj_pyxb in allow_pyxb.subject:
subj_dict.setdefault(subj_pyxb.value(), set()).update(perm_set)
return subj_dict | Return a dict representation of ``access_pyxb``, which is an AccessPolicy
PyXB object.
This also remove any duplicate subjects and permissions in the PyXB object. | entailment |
def _highest_perm_dict_from_perm_dict(self, perm_dict):
"""Return a perm_dict where only the highest permission for each subject is
included."""
highest_perm_dict = copy.copy(perm_dict)
for ordered_str in reversed(ORDERED_PERM_LIST):
for lower_perm in self._lower_perm_list(ordered_str):
highest_perm_dict.setdefault(lower_perm, set())
highest_perm_dict[lower_perm] -= perm_dict.get(ordered_str, set())
return highest_perm_dict | Return a perm_dict where only the highest permission for each subject is
included. | entailment |
def _norm_perm_list_from_perm_dict(self, perm_dict):
"""Return a minimal, ordered, hashable list of subjects and permissions."""
high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict)
return [
[k, list(sorted(high_perm_dict[k]))]
for k in ORDERED_PERM_LIST
if high_perm_dict.get(k, False)
] | Return a minimal, ordered, hashable list of subjects and permissions. | entailment |
def _effective_perm_list_from_iter(self, perm_iter):
"""Return list of effective permissions for for highest permission in
``perm_iter``, ordered lower to higher, or None if ``perm_iter`` is empty."""
highest_perm_str = self._highest_perm_from_iter(perm_iter)
return (
self._equal_or_lower_perm_list(highest_perm_str)
if highest_perm_str is not None
else None
) | Return list of effective permissions for for highest permission in
``perm_iter``, ordered lower to higher, or None if ``perm_iter`` is empty. | entailment |
def _present_perm_set_for_subj(self, perm_dict, subj_str):
"""Return a set containing only the permissions that are present in the
``perm_dict`` for ``subj_str``"""
return {p for p, s in list(perm_dict.items()) if subj_str in s} | Return a set containing only the permissions that are present in the
``perm_dict`` for ``subj_str`` | entailment |
def _highest_perm_from_iter(self, perm_iter):
"""Return the highest perm present in ``perm_iter`` or None if ``perm_iter`` is
empty."""
perm_set = set(perm_iter)
for perm_str in reversed(ORDERED_PERM_LIST):
if perm_str in perm_set:
return perm_str | Return the highest perm present in ``perm_iter`` or None if ``perm_iter`` is
empty. | entailment |
def _ordered_idx_from_perm(self, perm_str):
"""Return the ordered index of ``perm_str`` or None if ``perm_str`` is not a
valid permission."""
for i, ordered_str in enumerate(ORDERED_PERM_LIST):
if perm_str == ordered_str:
return i | Return the ordered index of ``perm_str`` or None if ``perm_str`` is not a
valid permission. | entailment |
def _assert_valid_permission(self, perm_str):
"""Raise D1 exception if ``perm_str`` is not a valid permission."""
if perm_str not in ORDERED_PERM_LIST:
raise d1_common.types.exceptions.InvalidRequest(
0,
'Permission must be one of {}. perm_str="{}"'.format(
', '.join(ORDERED_PERM_LIST), perm_str
),
) | Raise D1 exception if ``perm_str`` is not a valid permission. | entailment |
def handle_unexpected_exception(max_traceback_levels=100):
"""Suppress stack traces for common errors and provide hints for how to resolve
them."""
exc_type, exc_msgs = sys.exc_info()[:2]
if exc_type.__name__ == "SSLError":
d1_cli.impl.util.print_error(
"""HTTPS / TLS / SSL / X.509v3 Certificate Error:
An HTTPS connection could not be established. Verify that a DataONE node
responds at the URL provided in the cn-url or mn-url session variable. If the
URL is valid and if you intended to connect without authentication, make sure
that the session variable, "anonymous", is set to True. If you intended to
connect with authentication, make sure that the parameter, "cert-file", points
to a valid certificate from CILogon. If the certificate has the private
key in a separate file, also set "key-file" to the private key file.
Otherwise, set "key-file" to None. Note that CILogon certificates must be
renewed after 18 hours.
"""
)
elif exc_type.__name__ == "timeout":
d1_cli.impl.util.print_error(
"""Timeout error:
A connection to a DataONE node timed out. Verify that a DataONE node responds
at the URL provided in the cn-url or mn-url session variable.
"""
)
else:
_print_unexpected_exception(max_traceback_levels) | Suppress stack traces for common errors and provide hints for how to resolve
them. | entailment |
def create_sciobj(request, sysmeta_pyxb):
"""Create object file and database entries for a new native locally stored (non-
proxied) science object.
This method takes a request object and is only called from the views that
handle:
- MNStorage.create()
- MNStorage.update()
Various sanity checking is performed. Raises D1 exceptions that are returned
directly to the client. Adds create event to the event log.
Preconditions:
- None. This method should check everything.
Postconditions:
- A new file containing sciobj bytes, and models (database rows) for the newly
added object.
"""
pid = d1_common.xml.get_req_val(sysmeta_pyxb.identifier)
set_mn_controlled_values(request, sysmeta_pyxb, is_modification=False)
d1_gmn.app.views.assert_db.is_valid_pid_for_create(pid)
d1_gmn.app.views.assert_sysmeta.sanity(request, sysmeta_pyxb)
if _is_proxy_sciobj(request):
sciobj_url = _get_sciobj_proxy_url(request)
_sanity_check_proxy_url(sciobj_url)
else:
sciobj_url = d1_gmn.app.sciobj_store.get_rel_sciobj_file_url_by_pid(pid)
if not _is_proxy_sciobj(request):
if d1_gmn.app.resource_map.is_resource_map_sysmeta_pyxb(sysmeta_pyxb):
_create_resource_map(pid, request, sysmeta_pyxb, sciobj_url)
else:
_save_sciobj_bytes_from_request(request, pid)
d1_gmn.app.scimeta.assert_valid(sysmeta_pyxb, pid)
d1_gmn.app.sysmeta.create_or_update(sysmeta_pyxb, sciobj_url)
d1_gmn.app.event_log.create(
d1_common.xml.get_req_val(sysmeta_pyxb.identifier),
request,
timestamp=d1_common.date_time.normalize_datetime_to_utc(
sysmeta_pyxb.dateUploaded
),
) | Create object file and database entries for a new native locally stored (non-
proxied) science object.
This method takes a request object and is only called from the views that
handle:
- MNStorage.create()
- MNStorage.update()
Various sanity checking is performed. Raises D1 exceptions that are returned
directly to the client. Adds create event to the event log.
Preconditions:
- None. This method should check everything.
Postconditions:
- A new file containing sciobj bytes, and models (database rows) for the newly
added object. | entailment |
def _save_sciobj_bytes_from_request(request, pid):
"""Django stores small uploads in memory and streams large uploads directly to disk.
Uploads stored in memory are represented by UploadedFile and on disk,
TemporaryUploadedFile. To store an UploadedFile on disk, it's iterated and saved in
chunks. To store a TemporaryUploadedFile, it's moved from the temporary to the final
location. Django automatically handles this when using the file related fields in
the models, but GMN is not using those, so has to do it manually here.
"""
sciobj_path = d1_gmn.app.sciobj_store.get_abs_sciobj_file_path_by_pid(pid)
if hasattr(request.FILES['object'], 'temporary_file_path'):
d1_common.utils.filesystem.create_missing_directories_for_file(sciobj_path)
django.core.files.move.file_move_safe(
request.FILES['object'].temporary_file_path(), sciobj_path
)
else:
with d1_gmn.app.sciobj_store.open_sciobj_file_by_path_ctx(
sciobj_path, write=True
) as sciobj_stream:
for chunk in request.FILES['object'].chunks():
sciobj_stream.write(chunk) | Django stores small uploads in memory and streams large uploads directly to disk.
Uploads stored in memory are represented by UploadedFile and on disk,
TemporaryUploadedFile. To store an UploadedFile on disk, it's iterated and saved in
chunks. To store a TemporaryUploadedFile, it's moved from the temporary to the final
location. Django automatically handles this when using the file related fields in
the models, but GMN is not using those, so has to do it manually here. | entailment |
def set_mn_controlled_values(request, sysmeta_pyxb, is_modification):
"""See the description of TRUST_CLIENT_* in settings.py."""
now_datetime = d1_common.date_time.utc_now()
default_value_list = [
('originMemberNode', django.conf.settings.NODE_IDENTIFIER, True),
('authoritativeMemberNode', django.conf.settings.NODE_IDENTIFIER, True),
('serialVersion', 1, False),
('dateUploaded', now_datetime, False),
]
if not is_modification:
# submitter cannot be updated as the CN does not allow it.
default_value_list.append(('submitter', request.primary_subject_str, True))
# dateSysMetadataModified cannot be updated as it is used for optimistic
# locking. If changed, it is assumed that optimistic locking failed, and the
# update is rejected in order to prevent a concurrent update from being lost.
default_value_list.append(('dateSysMetadataModified', now_datetime, False))
else:
sysmeta_pyxb.submitter = None
sysmeta_pyxb.dateSysMetadataModified = now_datetime
for attr_str, default_value, is_simple_content in default_value_list:
is_trusted_from_client = getattr(
django.conf.settings, 'TRUST_CLIENT_{}'.format(attr_str.upper()), False
)
override_value = None
if is_trusted_from_client:
override_value = (
d1_common.xml.get_opt_val(sysmeta_pyxb, attr_str)
if is_simple_content
else getattr(sysmeta_pyxb, attr_str, None)
)
setattr(sysmeta_pyxb, attr_str, override_value or default_value) | See the description of TRUST_CLIENT_* in settings.py. | entailment |
def duplicate(self, contributor=None, inherit_collections=False):
"""Duplicate (make a copy) ``Entity`` objects.
:param contributor: Duplication user
:param inherit_collections: If ``True`` then duplicated
entities will be added to collections the original entity
is part of. Duplicated entities' data objects will also be
added to the collections, but only those which are in the
collection
:return: A list of duplicated entities
"""
return [
entity.duplicate(contributor, inherit_collections)
for entity in self
] | Duplicate (make a copy) ``Entity`` objects.
:param contributor: Duplication user
:param inherit_collections: If ``True`` then duplicated
entities will be added to collections the original entity
is part of. Duplicated entities' data objects will also be
added to the collections, but only those which are in the
collection
:return: A list of duplicated entities | entailment |
def move_to_collection(self, source_collection, destination_collection):
"""Move entities from source to destination collection."""
for entity in self:
entity.move_to_collection(source_collection, destination_collection) | Move entities from source to destination collection. | entailment |
def duplicate(self, contributor=None, inherit_collections=False):
"""Duplicate (make a copy)."""
duplicate = Entity.objects.get(id=self.id)
duplicate.pk = None
duplicate.slug = None
duplicate.name = 'Copy of {}'.format(self.name)
duplicate.duplicated = now()
if contributor:
duplicate.contributor = contributor
duplicate.save(force_insert=True)
assign_contributor_permissions(duplicate)
# Override fields that are automatically set on create.
duplicate.created = self.created
duplicate.save()
# Duplicate entity's data objects.
data = get_objects_for_user(contributor, 'view_data', self.data.all()) # pylint: disable=no-member
duplicated_data = data.duplicate(contributor)
duplicate.data.add(*duplicated_data)
if inherit_collections:
collections = get_objects_for_user(
contributor,
'add_collection',
self.collections.all() # pylint: disable=no-member
)
for collection in collections:
collection.entity_set.add(duplicate)
copy_permissions(collection, duplicate)
collection.data.add(*duplicated_data)
for datum in duplicated_data:
copy_permissions(collection, datum)
return duplicate | Duplicate (make a copy). | entailment |
def move_to_collection(self, source_collection, destination_collection):
"""Move entity from source to destination collection."""
# Remove from collection.
self.collections.remove(source_collection) # pylint: disable=no-member
source_collection.data.remove(*self.data.all()) # pylint: disable=no-member
# Add to collection.
self.collections.add(destination_collection) # pylint: disable=no-member
destination_collection.data.add(*self.data.all()) | Move entity from source to destination collection. | entailment |
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
# fastpath
if len(res) == 1:
return s
s = res[0]
for item in res[1:]:
try:
s += _hextochr[item[:2]] + item[2:]
except KeyError:
s += '%' + item
except UnicodeDecodeError:
s += chr(int(item[:2], 16)) + item[2:]
return s | unquote('abc%20def') -> 'abc def'. | entailment |
def quote(s, unsafe='/'):
"""Pass in a dictionary that has unsafe characters as the keys, and the percent
encoded value as the value."""
res = s.replace('%', '%25')
for c in unsafe:
res = res.replace(c, '%' + (hex(ord(c)).upper())[2:])
return res | Pass in a dictionary that has unsafe characters as the keys, and the percent
encoded value as the value. | entailment |
def get_dependencies(self):
"""Return dependencies, which should trigger updates of this model."""
# pylint: disable=no-member
return super().get_dependencies() + [
Data.collection_set,
Data.entity_set,
Data.parents,
] | Return dependencies, which should trigger updates of this model. | entailment |
def alpha_s(scale, f, alphasMZ=0.1185, loop=3):
"""3-loop computation of alpha_s for f flavours
with initial condition alpha_s(MZ) = 0.1185"""
if scale == MZ and f == 5:
return alphasMZ # nothing to do
_sane(scale, f)
crd = rundec.CRunDec()
if f == 5:
return_value = crd.AlphasExact(alphasMZ, MZ, scale, f, loop)
elif f == 6:
crd.nfMmu.Mth = 170
crd.nfMmu.muth = 170
crd.nfMmu.nf = 6
return_value = crd.AlL2AlH(alphasMZ, MZ, crd.nfMmu, scale, loop)
elif f == 4:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
return_value = crd.AlH2AlL(alphasMZ, MZ, crd.nfMmu, scale, loop)
elif f == 3:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
mc = 1.3
asmc = crd.AlH2AlL(alphasMZ, MZ, crd.nfMmu, mc, loop)
crd.nfMmu.Mth = mc
crd.nfMmu.muth = mc
crd.nfMmu.nf = 4
return_value = crd.AlH2AlL(asmc, mc, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale))
if return_value == 0:
raise ValueError("Return value is 0, probably `scale={}` is too small.".format(scale))
else:
return return_value | 3-loop computation of alpha_s for f flavours
with initial condition alpha_s(MZ) = 0.1185 | entailment |
def m_b(mbmb, scale, f, alphasMZ=0.1185, loop=3):
r"""Get running b quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_b(m_b)$"""
if scale == mbmb and f == 5:
return mbmb # nothing to do
_sane(scale, f)
alphas_mb = alpha_s(mbmb, 5, alphasMZ=alphasMZ, loop=loop)
crd = rundec.CRunDec()
if f == 5:
alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)
return crd.mMS2mMS(mbmb, alphas_mb, alphas_scale, f, loop)
elif f == 4:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
return crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop)
elif f == 3:
mc = 1.3
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
mbmc = crd.mH2mL(mbmb, alphas_mb, mbmb, crd.nfMmu, mc, loop)
crd.nfMmu.Mth = mc
crd.nfMmu.muth = mc
crd.nfMmu.nf = 4
alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop)
return crd.mH2mL(mbmc, alphas_mc, mc, crd.nfMmu, scale, loop)
elif f == 6:
crd.nfMmu.Mth = 170
crd.nfMmu.muth = 170
crd.nfMmu.nf = 6
return crd.mL2mH(mbmb, alphas_mb, mbmb, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale)) | r"""Get running b quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_b(m_b)$ | entailment |
def m_c(mcmc, scale, f, alphasMZ=0.1185, loop=3):
r"""Get running c quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_c(m_c)$"""
if scale == mcmc:
return mcmc # nothing to do
_sane(scale, f)
crd = rundec.CRunDec()
alphas_mc = alpha_s(mcmc, 4, alphasMZ=alphasMZ, loop=loop)
if f == 4:
alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)
return crd.mMS2mMS(mcmc, alphas_mc, alphas_scale, f, loop)
elif f == 3:
crd.nfMmu.Mth = 1.3
crd.nfMmu.muth = 1.3
crd.nfMmu.nf = 4
return crd.mH2mL(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)
elif f == 5:
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
return crd.mL2mH(mcmc, alphas_mc, mcmc, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale)) | r"""Get running c quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_c(m_c)$ | entailment |
def m_s(ms2, scale, f, alphasMZ=0.1185, loop=3):
r"""Get running s quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_s(2 \,\text{GeV})$"""
if scale == 2 and f == 3:
return ms2 # nothing to do
_sane(scale, f)
crd = rundec.CRunDec()
alphas_2 = alpha_s(2, 3, alphasMZ=alphasMZ, loop=loop)
if f == 3:
alphas_scale = alpha_s(scale, f, alphasMZ=alphasMZ, loop=loop)
return crd.mMS2mMS(ms2, alphas_2, alphas_scale, f, loop)
elif f == 4:
crd.nfMmu.Mth = 1.3
crd.nfMmu.muth = 1.3
crd.nfMmu.nf = 4
return crd.mL2mH(ms2, alphas_2, 2, crd.nfMmu, scale, loop)
elif f == 5:
mc = 1.3
crd.nfMmu.Mth = mc
crd.nfMmu.muth = mc
crd.nfMmu.nf = 4
msmc = crd.mL2mH(ms2, alphas_2, 2, crd.nfMmu, mc, loop)
crd.nfMmu.Mth = 4.8
crd.nfMmu.muth = 4.8
crd.nfMmu.nf = 5
alphas_mc = alpha_s(mc, 4, alphasMZ=alphasMZ, loop=loop)
return crd.mL2mH(msmc, alphas_mc, mc, crd.nfMmu, scale, loop)
else:
raise ValueError("Invalid input: f={}, scale={}".format(f, scale)) | r"""Get running s quark mass in the MSbar scheme at the scale `scale`
in the theory with `f` dynamical quark flavours starting from $m_s(2 \,\text{GeV})$ | entailment |
def get_confirmation(self):
"""Get user confirmation to proceed."""
if self.clear:
action = 'This will DELETE ALL FILES in this location!'
else:
action = 'This will overwrite existing files!'
message = (
"\n"
"You have requested to collect static files at the destination\n"
"location as specified in your settings\n"
"\n"
" {destination}\n"
"\n"
"{action}\n"
"Are you sure you want to do this?\n"
"\n"
"Type 'yes' to continue, or 'no' to cancel: ".format(
destination=self.destination_path,
action=action,
)
)
if input(''.join(message)) != 'yes':
raise CommandError("Collecting tools cancelled.") | Get user confirmation to proceed. | entailment |
def clear_dir(self):
"""Delete contents of the directory on the given path."""
self.stdout.write("Deleting contents of '{}'.".format(self.destination_path))
for filename in os.listdir(self.destination_path):
if os.path.isfile(filename) or os.path.islink(filename):
os.remove(filename)
elif os.path.isdir(filename):
shutil.rmtree(filename) | Delete contents of the directory on the given path. | entailment |
def change_path_prefix(self, path, old_prefix, new_prefix, app_name):
"""Change path prefix and include app name."""
relative_path = os.path.relpath(path, old_prefix)
return os.path.join(new_prefix, app_name, relative_path) | Change path prefix and include app name. | entailment |
def collect(self):
"""Get tools' locations and copy them to a single location."""
for app_name, tools_path in get_apps_tools().items():
self.stdout.write("Copying files from '{}'.".format(tools_path))
app_name = app_name.replace('.', '_')
app_destination_path = os.path.join(self.destination_path, app_name)
if not os.path.isdir(app_destination_path):
os.mkdir(app_destination_path)
for root, dirs, files in os.walk(tools_path):
for dir_name in dirs:
dir_source_path = os.path.join(root, dir_name)
dir_destination_path = self.change_path_prefix(
dir_source_path, tools_path, self.destination_path, app_name
)
if not os.path.isdir(dir_destination_path):
os.mkdir(dir_destination_path)
for file_name in files:
file_source_path = os.path.join(root, file_name)
file_destination_path = self.change_path_prefix(
file_source_path, tools_path, self.destination_path, app_name
)
shutil.copy2(file_source_path, file_destination_path) | Get tools' locations and copy them to a single location. | entailment |
def handle(self, **options):
"""Collect tools."""
self.set_options(**options)
os.makedirs(self.destination_path, exist_ok=True)
if self.interactive and any(os.listdir(self.destination_path)):
self.get_confirmation()
if self.clear:
self.clear_dir()
self.collect() | Collect tools. | entailment |
def get_data_checksum(proc_input, proc_slug, proc_version):
"""Compute checksum of processor inputs, name and version."""
checksum = hashlib.sha256()
checksum.update(json.dumps(proc_input, sort_keys=True).encode('utf-8'))
checksum.update(proc_slug.encode('utf-8'))
checksum.update(str(proc_version).encode('utf-8'))
return checksum.hexdigest() | Compute checksum of processor inputs, name and version. | entailment |
def dict_dot(d, k, val=None, default=None):
"""Get or set value using a dot-notation key in a multilevel dict."""
if val is None and k == '':
return d
def set_default(dict_or_model, key, default_value):
"""Set default field value."""
if isinstance(dict_or_model, models.Model):
if not hasattr(dict_or_model, key):
setattr(dict_or_model, key, default_value)
return getattr(dict_or_model, key)
else:
return dict_or_model.setdefault(key, default_value)
def get_item(dict_or_model, key):
"""Get field value."""
if isinstance(dict_or_model, models.Model):
return getattr(dict_or_model, key)
else:
return dict_or_model[key]
def set_item(dict_or_model, key, value):
"""Set field value."""
if isinstance(dict_or_model, models.Model):
setattr(dict_or_model, key, value)
else:
dict_or_model[key] = value
if val is None and callable(default):
# Get value, default for missing
return functools.reduce(lambda a, b: set_default(a, b, default()), k.split('.'), d)
elif val is None:
# Get value, error on missing
return functools.reduce(get_item, k.split('.'), d)
else:
# Set value
try:
k, k_last = k.rsplit('.', 1)
set_item(dict_dot(d, k, default=dict), k_last, val)
except ValueError:
set_item(d, k, val)
return val | Get or set value using a dot-notation key in a multilevel dict. | entailment |
def get_apps_tools():
"""Get applications' tools and their paths.
Return a dict with application names as keys and paths to tools'
directories as values. Applications without tools are omitted.
"""
tools_paths = {}
for app_config in apps.get_app_configs():
proc_path = os.path.join(app_config.path, 'tools')
if os.path.isdir(proc_path):
tools_paths[app_config.name] = proc_path
custom_tools_paths = getattr(settings, 'RESOLWE_CUSTOM_TOOLS_PATHS', [])
if not isinstance(custom_tools_paths, list):
raise KeyError("`RESOLWE_CUSTOM_TOOLS_PATHS` setting must be a list.")
for seq, custom_path in enumerate(custom_tools_paths):
custom_key = '_custom_{}'.format(seq)
tools_paths[custom_key] = custom_path
return tools_paths | Get applications' tools and their paths.
Return a dict with application names as keys and paths to tools'
directories as values. Applications without tools are omitted. | entailment |
def rewire_inputs(data_list):
"""Rewire inputs of provided data objects.
Input parameter is a list of original and copied data object model
instances: ``[{'original': original, 'copy': copy}]``. This
function finds which objects reference other objects (in the list)
on the input and replaces original objects with the copies (mutates
copies' inputs).
"""
if len(data_list) < 2:
return data_list
mapped_ids = {bundle['original'].id: bundle['copy'].id for bundle in data_list}
for bundle in data_list:
updated = False
copy = bundle['copy']
for field_schema, fields in iterate_fields(copy.input, copy.process.input_schema):
name = field_schema['name']
value = fields[name]
if field_schema['type'].startswith('data:') and value in mapped_ids:
fields[name] = mapped_ids[value]
updated = True
elif field_schema['type'].startswith('list:data:') and any([id_ in mapped_ids for id_ in value]):
fields[name] = [mapped_ids[id_] if id_ in mapped_ids else id_ for id_ in value]
updated = True
if updated:
copy.save()
return data_list | Rewire inputs of provided data objects.
Input parameter is a list of original and copied data object model
instances: ``[{'original': original, 'copy': copy}]``. This
function finds which objects reference other objects (in the list)
on the input and replaces original objects with the copies (mutates
copies' inputs). | entailment |
def CreateFromDocument(xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(
fallback_namespace=default_namespace, location_base=location_base
)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, pyxb.utils.six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance | Parse the given XML and use the document element to create a Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained. | entailment |
def CreateFromDOM(node, default_namespace=None):
"""Create a Python instance from the given DOM node. The node tag must correspond to
an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}.
"""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace) | Create a Python instance from the given DOM node. The node tag must correspond to
an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}. | entailment |
def postloop(self):
"""Take care of any unfinished business.
Despite the claims in the Cmd documentation, Cmd.postloop() is not a stub.
"""
cmd.Cmd.postloop(self) # Clean up command completion
d1_cli.impl.util.print_info("Exiting...") | Take care of any unfinished business.
Despite the claims in the Cmd documentation, Cmd.postloop() is not a stub. | entailment |
def precmd(self, line):
"""This method is called after the line has been input but before it has been
interpreted.
If you want to modify the input line before execution (for example, variable
substitution) do it here.
"""
line = self.prefix + line
self._history += [line.strip()]
return line | This method is called after the line has been input but before it has been
interpreted.
If you want to modify the input line before execution (for example, variable
substitution) do it here. | entailment |
def default(self, line):
"""Called on an input line when the command prefix is not recognized."""
args = self._split_args(line, 0, 99)
d1_cli.impl.util.print_error("Unknown command: {}".format(args[0])) | Called on an input line when the command prefix is not recognized. | entailment |
def do_help(self, line):
"""Get help on commands "help" or "?" with no arguments displays a list_objects
of commands for which help is available "help <command>" or "?
<command>" gives help on <command>
"""
command, = self._split_args(line, 0, 1)
if command is None:
return self._print_help()
cmd.Cmd.do_help(self, line) | Get help on commands "help" or "?" with no arguments displays a list_objects
of commands for which help is available "help <command>" or "?
<command>" gives help on <command> | entailment |
def do_history(self, line):
"""history Display a list of commands that have been entered."""
self._split_args(line, 0, 0)
for idx, item in enumerate(self._history):
d1_cli.impl.util.print_info("{0: 3d} {1}".format(idx, item)) | history Display a list of commands that have been entered. | entailment |
def do_exit(self, line):
"""exit Exit from the CLI."""
n_remaining_operations = len(self._command_processor.get_operation_queue())
if n_remaining_operations:
d1_cli.impl.util.print_warn(
"""There are {} unperformed operations in the write operation queue. These will
be lost if you exit.""".format(
n_remaining_operations
)
)
if not d1_cli.impl.util.confirm("Exit?", default="yes"):
return
sys.exit() | exit Exit from the CLI. | entailment |
def do_eof(self, line):
"""Exit on system EOF character."""
d1_cli.impl.util.print_info("")
self.do_exit(line) | Exit on system EOF character. | entailment |
def do_set(self, line):
"""set [parameter [value]] set (without parameters): Display the value of all
session variables.
set <session variable>: Display the value of a single session variable. set
<session variable> <value>: Set the value of a session variable.
"""
session_parameter, value = self._split_args(line, 0, 2)
if value is None:
self._command_processor.get_session().print_variable(session_parameter)
else:
self._command_processor.get_session().set_with_conversion(
session_parameter, value
)
self._print_info_if_verbose(
'Set session variable {} to "{}"'.format(session_parameter, value)
) | set [parameter [value]] set (without parameters): Display the value of all
session variables.
set <session variable>: Display the value of a single session variable. set
<session variable> <value>: Set the value of a session variable. | entailment |
def do_load(self, line):
"""load [file] Load session variables from file load (without parameters): Load
session from default file ~/.dataone_cli.conf load.
<file>: Load session from specified file.
"""
config_file = self._split_args(line, 0, 1)[0]
self._command_processor.get_session().load(config_file)
if config_file is None:
config_file = (
self._command_processor.get_session().get_default_pickle_file_path()
)
self._print_info_if_verbose("Loaded session from file: {}".format(config_file)) | load [file] Load session variables from file load (without parameters): Load
session from default file ~/.dataone_cli.conf load.
<file>: Load session from specified file. | entailment |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.