_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q35300 | LinCombOperator._call | train | def _call(self, x, out=None):
"""Linearly combine ``x`` and write to ``out`` if given."""
if out is None:
out = self.range.element()
out.lincomb(self.a, x[0], self.b, x[1])
return out | python | {
"resource": ""
} |
q35301 | MultiplyOperator._call | train | def _call(self, x, out=None):
"""Multiply ``x`` and write to ``out`` if given."""
if out is None:
return x * self.multiplicand
elif not self.__range_is_field:
if self.__domain_is_field:
out.lincomb(x, self.multiplicand)
else:
out.assign(self.multiplicand * x)
else:
raise ValueError('can only use `out` with `LinearSpace` range') | python | {
"resource": ""
} |
q35302 | PowerOperator._call | train | def _call(self, x, out=None):
"""Take the power of ``x`` and write to ``out`` if given."""
if out is None:
return x ** self.exponent
elif self.__domain_is_field:
raise ValueError('cannot use `out` with field')
else:
out.assign(x)
out **= self.exponent | python | {
"resource": ""
} |
q35303 | NormOperator.derivative | train | def derivative(self, point):
r"""Derivative of this operator in ``point``.
``NormOperator().derivative(y)(x) == (y / y.norm()).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
point : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point.norm() == 0``, in which case the derivative is not well
defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D \|\cdot\|)(y)(x) = \langle y / \|y\|, x \rangle
Examples
--------
>>> r3 = odl.rn(3)
>>> op = NormOperator(r3)
>>> derivative = op.derivative([1, 0, 0])
>>> derivative([1, 0, 0])
1.0
"""
point = self.domain.element(point)
norm = point.norm()
if norm == 0:
raise ValueError('not differentiable in 0')
return InnerProductOperator(point / norm) | python | {
"resource": ""
} |
q35304 | DistOperator.derivative | train | def derivative(self, point):
r"""The derivative operator.
``DistOperator(y).derivative(z)(x) ==
((y - z) / y.dist(z)).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
x : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point == self.vector``, in which case the derivative is not
well defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D d(\cdot, y))(z)(x) = \langle (y-z) / d(y, z), x \rangle
Examples
--------
>>> r2 = odl.rn(2)
>>> x = r2.element([1, 1])
>>> op = DistOperator(x)
>>> derivative = op.derivative([2, 1])
>>> derivative([1, 0])
1.0
"""
point = self.domain.element(point)
diff = point - self.vector
dist = self.vector.dist(point)
if dist == 0:
raise ValueError('not differentiable at the reference vector {!r}'
''.format(self.vector))
return InnerProductOperator(diff / dist) | python | {
"resource": ""
} |
q35305 | ConstantOperator._call | train | def _call(self, x, out=None):
"""Return the constant vector or assign it to ``out``."""
if out is None:
return self.range.element(copy(self.constant))
else:
out.assign(self.constant) | python | {
"resource": ""
} |
q35306 | ConstantOperator.derivative | train | def derivative(self, point):
"""Derivative of this operator, always zero.
Returns
-------
derivative : `ZeroOperator`
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = ConstantOperator(x)
>>> deriv = op.derivative([1, 1, 1])
>>> deriv([2, 2, 2])
rn(3).element([ 0., 0., 0.])
"""
return ZeroOperator(domain=self.domain, range=self.range) | python | {
"resource": ""
} |
q35307 | ZeroOperator._call | train | def _call(self, x, out=None):
"""Return the zero vector or assign it to ``out``."""
if self.domain == self.range:
if out is None:
out = 0 * x
else:
out.lincomb(0, x)
else:
result = self.range.zero()
if out is None:
out = result
else:
out.assign(result)
return out | python | {
"resource": ""
} |
q35308 | ImagPart.inverse | train | def inverse(self):
"""Return the pseudoinverse.
Examples
--------
The inverse is the zero operator if the domain is real:
>>> r3 = odl.rn(3)
>>> op = ImagPart(r3)
>>> op.inverse(op([1, 2, 3]))
rn(3).element([ 0., 0., 0.])
This is not a true inverse, only a pseudoinverse, the real part
will by necessity be lost.
>>> c3 = odl.cn(3)
>>> op = ImagPart(c3)
>>> op.inverse(op([1 + 2j, 2, 3 - 1j]))
cn(3).element([ 0.+2.j, 0.+0.j, -0.-1.j])
"""
if self.space_is_real:
return ZeroOperator(self.domain)
else:
return ComplexEmbedding(self.domain, scalar=1j) | python | {
"resource": ""
} |
q35309 | convert | train | def convert(image, shape, gray=False, dtype='float64', normalize='max'):
"""Convert image to standardized format.
Several properties of the input image may be changed including the shape,
data type and maximal value of the image. In addition, this function may
convert the image into an ODL object and/or a gray scale image.
"""
image = image.astype(dtype)
if gray:
image[..., 0] *= 0.2126
image[..., 1] *= 0.7152
image[..., 2] *= 0.0722
image = np.sum(image, axis=2)
if shape is not None:
image = skimage.transform.resize(image, shape, mode='constant')
image = image.astype(dtype)
if normalize == 'max':
image /= image.max()
elif normalize == 'sum':
image /= image.sum()
else:
assert False
return image | python | {
"resource": ""
} |
q35310 | resolution_phantom | train | def resolution_phantom(shape=None):
"""Resolution phantom for tomographic simulations.
Returns
-------
An image with the following properties:
image type: gray scales
shape: [1024, 1024] (if not specified by `size`)
scale: [0, 1]
type: float64
"""
# TODO: Store data in some ODL controlled url
# TODO: This can be also done with ODL's ellipse_phantom
name = 'phantom_resolution.mat'
url = URL_CAM + name
dct = get_data(name, subset=DATA_SUBSET, url=url)
im = np.rot90(dct['im'], k=3)
return convert(im, shape) | python | {
"resource": ""
} |
q35311 | building | train | def building(shape=None, gray=False):
"""Photo of the Centre for Mathematical Sciences in Cambridge.
Returns
-------
An image with the following properties:
image type: color (or gray scales if `gray=True`)
size: [442, 331] (if not specified by `size`)
scale: [0, 1]
type: float64
"""
# TODO: Store data in some ODL controlled url
name = 'cms.mat'
url = URL_CAM + name
dct = get_data(name, subset=DATA_SUBSET, url=url)
im = np.rot90(dct['im'], k=3)
return convert(im, shape, gray=gray) | python | {
"resource": ""
} |
q35312 | blurring_kernel | train | def blurring_kernel(shape=None):
"""Blurring kernel for convolution simulations.
The kernel is scaled to sum to one.
Returns
-------
An image with the following properties:
image type: gray scales
size: [100, 100] (if not specified by `size`)
scale: [0, 1]
type: float64
"""
# TODO: Store data in some ODL controlled url
name = 'motionblur.mat'
url = URL_CAM + name
dct = get_data(name, subset=DATA_SUBSET, url=url)
return convert(255 - dct['im'], shape, normalize='sum') | python | {
"resource": ""
} |
q35313 | TensorSpace.real_space | train | def real_space(self):
"""The space corresponding to this space's `real_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
"""
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`real_space` not defined for non-numeric `dtype`')
return self.astype(self.real_dtype) | python | {
"resource": ""
} |
q35314 | TensorSpace.complex_space | train | def complex_space(self):
"""The space corresponding to this space's `complex_dtype`.
Raises
------
ValueError
If `dtype` is not a numeric data type.
"""
if not is_numeric_dtype(self.dtype):
raise ValueError(
'`complex_space` not defined for non-numeric `dtype`')
return self.astype(self.complex_dtype) | python | {
"resource": ""
} |
q35315 | TensorSpace.examples | train | def examples(self):
"""Return example random vectors."""
# Always return the same numbers
rand_state = np.random.get_state()
np.random.seed(1337)
if is_numeric_dtype(self.dtype):
yield ('Linearly spaced samples', self.element(
np.linspace(0, 1, self.size).reshape(self.shape)))
yield ('Normally distributed noise',
self.element(np.random.standard_normal(self.shape)))
if self.is_real:
yield ('Uniformly distributed noise',
self.element(np.random.uniform(size=self.shape)))
elif self.is_complex:
yield ('Uniformly distributed noise',
self.element(np.random.uniform(size=self.shape) +
np.random.uniform(size=self.shape) * 1j))
else:
# TODO: return something that always works, like zeros or ones?
raise NotImplementedError('no examples available for non-numeric'
'data type')
np.random.set_state(rand_state) | python | {
"resource": ""
} |
q35316 | astra_supports | train | def astra_supports(feature):
"""Return bool indicating whether current ASTRA supports ``feature``.
Parameters
----------
feature : str
Name of a potential feature of ASTRA. See ``ASTRA_FEATURES`` for
possible values.
Returns
-------
supports : bool
``True`` if the currently imported version of ASTRA supports the
feature in question, ``False`` otherwise.
"""
from odl.util.utility import pkg_supports
return pkg_supports(feature, ASTRA_VERSION, ASTRA_FEATURES) | python | {
"resource": ""
} |
q35317 | astra_volume_geometry | train | def astra_volume_geometry(reco_space):
"""Create an ASTRA volume geometry from the discretized domain.
From the ASTRA documentation:
In all 3D geometries, the coordinate system is defined around the
reconstruction volume. The center of the reconstruction volume is the
origin, and the sides of the voxels in the volume have length 1.
All dimensions in the projection geometries are relative to this unit
length.
Parameters
----------
reco_space : `DiscreteLp`
Discretized space where the reconstruction (volume) lives.
It must be 2- or 3-dimensional and uniformly discretized.
Returns
-------
astra_geom : dict
Raises
------
NotImplementedError
If the cell sizes are not the same in each dimension.
"""
if not isinstance(reco_space, DiscreteLp):
raise TypeError('`reco_space` {!r} is not a DiscreteLp instance'
''.format(reco_space))
if not reco_space.is_uniform:
raise ValueError('`reco_space` {} is not uniformly discretized')
vol_shp = reco_space.partition.shape
vol_min = reco_space.partition.min_pt
vol_max = reco_space.partition.max_pt
if reco_space.ndim == 2:
# ASTRA does in principle support custom minimum and maximum
# values for the volume extent also in earlier versions, but running
# the algorithm fails if voxels are non-isotropic.
if (not reco_space.partition.has_isotropic_cells and
not astra_supports('anisotropic_voxels_2d')):
raise NotImplementedError(
'non-isotropic pixels in 2d volumes not supported by ASTRA '
'v{}'.format(ASTRA_VERSION))
# Given a 2D array of shape (x, y), a volume geometry is created as:
# astra.create_vol_geom(x, y, y_min, y_max, x_min, x_max)
# yielding a dictionary:
# {'GridRowCount': x,
# 'GridColCount': y,
# 'WindowMinX': y_min,
# 'WindowMaxX': y_max,
# 'WindowMinY': x_min,
# 'WindowMaxY': x_max}
#
# NOTE: this setting is flipped with respect to x and y. We do this
# as part of a global rotation of the geometry by -90 degrees, which
# avoids rotating the data.
# NOTE: We need to flip the sign of the (ODL) x component since
# ASTRA seems to move it in the other direction. Not quite clear
# why.
vol_geom = astra.create_vol_geom(vol_shp[0], vol_shp[1],
vol_min[1], vol_max[1],
-vol_max[0], -vol_min[0])
elif reco_space.ndim == 3:
# Not supported in all versions of ASTRA
if (not reco_space.partition.has_isotropic_cells and
not astra_supports('anisotropic_voxels_3d')):
raise NotImplementedError(
'non-isotropic voxels in 3d volumes not supported by ASTRA '
'v{}'.format(ASTRA_VERSION))
# Given a 3D array of shape (x, y, z), a volume geometry is created as:
# astra.create_vol_geom(y, z, x, z_min, z_max, y_min, y_max,
# x_min, x_max),
# yielding a dictionary:
# {'GridColCount': z,
# 'GridRowCount': y,
# 'GridSliceCount': x,
# 'WindowMinX': z_max,
# 'WindowMaxX': z_max,
# 'WindowMinY': y_min,
# 'WindowMaxY': y_min,
# 'WindowMinZ': x_min,
# 'WindowMaxZ': x_min}
vol_geom = astra.create_vol_geom(vol_shp[1], vol_shp[2], vol_shp[0],
vol_min[2], vol_max[2],
vol_min[1], vol_max[1],
vol_min[0], vol_max[0])
else:
raise ValueError('{}-dimensional volume geometries not supported '
'by ASTRA'.format(reco_space.ndim))
return vol_geom | python | {
"resource": ""
} |
q35318 | astra_projection_geometry | train | def astra_projection_geometry(geometry):
"""Create an ASTRA projection geometry from an ODL geometry object.
As of ASTRA version 1.7, the length values are not required any more to be
rescaled for 3D geometries and non-unit (but isotropic) voxel sizes.
Parameters
----------
geometry : `Geometry`
ODL projection geometry from which to create the ASTRA geometry.
Returns
-------
proj_geom : dict
Dictionary defining the ASTRA projection geometry.
"""
if not isinstance(geometry, Geometry):
raise TypeError('`geometry` {!r} is not a `Geometry` instance'
''.format(geometry))
if 'astra' in geometry.implementation_cache:
# Shortcut, reuse already computed value.
return geometry.implementation_cache['astra']
if not geometry.det_partition.is_uniform:
raise ValueError('non-uniform detector sampling is not supported')
if (isinstance(geometry, ParallelBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 2):
# TODO: change to parallel_vec when available
det_width = geometry.det_partition.cell_sides[0]
det_count = geometry.detector.size
# Instead of rotating the data by 90 degrees counter-clockwise,
# we subtract pi/2 from the geometry angles, thereby rotating the
# geometry by 90 degrees clockwise
angles = geometry.angles - np.pi / 2
proj_geom = astra.create_proj_geom('parallel', det_width, det_count,
angles)
elif (isinstance(geometry, DivergentBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 2):
det_count = geometry.detector.size
vec = astra_conebeam_2d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('fanflat_vec', det_count, vec)
elif (isinstance(geometry, ParallelBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 3):
# Swap detector axes (see astra_*_3d_to_vec)
det_row_count = geometry.det_partition.shape[0]
det_col_count = geometry.det_partition.shape[1]
vec = astra_parallel_3d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('parallel3d_vec', det_row_count,
det_col_count, vec)
elif (isinstance(geometry, DivergentBeamGeometry) and
isinstance(geometry.detector, (Flat1dDetector, Flat2dDetector)) and
geometry.ndim == 3):
# Swap detector axes (see astra_*_3d_to_vec)
det_row_count = geometry.det_partition.shape[0]
det_col_count = geometry.det_partition.shape[1]
vec = astra_conebeam_3d_geom_to_vec(geometry)
proj_geom = astra.create_proj_geom('cone_vec', det_row_count,
det_col_count, vec)
else:
raise NotImplementedError('unknown ASTRA geometry type {!r}'
''.format(geometry))
if 'astra' not in geometry.implementation_cache:
# Save computed value for later
geometry.implementation_cache['astra'] = proj_geom
return proj_geom | python | {
"resource": ""
} |
q35319 | astra_data | train | def astra_data(astra_geom, datatype, data=None, ndim=2, allow_copy=False):
"""Create an ASTRA data object.
Parameters
----------
astra_geom : dict
ASTRA geometry object for the data creator, must correspond to the
given ``datatype``.
datatype : {'volume', 'projection'}
Type of the data container.
data : `DiscreteLpElement` or `numpy.ndarray`, optional
Data for the initialization of the data object. If ``None``,
an ASTRA data object filled with zeros is created.
ndim : {2, 3}, optional
Dimension of the data. If ``data`` is provided, this parameter
has no effect.
allow_copy : `bool`, optional
If ``True``, allow copying of ``data``. This means that anything
written by ASTRA to the returned object will not be written to
``data``.
Returns
-------
id : int
Handle for the new ASTRA internal data object.
"""
if data is not None:
if isinstance(data, (DiscreteLpElement, np.ndarray)):
ndim = data.ndim
else:
raise TypeError('`data` {!r} is neither DiscreteLpElement '
'instance nor a `numpy.ndarray`'.format(data))
else:
ndim = int(ndim)
if datatype == 'volume':
astra_dtype_str = '-vol'
elif datatype == 'projection':
astra_dtype_str = '-sino'
else:
raise ValueError('`datatype` {!r} not understood'.format(datatype))
# Get the functions from the correct module
if ndim == 2:
link = astra.data2d.link
create = astra.data2d.create
elif ndim == 3:
link = astra.data3d.link
create = astra.data3d.create
else:
raise ValueError('{}-dimensional data not supported'
''.format(ndim))
# ASTRA checks if data is c-contiguous and aligned
if data is not None:
if allow_copy:
data_array = np.asarray(data, dtype='float32', order='C')
return link(astra_dtype_str, astra_geom, data_array)
else:
if isinstance(data, np.ndarray):
return link(astra_dtype_str, astra_geom, data)
elif data.tensor.impl == 'numpy':
return link(astra_dtype_str, astra_geom, data.asarray())
else:
# Something else than NumPy data representation
raise NotImplementedError('ASTRA supports data wrapping only '
'for `numpy.ndarray` instances, got '
'{!r}'.format(data))
else:
return create(astra_dtype_str, astra_geom) | python | {
"resource": ""
} |
q35320 | astra_projector | train | def astra_projector(vol_interp, astra_vol_geom, astra_proj_geom, ndim, impl):
"""Create an ASTRA projector configuration dictionary.
Parameters
----------
vol_interp : {'nearest', 'linear'}
Interpolation type of the volume discretization. This determines
the projection model that is chosen.
astra_vol_geom : dict
ASTRA volume geometry.
astra_proj_geom : dict
ASTRA projection geometry.
ndim : {2, 3}
Number of dimensions of the projector.
impl : {'cpu', 'cuda'}
Implementation of the projector.
Returns
-------
proj_id : int
Handle for the created ASTRA internal projector object.
"""
if vol_interp not in ('nearest', 'linear'):
raise ValueError("`vol_interp` '{}' not understood"
''.format(vol_interp))
impl = str(impl).lower()
if impl not in ('cpu', 'cuda'):
raise ValueError("`impl` '{}' not understood"
''.format(impl))
if 'type' not in astra_proj_geom:
raise ValueError('invalid projection geometry dict {}'
''.format(astra_proj_geom))
if ndim == 3 and impl == 'cpu':
raise ValueError('3D projectors not supported on CPU')
ndim = int(ndim)
proj_type = astra_proj_geom['type']
if proj_type not in ('parallel', 'fanflat', 'fanflat_vec',
'parallel3d', 'parallel3d_vec', 'cone', 'cone_vec'):
raise ValueError('invalid geometry type {!r}'.format(proj_type))
# Mapping from interpolation type and geometry to ASTRA projector type.
# "I" means probably mathematically inconsistent. Some projectors are
# not implemented, e.g. CPU 3d projectors in general.
type_map_cpu = {'parallel': {'nearest': 'line',
'linear': 'linear'}, # I
'fanflat': {'nearest': 'line_fanflat',
'linear': 'line_fanflat'}, # I
'parallel3d': {'nearest': 'linear3d', # I
'linear': 'linear3d'}, # I
'cone': {'nearest': 'linearcone', # I
'linear': 'linearcone'}} # I
type_map_cpu['fanflat_vec'] = type_map_cpu['fanflat']
type_map_cpu['parallel3d_vec'] = type_map_cpu['parallel3d']
type_map_cpu['cone_vec'] = type_map_cpu['cone']
# GPU algorithms not necessarily require a projector, but will in future
# releases making the interface more coherent regarding CPU and GPU
type_map_cuda = {'parallel': 'cuda', # I
'parallel3d': 'cuda3d'} # I
type_map_cuda['fanflat'] = type_map_cuda['parallel']
type_map_cuda['fanflat_vec'] = type_map_cuda['fanflat']
type_map_cuda['cone'] = type_map_cuda['parallel3d']
type_map_cuda['parallel3d_vec'] = type_map_cuda['parallel3d']
type_map_cuda['cone_vec'] = type_map_cuda['cone']
# create config dict
proj_cfg = {}
if impl == 'cpu':
proj_cfg['type'] = type_map_cpu[proj_type][vol_interp]
else: # impl == 'cuda'
proj_cfg['type'] = type_map_cuda[proj_type]
proj_cfg['VolumeGeometry'] = astra_vol_geom
proj_cfg['ProjectionGeometry'] = astra_proj_geom
proj_cfg['options'] = {}
# Add the hacky 1/r^2 weighting exposed in intermediate versions of
# ASTRA
if (proj_type in ('cone', 'cone_vec') and
astra_supports('cone3d_hacky_density_weighting')):
proj_cfg['options']['DensityWeighting'] = True
if ndim == 2:
return astra.projector.create(proj_cfg)
else:
return astra.projector3d.create(proj_cfg) | python | {
"resource": ""
} |
q35321 | astra_algorithm | train | def astra_algorithm(direction, ndim, vol_id, sino_id, proj_id, impl):
"""Create an ASTRA algorithm object to run the projector.
Parameters
----------
direction : {'forward', 'backward'}
For ``'forward'``, apply the forward projection, for ``'backward'``
the backprojection.
ndim : {2, 3}
Number of dimensions of the projector.
vol_id : int
Handle for the ASTRA volume data object.
sino_id : int
Handle for the ASTRA projection data object.
proj_id : int
Handle for the ASTRA projector object.
impl : {'cpu', 'cuda'}
Implementation of the projector.
Returns
-------
id : int
Handle for the created ASTRA internal algorithm object.
"""
if direction not in ('forward', 'backward'):
raise ValueError("`direction` '{}' not understood".format(direction))
if ndim not in (2, 3):
raise ValueError('{}-dimensional projectors not supported'
''.format(ndim))
if impl not in ('cpu', 'cuda'):
raise ValueError("`impl` type '{}' not understood"
''.format(impl))
if ndim == 3 and impl == 'cpu':
raise NotImplementedError(
'3d algorithms for CPU not supported by ASTRA')
if proj_id is None and impl == 'cpu':
raise ValueError("'cpu' implementation requires projector ID")
algo_map = {'forward': {2: {'cpu': 'FP', 'cuda': 'FP_CUDA'},
3: {'cpu': None, 'cuda': 'FP3D_CUDA'}},
'backward': {2: {'cpu': 'BP', 'cuda': 'BP_CUDA'},
3: {'cpu': None, 'cuda': 'BP3D_CUDA'}}}
algo_cfg = {'type': algo_map[direction][ndim][impl],
'ProjectorId': proj_id,
'ProjectionDataId': sino_id}
if direction is 'forward':
algo_cfg['VolumeDataId'] = vol_id
else:
algo_cfg['ReconstructionDataId'] = vol_id
# Create ASTRA algorithm object
return astra.algorithm.create(algo_cfg) | python | {
"resource": ""
} |
q35322 | space_shape | train | def space_shape(space):
"""Return ``space.shape``, including power space base shape.
If ``space`` is a power space, return ``(len(space),) + space[0].shape``,
otherwise return ``space.shape``.
"""
if isinstance(space, odl.ProductSpace) and space.is_power_space:
return (len(space),) + space[0].shape
else:
return space.shape | python | {
"resource": ""
} |
q35323 | Convolution._call | train | def _call(self, x):
"""Implement calling the operator by calling scipy."""
return scipy.signal.fftconvolve(self.kernel, x, mode='same') | python | {
"resource": ""
} |
q35324 | apply_on_boundary | train | def apply_on_boundary(array, func, only_once=True, which_boundaries=None,
axis_order=None, out=None):
"""Apply a function of the boundary of an n-dimensional array.
All other values are preserved as-is.
Parameters
----------
array : `array-like`
Modify the boundary of this array
func : callable or sequence of callables
If a single function is given, assign
``array[slice] = func(array[slice])`` on the boundary slices,
e.g. use ``lamda x: x / 2`` to divide values by 2.
A sequence of functions is applied per axis separately. It
must have length ``array.ndim`` and may consist of one function
or a 2-tuple of functions per axis.
``None`` entries in a sequence cause the axis (side) to be
skipped.
only_once : bool, optional
If ``True``, ensure that each boundary point appears in exactly
one slice. If ``func`` is a list of functions, the
``axis_order`` determines which functions are applied to nodes
which appear in multiple slices, according to the principle
"first-come, first-served".
which_boundaries : sequence, optional
If provided, this sequence determines per axis whether to
apply the function at the boundaries in each axis. The entry
in each axis may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``. ``None`` is interpreted as
"all boundaries".
axis_order : sequence of ints, optional
Permutation of ``range(array.ndim)`` defining the order in which
to process the axes. If combined with ``only_once`` and a
function list, this determines which function is evaluated in
the points that are potentially processed multiple times.
out : `numpy.ndarray`, optional
Location in which to store the result, can be the same as ``array``.
Default: copy of ``array``
Examples
--------
>>> arr = np.ones((3, 3))
>>> apply_on_boundary(arr, lambda x: x / 2)
array([[ 0.5, 0.5, 0.5],
[ 0.5, 1. , 0.5],
[ 0.5, 0.5, 0.5]])
If called with ``only_once=False``, the function is applied repeatedly:
>>> apply_on_boundary(arr, lambda x: x / 2, only_once=False)
array([[ 0.25, 0.5 , 0.25],
[ 0.5 , 1. , 0.5 ],
[ 0.25, 0.5 , 0.25]])
>>> apply_on_boundary(arr, lambda x: x / 2, only_once=True,
... which_boundaries=((True, False), True))
array([[ 0.5, 0.5, 0.5],
[ 0.5, 1. , 0.5],
[ 0.5, 1. , 0.5]])
Use the ``out`` parameter to store the result in an existing array:
>>> out = np.empty_like(arr)
>>> result = apply_on_boundary(arr, lambda x: x / 2, out=out)
>>> result
array([[ 0.5, 0.5, 0.5],
[ 0.5, 1. , 0.5],
[ 0.5, 0.5, 0.5]])
>>> result is out
True
"""
array = np.asarray(array)
if callable(func):
func = [func] * array.ndim
elif len(func) != array.ndim:
raise ValueError('sequence of functions has length {}, expected {}'
''.format(len(func), array.ndim))
if which_boundaries is None:
which_boundaries = ([(True, True)] * array.ndim)
elif len(which_boundaries) != array.ndim:
raise ValueError('`which_boundaries` has length {}, expected {}'
''.format(len(which_boundaries), array.ndim))
if axis_order is None:
axis_order = list(range(array.ndim))
elif len(axis_order) != array.ndim:
raise ValueError('`axis_order` has length {}, expected {}'
''.format(len(axis_order), array.ndim))
if out is None:
out = array.copy()
else:
out[:] = array # Self assignment is free, in case out is array
# The 'only_once' functionality is implemented by storing for each axis
# if the left and right boundaries have been processed. This information
# is stored in a list of slices which is reused for the next axis in the
# list.
slices = [slice(None)] * array.ndim
for ax, function, which in zip(axis_order, func, which_boundaries):
if only_once:
slc_l = list(slices) # Make a copy; copy() exists in Py3 only
slc_r = list(slices)
else:
slc_l = [slice(None)] * array.ndim
slc_r = [slice(None)] * array.ndim
# slc_l and slc_r select left and right boundary, resp, in this axis.
slc_l[ax] = 0
slc_r[ax] = -1
slc_l, slc_r = tuple(slc_l), tuple(slc_r)
try:
# Tuple of functions in this axis
func_l, func_r = function
except TypeError:
# Single function
func_l = func_r = function
try:
# Tuple of bool
mod_left, mod_right = which
except TypeError:
# Single bool
mod_left = mod_right = which
if mod_left and func_l is not None:
out[slc_l] = func_l(out[slc_l])
start = 1
else:
start = None
if mod_right and func_r is not None:
out[slc_r] = func_r(out[slc_r])
end = -1
else:
end = None
# Write the information for the processed axis into the slice list.
# Start and end include the boundary if it was processed.
slices[ax] = slice(start, end)
return out | python | {
"resource": ""
} |
q35325 | fast_1d_tensor_mult | train | def fast_1d_tensor_mult(ndarr, onedim_arrs, axes=None, out=None):
"""Fast multiplication of an n-dim array with an outer product.
This method implements the multiplication of an n-dimensional array
with an outer product of one-dimensional arrays, e.g.::
a = np.ones((10, 10, 10))
x = np.random.rand(10)
a *= x[:, None, None] * x[None, :, None] * x[None, None, :]
Basically, there are two ways to do such an operation:
1. First calculate the factor on the right-hand side and do one
"big" multiplication; or
2. Multiply by one factor at a time.
The procedure of building up the large factor in the first method
is relatively cheap if the number of 1d arrays is smaller than the
number of dimensions. For exactly n vectors, the second method is
faster, although it loops of the array ``a`` n times.
This implementation combines the two ideas into a hybrid scheme:
- If there are less 1d arrays than dimensions, choose 1.
- Otherwise, calculate the factor array for n-1 arrays
and multiply it to the large array. Finally, multiply with the
last 1d array.
The advantage of this approach is that it is memory-friendly and
loops over the big array only twice.
Parameters
----------
ndarr : `array-like`
Array to multiply to
onedim_arrs : sequence of `array-like`'s
One-dimensional arrays to be multiplied with ``ndarr``. The
sequence may not be longer than ``ndarr.ndim``.
axes : sequence of ints, optional
Take the 1d transform along these axes. ``None`` corresponds to
the last ``len(onedim_arrs)`` axes, in ascending order.
out : `numpy.ndarray`, optional
Array in which the result is stored
Returns
-------
out : `numpy.ndarray`
Result of the modification. If ``out`` was given, the returned
object is a reference to it.
"""
if out is None:
out = np.array(ndarr, copy=True)
else:
out[:] = ndarr # Self-assignment is free if out is ndarr
if not onedim_arrs:
raise ValueError('no 1d arrays given')
if axes is None:
axes = list(range(out.ndim - len(onedim_arrs), out.ndim))
axes_in = None
elif len(axes) != len(onedim_arrs):
raise ValueError('there are {} 1d arrays, but {} axes entries'
''.format(len(onedim_arrs), len(axes)))
else:
# Make axes positive
axes, axes_in = np.array(axes, dtype=int), axes
axes[axes < 0] += out.ndim
axes = list(axes)
if not all(0 <= ai < out.ndim for ai in axes):
raise ValueError('`axes` {} out of bounds for {} dimensions'
''.format(axes_in, out.ndim))
# Make scalars 1d arrays and squeezable arrays 1d
alist = [np.atleast_1d(np.asarray(a).squeeze()) for a in onedim_arrs]
if any(a.ndim != 1 for a in alist):
raise ValueError('only 1d arrays allowed')
if len(axes) < out.ndim:
# Make big factor array (start with 0d)
factor = np.array(1.0)
for ax, arr in zip(axes, alist):
# Meshgrid-style slice
slc = [None] * out.ndim
slc[ax] = slice(None)
factor = factor * arr[tuple(slc)]
out *= factor
else:
# Hybrid approach
# Get the axis to spare for the final multiplication, the one
# with the largest stride.
last_ax = np.argmax(out.strides)
last_arr = alist[axes.index(last_ax)]
# Build the semi-big array and multiply
factor = np.array(1.0)
for ax, arr in zip(axes, alist):
if ax == last_ax:
continue
slc = [None] * out.ndim
slc[ax] = slice(None)
factor = factor * arr[tuple(slc)]
out *= factor
# Finally multiply by the remaining 1d array
slc = [None] * out.ndim
slc[last_ax] = slice(None)
out *= last_arr[tuple(slc)]
return out | python | {
"resource": ""
} |
q35326 | _intersection_slice_tuples | train | def _intersection_slice_tuples(lhs_arr, rhs_arr, offset):
"""Return tuples to yield the intersecting part of both given arrays.
The returned slices ``lhs_slc`` and ``rhs_slc`` are such that
``lhs_arr[lhs_slc]`` and ``rhs_arr[rhs_slc]`` have the same shape.
The ``offset`` parameter determines how much is skipped/added on the
"left" side (small indices).
"""
lhs_slc, rhs_slc = [], []
for istart, n_lhs, n_rhs in zip(offset, lhs_arr.shape, rhs_arr.shape):
# Slice for the inner part in the larger array corresponding to the
# small one, offset by the given amount
istop = istart + min(n_lhs, n_rhs)
inner_slc = slice(istart, istop)
if n_lhs > n_rhs:
# Extension
lhs_slc.append(inner_slc)
rhs_slc.append(slice(None))
elif n_lhs < n_rhs:
# Restriction
lhs_slc.append(slice(None))
rhs_slc.append(inner_slc)
else:
# Same size, so full slices for both
lhs_slc.append(slice(None))
rhs_slc.append(slice(None))
return tuple(lhs_slc), tuple(rhs_slc) | python | {
"resource": ""
} |
q35327 | _assign_intersection | train | def _assign_intersection(lhs_arr, rhs_arr, offset):
"""Assign the intersecting region from ``rhs_arr`` to ``lhs_arr``."""
lhs_slc, rhs_slc = _intersection_slice_tuples(lhs_arr, rhs_arr, offset)
lhs_arr[lhs_slc] = rhs_arr[rhs_slc] | python | {
"resource": ""
} |
q35328 | _padding_slices_outer | train | def _padding_slices_outer(lhs_arr, rhs_arr, axis, offset):
"""Return slices into the outer array part where padding is applied.
When padding is performed, these slices yield the outer (excess) part
of the larger array that is to be filled with values. Slices for
both sides of the arrays in a given ``axis`` are returned.
The same slices are used also in the adjoint padding correction,
however in a different way.
See `the online documentation
<https://odlgroup.github.io/odl/math/resizing_ops.html>`_
on resizing operators for details.
"""
istart_inner = offset[axis]
istop_inner = istart_inner + min(lhs_arr.shape[axis], rhs_arr.shape[axis])
return slice(istart_inner), slice(istop_inner, None) | python | {
"resource": ""
} |
q35329 | _padding_slices_inner | train | def _padding_slices_inner(lhs_arr, rhs_arr, axis, offset, pad_mode):
"""Return slices into the inner array part for a given ``pad_mode``.
When performing padding, these slices yield the values from the inner
part of a larger array that are to be assigned to the excess part
of the same array. Slices for both sides ("left", "right") of
the arrays in a given ``axis`` are returned.
"""
# Calculate the start and stop indices for the inner part
istart_inner = offset[axis]
n_large = max(lhs_arr.shape[axis], rhs_arr.shape[axis])
n_small = min(lhs_arr.shape[axis], rhs_arr.shape[axis])
istop_inner = istart_inner + n_small
# Number of values padded to left and right
n_pad_l = istart_inner
n_pad_r = n_large - istop_inner
if pad_mode == 'periodic':
# left: n_pad_l forward, ending at istop_inner - 1
pad_slc_l = slice(istop_inner - n_pad_l, istop_inner)
# right: n_pad_r forward, starting at istart_inner
pad_slc_r = slice(istart_inner, istart_inner + n_pad_r)
elif pad_mode == 'symmetric':
# left: n_pad_l backward, ending at istart_inner + 1
pad_slc_l = slice(istart_inner + n_pad_l, istart_inner, -1)
# right: n_pad_r backward, starting at istop_inner - 2
# For the corner case that the stopping index is -1, we need to
# replace it with None, since -1 as stopping index is equivalent
# to the last index, which is not what we want (0 as last index).
istop_r = istop_inner - 2 - n_pad_r
if istop_r == -1:
istop_r = None
pad_slc_r = slice(istop_inner - 2, istop_r, -1)
elif pad_mode in ('order0', 'order1'):
# left: only the first entry, using a slice to avoid squeezing
pad_slc_l = slice(istart_inner, istart_inner + 1)
# right: only last entry
pad_slc_r = slice(istop_inner - 1, istop_inner)
else:
# Slices are not used, returning trivial ones. The function should not
# be used for other modes anyway.
pad_slc_l, pad_slc_r = slice(0), slice(0)
return pad_slc_l, pad_slc_r | python | {
"resource": ""
} |
q35330 | zscore | train | def zscore(arr):
"""Return arr normalized with mean 0 and unit variance.
If the input has 0 variance, the result will also have 0 variance.
Parameters
----------
arr : array-like
Returns
-------
zscore : array-like
Examples
--------
Compute the z score for a small array:
>>> result = zscore([1, 0])
>>> result
array([ 1., -1.])
>>> np.mean(result)
0.0
>>> np.std(result)
1.0
Does not re-scale in case the input is constant (has 0 variance):
>>> zscore([1, 1])
array([ 0., 0.])
"""
arr = arr - np.mean(arr)
std = np.std(arr)
if std != 0:
arr /= std
return arr | python | {
"resource": ""
} |
q35331 | FunctionSpace.real_out_dtype | train | def real_out_dtype(self):
"""The real dtype corresponding to this space's `out_dtype`."""
if self.__real_out_dtype is None:
raise AttributeError(
'no real variant of output dtype {} defined'
''.format(dtype_repr(self.scalar_out_dtype)))
else:
return self.__real_out_dtype | python | {
"resource": ""
} |
q35332 | FunctionSpace.complex_out_dtype | train | def complex_out_dtype(self):
"""The complex dtype corresponding to this space's `out_dtype`."""
if self.__complex_out_dtype is None:
raise AttributeError(
'no complex variant of output dtype {} defined'
''.format(dtype_repr(self.scalar_out_dtype)))
else:
return self.__complex_out_dtype | python | {
"resource": ""
} |
q35333 | FunctionSpace.zero | train | def zero(self):
"""Function mapping anything to zero."""
# Since `FunctionSpace.lincomb` may be slow, we implement this
# function directly.
# The unused **kwargs are needed to support combination with
# functions that take parameters.
def zero_vec(x, out=None, **kwargs):
"""Zero function, vectorized."""
if is_valid_input_meshgrid(x, self.domain.ndim):
scalar_out_shape = out_shape_from_meshgrid(x)
elif is_valid_input_array(x, self.domain.ndim):
scalar_out_shape = out_shape_from_array(x)
else:
raise TypeError('invalid input type')
# For tensor-valued functions
out_shape = self.out_shape + scalar_out_shape
if out is None:
return np.zeros(out_shape, dtype=self.scalar_out_dtype)
else:
# Need to go through an array to fill with the correct
# zero value for all dtypes
fill_value = np.zeros(1, dtype=self.scalar_out_dtype)[0]
out.fill(fill_value)
return self.element_type(self, zero_vec) | python | {
"resource": ""
} |
q35334 | FunctionSpace.one | train | def one(self):
"""Function mapping anything to one."""
# See zero() for remarks
def one_vec(x, out=None, **kwargs):
"""One function, vectorized."""
if is_valid_input_meshgrid(x, self.domain.ndim):
scalar_out_shape = out_shape_from_meshgrid(x)
elif is_valid_input_array(x, self.domain.ndim):
scalar_out_shape = out_shape_from_array(x)
else:
raise TypeError('invalid input type')
out_shape = self.out_shape + scalar_out_shape
if out is None:
return np.ones(out_shape, dtype=self.scalar_out_dtype)
else:
fill_value = np.ones(1, dtype=self.scalar_out_dtype)[0]
out.fill(fill_value)
return self.element_type(self, one_vec) | python | {
"resource": ""
} |
q35335 | FunctionSpace.astype | train | def astype(self, out_dtype):
"""Return a copy of this space with new ``out_dtype``.
Parameters
----------
out_dtype :
Output data type of the returned space. Can be given in any
way `numpy.dtype` understands, e.g. as string (``'complex64'``)
or built-in type (``complex``). ``None`` is interpreted as
``'float64'``.
Returns
-------
newspace : `FunctionSpace`
The version of this space with given data type
"""
out_dtype = np.dtype(out_dtype)
if out_dtype == self.out_dtype:
return self
# Try to use caching for real and complex versions (exact dtype
# mappings). This may fail for certain dtype, in which case we
# just go to `_astype` directly.
real_dtype = getattr(self, 'real_out_dtype', None)
if real_dtype is None:
return self._astype(out_dtype)
else:
if out_dtype == real_dtype:
if self.__real_space is None:
self.__real_space = self._astype(out_dtype)
return self.__real_space
elif out_dtype == self.complex_out_dtype:
if self.__complex_space is None:
self.__complex_space = self._astype(out_dtype)
return self.__complex_space
else:
return self._astype(out_dtype) | python | {
"resource": ""
} |
q35336 | FunctionSpace._lincomb | train | def _lincomb(self, a, f1, b, f2, out):
"""Linear combination of ``f1`` and ``f2``.
Notes
-----
The additions and multiplications are implemented via simple
Python functions, so non-vectorized versions are slow.
"""
# Avoid infinite recursions by making a copy of the functions
f1_copy = f1.copy()
f2_copy = f2.copy()
def lincomb_oop(x, **kwargs):
"""Linear combination, out-of-place version."""
# Not optimized since that raises issues with alignment
# of input and partial results
out = a * np.asarray(f1_copy(x, **kwargs),
dtype=self.scalar_out_dtype)
tmp = b * np.asarray(f2_copy(x, **kwargs),
dtype=self.scalar_out_dtype)
out += tmp
return out
out._call_out_of_place = lincomb_oop
decorator = preload_first_arg(out, 'in-place')
out._call_in_place = decorator(_default_in_place)
out._call_has_out = out._call_out_optional = False
return out | python | {
"resource": ""
} |
q35337 | FunctionSpace._multiply | train | def _multiply(self, f1, f2, out):
"""Pointwise multiplication of ``f1`` and ``f2``.
Notes
-----
The multiplication is implemented with a simple Python
function, so the non-vectorized versions are slow.
"""
# Avoid infinite recursions by making a copy of the functions
f1_copy = f1.copy()
f2_copy = f2.copy()
def product_oop(x, **kwargs):
"""Product out-of-place evaluation function."""
return np.asarray(f1_copy(x, **kwargs) * f2_copy(x, **kwargs),
dtype=self.scalar_out_dtype)
out._call_out_of_place = product_oop
decorator = preload_first_arg(out, 'in-place')
out._call_in_place = decorator(_default_in_place)
out._call_has_out = out._call_out_optional = False
return out | python | {
"resource": ""
} |
q35338 | FunctionSpace._scalar_power | train | def _scalar_power(self, f, p, out):
"""Compute ``p``-th power of ``f`` for ``p`` scalar."""
# Avoid infinite recursions by making a copy of the function
f_copy = f.copy()
def pow_posint(x, n):
"""Power function for positive integer ``n``, out-of-place."""
if isinstance(x, np.ndarray):
y = x.copy()
return ipow_posint(y, n)
else:
return x ** n
def ipow_posint(x, n):
"""Power function for positive integer ``n``, in-place."""
if n == 1:
return x
elif n % 2 == 0:
x *= x
return ipow_posint(x, n // 2)
else:
tmp = x.copy()
x *= x
ipow_posint(x, n // 2)
x *= tmp
return x
def power_oop(x, **kwargs):
"""Power out-of-place evaluation function."""
if p == 0:
return self.one()
elif p == int(p) and p >= 1:
return np.asarray(pow_posint(f_copy(x, **kwargs), int(p)),
dtype=self.scalar_out_dtype)
else:
result = np.power(f_copy(x, **kwargs), p)
return result.astype(self.scalar_out_dtype)
out._call_out_of_place = power_oop
decorator = preload_first_arg(out, 'in-place')
out._call_in_place = decorator(_default_in_place)
out._call_has_out = out._call_out_optional = False
return out | python | {
"resource": ""
} |
q35339 | FunctionSpace._realpart | train | def _realpart(self, f):
"""Function returning the real part of the result from ``f``."""
def f_re(x, **kwargs):
result = np.asarray(f(x, **kwargs),
dtype=self.scalar_out_dtype)
return result.real
if is_real_dtype(self.out_dtype):
return f
else:
return self.real_space.element(f_re) | python | {
"resource": ""
} |
q35340 | FunctionSpace._imagpart | train | def _imagpart(self, f):
"""Function returning the imaginary part of the result from ``f``."""
def f_im(x, **kwargs):
result = np.asarray(f(x, **kwargs),
dtype=self.scalar_out_dtype)
return result.imag
if is_real_dtype(self.out_dtype):
return self.zero()
else:
return self.real_space.element(f_im) | python | {
"resource": ""
} |
q35341 | FunctionSpace._conj | train | def _conj(self, f):
"""Function returning the complex conjugate of a result."""
def f_conj(x, **kwargs):
result = np.asarray(f(x, **kwargs),
dtype=self.scalar_out_dtype)
return result.conj()
if is_real_dtype(self.out_dtype):
return f
else:
return self.element(f_conj) | python | {
"resource": ""
} |
q35342 | FunctionSpace.byaxis_out | train | def byaxis_out(self):
"""Object to index along output dimensions.
This is only valid for non-trivial `out_shape`.
Examples
--------
Indexing with integers or slices:
>>> domain = odl.IntervalProd(0, 1)
>>> fspace = odl.FunctionSpace(domain, out_dtype=(float, (2, 3, 4)))
>>> fspace.byaxis_out[0]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (2,)))
>>> fspace.byaxis_out[1]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3,)))
>>> fspace.byaxis_out[1:]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (3, 4)))
Lists can be used to stack spaces arbitrarily:
>>> fspace.byaxis_out[[2, 1, 2]]
FunctionSpace(IntervalProd(0.0, 1.0), out_dtype=('float64', (4, 3, 4)))
"""
space = self
class FspaceByaxisOut(object):
"""Helper class for indexing by output axes."""
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object used to index the output components.
Returns
-------
space : `FunctionSpace`
The resulting space with same domain and scalar output
data type, but indexed output components.
Raises
------
IndexError
If this is a space of scalar-valued functions.
"""
try:
iter(indices)
except TypeError:
newshape = space.out_shape[indices]
else:
newshape = tuple(space.out_shape[int(i)] for i in indices)
dtype = (space.scalar_out_dtype, newshape)
return FunctionSpace(space.domain, out_dtype=dtype)
def __repr__(self):
"""Return ``repr(self)``."""
return repr(space) + '.byaxis_out'
return FspaceByaxisOut() | python | {
"resource": ""
} |
q35343 | FunctionSpace.byaxis_in | train | def byaxis_in(self):
"""Object to index ``self`` along input dimensions.
Examples
--------
Indexing with integers or slices:
>>> domain = odl.IntervalProd([0, 0, 0], [1, 2, 3])
>>> fspace = odl.FunctionSpace(domain)
>>> fspace.byaxis_in[0]
FunctionSpace(IntervalProd(0.0, 1.0))
>>> fspace.byaxis_in[1]
FunctionSpace(IntervalProd(0.0, 2.0))
>>> fspace.byaxis_in[1:]
FunctionSpace(IntervalProd([ 0., 0.], [ 2., 3.]))
Lists can be used to stack spaces arbitrarily:
>>> fspace.byaxis_in[[2, 1, 2]]
FunctionSpace(IntervalProd([ 0., 0., 0.], [ 3., 2., 3.]))
"""
space = self
class FspaceByaxisIn(object):
"""Helper class for indexing by input axes."""
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object used to index the space domain.
Returns
-------
space : `FunctionSpace`
The resulting space with same output data type, but
indexed domain.
"""
domain = space.domain[indices]
return FunctionSpace(domain, out_dtype=space.out_dtype)
def __repr__(self):
"""Return ``repr(self)``."""
return repr(space) + '.byaxis_in'
return FspaceByaxisIn() | python | {
"resource": ""
} |
q35344 | FunctionSpaceElement._call | train | def _call(self, x, out=None, **kwargs):
"""Raw evaluation method."""
if out is None:
return self._call_out_of_place(x, **kwargs)
else:
self._call_in_place(x, out=out, **kwargs) | python | {
"resource": ""
} |
q35345 | FunctionSpaceElement.assign | train | def assign(self, other):
"""Assign ``other`` to ``self``.
This is implemented without `FunctionSpace.lincomb` to ensure that
``self == other`` evaluates to True after ``self.assign(other)``.
"""
if other not in self.space:
raise TypeError('`other` {!r} is not an element of the space '
'{} of this function'
''.format(other, self.space))
self._call_in_place = other._call_in_place
self._call_out_of_place = other._call_out_of_place
self._call_has_out = other._call_has_out
self._call_out_optional = other._call_out_optional | python | {
"resource": ""
} |
q35346 | optimal_parameters | train | def optimal_parameters(reconstruction, fom, phantoms, data,
initial=None, univariate=False):
r"""Find the optimal parameters for a reconstruction method.
Notes
-----
For a forward operator :math:`A : X \to Y`, a reconstruction operator
parametrized by :math:`\theta` is some operator
:math:`R_\theta : Y \to X`
such that
.. math::
R_\theta(A(x)) \approx x.
The optimal choice of :math:`\theta` is given by
.. math::
\theta = \arg\min_\theta fom(R(A(x) + noise), x)
where :math:`fom : X \times X \to \mathbb{R}` is a figure of merit.
Parameters
----------
reconstruction : callable
Function that takes two parameters:
* data : The data to be reconstructed
* parameters : Parameters of the reconstruction method
The function should return the reconstructed image.
fom : callable
Function that takes two parameters:
* reconstructed_image
* true_image
and returns a scalar figure of merit.
phantoms : sequence
True images.
data : sequence
The data to reconstruct from.
initial : array-like or pair
Initial guess for the parameters. It is
- a required array in the multivariate case
- an optional pair in the univariate case.
univariate : bool, optional
Whether to use a univariate solver
Returns
-------
parameters : 'numpy.ndarray'
The optimal parameters for the reconstruction problem.
"""
def func(lam):
# Function to be minimized by scipy
return sum(fom(reconstruction(datai, lam), phantomi)
for phantomi, datai in zip(phantoms, data))
# Pick resolution to fit the one used by the space
tol = np.finfo(phantoms[0].space.dtype).resolution * 10
if univariate:
# We use a faster optimizer for the one parameter case
result = scipy.optimize.minimize_scalar(
func, bracket=initial, tol=tol, bounds=None,
options={'disp': False})
return result.x
else:
# Use a gradient free method to find the best parameters
initial = np.asarray(initial)
parameters = scipy.optimize.fmin_powell(
func, initial, xtol=tol, ftol=tol, disp=False)
return parameters | python | {
"resource": ""
} |
q35347 | OperatorAsAutogradFunction.forward | train | def forward(self, input):
"""Evaluate forward pass on the input.
Parameters
----------
input : `torch.tensor._TensorBase`
Point at which to evaluate the operator.
Returns
-------
result : `torch.autograd.variable.Variable`
Variable holding the result of the evaluation.
Examples
--------
Perform a matrix multiplication:
>>> matrix = np.array([[1, 0, 1],
... [0, 1, 1]], dtype='float32')
>>> odl_op = odl.MatrixOperator(matrix)
>>> torch_op = OperatorAsAutogradFunction(odl_op)
>>> x = torch.Tensor([1, 2, 3])
>>> x_var = torch.autograd.Variable(x)
>>> torch_op(x_var)
Variable containing:
4
5
[torch.FloatTensor of size 2]
Evaluate a functional, i.e., an operator with scalar output:
>>> odl_func = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32'))
>>> torch_func = OperatorAsAutogradFunction(odl_func)
>>> x = torch.Tensor([1, 2, 3])
>>> x_var = torch.autograd.Variable(x)
>>> torch_func(x_var)
Variable containing:
14
[torch.FloatTensor of size 1]
"""
# TODO: batched evaluation
if not self.operator.is_linear:
# Only needed for nonlinear operators
self.save_for_backward(input)
# TODO: use GPU memory directly if possible
input_arr = input.cpu().detach().numpy()
if any(s == 0 for s in input_arr.strides):
# TODO: remove when Numpy issue #9165 is fixed
# https://github.com/numpy/numpy/pull/9177
input_arr = input_arr.copy()
op_result = self.operator(input_arr)
if np.isscalar(op_result):
# For functionals, the result is funnelled through `float`,
# so we wrap it into a Numpy array with the same dtype as
# `operator.domain`
op_result = np.array(op_result, ndmin=1,
dtype=self.operator.domain.dtype)
tensor = torch.from_numpy(np.array(op_result, copy=False, ndmin=1))
tensor = tensor.to(input.device)
return tensor | python | {
"resource": ""
} |
q35348 | OperatorAsAutogradFunction.backward | train | def backward(self, grad_output):
r"""Apply the adjoint of the derivative at ``grad_output``.
This method is usually not called explicitly but as a part of the
``cost.backward()`` pass of a backpropagation step.
Parameters
----------
grad_output : `torch.tensor._TensorBase`
Tensor to which the Jacobian should be applied. See Notes
for details.
Returns
-------
result : `torch.autograd.variable.Variable`
Variable holding the result of applying the Jacobian to
``grad_output``. See Notes for details.
Examples
--------
Compute the Jacobian adjoint of the matrix operator, which is the
operator of the transposed matrix. We compose with the ``sum``
functional to be able to evaluate ``grad``:
>>> matrix = np.array([[1, 0, 1],
... [0, 1, 1]], dtype='float32')
>>> odl_op = odl.MatrixOperator(matrix)
>>> torch_op = OperatorAsAutogradFunction(odl_op)
>>> x = torch.Tensor([1, 2, 3])
>>> x_var = torch.autograd.Variable(x, requires_grad=True)
>>> op_x_var = torch_op(x_var)
>>> cost = op_x_var.sum()
>>> cost.backward()
>>> x_var.grad # should be matrix.T.dot([1, 1])
Variable containing:
1
1
2
[torch.FloatTensor of size 3]
Compute the gradient of a custom functional:
>>> odl_func = odl.solvers.L2NormSquared(odl.rn(3, dtype='float32'))
>>> torch_func = OperatorAsAutogradFunction(odl_func)
>>> x = torch.Tensor([1, 2, 3])
>>> x_var = torch.autograd.Variable(x, requires_grad=True)
>>> func_x_var = torch_func(x_var)
>>> func_x_var
Variable containing:
14
[torch.FloatTensor of size 1]
>>> func_x_var.backward()
>>> x_var.grad # Should be 2 * x
Variable containing:
2
4
6
[torch.FloatTensor of size 3]
Notes
-----
This method applies the contribution of this node, i.e., the
transpose of the Jacobian of its outputs with respect to its inputs,
to the gradients of some cost function with respect to the outputs
of this node.
Example: Assume that this node computes :math:`x \mapsto C(f(x))`,
where :math:`x` is a tensor variable and :math:`C` is a scalar-valued
function. In ODL language, what ``backward`` should compute is
.. math::
\nabla(C \circ f)(x) = f'(x)^*\big(\nabla C (f(x))\big)
according to the chain rule. In ODL code, this corresponds to ::
f.derivative(x).adjoint(C.gradient(f(x))).
Hence, the parameter ``grad_output`` is a tensor variable containing
:math:`y = \nabla C(f(x))`. Then, ``backward`` boils down to
computing ``[f'(x)^*(y)]`` using the input ``x`` stored during
the previous `forward` pass.
"""
# TODO: implement directly for GPU data
if not self.operator.is_linear:
input_arr = self.saved_variables[0].data.cpu().numpy()
if any(s == 0 for s in input_arr.strides):
# TODO: remove when Numpy issue #9165 is fixed
# https://github.com/numpy/numpy/pull/9177
input_arr = input_arr.copy()
grad = None
# ODL weights spaces, pytorch doesn't, so we need to handle this
try:
dom_weight = self.operator.domain.weighting.const
except AttributeError:
dom_weight = 1.0
try:
ran_weight = self.operator.range.weighting.const
except AttributeError:
ran_weight = 1.0
scaling = dom_weight / ran_weight
if self.needs_input_grad[0]:
grad_output_arr = grad_output.cpu().numpy()
if any(s == 0 for s in grad_output_arr.strides):
# TODO: remove when Numpy issue #9165 is fixed
# https://github.com/numpy/numpy/pull/9177
grad_output_arr = grad_output_arr.copy()
if self.operator.is_linear:
adjoint = self.operator.adjoint
else:
adjoint = self.operator.derivative(input_arr).adjoint
grad_odl = adjoint(grad_output_arr)
if scaling != 1.0:
grad_odl *= scaling
grad = torch.from_numpy(np.array(grad_odl, copy=False, ndmin=1))
grad = grad.to(grad_output.device)
return grad | python | {
"resource": ""
} |
q35349 | OperatorAsModule.forward | train | def forward(self, x):
"""Compute forward-pass of this module on ``x``.
Parameters
----------
x : `torch.autograd.variable.Variable`
Input of this layer. The contained tensor must have shape
``extra_shape + operator.domain.shape``, and
``len(extra_shape)`` must be at least 1 (batch axis).
Returns
-------
out : `torch.autograd.variable.Variable`
The computed output. Its tensor will have shape
``extra_shape + operator.range.shape``, where ``extra_shape``
are the extra axes of ``x``.
Examples
--------
Evaluating on a 2D tensor, where the operator expects a 1D input,
i.e., with extra batch axis only:
>>> matrix = np.array([[1, 0, 0],
... [0, 1, 1]], dtype='float32')
>>> odl_op = odl.MatrixOperator(matrix)
>>> odl_op.domain.shape
(3,)
>>> odl_op.range.shape
(2,)
>>> op_mod = OperatorAsModule(odl_op)
>>> t = torch.ones(3)
>>> x = autograd.Variable(t[None, :]) # "fake" batch axis
>>> op_mod(x)
Variable containing:
1 2
[torch.FloatTensor of size 1x2]
>>> t = torch.ones(3)
>>> x_tensor = torch.stack([0 * t, 1 * t])
>>> x = autograd.Variable(x_tensor) # batch of 2 inputs
>>> op_mod(x)
Variable containing:
0 0
1 2
[torch.FloatTensor of size 2x2]
An arbitrary number of axes is supported:
>>> x = autograd.Variable(t[None, None, :]) # "fake" batch and channel
>>> op_mod(x)
Variable containing:
(0 ,.,.) =
1 2
[torch.FloatTensor of size 1x1x2]
>>> x_tensor = torch.stack([torch.stack([0 * t, 1 * t]),
... torch.stack([2 * t, 3 * t]),
... torch.stack([4 * t, 5 * t])])
>>> x = autograd.Variable(x_tensor) # batch of 3x2 inputs
>>> op_mod(x)
Variable containing:
(0 ,.,.) =
0 0
1 2
<BLANKLINE>
(1 ,.,.) =
2 4
3 6
<BLANKLINE>
(2 ,.,.) =
4 8
5 10
[torch.FloatTensor of size 3x2x2]
"""
in_shape = x.data.shape
op_in_shape = self.op_func.operator.domain.shape
op_out_shape = self.op_func.operator.range.shape
extra_shape = in_shape[:-len(op_in_shape)]
if in_shape[-len(op_in_shape):] != op_in_shape or not extra_shape:
shp_str = str(op_in_shape).strip('()')
raise ValueError('expected input of shape (N, *, {}), got input '
'with shape {}'.format(shp_str, in_shape))
# Flatten extra axes, then do one entry at a time
newshape = (int(np.prod(extra_shape)),) + op_in_shape
x_flat_xtra = x.reshape(*newshape)
results = []
for i in range(x_flat_xtra.data.shape[0]):
results.append(self.op_func(x_flat_xtra[i]))
# Reshape the resulting stack to the expected output shape
stack_flat_xtra = torch.stack(results)
return stack_flat_xtra.view(extra_shape + op_out_shape) | python | {
"resource": ""
} |
q35350 | mean_squared_error | train | def mean_squared_error(data, ground_truth, mask=None,
normalized=False, force_lower_is_better=True):
r"""Return mean squared L2 distance between ``data`` and ``ground_truth``.
See also `this Wikipedia article
<https://en.wikipedia.org/wiki/Mean_squared_error>`_.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
If ``True``, the output values are mapped to the interval
:math:`[0, 1]` (see `Notes` for details).
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the mean squared error, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
mse : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{MSE}(f, g) = \frac{\| f - g \|_2^2}{\| 1 \|_2^2},
where :math:`\| 1 \|^2_2` is the volume of the domain of definition
of the functions. For :math:`\mathbb{R}^n` type spaces, this is equal
to the number of elements :math:`n`.
The normalized form is
.. math::
\mathrm{MSE_N} = \frac{\| f - g \|_2^2}{(\| f \|_2 + \| g \|_2)^2}.
The normalized variant takes values in :math:`[0, 1]`.
"""
if not hasattr(data, 'space'):
data = odl.vector(data)
space = data.space
ground_truth = space.element(ground_truth)
l2norm = odl.solvers.L2Norm(space)
if mask is not None:
data = data * mask
ground_truth = ground_truth * mask
diff = data - ground_truth
fom = l2norm(diff) ** 2
if normalized:
fom /= (l2norm(data) + l2norm(ground_truth)) ** 2
else:
fom /= l2norm(space.one()) ** 2
# Ignore `force_lower_is_better` since that's already the case
return fom | python | {
"resource": ""
} |
q35351 | mean_absolute_error | train | def mean_absolute_error(data, ground_truth, mask=None,
normalized=False, force_lower_is_better=True):
r"""Return L1-distance between ``data`` and ``ground_truth``.
See also `this Wikipedia article
<https://en.wikipedia.org/wiki/Mean_absolute_error>`_.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
If ``True``, the output values are mapped to the interval
:math:`[0, 1]` (see `Notes` for details), otherwise return the
original mean absolute error.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the mean absolute error, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
mae : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{MAE}(f, g) = \frac{\| f - g \|_1}{\| 1 \|_1},
where :math:`\| 1 \|_1` is the volume of the domain of definition
of the functions. For :math:`\mathbb{R}^n` type spaces, this is equal
to the number of elements :math:`n`.
The normalized form is
.. math::
\mathrm{MAE_N}(f, g) = \frac{\| f - g \|_1}{\| f \|_1 + \| g \|_1}.
The normalized variant takes values in :math:`[0, 1]`.
"""
if not hasattr(data, 'space'):
data = odl.vector(data)
space = data.space
ground_truth = space.element(ground_truth)
l1_norm = odl.solvers.L1Norm(space)
if mask is not None:
data = data * mask
ground_truth = ground_truth * mask
diff = data - ground_truth
fom = l1_norm(diff)
if normalized:
fom /= (l1_norm(data) + l1_norm(ground_truth))
else:
fom /= l1_norm(space.one())
# Ignore `force_lower_is_better` since that's already the case
return fom | python | {
"resource": ""
} |
q35352 | mean_value_difference | train | def mean_value_difference(data, ground_truth, mask=None, normalized=False,
force_lower_is_better=True):
r"""Return difference in mean value between ``data`` and ``ground_truth``.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
Boolean flag to switch between unormalized and normalized FOM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the mean value difference, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
mvd : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{MVD}(f, g) =
\Big| \overline{f} - \overline{g} \Big|,
or, in normalized form
.. math::
\mathrm{MVD_N}(f, g) =
\frac{\Big| \overline{f} - \overline{g} \Big|}
{|\overline{f}| + |\overline{g}|}
where :math:`\overline{f}` is the mean value of :math:`f`,
.. math::
\overline{f} = \frac{\langle f, 1\rangle}{\|1|_1}.
The normalized variant takes values in :math:`[0, 1]`.
"""
if not hasattr(data, 'space'):
data = odl.vector(data)
space = data.space
ground_truth = space.element(ground_truth)
l1_norm = odl.solvers.L1Norm(space)
if mask is not None:
data = data * mask
ground_truth = ground_truth * mask
# Volume of space
vol = l1_norm(space.one())
data_mean = data.inner(space.one()) / vol
ground_truth_mean = ground_truth.inner(space.one()) / vol
fom = np.abs(data_mean - ground_truth_mean)
if normalized:
fom /= (np.abs(data_mean) + np.abs(ground_truth_mean))
# Ignore `force_lower_is_better` since that's already the case
return fom | python | {
"resource": ""
} |
q35353 | standard_deviation_difference | train | def standard_deviation_difference(data, ground_truth, mask=None,
normalized=False,
force_lower_is_better=True):
r"""Return absolute diff in std between ``data`` and ``ground_truth``.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
If given, ``data * mask`` is compared to ``ground_truth * mask``.
normalized : bool, optional
Boolean flag to switch between unormalized and normalized FOM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the standard deviation difference, this is already the
case, and the flag is only present for compatibility to other figures
of merit.
Returns
-------
sdd : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{SDD}(f, g) =
\Big| \| f - \overline{f} \|_2 - \| g - \overline{g} \|_2 \Big|,
or, in normalized form
.. math::
\mathrm{SDD_N}(f, g) =
\frac{\Big| \| f - \overline{f} \|_2 -
\| g - \overline{g} \|_2 \Big|}
{\| f - \overline{f} \|_2 + \| g - \overline{g} \|_2},
where :math:`\overline{f}` is the mean value of :math:`f`,
.. math::
\overline{f} = \frac{\langle f, 1\rangle}{\|1|_1}.
The normalized variant takes values in :math:`[0, 1]`.
"""
if not hasattr(data, 'space'):
data = odl.vector(data)
space = data.space
ground_truth = space.element(ground_truth)
l1_norm = odl.solvers.L1Norm(space)
l2_norm = odl.solvers.L2Norm(space)
if mask is not None:
data = data * mask
ground_truth = ground_truth * mask
# Volume of space
vol = l1_norm(space.one())
data_mean = data.inner(space.one()) / vol
ground_truth_mean = ground_truth.inner(space.one()) / vol
deviation_data = l2_norm(data - data_mean)
deviation_ground_truth = l2_norm(ground_truth - ground_truth_mean)
fom = np.abs(deviation_data - deviation_ground_truth)
if normalized:
denom = deviation_data + deviation_ground_truth
if denom == 0:
fom = 0.0
else:
fom /= denom
return fom | python | {
"resource": ""
} |
q35354 | range_difference | train | def range_difference(data, ground_truth, mask=None, normalized=False,
force_lower_is_better=True):
r"""Return dynamic range difference between ``data`` and ``ground_truth``.
Evaluates difference in range between input (``data``) and reference
data (``ground_truth``). Allows for normalization (``normalized``) and a
masking of the two spaces (``mask``).
Parameters
----------
data : `array-like`
Input data to compare to the ground truth.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
Binary mask or index array to define ROI in which FOM evaluation
is performed.
normalized : bool, optional
If ``True``, normalize the FOM to lie in [0, 1].
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches. For the range difference, this is already the case, and
the flag is only present for compatibility to other figures of merit.
Returns
-------
rd : float
FOM value, where a lower value means a better match.
Notes
-----
The FOM evaluates
.. math::
\mathrm{RD}(f, g) = \Big|
\big(\max(f) - \min(f) \big) -
\big(\max(g) - \min(g) \big)
\Big|
or, in normalized form
.. math::
\mathrm{RD_N}(f, g) = \frac{
\Big|
\big(\max(f) - \min(f) \big) -
\big(\max(g) - \min(g) \big)
\Big|}{
\big(\max(f) - \min(f) \big) +
\big(\max(g) - \min(g) \big)}
The normalized variant takes values in :math:`[0, 1]`.
"""
data = np.asarray(data)
ground_truth = np.asarray(ground_truth)
if mask is not None:
mask = np.asarray(mask, dtype=bool)
data = data[mask]
ground_truth = ground_truth[mask]
data_range = np.ptp(data)
ground_truth_range = np.ptp(ground_truth)
fom = np.abs(data_range - ground_truth_range)
if normalized:
denom = np.abs(data_range + ground_truth_range)
if denom == 0:
fom = 0.0
else:
fom /= denom
return fom | python | {
"resource": ""
} |
q35355 | blurring | train | def blurring(data, ground_truth, mask=None, normalized=False,
smoothness_factor=None):
r"""Return weighted L2 distance, emphasizing regions defined by ``mask``.
.. note::
If the mask argument is omitted, this FOM is equivalent to the
mean squared error.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
mask : `array-like`, optional
Binary mask to define ROI in which FOM evaluation is performed.
normalized : bool, optional
Boolean flag to switch between unormalized and normalized FOM.
smoothness_factor : float, optional
Positive real number. Higher value gives smoother weighting.
Returns
-------
blur : float
FOM value, where a lower value means a better match.
See Also
--------
false_structures
mean_squared_error
Notes
-----
The FOM evaluates
.. math::
\mathrm{BLUR}(f, g) = \|\alpha (f - g) \|_2^2,
or, in normalized form
.. math::
\mathrm{BLUR_N}(f, g) =
\frac{\|\alpha(f - g)\|^2_2}
{\|\alpha f\|^2_2 + \|\alpha g\|^2_2}.
The weighting function :math:`\alpha` is given as
.. math::
\alpha(x) = e^{-\frac{1}{k} \beta_m(x)},
where :math:`\beta_m(x)` is the Euclidian distance transform of a
given binary mask :math:`m`, and :math:`k` positive real number that
controls the smoothness of the weighting function :math:`\alpha`.
The weighting gives higher values to structures in the region of
interest defined by the mask.
The normalized variant takes values in :math:`[0, 1]`.
"""
from scipy.ndimage.morphology import distance_transform_edt
if not hasattr(data, 'space'):
data = odl.vector(data)
space = data.space
ground_truth = space.element(ground_truth)
if smoothness_factor is None:
smoothness_factor = np.mean(data.shape) / 10
if mask is not None:
mask = distance_transform_edt(1 - mask)
mask = np.exp(-mask / smoothness_factor)
return mean_squared_error(data, ground_truth, mask, normalized) | python | {
"resource": ""
} |
q35356 | false_structures_mask | train | def false_structures_mask(foreground, smoothness_factor=None):
"""Return mask emphasizing areas outside ``foreground``.
Parameters
----------
foreground : `Tensor` or `array-like`
The region that should be de-emphasized. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
foreground : `FnBaseVector`
The region that should be de-emphasized.
smoothness_factor : float, optional
Positive real number. Higher value gives smoother transition
between foreground and its complement.
Returns
-------
result : `Tensor` or `numpy.ndarray`
Euclidean distances of elements in ``foreground``. The return value
is a `Tensor` if ``foreground`` is one, too, otherwise a NumPy array.
Examples
--------
>>> space = odl.uniform_discr(0, 1, 5)
>>> foreground = space.element([0, 0, 1.0, 0, 0])
>>> mask = false_structures_mask(foreground)
>>> np.asarray(mask)
array([ 0.4, 0.2, 0. , 0.2, 0.4])
Raises
------
ValueError
If foreground is all zero or all one, or contains values not in {0, 1}.
Notes
-----
This helper function computes the Euclidean distance transform from each
point in ``foreground.space`` to ``foreground``.
The weighting gives higher values to structures outside the foreground
as defined by the mask.
"""
try:
space = foreground.space
has_space = True
except AttributeError:
has_space = False
foreground = np.asarray(foreground)
space = odl.tensor_space(foreground.shape, foreground.dtype)
foreground = space.element(foreground)
from scipy.ndimage.morphology import distance_transform_edt
unique = np.unique(foreground)
if not np.array_equiv(unique, [0., 1.]):
raise ValueError('`foreground` is not a binary mask or has '
'either only true or only false values {!r}'
''.format(unique))
result = distance_transform_edt(
1.0 - foreground, sampling=getattr(space, 'cell_sides', 1.0)
)
if has_space:
return space.element(result)
else:
return result | python | {
"resource": ""
} |
q35357 | ssim | train | def ssim(data, ground_truth, size=11, sigma=1.5, K1=0.01, K2=0.03,
dynamic_range=None, normalized=False, force_lower_is_better=False):
r"""Structural SIMilarity between ``data`` and ``ground_truth``.
The SSIM takes value -1 for maximum dissimilarity and +1 for maximum
similarity.
See also `this Wikipedia article
<https://en.wikipedia.org/wiki/Structural_similarity>`_.
Parameters
----------
data : `array-like`
Input data to compare to the ground truth.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
size : odd int, optional
Size in elements per axis of the Gaussian window that is used
for all smoothing operations.
sigma : positive float, optional
Width of the Gaussian function used for smoothing.
K1, K2 : positive float, optional
Small constants to stabilize the result. See [Wan+2004] for details.
dynamic_range : nonnegative float, optional
Difference between the maximum and minimum value that the pixels
can attain. Use 255 if pixel range is :math:`[0, 255]` and 1 if
it is :math:`[0, 1]`. Default: `None`, obtain maximum and minimum
from the ground truth.
normalized : bool, optional
If ``True``, the output values are mapped to the interval
:math:`[0, 1]` (see `Notes` for details), otherwise return the
original SSIM.
force_lower_is_better : bool, optional
If ``True``, it is ensured that lower values correspond to better
matches by returning the negative of the SSIM, otherwise the (possibly
normalized) SSIM is returned. If both `normalized` and
`force_lower_is_better` are ``True``, then the order is reversed before
mapping the outputs, so that the latter are still in the interval
:math:`[0, 1]`.
Returns
-------
ssim : float
FOM value, where a higher value means a better match
if `force_lower_is_better` is ``False``.
Notes
-----
The SSIM is computed on small windows and then averaged over the whole
image. The SSIM between two windows :math:`x` and :math:`y` of size
:math:`N \times N`
.. math::
SSIM(x,y) = \frac{(2\mu_x\mu_y + c_1)(2\sigma_{xy} + c_2)}
{(\mu_x^2 + \mu_y^2 + c_1)(\sigma_x^2 + \sigma_y^2 + c_2)}
where:
* :math:`\mu_x`, :math:`\mu_y` is the mean of :math:`x` and :math:`y`,
respectively.
* :math:`\sigma_x`, :math:`\sigma_y` is the standard deviation of
:math:`x` and :math:`y`, respectively.
* :math:`\sigma_{xy}` the covariance of :math:`x` and :math:`y`
* :math:`c_1 = (k_1L)^2`, :math:`c_2 = (k_2L)^2` where :math:`L` is the
dynamic range of the image.
The unnormalized values are contained in the interval :math:`[-1, 1]`,
where 1 corresponds to a perfect match. The normalized values are given by
.. math::
SSIM_{normalized}(x, y) = \frac{SSIM(x, y) + 1}{2}
References
----------
[Wan+2004] Wang, Z, Bovik, AC, Sheikh, HR, and Simoncelli, EP.
*Image Quality Assessment: From Error Visibility to Structural Similarity*.
IEEE Transactions on Image Processing, 13.4 (2004), pp 600--612.
"""
from scipy.signal import fftconvolve
data = np.asarray(data)
ground_truth = np.asarray(ground_truth)
# Compute gaussian on a `size`-sized grid in each axis
coords = np.linspace(-(size - 1) / 2, (size - 1) / 2, size)
grid = sparse_meshgrid(*([coords] * data.ndim))
window = np.exp(-(sum(xi ** 2 for xi in grid) / (2.0 * sigma ** 2)))
window /= np.sum(window)
def smoothen(img):
"""Smoothes an image by convolving with a window function."""
return fftconvolve(window, img, mode='valid')
if dynamic_range is None:
dynamic_range = np.max(ground_truth) - np.min(ground_truth)
C1 = (K1 * dynamic_range) ** 2
C2 = (K2 * dynamic_range) ** 2
mu1 = smoothen(data)
mu2 = smoothen(ground_truth)
mu1_sq = mu1 * mu1
mu2_sq = mu2 * mu2
mu1_mu2 = mu1 * mu2
sigma1_sq = smoothen(data * data) - mu1_sq
sigma2_sq = smoothen(ground_truth * ground_truth) - mu2_sq
sigma12 = smoothen(data * ground_truth) - mu1_mu2
num = (2 * mu1_mu2 + C1) * (2 * sigma12 + C2)
denom = (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)
pointwise_ssim = num / denom
result = np.mean(pointwise_ssim)
if force_lower_is_better:
result = -result
if normalized:
result = (result + 1.0) / 2.0
return result | python | {
"resource": ""
} |
q35358 | psnr | train | def psnr(data, ground_truth, use_zscore=False, force_lower_is_better=False):
"""Return the Peak Signal-to-Noise Ratio of ``data`` wrt ``ground_truth``.
See also `this Wikipedia article
<https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
Parameters
----------
data : `Tensor` or `array-like`
Input data to compare to the ground truth. If not a `Tensor`, an
unweighted tensor space will be assumed.
ground_truth : `array-like`
Reference to which ``data`` should be compared.
use_zscore : bool
If ``True``, normalize ``data`` and ``ground_truth`` to have zero mean
and unit variance before comparison.
force_lower_is_better : bool
If ``True``, then lower value indicates better fit. In this case the
output is negated.
Returns
-------
psnr : float
FOM value, where a higher value means a better match.
Examples
--------
Compute the PSNR for two vectors:
>>> spc = odl.rn(5)
>>> data = spc.element([1, 1, 1, 1, 1])
>>> ground_truth = spc.element([1, 1, 1, 1, 2])
>>> result = psnr(data, ground_truth)
>>> print('{:.3f}'.format(result))
13.010
If data == ground_truth, the result is positive infinity:
>>> psnr(ground_truth, ground_truth)
inf
With ``use_zscore=True``, scaling differences and constant offsets
are ignored:
>>> (psnr(data, ground_truth, use_zscore=True) ==
... psnr(data, 3 + 4 * ground_truth, use_zscore=True))
True
"""
if use_zscore:
data = odl.util.zscore(data)
ground_truth = odl.util.zscore(ground_truth)
mse = mean_squared_error(data, ground_truth)
max_true = np.max(np.abs(ground_truth))
if mse == 0:
result = np.inf
elif max_true == 0:
result = -np.inf
else:
result = 20 * np.log10(max_true) - 10 * np.log10(mse)
if force_lower_is_better:
return -result
else:
return result | python | {
"resource": ""
} |
q35359 | haarpsi | train | def haarpsi(data, ground_truth, a=4.2, c=None):
r"""Haar-Wavelet based perceptual similarity index FOM.
This function evaluates the structural similarity between two images
based on edge features along the coordinate axes, analyzed with two
wavelet filter levels. See
`[Rei+2016] <https://arxiv.org/abs/1607.06140>`_ and the Notes section
for further details.
Parameters
----------
data : 2D array-like
The image to compare to the ground truth.
ground_truth : 2D array-like
The true image with which to compare ``data``. It must have the
same shape as ``data``.
a : positive float, optional
Parameter in the logistic function. Larger value leads to a
steeper curve, thus lowering the threshold for an input to
be mapped to an output close to 1. See Notes for details.
The default value 4.2 is taken from the referenced paper.
c : positive float, optional
Constant determining the score of maximally dissimilar values.
Smaller constant means higher penalty for dissimilarity.
See `haarpsi_similarity_map` for details.
For ``None``, the value is chosen as
``3 * sqrt(max(abs(ground_truth)))``.
Returns
-------
haarpsi : float between 0 and 1
The similarity score, where a higher score means a better match.
See Notes for details.
See Also
--------
haarpsi_similarity_map
haarpsi_weight_map
Notes
-----
For input images :math:`f_1, f_2`, the HaarPSI score is defined as
.. math::
\mathrm{HaarPSI}_{f_1, f_2} =
l_a^{-1} \left(
\frac{
\sum_x \sum_{k=1}^2 \mathrm{HS}_{f_1, f_2}^{(k)}(x) \cdot
\mathrm{W}_{f_1, f_2}^{(k)}(x)}{
\sum_x \sum_{k=1}^2 \mathrm{W}_{f_1, f_2}^{(k)}(x)}
\right)^2
see `[Rei+2016] <https://arxiv.org/abs/1607.06140>`_ equation (12).
For the definitions of the constituting functions, see
- `haarpsi_similarity_map` for :math:`\mathrm{HS}_{f_1, f_2}^{(k)}`,
- `haarpsi_weight_map` for :math:`\mathrm{W}_{f_1, f_2}^{(k)}`.
References
----------
[Rei+2016] Reisenhofer, R, Bosse, S, Kutyniok, G, and Wiegand, T.
*A Haar Wavelet-Based Perceptual Similarity Index for Image Quality
Assessment*. arXiv:1607.06140 [cs], Jul. 2016.
"""
import scipy.special
from odl.contrib.fom.util import haarpsi_similarity_map, haarpsi_weight_map
if c is None:
c = 3 * np.sqrt(np.max(np.abs(ground_truth)))
lsim_horiz = haarpsi_similarity_map(data, ground_truth, axis=0, c=c, a=a)
lsim_vert = haarpsi_similarity_map(data, ground_truth, axis=1, c=c, a=a)
wmap_horiz = haarpsi_weight_map(data, ground_truth, axis=0)
wmap_vert = haarpsi_weight_map(data, ground_truth, axis=1)
numer = np.sum(lsim_horiz * wmap_horiz + lsim_vert * wmap_vert)
denom = np.sum(wmap_horiz + wmap_vert)
return (scipy.special.logit(numer / denom) / a) ** 2 | python | {
"resource": ""
} |
q35360 | Detector.surface_normal | train | def surface_normal(self, param):
"""Unit vector perpendicular to the detector surface at ``param``.
The orientation is chosen as follows:
- In 2D, the system ``(normal, tangent)`` should be
right-handed.
- In 3D, the system ``(tangent[0], tangent[1], normal)``
should be right-handed.
Here, ``tangent`` is the return value of `surface_deriv` at
``param``.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
normal : `numpy.ndarray`
Unit vector(s) perpendicular to the detector surface at
``param``.
If ``param`` is a single parameter, an array of shape
``(space_ndim,)`` representing a single vector is returned.
Otherwise the shape of the returned array is
- ``param.shape + (space_ndim,)`` if `ndim` is 1,
- ``param.shape[:-1] + (space_ndim,)`` otherwise.
"""
# Checking is done by `surface_deriv`
if self.ndim == 1 and self.space_ndim == 2:
return -perpendicular_vector(self.surface_deriv(param))
elif self.ndim == 2 and self.space_ndim == 3:
deriv = self.surface_deriv(param)
if deriv.ndim > 2:
# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)
deriv = moveaxis(deriv, -2, 0)
normal = np.cross(*deriv, axis=-1)
normal /= np.linalg.norm(normal, axis=-1, keepdims=True)
return normal
else:
raise NotImplementedError(
'no default implementation of `surface_normal` available '
'for `ndim = {}` and `space_ndim = {}`'
''.format(self.ndim, self.space_ndim)) | python | {
"resource": ""
} |
q35361 | Detector.surface_measure | train | def surface_measure(self, param):
"""Density function of the surface measure.
This is the default implementation relying on the `surface_deriv`
method. For a detector with `ndim` equal to 1, the density is given
by the `Arc length`_, for a surface with `ndim` 2 in a 3D space, it
is the length of the cross product of the partial derivatives of the
parametrization, see Wikipedia's `Surface area`_ article.
Parameters
----------
param : `array-like` or sequence
Parameter value(s) at which to evaluate. If ``ndim >= 2``,
a sequence of length `ndim` must be provided.
Returns
-------
measure : float or `numpy.ndarray`
The density value(s) at the given parameter(s). If a single
parameter is provided, a float is returned. Otherwise, an
array is returned with shape
- ``param.shape`` if `ndim` is 1,
- ``broadcast(*param).shape`` otherwise.
References
----------
.. _Arc length:
https://en.wikipedia.org/wiki/Curve#Lengths_of_curves
.. _Surface area:
https://en.wikipedia.org/wiki/Surface_area
"""
# Checking is done by `surface_deriv`
if self.ndim == 1:
scalar_out = (np.shape(param) == ())
measure = np.linalg.norm(self.surface_deriv(param), axis=-1)
if scalar_out:
measure = float(measure)
return measure
elif self.ndim == 2 and self.space_ndim == 3:
scalar_out = (np.shape(param) == (2,))
deriv = self.surface_deriv(param)
if deriv.ndim > 2:
# Vectorized, need to reshape (N, 2, 3) to (2, N, 3)
deriv = moveaxis(deriv, -2, 0)
cross = np.cross(*deriv, axis=-1)
measure = np.linalg.norm(cross, axis=-1)
if scalar_out:
measure = float(measure)
return measure
else:
raise NotImplementedError(
'no default implementation of `surface_measure` available '
'for `ndim={}` and `space_ndim={}`'
''.format(self.ndim, self.space_ndim)) | python | {
"resource": ""
} |
q35362 | CircularDetector.surface_measure | train | def surface_measure(self, param):
"""Return the arc length measure at ``param``.
This is a constant function evaluating to `radius` everywhere.
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
measure : float or `numpy.ndarray`
Constant value(s) of the arc length measure at ``param``.
If ``param`` is a single parameter, a float is returned,
otherwise an array of shape ``param.shape``.
See Also
--------
surface
surface_deriv
Examples
--------
The method works with a single parameter, resulting in a float:
>>> part = odl.uniform_partition(-np.pi / 2, np.pi / 2, 10)
>>> det = CircularDetector(part, axis=[1, 0], radius=2)
>>> det.surface_measure(0)
2.0
>>> det.surface_measure(np.pi / 2)
2.0
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> det.surface_measure([0, np.pi / 2])
array([ 2., 2.])
>>> det.surface_measure(np.zeros((4, 5))).shape
(4, 5)
"""
scalar_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
if scalar_out:
return self.radius
else:
return self.radius * np.ones(param.shape) | python | {
"resource": ""
} |
q35363 | adupdates | train | def adupdates(x, g, L, stepsize, inner_stepsizes, niter, random=False,
callback=None, callback_loop='outer'):
r"""Alternating Dual updates method.
The Alternating Dual (AD) updates method of McGaffin and Fessler `[MF2015]
<http://ieeexplore.ieee.org/document/7271047/>`_ is designed to solve an
optimization problem of the form ::
min_x [ sum_i g_i(L_i x) ]
where ``g_i`` are proper, convex and lower semicontinuous functions and
``L_i`` are linear `Operator` s.
Parameters
----------
g : sequence of `Functional` s
All functions need to provide a `Functional.convex_conj` with a
`Functional.proximal` factory.
L : sequence of `Operator` s
Length of ``L`` must equal the length of ``g``.
x : `LinearSpaceElement`
Initial point, updated in-place.
stepsize : positive float
The stepsize for the outer (proximal point) iteration. The theory
guarantees convergence for any positive real number, but the
performance might depend on the choice of a good stepsize.
inner_stepsizes : sequence of stepsizes
Parameters determining the stepsizes for the inner iterations. Must be
matched with the norms of ``L``, and convergence is guaranteed if the
``inner_stepsizes`` are small enough. See the Notes section for
details.
niter : int
Number of (outer) iterations.
random : bool, optional
If `True`, the order of the dual upgdates is chosen randomly,
otherwise the order provided by the lists ``g``, ``L`` and
``inner_stepsizes`` is used.
callback : callable, optional
Function called with the current iterate after each iteration.
callback_loop : {'inner', 'outer'}, optional
If 'inner', the ``callback`` function is called after each inner
iteration, i.e., after each dual update. If 'outer', the ``callback``
function is called after each outer iteration, i.e., after each primal
update.
Notes
-----
The algorithm as implemented here is described in the article [MF2015],
where it is applied to a tomography problem. It solves the problem
.. math::
\min_x \sum_{i=1}^m g_i(L_i x),
where :math:`g_i` are proper, convex and lower semicontinuous functions
and :math:`L_i` are linear, continuous operators for
:math:`i = 1, \ldots, m`. In an outer iteration, the solution is found
iteratively by an iteration
.. math::
x_{n+1} = \mathrm{arg\,min}_x \sum_{i=1}^m g_i(L_i x)
+ \frac{\mu}{2} \|x - x_n\|^2
with some ``stepsize`` parameter :math:`\mu > 0` according to the proximal
point algorithm. In the inner iteration, dual variables are introduced for
each of the components of the sum. The Lagrangian of the problem is given
by
.. math::
S_n(x; v_1, \ldots, v_m) = \sum_{i=1}^m (\langle v_i, L_i x \rangle
- g_i^*(v_i)) + \frac{\mu}{2} \|x - x_n\|^2.
Given the dual variables, the new primal variable :math:`x_{n+1}`
can be calculated by directly minimizing :math:`S_n` with respect to
:math:`x`. This corresponds to the formula
.. math::
x_{n+1} = x_n - \frac{1}{\mu} \sum_{i=1}^m L_i^* v_i.
The dual updates are executed according to the following rule:
.. math::
v_i^+ = \mathrm{Prox}^{\mu M_i^{-1}}_{g_i^*}
(v_i + \mu M_i^{-1} L_i x_{n+1}),
where :math:`x_{n+1}` is given by the formula above and :math:`M_i` is a
diagonal matrix with positive diagonal entries such that
:math:`M_i - L_i L_i^*` is positive semidefinite. The variable
``inner_stepsizes`` is chosen as a stepsize to the `Functional.proximal` to
the `Functional.convex_conj` of each of the ``g`` s after multiplying with
``stepsize``. The ``inner_stepsizes`` contain the elements of
:math:`M_i^{-1}` in one of the following ways:
* Setting ``inner_stepsizes[i]`` a positive float :math:`\gamma`
corresponds to the choice :math:`M_i^{-1} = \gamma \mathrm{Id}`.
* Assume that ``g_i`` is a `SeparableSum`, then setting
``inner_stepsizes[i]`` a list :math:`(\gamma_1, \ldots, \gamma_k)` of
positive floats corresponds to the choice of a block-diagonal matrix
:math:`M_i^{-1}`, where each block corresponds to one of the space
components and equals :math:`\gamma_i \mathrm{Id}`.
* Assume that ``g_i`` is an `L1Norm` or an `L2NormSquared`, then setting
``inner_stepsizes[i]`` a ``g_i.domain.element`` :math:`z` corresponds to
the choice :math:`M_i^{-1} = \mathrm{diag}(z)`.
References
----------
[MF2015] McGaffin, M G, and Fessler, J A. *Alternating dual updates
algorithm for X-ray CT reconstruction on the GPU*. IEEE Transactions
on Computational Imaging, 1.3 (2015), pp 186--199.
"""
# Check the lenghts of the lists (= number of dual variables)
length = len(g)
if len(L) != length:
raise ValueError('`len(L)` should equal `len(g)`, but {} != {}'
''.format(len(L), length))
if len(inner_stepsizes) != length:
raise ValueError('len(`inner_stepsizes`) should equal `len(g)`, '
' but {} != {}'.format(len(inner_stepsizes), length))
# Check if operators have a common domain
# (the space of the primal variable):
domain = L[0].domain
if any(opi.domain != domain for opi in L):
raise ValueError('domains of `L` are not all equal')
# Check if range of the operators equals domain of the functionals
ranges = [opi.range for opi in L]
if any(L[i].range != g[i].domain for i in range(length)):
raise ValueError('L[i].range` should equal `g.domain`')
# Normalize string
callback_loop, callback_loop_in = str(callback_loop).lower(), callback_loop
if callback_loop not in ('inner', 'outer'):
raise ValueError('`callback_loop` {!r} not understood'
''.format(callback_loop_in))
# Initialization of the dual variables
duals = [space.zero() for space in ranges]
# Reusable elements in the ranges, one per type of space
unique_ranges = set(ranges)
tmp_rans = {ran: ran.element() for ran in unique_ranges}
# Prepare the proximal operators. Since the stepsize does not vary over
# the iterations, we always use the same proximal operator.
proxs = [func.convex_conj.proximal(stepsize * inner_ss
if np.isscalar(inner_ss)
else stepsize * np.asarray(inner_ss))
for (func, inner_ss) in zip(g, inner_stepsizes)]
# Iteratively find a solution
for _ in range(niter):
# Update x = x - 1/stepsize * sum([ops[i].adjoint(duals[i])
# for i in range(length)])
for i in range(length):
x -= (1.0 / stepsize) * L[i].adjoint(duals[i])
if random:
rng = np.random.permutation(range(length))
else:
rng = range(length)
for j in rng:
step = (stepsize * inner_stepsizes[j]
if np.isscalar(inner_stepsizes[j])
else stepsize * np.asarray(inner_stepsizes[j]))
arg = duals[j] + step * L[j](x)
tmp_ran = tmp_rans[L[j].range]
proxs[j](arg, out=tmp_ran)
x -= 1.0 / stepsize * L[j].adjoint(tmp_ran - duals[j])
duals[j].assign(tmp_ran)
if callback is not None and callback_loop == 'inner':
callback(x)
if callback is not None and callback_loop == 'outer':
callback(x) | python | {
"resource": ""
} |
q35364 | adupdates_simple | train | def adupdates_simple(x, g, L, stepsize, inner_stepsizes, niter,
random=False):
"""Non-optimized version of ``adupdates``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking.
"""
# Initializations
length = len(g)
ranges = [Li.range for Li in L]
duals = [space.zero() for space in ranges]
# Iteratively find a solution
for _ in range(niter):
# Update x = x - 1/stepsize * sum([ops[i].adjoint(duals[i])
# for i in range(length)])
for i in range(length):
x -= (1.0 / stepsize) * L[i].adjoint(duals[i])
rng = np.random.permutation(range(length)) if random else range(length)
for j in rng:
dual_tmp = ranges[j].element()
dual_tmp = (g[j].convex_conj.proximal
(stepsize * inner_stepsizes[j]
if np.isscalar(inner_stepsizes[j])
else stepsize * np.asarray(inner_stepsizes[j]))
(duals[j] + stepsize * inner_stepsizes[j] * L[j](x)
if np.isscalar(inner_stepsizes[j])
else duals[j] + stepsize *
np.asarray(inner_stepsizes[j]) * L[j](x)))
x -= 1.0 / stepsize * L[j].adjoint(dual_tmp - duals[j])
duals[j].assign(dual_tmp) | python | {
"resource": ""
} |
q35365 | ParallelHoleCollimatorGeometry.frommatrix | train | def frommatrix(cls, apart, dpart, det_radius, init_matrix, **kwargs):
"""Create a `ParallelHoleCollimatorGeometry` using a matrix.
This alternative constructor uses a matrix to rotate and
translate the default configuration. It is most useful when
the transformation to be applied is already given as a matrix.
Parameters
----------
apart : 1-dim. `RectPartition`
Partition of the parameter interval.
dpart : 2-dim. `RectPartition`
Partition of the detector parameter set.
det_radius : positive float
Radius of the circular detector orbit.
init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional
Transformation matrix whose left ``(3, 3)`` block is multiplied
with the default ``det_pos_init`` and ``det_axes_init`` to
determine the new vectors. If present, the fourth column acts
as a translation after the initial transformation.
The resulting ``det_axes_init`` will be normalized.
kwargs :
Further keyword arguments passed to the class constructor.
Returns
-------
geometry : `ParallelHoleCollimatorGeometry`
The resulting geometry.
"""
# Get transformation and translation parts from `init_matrix`
init_matrix = np.asarray(init_matrix, dtype=float)
if init_matrix.shape not in ((3, 3), (3, 4)):
raise ValueError('`matrix` must have shape (3, 3) or (3, 4), '
'got array with shape {}'
''.format(init_matrix.shape))
trafo_matrix = init_matrix[:, :3]
translation = init_matrix[:, 3:].squeeze()
# Transform the default vectors
default_axis = cls._default_config['axis']
# Normalized version, just in case
default_orig_to_det_init = (
np.array(cls._default_config['det_pos_init'], dtype=float) /
np.linalg.norm(cls._default_config['det_pos_init']))
default_det_axes_init = cls._default_config['det_axes_init']
vecs_to_transform = ((default_orig_to_det_init,) +
default_det_axes_init)
transformed_vecs = transform_system(
default_axis, None, vecs_to_transform, matrix=trafo_matrix)
# Use the standard constructor with these vectors
axis, orig_to_det, det_axis_0, det_axis_1 = transformed_vecs
if translation.size != 0:
kwargs['translation'] = translation
return cls(apart, dpart, det_radius, axis,
orig_to_det_init=orig_to_det,
det_axes_init=[det_axis_0, det_axis_1],
**kwargs) | python | {
"resource": ""
} |
q35366 | _compute_nearest_weights_edge | train | def _compute_nearest_weights_edge(idcs, ndist, variant):
"""Helper for nearest interpolation mimicing the linear case."""
# Get out-of-bounds indices from the norm_distances. Negative
# means "too low", larger than or equal to 1 means "too high"
lo = (ndist < 0)
hi = (ndist > 1)
# For "too low" nodes, the lower neighbor gets weight zero;
# "too high" gets 1.
if variant == 'left':
w_lo = np.where(ndist <= 0.5, 1.0, 0.0)
else:
w_lo = np.where(ndist < 0.5, 1.0, 0.0)
w_lo[lo] = 0
w_lo[hi] = 1
# For "too high" nodes, the upper neighbor gets weight zero;
# "too low" gets 1.
if variant == 'left':
w_hi = np.where(ndist <= 0.5, 0.0, 1.0)
else:
w_hi = np.where(ndist < 0.5, 0.0, 1.0)
w_hi[lo] = 1
w_hi[hi] = 0
# For upper/lower out-of-bounds nodes, we need to set the
# lower/upper neighbors to the last/first grid point
edge = [idcs, idcs + 1]
edge[0][hi] = -1
edge[1][lo] = 0
return w_lo, w_hi, edge | python | {
"resource": ""
} |
q35367 | _compute_linear_weights_edge | train | def _compute_linear_weights_edge(idcs, ndist):
"""Helper for linear interpolation."""
# Get out-of-bounds indices from the norm_distances. Negative
# means "too low", larger than or equal to 1 means "too high"
lo = np.where(ndist < 0)
hi = np.where(ndist > 1)
# For "too low" nodes, the lower neighbor gets weight zero;
# "too high" gets 2 - yi (since yi >= 1)
w_lo = (1 - ndist)
w_lo[lo] = 0
w_lo[hi] += 1
# For "too high" nodes, the upper neighbor gets weight zero;
# "too low" gets 1 + yi (since yi < 0)
w_hi = np.copy(ndist)
w_hi[lo] += 1
w_hi[hi] = 0
# For upper/lower out-of-bounds nodes, we need to set the
# lower/upper neighbors to the last/first grid point
edge = [idcs, idcs + 1]
edge[0][hi] = -1
edge[1][lo] = 0
return w_lo, w_hi, edge | python | {
"resource": ""
} |
q35368 | PerAxisInterpolation._call | train | def _call(self, x, out=None):
"""Create an interpolator from grid values ``x``.
Parameters
----------
x : `Tensor`
The array of values to be interpolated
out : `FunctionSpaceElement`, optional
Element in which to store the interpolator
Returns
-------
out : `FunctionSpaceElement`
Per-axis interpolator for the grid of this operator. If
``out`` was provided, the returned object is a reference
to it.
"""
def per_axis_interp(arg, out=None):
"""Interpolating function with vectorization."""
if is_valid_input_meshgrid(arg, self.grid.ndim):
input_type = 'meshgrid'
else:
input_type = 'array'
interpolator = _PerAxisInterpolator(
self.grid.coord_vectors, x,
schemes=self.schemes, nn_variants=self.nn_variants,
input_type=input_type)
return interpolator(arg, out=out)
return self.range.element(per_axis_interp, vectorized=True) | python | {
"resource": ""
} |
q35369 | _Interpolator._find_indices | train | def _find_indices(self, x):
"""Find indices and distances of the given nodes.
Can be overridden by subclasses to improve efficiency.
"""
# find relevant edges between which xi are situated
index_vecs = []
# compute distance to lower edge in unity units
norm_distances = []
# iterate through dimensions
for xi, cvec in zip(x, self.coord_vecs):
idcs = np.searchsorted(cvec, xi) - 1
idcs[idcs < 0] = 0
idcs[idcs > cvec.size - 2] = cvec.size - 2
index_vecs.append(idcs)
norm_distances.append((xi - cvec[idcs]) /
(cvec[idcs + 1] - cvec[idcs]))
return index_vecs, norm_distances | python | {
"resource": ""
} |
q35370 | _NearestInterpolator._evaluate | train | def _evaluate(self, indices, norm_distances, out=None):
"""Evaluate nearest interpolation."""
idx_res = []
for i, yi in zip(indices, norm_distances):
if self.variant == 'left':
idx_res.append(np.where(yi <= .5, i, i + 1))
else:
idx_res.append(np.where(yi < .5, i, i + 1))
idx_res = tuple(idx_res)
if out is not None:
out[:] = self.values[idx_res]
return out
else:
return self.values[idx_res] | python | {
"resource": ""
} |
q35371 | _PerAxisInterpolator._evaluate | train | def _evaluate(self, indices, norm_distances, out=None):
"""Evaluate linear interpolation.
Modified for in-place evaluation and treatment of out-of-bounds
points by implicitly assuming 0 at the next node."""
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,) * (self.values.ndim - len(indices))
if out is None:
out_shape = out_shape_from_meshgrid(norm_distances)
out_dtype = self.values.dtype
out = np.zeros(out_shape, dtype=out_dtype)
else:
out[:] = 0.0
# Weights and indices (per axis)
low_weights, high_weights, edge_indices = _create_weight_edge_lists(
indices, norm_distances, self.schemes, self.nn_variants)
# Iterate over all possible combinations of [i, i+1] for each
# axis, resulting in a loop of length 2**ndim
for lo_hi, edge in zip(product(*([['l', 'h']] * len(indices))),
product(*edge_indices)):
weight = 1.0
# TODO: determine best summation order from array strides
for lh, w_lo, w_hi in zip(lo_hi, low_weights, high_weights):
# We don't multiply in-place to exploit the cheap operations
# in the beginning: sizes grow gradually as following:
# (n, 1, 1, ...) -> (n, m, 1, ...) -> ...
# Hence, it is faster to build up the weight array instead
# of doing full-size operations from the beginning.
if lh == 'l':
weight = weight * w_lo
else:
weight = weight * w_hi
out += np.asarray(self.values[edge]) * weight[vslice]
return np.array(out, copy=False, ndmin=1) | python | {
"resource": ""
} |
q35372 | accelerated_proximal_gradient | train | def accelerated_proximal_gradient(x, f, g, gamma, niter, callback=None,
**kwargs):
r"""Accelerated proximal gradient algorithm for convex optimization.
The method is known as "Fast Iterative Soft-Thresholding Algorithm"
(FISTA). See `[Beck2009]`_ for more information.
Solves the convex optimization problem::
min_{x in X} f(x) + g(x)
where the proximal operator of ``f`` is known and ``g`` is differentiable.
Parameters
----------
x : ``f.domain`` element
Starting point of the iteration, updated in-place.
f : `Functional`
The function ``f`` in the problem definition. Needs to have
``f.proximal``.
g : `Functional`
The function ``g`` in the problem definition. Needs to have
``g.gradient``.
gamma : positive float
Step size parameter.
niter : non-negative int, optional
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
The problem of interest is
.. math::
\min_{x \in X} f(x) + g(x),
where the formal conditions are that
:math:`f : X \to \mathbb{R}` is proper, convex and lower-semicontinuous,
and :math:`g : X \to \mathbb{R}` is differentiable and
:math:`\nabla g` is :math:`1 / \beta`-Lipschitz continuous.
Convergence is only guaranteed if the step length :math:`\gamma` satisfies
.. math::
0 < \gamma < 2 \beta.
References
----------
.. _[Beck2009]: http://epubs.siam.org/doi/abs/10.1137/080716542
"""
# Get and validate input
if x not in f.domain:
raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
''.format(x, f.domain))
if x not in g.domain:
raise TypeError('`x` {!r} is not in the domain of `g` {!r}'
''.format(x, g.domain))
gamma, gamma_in = float(gamma), gamma
if gamma <= 0:
raise ValueError('`gamma` must be positive, got {}'.format(gamma_in))
if int(niter) != niter:
raise ValueError('`niter` {} not understood'.format(niter))
# Get the proximal
f_prox = f.proximal(gamma)
g_grad = g.gradient
# Create temporary
tmp = x.space.element()
y = x.copy()
t = 1
for k in range(niter):
# Update t
t, t_old = (1 + np.sqrt(1 + 4 * t ** 2)) / 2, t
alpha = (t_old - 1) / t
# x - gamma grad_g (y)
tmp.lincomb(1, y, -gamma, g_grad(y))
# Store old x value in y
y.assign(x)
# Update x
f_prox(tmp, out=x)
# Update y
y.lincomb(1 + alpha, x, -alpha, y)
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35373 | _blas_is_applicable | train | def _blas_is_applicable(*args):
"""Whether BLAS routines can be applied or not.
BLAS routines are available for single and double precision
float or complex data only. If the arrays are non-contiguous,
BLAS methods are usually slower, and array-writing routines do
not work at all. Hence, only contiguous arrays are allowed.
Parameters
----------
x1,...,xN : `NumpyTensor`
The tensors to be tested for BLAS conformity.
Returns
-------
blas_is_applicable : bool
``True`` if all mentioned requirements are met, ``False`` otherwise.
"""
if any(x.dtype != args[0].dtype for x in args[1:]):
return False
elif any(x.dtype not in _BLAS_DTYPES for x in args):
return False
elif not (all(x.flags.f_contiguous for x in args) or
all(x.flags.c_contiguous for x in args)):
return False
elif any(x.size > np.iinfo('int32').max for x in args):
# Temporary fix for 32 bit int overflow in BLAS
# TODO: use chunking instead
return False
else:
return True | python | {
"resource": ""
} |
q35374 | _weighting | train | def _weighting(weights, exponent):
"""Return a weighting whose type is inferred from the arguments."""
if np.isscalar(weights):
weighting = NumpyTensorSpaceConstWeighting(weights, exponent)
elif weights is None:
weighting = NumpyTensorSpaceConstWeighting(1.0, exponent)
else: # last possibility: make an array
arr = np.asarray(weights)
weighting = NumpyTensorSpaceArrayWeighting(arr, exponent)
return weighting | python | {
"resource": ""
} |
q35375 | _norm_default | train | def _norm_default(x):
"""Default Euclidean norm implementation."""
# Lazy import to improve `import odl` time
import scipy.linalg
if _blas_is_applicable(x.data):
nrm2 = scipy.linalg.blas.get_blas_funcs('nrm2', dtype=x.dtype)
norm = partial(nrm2, n=native(x.size))
else:
norm = np.linalg.norm
return norm(x.data.ravel()) | python | {
"resource": ""
} |
q35376 | _pnorm_default | train | def _pnorm_default(x, p):
"""Default p-norm implementation."""
return np.linalg.norm(x.data.ravel(), ord=p) | python | {
"resource": ""
} |
q35377 | _pnorm_diagweight | train | def _pnorm_diagweight(x, p, w):
"""Diagonally weighted p-norm implementation."""
# Ravel both in the same order (w is a numpy array)
order = 'F' if all(a.flags.f_contiguous for a in (x.data, w)) else 'C'
# This is faster than first applying the weights and then summing with
# BLAS dot or nrm2
xp = np.abs(x.data.ravel(order))
if p == float('inf'):
xp *= w.ravel(order)
return np.max(xp)
else:
xp = np.power(xp, p, out=xp)
xp *= w.ravel(order)
return np.sum(xp) ** (1 / p) | python | {
"resource": ""
} |
q35378 | _inner_default | train | def _inner_default(x1, x2):
"""Default Euclidean inner product implementation."""
# Ravel both in the same order
order = 'F' if all(a.data.flags.f_contiguous for a in (x1, x2)) else 'C'
if is_real_dtype(x1.dtype):
if x1.size > THRESHOLD_MEDIUM:
# This is as fast as BLAS dotc
return np.tensordot(x1, x2, [range(x1.ndim)] * 2)
else:
# Several times faster for small arrays
return np.dot(x1.data.ravel(order),
x2.data.ravel(order))
else:
# x2 as first argument because we want linearity in x1
return np.vdot(x2.data.ravel(order),
x1.data.ravel(order)) | python | {
"resource": ""
} |
q35379 | NumpyTensorSpace.zero | train | def zero(self):
"""Return a tensor of all zeros.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.zero()
>>> x
rn(3).element([ 0., 0., 0.])
"""
return self.element(np.zeros(self.shape, dtype=self.dtype,
order=self.default_order)) | python | {
"resource": ""
} |
q35380 | NumpyTensorSpace.one | train | def one(self):
"""Return a tensor of all ones.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.one()
>>> x
rn(3).element([ 1., 1., 1.])
"""
return self.element(np.ones(self.shape, dtype=self.dtype,
order=self.default_order)) | python | {
"resource": ""
} |
q35381 | NumpyTensorSpace.available_dtypes | train | def available_dtypes():
"""Return the set of data types available in this implementation.
Notes
-----
This is all dtypes available in Numpy. See ``numpy.sctypes``
for more information.
The available dtypes may depend on the specific system used.
"""
all_dtypes = []
for lst in np.sctypes.values():
for dtype in lst:
if dtype not in (np.object, np.void):
all_dtypes.append(np.dtype(dtype))
# Need to add these manually since np.sctypes['others'] will only
# contain one of them (depending on Python version)
all_dtypes.extend([np.dtype('S'), np.dtype('U')])
return tuple(sorted(set(all_dtypes))) | python | {
"resource": ""
} |
q35382 | NumpyTensorSpace.default_dtype | train | def default_dtype(field=None):
"""Return the default data type of this class for a given field.
Parameters
----------
field : `Field`, optional
Set of numbers to be represented by a data type.
Currently supported : `RealNumbers`, `ComplexNumbers`
The default ``None`` means `RealNumbers`
Returns
-------
dtype : `numpy.dtype`
Numpy data type specifier. The returned defaults are:
``RealNumbers()`` : ``np.dtype('float64')``
``ComplexNumbers()`` : ``np.dtype('complex128')``
"""
if field is None or field == RealNumbers():
return np.dtype('float64')
elif field == ComplexNumbers():
return np.dtype('complex128')
else:
raise ValueError('no default data type defined for field {}'
''.format(field)) | python | {
"resource": ""
} |
q35383 | NumpyTensorSpace._lincomb | train | def _lincomb(self, a, x1, b, x2, out):
"""Implement the linear combination of ``x1`` and ``x2``.
Compute ``out = a*x1 + b*x2`` using optimized
BLAS routines if possible.
This function is part of the subclassing API. Do not
call it directly.
Parameters
----------
a, b : `TensorSpace.field` element
Scalars to multiply ``x1`` and ``x2`` with.
x1, x2 : `NumpyTensor`
Summands in the linear combination.
out : `NumpyTensor`
Tensor to which the result is written.
Examples
--------
>>> space = odl.rn(3)
>>> x = space.element([0, 1, 1])
>>> y = space.element([0, 0, 1])
>>> out = space.element()
>>> result = space.lincomb(1, x, 2, y, out)
>>> result
rn(3).element([ 0., 1., 3.])
>>> result is out
True
"""
_lincomb_impl(a, x1, b, x2, out) | python | {
"resource": ""
} |
q35384 | NumpyTensorSpace.byaxis | train | def byaxis(self):
"""Return the subspace defined along one or several dimensions.
Examples
--------
Indexing with integers or slices:
>>> space = odl.rn((2, 3, 4))
>>> space.byaxis[0]
rn(2)
>>> space.byaxis[1:]
rn((3, 4))
Lists can be used to stack spaces arbitrarily:
>>> space.byaxis[[2, 1, 2]]
rn((4, 3, 4))
"""
space = self
class NpyTensorSpacebyaxis(object):
"""Helper class for indexing by axis."""
def __getitem__(self, indices):
"""Return ``self[indices]``."""
try:
iter(indices)
except TypeError:
newshape = space.shape[indices]
else:
newshape = tuple(space.shape[i] for i in indices)
if isinstance(space.weighting, ArrayWeighting):
new_array = np.asarray(space.weighting.array[indices])
weighting = NumpyTensorSpaceArrayWeighting(
new_array, space.weighting.exponent)
else:
weighting = space.weighting
return type(space)(newshape, space.dtype, weighting=weighting)
def __repr__(self):
"""Return ``repr(self)``."""
return repr(space) + '.byaxis'
return NpyTensorSpacebyaxis() | python | {
"resource": ""
} |
q35385 | NumpyTensor.asarray | train | def asarray(self, out=None):
"""Extract the data of this array as a ``numpy.ndarray``.
This method is invoked when calling `numpy.asarray` on this
tensor.
Parameters
----------
out : `numpy.ndarray`, optional
Array in which the result should be written in-place.
Has to be contiguous and of the correct dtype.
Returns
-------
asarray : `numpy.ndarray`
Numpy array with the same data type as ``self``. If
``out`` was given, the returned object is a reference
to it.
Examples
--------
>>> space = odl.rn(3, dtype='float32')
>>> x = space.element([1, 2, 3])
>>> x.asarray()
array([ 1., 2., 3.], dtype=float32)
>>> np.asarray(x) is x.asarray()
True
>>> out = np.empty(3, dtype='float32')
>>> result = x.asarray(out=out)
>>> out
array([ 1., 2., 3.], dtype=float32)
>>> result is out
True
>>> space = odl.rn((2, 3))
>>> space.one().asarray()
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if out is None:
return self.data
else:
out[:] = self.data
return out | python | {
"resource": ""
} |
q35386 | NumpyTensor.imag | train | def imag(self):
"""Imaginary part of ``self``.
Returns
-------
imag : `NumpyTensor`
Imaginary part this element as an element of a
`NumpyTensorSpace` with real data type.
Examples
--------
Get the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> x.imag
rn(3).element([ 1., 0., -3.])
Set the imaginary part:
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> zero = odl.rn(3).zero()
>>> x.imag = zero
>>> x
cn(3).element([ 1.+0.j, 2.+0.j, 3.+0.j])
Other array-like types and broadcasting:
>>> x.imag = 1.0
>>> x
cn(3).element([ 1.+1.j, 2.+1.j, 3.+1.j])
>>> x.imag = [2, 3, 4]
>>> x
cn(3).element([ 1.+2.j, 2.+3.j, 3.+4.j])
"""
if self.space.is_real:
return self.space.zero()
elif self.space.is_complex:
real_space = self.space.astype(self.space.real_dtype)
return real_space.element(self.data.imag)
else:
raise NotImplementedError('`imag` not defined for non-numeric '
'dtype {}'.format(self.dtype)) | python | {
"resource": ""
} |
q35387 | NumpyTensor.conj | train | def conj(self, out=None):
"""Return the complex conjugate of ``self``.
Parameters
----------
out : `NumpyTensor`, optional
Element to which the complex conjugate is written.
Must be an element of ``self.space``.
Returns
-------
out : `NumpyTensor`
The complex conjugate element. If ``out`` was provided,
the returned object is a reference to it.
Examples
--------
>>> space = odl.cn(3)
>>> x = space.element([1 + 1j, 2, 3 - 3j])
>>> x.conj()
cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j])
>>> out = space.element()
>>> result = x.conj(out=out)
>>> result
cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j])
>>> result is out
True
In-place conjugation:
>>> result = x.conj(out=x)
>>> x
cn(3).element([ 1.-1.j, 2.-0.j, 3.+3.j])
>>> result is x
True
"""
if self.space.is_real:
if out is None:
return self
else:
out[:] = self
return out
if not is_numeric_dtype(self.space.dtype):
raise NotImplementedError('`conj` not defined for non-numeric '
'dtype {}'.format(self.dtype))
if out is None:
return self.space.element(self.data.conj())
else:
if out not in self.space:
raise LinearSpaceTypeError('`out` {!r} not in space {!r}'
''.format(out, self.space))
self.data.conj(out.data)
return out | python | {
"resource": ""
} |
q35388 | NumpyTensorSpaceConstWeighting.dist | train | def dist(self, x1, x2):
"""Return the weighted distance between ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `NumpyTensor`
Tensors whose mutual distance is calculated.
Returns
-------
dist : float
The distance between the tensors.
"""
if self.exponent == 2.0:
return float(np.sqrt(self.const) * _norm_default(x1 - x2))
elif self.exponent == float('inf'):
return float(self.const * _pnorm_default(x1 - x2, self.exponent))
else:
return float((self.const ** (1 / self.exponent) *
_pnorm_default(x1 - x2, self.exponent))) | python | {
"resource": ""
} |
q35389 | CallbackProgressBar.reset | train | def reset(self):
"""Set `iter` to 0."""
import tqdm
self.iter = 0
self.pbar = tqdm.tqdm(total=self.niter, **self.kwargs) | python | {
"resource": ""
} |
q35390 | warning_free_pause | train | def warning_free_pause():
"""Issue a matplotlib pause without the warning."""
import matplotlib.pyplot as plt
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="Using default event loop until "
"function specific to this GUI is "
"implemented")
plt.pause(0.0001) | python | {
"resource": ""
} |
q35391 | _safe_minmax | train | def _safe_minmax(values):
"""Calculate min and max of array with guards for nan and inf."""
# Nan and inf guarded min and max
isfinite = np.isfinite(values)
if np.any(isfinite):
# Only use finite values
values = values[isfinite]
minval = np.min(values)
maxval = np.max(values)
return minval, maxval | python | {
"resource": ""
} |
q35392 | _colorbar_format | train | def _colorbar_format(minval, maxval):
"""Return the format string for the colorbar."""
if not (np.isfinite(minval) and np.isfinite(maxval)):
return str(maxval)
else:
return '%.{}f'.format(_digits(minval, maxval)) | python | {
"resource": ""
} |
q35393 | import_submodules | train | def import_submodules(package, name=None, recursive=True):
"""Import all submodules of ``package``.
Parameters
----------
package : `module` or string
Package whose submodules to import.
name : string, optional
Override the package name with this value in the full
submodule names. By default, ``package`` is used.
recursive : bool, optional
If ``True``, recursively import all submodules of ``package``.
Otherwise, import only the modules at the top level.
Returns
-------
pkg_dict : dict
Dictionary where keys are the full submodule names and values
are the corresponding module objects.
"""
if isinstance(package, str):
package = importlib.import_module(package)
if name is None:
name = package.__name__
submodules = [m[0] for m in inspect.getmembers(package, inspect.ismodule)
if m[1].__name__.startswith('odl')]
results = {}
for pkgname in submodules:
full_name = name + '.' + pkgname
try:
results[full_name] = importlib.import_module(full_name)
except ImportError:
pass
else:
if recursive:
results.update(import_submodules(full_name, full_name))
return results | python | {
"resource": ""
} |
q35394 | make_interface | train | def make_interface():
"""Generate the RST files for the API doc of ODL."""
modnames = ['odl'] + list(import_submodules(odl).keys())
for modname in modnames:
if not modname.startswith('odl'):
modname = 'odl.' + modname
shortmodname = modname.split('.')[-1]
print('{: <25} : generated {}.rst'.format(shortmodname, modname))
line = '=' * len(shortmodname)
module = importlib.import_module(modname)
docstring = module.__doc__
submodules = [m[0] for m in inspect.getmembers(
module, inspect.ismodule) if m[1].__name__.startswith('odl')]
functions = [m[0] for m in inspect.getmembers(
module, inspect.isfunction) if m[1].__module__ == modname]
classes = [m[0] for m in inspect.getmembers(
module, inspect.isclass) if m[1].__module__ == modname]
docstring = '' if docstring is None else docstring
submodules = [modname + '.' + mod for mod in submodules]
functions = ['~' + modname + '.' + fun
for fun in functions if not fun.startswith('_')]
classes = ['~' + modname + '.' + cls
for cls in classes if not cls.startswith('_')]
if len(submodules) > 0:
this_mod_string = module_string.format('\n '.join(submodules))
else:
this_mod_string = ''
if len(functions) > 0:
this_fun_string = fun_string.format('\n '.join(functions))
else:
this_fun_string = ''
if len(classes) > 0:
this_class_string = class_string.format('\n '.join(classes))
else:
this_class_string = ''
with open(modname + '.rst', 'w') as text_file:
text_file.write(string.format(shortname=shortmodname,
name=modname,
line=line,
docstring=docstring,
module_string=this_mod_string,
fun_string=this_fun_string,
class_string=this_class_string)) | python | {
"resource": ""
} |
q35395 | LpNorm._call | train | def _call(self, x):
"""Return the Lp-norm of ``x``."""
if self.exponent == 0:
return self.domain.one().inner(np.not_equal(x, 0))
elif self.exponent == 1:
return x.ufuncs.absolute().inner(self.domain.one())
elif self.exponent == 2:
return np.sqrt(x.inner(x))
elif np.isfinite(self.exponent):
tmp = x.ufuncs.absolute()
tmp.ufuncs.power(self.exponent, out=tmp)
return np.power(tmp.inner(self.domain.one()), 1 / self.exponent)
elif self.exponent == np.inf:
return x.ufuncs.absolute().ufuncs.max()
elif self.exponent == -np.inf:
return x.ufuncs.absolute().ufuncs.min()
else:
raise RuntimeError('unknown exponent') | python | {
"resource": ""
} |
q35396 | GroupL1Norm._call | train | def _call(self, x):
"""Return the group L1-norm of ``x``."""
# TODO: update when integration operator is in place: issue #440
pointwise_norm = self.pointwise_norm(x)
return pointwise_norm.inner(pointwise_norm.space.one()) | python | {
"resource": ""
} |
q35397 | GroupL1Norm.convex_conj | train | def convex_conj(self):
"""The convex conjugate functional of the group L1-norm."""
conj_exp = conj_exponent(self.pointwise_norm.exponent)
return IndicatorGroupL1UnitBall(self.domain, exponent=conj_exp) | python | {
"resource": ""
} |
q35398 | IndicatorGroupL1UnitBall.convex_conj | train | def convex_conj(self):
"""Convex conjugate functional of IndicatorLpUnitBall.
Returns
-------
convex_conj : GroupL1Norm
The convex conjugate is the the group L1-norm.
"""
conj_exp = conj_exponent(self.pointwise_norm.exponent)
return GroupL1Norm(self.domain, exponent=conj_exp) | python | {
"resource": ""
} |
q35399 | IndicatorLpUnitBall.convex_conj | train | def convex_conj(self):
"""The conjugate functional of IndicatorLpUnitBall.
The convex conjugate functional of an ``Lp`` norm, ``p < infty`` is the
indicator function on the unit ball defined by the corresponding dual
norm ``q``, given by ``1/p + 1/q = 1`` and where ``q = infty`` if
``p = 1`` [Roc1970]. By the Fenchel-Moreau theorem, the convex
conjugate functional of indicator function on the unit ball in ``Lq``
is the corresponding Lp-norm [BC2011].
References
----------
[Roc1970] Rockafellar, R. T. *Convex analysis*. Princeton
University Press, 1970.
[BC2011] Bauschke, H H, and Combettes, P L. *Convex analysis and
monotone operator theory in Hilbert spaces*. Springer, 2011.
"""
if self.exponent == np.inf:
return L1Norm(self.domain)
elif self.exponent == 2:
return L2Norm(self.domain)
else:
return LpNorm(self.domain, exponent=conj_exponent(self.exponent)) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.