_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q35500 | rotation_matrix_from_to | train | def rotation_matrix_from_to(from_vec, to_vec):
r"""Return a matrix that rotates ``from_vec`` to ``to_vec`` in 2d or 3d.
Since a rotation from one vector to another in 3 dimensions has
(at least) one degree of freedom, this function makes deliberate but
still arbitrary choices to fix these free parameters. See Notes for
details. For the applied formula in 3d, see `this Wikipedia page
about Rodrigues' rotation formula
<https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_.
Parameters
----------
from_vec, to_vec : `array-like`, shape ``(2,)`` or ``(3,)``
Vectors between which the returned matrix rotates. They should not
be very close to zero or collinear.
Returns
-------
matrix : `numpy.ndarray`, shape ``(2, 2)`` or ``(3, 3)``
A matrix rotating ``from_vec`` to ``to_vec``. Note that the
matrix does *not* include scaling, i.e. it is not guaranteed
that ``matrix.dot(from_vec) == to_vec``.
Examples
--------
In two dimensions, rotation is simple:
>>> from_vec, to_vec = [1, 0], [1, 1]
>>> mat = rotation_matrix_from_to(from_vec, to_vec)
>>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec))
>>> np.allclose(mat.dot([1, 0]), to_vec_normalized)
True
>>> from_vec, to_vec = [1, 0], [-1, 1]
>>> mat = rotation_matrix_from_to(from_vec, to_vec)
>>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec))
>>> np.allclose(mat.dot([1, 0]), to_vec_normalized)
True
Rotation in 3d by less than ``pi``:
>>> from_vec, to_vec = [1, 0, 0], [-1, 1, 0]
>>> mat = rotation_matrix_from_to(from_vec, to_vec)
>>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec))
>>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized)
True
Rotation by more than ``pi``:
>>> from_vec, to_vec = [1, 0, 0], [-1, -1, 0]
>>> mat = rotation_matrix_from_to(from_vec, to_vec)
>>> to_vec_normalized = np.divide(to_vec, np.linalg.norm(to_vec))
>>> np.allclose(mat.dot([1, 0, 0]), to_vec_normalized)
True
Notes
-----
In 3d, the matrix corresponds to a rotation around the normal vector
:math:`\hat n = \hat u \times \hat v`, where :math:`\hat u` and
:math:`\hat v` are the normalized versions of :math:`u`, the
vector from which to rotate, and :math:`v`, the vector to which
should be rotated.
The rotation angle is determined as
:math:`\alpha = \pm \arccos(\langle \hat u, \hat v \rangle)`.
Its sign corresponds to the sign of
:math:`\langle \hat b, \hat v\rangle`, where
:math:`\hat b = \hat n \times \hat u` is the binormal vector.
In the case that :math:`\hat u` and :math:`\hat v` are collinear,
a perpendicular vector is chosen as :math:`\hat n = (1, 0, 0)` if
:math:`v_1 = v_2 = 0`, else :math:`\hat n = (-v_2, v_1, v_3)`.
The angle in this case is :math:`\alpha = 0` if
:math:`\langle \hat u, \hat v \rangle > 0`, otherwise
:math:`\alpha = \pi`.
"""
from_vec, from_vec_in = (np.array(from_vec, dtype=float, copy=True),
from_vec)
to_vec, to_vec_in = np.array(to_vec, dtype=float, copy=True), to_vec
if from_vec.shape not in ((2,), (3,)):
raise ValueError('`from_vec.shape` must be (2,) or (3,), got {}'
''.format(from_vec.shape))
if to_vec.shape not in ((2,), (3,)):
raise ValueError('`to_vec.shape` must be (2,) or (3,), got {}'
''.format(to_vec.shape))
if from_vec.shape != to_vec.shape:
raise ValueError('`from_vec.shape` and `to_vec.shape` not equal: '
'{} != {}'
''.format(from_vec.shape, to_vec.shape))
ndim = len(from_vec)
# Normalize vectors
from_vec_norm = np.linalg.norm(from_vec)
if from_vec_norm < 1e-10:
raise ValueError('`from_vec` {} too close to zero'.format(from_vec_in))
from_vec /= from_vec_norm
to_vec_norm = np.linalg.norm(to_vec)
if to_vec_norm < 1e-10:
raise ValueError('`to_vec` {} too close to zero'.format(to_vec_in))
to_vec /= to_vec_norm
if ndim == 2:
dot = np.dot(from_vec, to_vec)
from_rot = (-from_vec[1], from_vec[0])
if dot == 0:
angle = np.pi / 2 if np.dot(from_rot, to_vec) > 0 else -np.pi / 2
elif np.array_equal(to_vec, -from_vec):
angle = np.pi
else:
angle = (np.sign(np.dot(from_rot, to_vec)) *
np.arccos(np.dot(from_vec, to_vec)))
return np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
elif ndim == 3:
# Determine normal
normal = np.cross(from_vec, to_vec)
normal_norm = np.linalg.norm(normal)
if normal_norm < 1e-10:
# Collinear vectors, use perpendicular vector and angle = 0 or pi
normal = perpendicular_vector(from_vec)
angle = 0 if np.dot(from_vec, to_vec) > 0 else np.pi
return axis_rotation_matrix(normal, angle)
else:
# Usual case, determine binormal and sign of rotation angle
normal /= normal_norm
binormal = np.cross(normal, from_vec)
angle = (np.sign(np.dot(binormal, to_vec)) *
np.arccos(np.dot(from_vec, to_vec)))
return axis_rotation_matrix(normal, angle)
else:
raise RuntimeError('bad ndim') | python | {
"resource": ""
} |
q35501 | transform_system | train | def transform_system(principal_vec, principal_default, other_vecs,
matrix=None):
"""Transform vectors with either ``matrix`` or based on ``principal_vec``.
The logic of this function is as follows:
- If ``matrix`` is not ``None``, transform ``principal_vec`` and
all vectors in ``other_vecs`` by ``matrix``, ignoring
``principal_default``.
- If ``matrix`` is ``None``, compute the rotation matrix from
``principal_default`` to ``principal_vec``, not including the
dilation. Apply that rotation to all vectors in ``other_vecs``.
**Note:** All vectors must have the same shape and match the shape
of ``matrix`` if given.
Parameters
----------
principal_vec : `array-like`, shape ``(ndim,)``
Vector that defines the transformation if ``matrix`` is not
provided.
principal_default : `array-like`, shape ``(ndim,)``
Default value for ``principal_vec``. The deviation from this
determines the transformation.
If ``matrix`` is given, this has no effect.
other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)``
The other vectors that should be transformed. ``None`` entries
are just appended as-is.
matrix : `array-like`, shape ``(ndim, ndim)``, optional
Explicit transformation matrix to be applied to the vectors.
It is allowed to include a constant scaling but shouldn't have
strongly varying directional scaling (bad condition).
Returns
-------
transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)``
The transformed vectors. The first entry is (the transformed)
``principal_vec``, followed by the transformed ``other_vecs``.
Thus the length of the tuple is ``len(other_vecs) + 1``.
"""
transformed_vecs = []
principal_vec = np.asarray(principal_vec, dtype=float)
ndim = principal_vec.shape[0]
if matrix is None:
# Separate into dilation and rotation. The dilation is only used
# for comparison, not in the final matrix.
principal_default = np.asarray(principal_default, dtype=float)
pr_norm = np.linalg.norm(principal_vec)
pr_default_norm = np.linalg.norm(principal_default)
if pr_default_norm == 0.0 and pr_norm != 0.0:
raise ValueError('no transformation from {} to {}'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm != 0.0:
raise ValueError('transformation from {} to {} is singular'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm == 0.0:
dilation = 1.0
else:
dilation = (np.linalg.norm(principal_vec) /
np.linalg.norm(principal_default))
# Determine the rotation part
if np.allclose(principal_vec, dilation * principal_default):
# Dilation only
matrix = np.eye(ndim)
else:
matrix = rotation_matrix_from_to(principal_default, principal_vec)
# This one goes straight in
transformed_vecs.append(principal_vec)
else:
matrix = np.asarray(matrix, dtype=float)
if matrix.shape != (ndim, ndim):
raise ValueError('matrix shape must be {}, got {}'
''.format((ndim, ndim), matrix.shape))
# Check matrix condition
svals = np.linalg.svd(matrix, compute_uv=False)
condition = np.inf if 0.0 in svals else svals[0] / svals[-1]
if condition > 1e6:
raise np.linalg.LinAlgError(
'matrix is badly conditioned: condition number is {}'
''.format(condition))
transformed_vecs.append(matrix.dot(principal_vec))
for vec in other_vecs:
if vec is None:
transformed_vecs.append(None)
else:
transformed_vecs.append(matrix.dot(vec))
return tuple(transformed_vecs) | python | {
"resource": ""
} |
q35502 | perpendicular_vector | train | def perpendicular_vector(vec):
"""Return a vector perpendicular to ``vec``.
Parameters
----------
vec : `array-like`
Vector(s) of arbitrary length. The axis along the vector components
must come last.
Returns
-------
perp_vec : `numpy.ndarray`
Array of same shape as ``vec`` such that ``dot(vec, perp_vec) == 0``
(along the last axis if there are multiple vectors).
Examples
--------
Works in 2d:
>>> perpendicular_vector([0, 1])
array([-1., 0.])
>>> np.allclose(perpendicular_vector([1, 0]), [0, 1]) # would print -0
True
And in 3d:
>>> perpendicular_vector([0, 1, 0])
array([-1., 0., 0.])
>>> perpendicular_vector([0, 0, 1])
array([ 1., 0., 0.])
>>> np.allclose(perpendicular_vector([1, 0, 0]), [0, 1, 0])
True
The function is vectorized, i.e., it can be called with multiple
vectors at once (additional axes being added to the left):
>>> perpendicular_vector([[0, 1, 0],
... [0, 0, 1]]) # 2 vectors
array([[-1., 0., 0.],
[ 1., 0., 0.]])
>>> vecs = np.zeros((2, 3, 3))
>>> vecs[..., 1] = 1 # (2, 3) array of vectors (0, 1, 0)
>>> perpendicular_vector(vecs)
array([[[-1., 0., 0.],
[-1., 0., 0.],
[-1., 0., 0.]],
<BLANKLINE>
[[-1., 0., 0.],
[-1., 0., 0.],
[-1., 0., 0.]]])
"""
squeeze_out = (np.ndim(vec) == 1)
vec = np.array(vec, dtype=float, copy=False, ndmin=2)
if np.any(np.all(vec == 0, axis=-1)):
raise ValueError('zero vector')
result = np.zeros(vec.shape)
cond = np.any(vec[..., :2] != 0, axis=-1)
result[cond, 0] = -vec[cond, 1]
result[cond, 1] = vec[cond, 0]
result[~cond, 0] = 1
result /= np.linalg.norm(result, axis=-1, keepdims=True)
if squeeze_out:
result = result.squeeze()
return result | python | {
"resource": ""
} |
q35503 | is_inside_bounds | train | def is_inside_bounds(value, params):
"""Return ``True`` if ``value`` is contained in ``params``.
This method supports broadcasting in the sense that for
``params.ndim >= 2``, if more than one value is given, the inputs
are broadcast against each other.
Parameters
----------
value : `array-like`
Value(s) to be checked. For several inputs, the final bool
tells whether all inputs pass the check or not.
params : `IntervalProd`
Set in which the value is / the values are supposed to lie.
Returns
-------
is_inside_bounds : bool
``True`` is all values lie in ``params``, ``False`` otherwise.
Examples
--------
Check a single point:
>>> params = odl.IntervalProd([0, 0], [1, 2])
>>> is_inside_bounds([0, 0], params)
True
>>> is_inside_bounds([0, -1], params)
False
Using broadcasting:
>>> pts_ax0 = np.array([0, 0, 1, 0, 1])[:, None]
>>> pts_ax1 = np.array([2, 0, 1])[None, :]
>>> is_inside_bounds([pts_ax0, pts_ax1], params)
True
>>> pts_ax1 = np.array([-2, 1])[None, :]
>>> is_inside_bounds([pts_ax0, pts_ax1], params)
False
"""
if value in params:
# Single parameter
return True
else:
if params.ndim == 1:
return params.contains_all(np.ravel(value))
else:
# Flesh out and flatten to check bounds
bcast_value = np.broadcast_arrays(*value)
stacked_value = np.vstack(bcast_value)
flat_value = stacked_value.reshape(params.ndim, -1)
return params.contains_all(flat_value) | python | {
"resource": ""
} |
q35504 | pyfftw_call | train | def pyfftw_call(array_in, array_out, direction='forward', axes=None,
halfcomplex=False, **kwargs):
"""Calculate the DFT with pyfftw.
The discrete Fourier (forward) transform calcuates the sum::
f_hat[k] = sum_j( f[j] * exp(-2*pi*1j * j*k/N) )
where the summation is taken over all indices
``j = (j[0], ..., j[d-1])`` in the range ``0 <= j < N``
(component-wise), with ``N`` being the shape of the input array.
The output indices ``k`` lie in the same range, except
for half-complex transforms, where the last axis ``i`` in ``axes``
is shortened to ``0 <= k[i] < floor(N[i]/2) + 1``.
In the backward transform, sign of the the exponential argument
is flipped.
Parameters
----------
array_in : `numpy.ndarray`
Array to be transformed
array_out : `numpy.ndarray`
Output array storing the transformed values, may be aliased
with ``array_in``.
direction : {'forward', 'backward'}, optional
Direction of the transform
axes : int or sequence of ints, optional
Dimensions along which to take the transform. ``None`` means
using all axes and is equivalent to ``np.arange(ndim)``.
halfcomplex : bool, optional
If ``True``, calculate only the negative frequency part along the
last axis. If ``False``, calculate the full complex FFT.
This option can only be used with real input data.
Other Parameters
----------------
fftw_plan : ``pyfftw.FFTW``, optional
Use this plan instead of calculating a new one. If specified,
the options ``planning_effort``, ``planning_timelimit`` and
``threads`` have no effect.
planning_effort : str, optional
Flag for the amount of effort put into finding an optimal
FFTW plan. See the `FFTW doc on planner flags
<http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_.
Available options: {'estimate', 'measure', 'patient', 'exhaustive'}
Default: 'estimate'
planning_timelimit : float or ``None``, optional
Limit planning time to roughly this many seconds.
Default: ``None`` (no limit)
threads : int, optional
Number of threads to use.
Default: Number of CPUs if the number of data points is larger
than 4096, else 1.
normalise_idft : bool, optional
If ``True``, the result of the backward transform is divided by
``1 / N``, where ``N`` is the total number of points in
``array_in[axes]``. This ensures that the IDFT is the true
inverse of the forward DFT.
Default: ``False``
import_wisdom : filename or file handle, optional
File to load FFTW wisdom from. If the file does not exist,
it is ignored.
export_wisdom : filename or file handle, optional
File to append the accumulated FFTW wisdom to
Returns
-------
fftw_plan : ``pyfftw.FFTW``
The plan object created from the input arguments. It can be
reused for transforms of the same size with the same data types.
Note that reuse only gives a speedup if the initial plan
used a planner flag other than ``'estimate'``.
If ``fftw_plan`` was specified, the returned object is a
reference to it.
Notes
-----
* The planning and direction flags can also be specified as
capitalized and prepended by ``'FFTW_'``, i.e. in the original
FFTW form.
* For a ``halfcomplex`` forward transform, the arrays must fulfill
``array_out.shape[axes[-1]] == array_in.shape[axes[-1]] // 2 + 1``,
and vice versa for backward transforms.
* All planning schemes except ``'estimate'`` require an internal copy
of the input array but are often several times faster after the
first call (measuring results are cached). Typically,
'measure' is a good compromise. If you cannot afford the copy,
use ``'estimate'``.
* If a plan is provided via the ``fftw_plan`` parameter, no copy
is needed internally.
"""
import pickle
if not array_in.flags.aligned:
raise ValueError('input array not aligned')
if not array_out.flags.aligned:
raise ValueError('output array not aligned')
if axes is None:
axes = tuple(range(array_in.ndim))
axes = normalized_axes_tuple(axes, array_in.ndim)
direction = _pyfftw_to_local(direction)
fftw_plan_in = kwargs.pop('fftw_plan', None)
planning_effort = _pyfftw_to_local(kwargs.pop('planning_effort',
'estimate'))
planning_timelimit = kwargs.pop('planning_timelimit', None)
threads = kwargs.pop('threads', None)
normalise_idft = kwargs.pop('normalise_idft', False)
wimport = kwargs.pop('import_wisdom', '')
wexport = kwargs.pop('export_wisdom', '')
# Cast input to complex if necessary
array_in_copied = False
if is_real_dtype(array_in.dtype) and not halfcomplex:
# Need to cast array_in to complex dtype
array_in = array_in.astype(complex_dtype(array_in.dtype))
array_in_copied = True
# Do consistency checks on the arguments
_pyfftw_check_args(array_in, array_out, axes, halfcomplex, direction)
# Import wisdom if possible
if wimport:
try:
with open(wimport, 'rb') as wfile:
wisdom = pickle.load(wfile)
except IOError:
wisdom = []
except TypeError: # Got file handle
wisdom = pickle.load(wimport)
if wisdom:
pyfftw.import_wisdom(wisdom)
# Copy input array if it hasn't been done yet and the planner is likely
# to destroy it. If we already have a plan, we don't have to worry.
planner_destroys = _pyfftw_destroys_input(
[planning_effort], direction, halfcomplex, array_in.ndim)
must_copy_array_in = fftw_plan_in is None and planner_destroys
if must_copy_array_in and not array_in_copied:
plan_arr_in = np.empty_like(array_in)
flags = [_local_to_pyfftw(planning_effort), 'FFTW_DESTROY_INPUT']
else:
plan_arr_in = array_in
flags = [_local_to_pyfftw(planning_effort)]
if fftw_plan_in is None:
if threads is None:
if plan_arr_in.size <= 4096: # Trade-off wrt threading overhead
threads = 1
else:
threads = cpu_count()
fftw_plan = pyfftw.FFTW(
plan_arr_in, array_out, direction=_local_to_pyfftw(direction),
flags=flags, planning_timelimit=planning_timelimit,
threads=threads, axes=axes)
else:
fftw_plan = fftw_plan_in
fftw_plan(array_in, array_out, normalise_idft=normalise_idft)
if wexport:
try:
with open(wexport, 'ab') as wfile:
pickle.dump(pyfftw.export_wisdom(), wfile)
except TypeError: # Got file handle
pickle.dump(pyfftw.export_wisdom(), wexport)
return fftw_plan | python | {
"resource": ""
} |
q35505 | _pyfftw_destroys_input | train | def _pyfftw_destroys_input(flags, direction, halfcomplex, ndim):
"""Return ``True`` if FFTW destroys an input array, ``False`` otherwise."""
if any(flag in flags or _pyfftw_to_local(flag) in flags
for flag in ('FFTW_MEASURE', 'FFTW_PATIENT', 'FFTW_EXHAUSTIVE',
'FFTW_DESTROY_INPUT')):
return True
elif (direction in ('backward', 'FFTW_BACKWARD') and halfcomplex and
ndim != 1):
return True
else:
return False | python | {
"resource": ""
} |
q35506 | _pyfftw_check_args | train | def _pyfftw_check_args(arr_in, arr_out, axes, halfcomplex, direction):
"""Raise an error if anything is not ok with in and out."""
if len(set(axes)) != len(axes):
raise ValueError('duplicate axes are not allowed')
if direction == 'forward':
out_shape = list(arr_in.shape)
if halfcomplex:
try:
out_shape[axes[-1]] = arr_in.shape[axes[-1]] // 2 + 1
except IndexError:
raise IndexError('axis index {} out of range for array '
'with {} axes'
''.format(axes[-1], arr_in.ndim))
if arr_out.shape != tuple(out_shape):
raise ValueError('expected output shape {}, got {}'
''.format(tuple(out_shape), arr_out.shape))
if is_real_dtype(arr_in.dtype):
out_dtype = complex_dtype(arr_in.dtype)
elif halfcomplex:
raise ValueError('cannot combine halfcomplex forward transform '
'with complex input')
else:
out_dtype = arr_in.dtype
if arr_out.dtype != out_dtype:
raise ValueError('expected output dtype {}, got {}'
''.format(dtype_repr(out_dtype),
dtype_repr(arr_out.dtype)))
elif direction == 'backward':
in_shape = list(arr_out.shape)
if halfcomplex:
try:
in_shape[axes[-1]] = arr_out.shape[axes[-1]] // 2 + 1
except IndexError as err:
raise IndexError('axis index {} out of range for array '
'with {} axes'
''.format(axes[-1], arr_out.ndim))
if arr_in.shape != tuple(in_shape):
raise ValueError('expected input shape {}, got {}'
''.format(tuple(in_shape), arr_in.shape))
if is_real_dtype(arr_out.dtype):
in_dtype = complex_dtype(arr_out.dtype)
elif halfcomplex:
raise ValueError('cannot combine halfcomplex backward transform '
'with complex output')
else:
in_dtype = arr_out.dtype
if arr_in.dtype != in_dtype:
raise ValueError('expected input dtype {}, got {}'
''.format(dtype_repr(in_dtype),
dtype_repr(arr_in.dtype)))
else: # Shouldn't happen
raise RuntimeError | python | {
"resource": ""
} |
q35507 | admm_linearized | train | def admm_linearized(x, f, g, L, tau, sigma, niter, **kwargs):
r"""Generic linearized ADMM method for convex problems.
ADMM stands for "Alternating Direction Method of Multipliers" and
is a popular convex optimization method. This variant solves problems
of the form ::
min_x [ f(x) + g(Lx) ]
with convex ``f`` and ``g``, and a linear operator ``L``. See Section
4.4 of `[PB2014] <http://web.stanford.edu/~boyd/papers/prox_algs.html>`_
and the Notes for more mathematical details.
Parameters
----------
x : ``L.domain`` element
Starting point of the iteration, updated in-place.
f, g : `Functional`
The functions ``f`` and ``g`` in the problem definition. They
need to implement the ``proximal`` method.
L : linear `Operator`
The linear operator that is composed with ``g`` in the problem
definition. It must fulfill ``L.domain == f.domain`` and
``L.range == g.domain``.
tau, sigma : positive float
Step size parameters for the update of the variables.
niter : non-negative int
Number of iterations.
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
Notes
-----
Given :math:`x^{(0)}` (the provided ``x``) and
:math:`u^{(0)} = z^{(0)} = 0`, linearized ADMM applies the following
iteration:
.. math::
x^{(k+1)} &= \mathrm{prox}_{\tau f} \left[
x^{(k)} - \sigma^{-1}\tau L^*\big(
L x^{(k)} - z^{(k)} + u^{(k)}
\big)
\right]
z^{(k+1)} &= \mathrm{prox}_{\sigma g}\left(
L x^{(k+1)} + u^{(k)}
\right)
u^{(k+1)} &= u^{(k)} + L x^{(k+1)} - z^{(k+1)}
The step size parameters :math:`\tau` and :math:`\sigma` must satisfy
.. math::
0 < \tau < \frac{\sigma}{\|L\|^2}
to guarantee convergence.
The name "linearized ADMM" comes from the fact that in the
minimization subproblem for the :math:`x` variable, this variant
uses a linearization of a quadratic term in the augmented Lagrangian
of the generic ADMM, in order to make the step expressible with
the proximal operator of :math:`f`.
Another name for this algorithm is *split inexact Uzawa method*.
References
----------
[PB2014] Parikh, N and Boyd, S. *Proximal Algorithms*. Foundations and
Trends in Optimization, 1(3) (2014), pp 123-231.
"""
if not isinstance(L, Operator):
raise TypeError('`op` {!r} is not an `Operator` instance'
''.format(L))
if x not in L.domain:
raise OpDomainError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, L.domain))
tau, tau_in = float(tau), tau
if tau <= 0:
raise ValueError('`tau` must be positive, got {}'.format(tau_in))
sigma, sigma_in = float(sigma), sigma
if sigma <= 0:
raise ValueError('`sigma` must be positive, got {}'.format(sigma_in))
niter, niter_in = int(niter), niter
if niter < 0 or niter != niter_in:
raise ValueError('`niter` must be a non-negative integer, got {}'
''.format(niter_in))
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
# Initialize range variables
z = L.range.zero()
u = L.range.zero()
# Temporary for Lx + u [- z]
tmp_ran = L(x)
# Temporary for L^*(Lx + u - z)
tmp_dom = L.domain.element()
# Store proximals since their initialization may involve computation
prox_tau_f = f.proximal(tau)
prox_sigma_g = g.proximal(sigma)
for _ in range(niter):
# tmp_ran has value Lx^k here
# tmp_dom <- L^*(Lx^k + u^k - z^k)
tmp_ran += u
tmp_ran -= z
L.adjoint(tmp_ran, out=tmp_dom)
# x <- x^k - (tau/sigma) L^*(Lx^k + u^k - z^k)
x.lincomb(1, x, -tau / sigma, tmp_dom)
# x^(k+1) <- prox[tau*f](x)
prox_tau_f(x, out=x)
# tmp_ran <- Lx^(k+1)
L(x, out=tmp_ran)
# z^(k+1) <- prox[sigma*g](Lx^(k+1) + u^k)
prox_sigma_g(tmp_ran + u, out=z) # 1 copy here
# u^(k+1) = u^k + Lx^(k+1) - z^(k+1)
u += tmp_ran
u -= z
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35508 | admm_linearized_simple | train | def admm_linearized_simple(x, f, g, L, tau, sigma, niter, **kwargs):
"""Non-optimized version of ``admm_linearized``.
This function is intended for debugging. It makes a lot of copies and
performs no error checking.
"""
callback = kwargs.pop('callback', None)
z = L.range.zero()
u = L.range.zero()
for _ in range(niter):
x[:] = f.proximal(tau)(x - tau / sigma * L.adjoint(L(x) + u - z))
z = g.proximal(sigma)(L(x) + u)
u = L(x) + u - z
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35509 | NumericalGradient.derivative | train | def derivative(self, point):
"""Return the derivative in ``point``.
The derivative of the gradient is often called the Hessian.
Parameters
----------
point : `domain` `element-like`
The point that the derivative should be taken in.
Returns
-------
derivative : `NumericalDerivative`
Numerical estimate of the derivative. Uses the same method as this
operator does, but with half the number of significant digits in
the step size in order to preserve numerical stability.
Examples
--------
Compute a numerical estimate of the derivative of the squared L2 norm:
>>> space = odl.rn(3)
>>> func = odl.solvers.L2NormSquared(space)
>>> grad = NumericalGradient(func)
>>> hess = grad.derivative([1, 1, 1])
>>> hess([1, 0, 0])
rn(3).element([ 2., 0., 0.])
Find the Hessian matrix:
>>> hess_matrix = odl.matrix_representation(hess)
>>> np.allclose(hess_matrix, 2 * np.eye(3))
True
"""
return NumericalDerivative(self, point,
method=self.method, step=np.sqrt(self.step)) | python | {
"resource": ""
} |
q35510 | _offset_from_spaces | train | def _offset_from_spaces(dom, ran):
"""Return index offset corresponding to given spaces."""
affected = np.not_equal(dom.shape, ran.shape)
diff_l = np.abs(ran.grid.min() - dom.grid.min())
offset_float = diff_l / dom.cell_sides
offset = np.around(offset_float).astype(int)
for i in range(dom.ndim):
if affected[i] and not np.isclose(offset[i], offset_float[i]):
raise ValueError('in axis {}: range is shifted relative to domain '
'by a non-multiple {} of cell_sides'
''.format(i, offset_float[i] - offset[i]))
offset[~affected] = 0
return tuple(offset) | python | {
"resource": ""
} |
q35511 | Resampling._call | train | def _call(self, x, out=None):
"""Apply resampling operator.
The element ``x`` is resampled using the sampling and interpolation
operators of the underlying spaces.
"""
if out is None:
return x.interpolation
else:
out.sampling(x.interpolation) | python | {
"resource": ""
} |
q35512 | ResizingOperatorBase.axes | train | def axes(self):
"""Dimensions in which an actual resizing is performed."""
return tuple(i for i in range(self.domain.ndim)
if self.domain.shape[i] != self.range.shape[i]) | python | {
"resource": ""
} |
q35513 | ResizingOperator.derivative | train | def derivative(self, point):
"""Derivative of this operator at ``point``.
For the particular case of constant padding with non-zero
constant, the derivative is the corresponding zero-padding
variant. In all other cases, this operator is linear, i.e.
the derivative is equal to ``self``.
"""
if self.pad_mode == 'constant' and self.pad_const != 0:
return ResizingOperator(
domain=self.domain, range=self.range, pad_mode='constant',
pad_const=0.0)
else: # operator is linear
return self | python | {
"resource": ""
} |
q35514 | Strings.contains_all | train | def contains_all(self, other):
"""Return ``True`` if all strings in ``other`` have size `length`."""
dtype = getattr(other, 'dtype', None)
if dtype is None:
dtype = np.result_type(*other)
dtype_str = np.dtype('S{}'.format(self.length))
dtype_uni = np.dtype('<U{}'.format(self.length))
return dtype in (dtype_str, dtype_uni) | python | {
"resource": ""
} |
q35515 | Strings.element | train | def element(self, inp=None):
"""Return an element from ``inp`` or from scratch."""
if inp is not None:
s = str(inp)[:self.length]
s += ' ' * (self.length - len(s))
return s
else:
return ' ' * self.length | python | {
"resource": ""
} |
q35516 | ComplexNumbers.contains_all | train | def contains_all(self, other):
"""Return ``True`` if ``other`` is a sequence of complex numbers."""
dtype = getattr(other, 'dtype', None)
if dtype is None:
dtype = np.result_type(*other)
return is_numeric_dtype(dtype) | python | {
"resource": ""
} |
q35517 | ComplexNumbers.element | train | def element(self, inp=None):
"""Return a complex number from ``inp`` or from scratch."""
if inp is not None:
# Workaround for missing __complex__ of numpy.ndarray
# for Numpy version < 1.12
# TODO: remove when Numpy >= 1.12 is required
if isinstance(inp, np.ndarray):
return complex(inp.reshape([1])[0])
else:
return complex(inp)
else:
return complex(0.0, 0.0) | python | {
"resource": ""
} |
q35518 | RealNumbers.contains_set | train | def contains_set(self, other):
"""Return ``True`` if ``other`` is a subset of the real numbers.
Returns
-------
contained : bool
``True`` if other is an instance of `RealNumbers` or
`Integers` False otherwise.
Examples
--------
>>> real_numbers = RealNumbers()
>>> real_numbers.contains_set(RealNumbers())
True
"""
if other is self:
return True
return (isinstance(other, RealNumbers) or
isinstance(other, Integers)) | python | {
"resource": ""
} |
q35519 | RealNumbers.contains_all | train | def contains_all(self, array):
"""Test if `array` is an array of real numbers."""
dtype = getattr(array, 'dtype', None)
if dtype is None:
dtype = np.result_type(*array)
return is_real_dtype(dtype) | python | {
"resource": ""
} |
q35520 | Integers.contains_all | train | def contains_all(self, other):
"""Return ``True`` if ``other`` is a sequence of integers."""
dtype = getattr(other, 'dtype', None)
if dtype is None:
dtype = np.result_type(*other)
return is_int_dtype(dtype) | python | {
"resource": ""
} |
q35521 | CartesianProduct.element | train | def element(self, inp=None):
"""Create a `CartesianProduct` element.
Parameters
----------
inp : iterable, optional
Collection of input values for the
`LinearSpace.element` methods
of all sets in the Cartesian product.
Returns
-------
element : tuple
A tuple of the given input
"""
if inp is None:
tpl = tuple(set_.element() for set_ in self.sets)
else:
tpl = tuple(set_.element(inpt)
for inpt, set_ in zip(inp, self.sets))
if len(tpl) != len(self):
raise ValueError('input provides only {} values, needed '
'are {}'.format(len(tpl), len(self)))
return tpl | python | {
"resource": ""
} |
q35522 | DiscreteFourierTransformBase.adjoint | train | def adjoint(self):
"""Adjoint transform, equal to the inverse.
See Also
--------
inverse
"""
if self.domain.exponent == 2.0 and self.range.exponent == 2.0:
return self.inverse
else:
raise NotImplementedError(
'no adjoint defined for exponents ({}, {}) != (2, 2)'
''.format(self.domain.exponent, self.range.exponent)) | python | {
"resource": ""
} |
q35523 | FourierTransformBase.create_temporaries | train | def create_temporaries(self, r=True, f=True):
"""Allocate and store reusable temporaries.
Existing temporaries are overridden.
Parameters
----------
r : bool, optional
Create temporary for the real space
f : bool, optional
Create temporary for the frequency space
Notes
-----
To save memory, clear the temporaries when the transform is
no longer used.
See Also
--------
clear_temporaries
clear_fftw_plan : can also hold references to the temporaries
"""
inverse = isinstance(self, FourierTransformInverse)
if inverse:
rspace = self.range
fspace = self.domain
else:
rspace = self.domain
fspace = self.range
if r:
self._tmp_r = rspace.element().asarray()
if f:
self._tmp_f = fspace.element().asarray() | python | {
"resource": ""
} |
q35524 | FourierTransform._preprocess | train | def _preprocess(self, x, out=None):
"""Return the pre-processed version of ``x``.
C2C: use ``tmp_r`` or ``tmp_f`` (C2C operation)
R2C: use ``tmp_f`` (R2C operation)
HALFC: use ``tmp_r`` (R2R operation)
The result is stored in ``out`` if given, otherwise in
a temporary or a new array.
"""
if out is None:
if self.domain.field == ComplexNumbers():
out = self._tmp_r if self._tmp_r is not None else self._tmp_f
elif self.domain.field == RealNumbers() and not self.halfcomplex:
out = self._tmp_f
else:
out = self._tmp_r
return dft_preprocess_data(
x, shift=self.shifts, axes=self.axes, sign=self.sign,
out=out) | python | {
"resource": ""
} |
q35525 | FourierTransformInverse.inverse | train | def inverse(self):
"""Inverse of the inverse, the forward FT."""
sign = '+' if self.sign == '-' else '-'
return FourierTransform(
domain=self.range, range=self.domain, impl=self.impl,
axes=self.axes, halfcomplex=self.halfcomplex, shift=self.shifts,
sign=sign, tmp_r=self._tmp_r, tmp_f=self._tmp_f) | python | {
"resource": ""
} |
q35526 | moveaxis | train | def moveaxis(a, source, destination):
"""Move axes of an array to new positions.
Other axes remain in their original order.
This function is a backport of `numpy.moveaxis` introduced in
NumPy 1.11.
See Also
--------
numpy.moveaxis
"""
import numpy
if hasattr(numpy, 'moveaxis'):
return numpy.moveaxis(a, source, destination)
try:
source = list(source)
except TypeError:
source = [source]
try:
destination = list(destination)
except TypeError:
destination = [destination]
source = [ax + a.ndim if ax < 0 else ax for ax in source]
destination = [ax + a.ndim if ax < 0 else ax for ax in destination]
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
return a.transpose(order) | python | {
"resource": ""
} |
q35527 | flip | train | def flip(a, axis):
"""Reverse the order of elements in an array along the given axis.
This function is a backport of `numpy.flip` introduced in NumPy 1.12.
See Also
--------
numpy.flip
"""
if not hasattr(a, 'ndim'):
a = np.asarray(a)
indexer = [slice(None)] * a.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError('axis={} is invalid for the {}-dimensional input '
'array'.format(axis, a.ndim))
return a[tuple(indexer)] | python | {
"resource": ""
} |
q35528 | _read_projections | train | def _read_projections(folder, indices):
"""Read mayo projections from a folder."""
datasets = []
# Get the relevant file names
file_names = sorted([f for f in os.listdir(folder) if f.endswith(".dcm")])
if len(file_names) == 0:
raise ValueError('No DICOM files found in {}'.format(folder))
file_names = file_names[indices]
data_array = None
for i, file_name in enumerate(tqdm.tqdm(file_names,
'Loading projection data')):
# read the file
dataset = dicom.read_file(folder + '/' + file_name)
# Get some required data
rows = dataset.NumberofDetectorRows
cols = dataset.NumberofDetectorColumns
hu_factor = dataset.HUCalibrationFactor
rescale_intercept = dataset.RescaleIntercept
rescale_slope = dataset.RescaleSlope
# Load the array as bytes
proj_array = np.array(np.frombuffer(dataset.PixelData, 'H'),
dtype='float32')
proj_array = proj_array.reshape([rows, cols], order='F').T
# Rescale array
proj_array *= rescale_slope
proj_array += rescale_intercept
proj_array /= hu_factor
# Store results
if data_array is None:
# We need to load the first dataset before we know the shape
data_array = np.empty((len(file_names), cols, rows),
dtype='float32')
data_array[i] = proj_array[:, ::-1]
datasets.append(dataset)
return datasets, data_array | python | {
"resource": ""
} |
q35529 | load_projections | train | def load_projections(folder, indices=None):
"""Load geometry and data stored in Mayo format from folder.
Parameters
----------
folder : str
Path to the folder where the Mayo DICOM files are stored.
indices : optional
Indices of the projections to load.
Accepts advanced indexing such as slice or list of indices.
Returns
-------
geometry : ConeFlatGeometry
Geometry corresponding to the Mayo projector.
proj_data : `numpy.ndarray`
Projection data, given as the line integral of the linear attenuation
coefficient (g/cm^3). Its unit is thus g/cm^2.
"""
datasets, data_array = _read_projections(folder, indices)
# Get the angles
angles = [d.DetectorFocalCenterAngularPosition for d in datasets]
angles = -np.unwrap(angles) - np.pi # different defintion of angles
# Set minimum and maximum corners
shape = np.array([datasets[0].NumberofDetectorColumns,
datasets[0].NumberofDetectorRows])
pixel_size = np.array([datasets[0].DetectorElementTransverseSpacing,
datasets[0].DetectorElementAxialSpacing])
# Correct from center of pixel to corner of pixel
minp = -(np.array(datasets[0].DetectorCentralElement) - 0.5) * pixel_size
maxp = minp + shape * pixel_size
# Select geometry parameters
src_radius = datasets[0].DetectorFocalCenterRadialDistance
det_radius = (datasets[0].ConstantRadialDistance -
datasets[0].DetectorFocalCenterRadialDistance)
# For unknown reasons, mayo does not include the tag
# "TableFeedPerRotation", which is what we want.
# Instead we manually compute the pitch
pitch = ((datasets[-1].DetectorFocalCenterAxialPosition -
datasets[0].DetectorFocalCenterAxialPosition) /
((np.max(angles) - np.min(angles)) / (2 * np.pi)))
# Get flying focal spot data
offset_axial = np.array([d.SourceAxialPositionShift for d in datasets])
offset_angular = np.array([d.SourceAngularPositionShift for d in datasets])
offset_radial = np.array([d.SourceRadialDistanceShift for d in datasets])
# TODO(adler-j): Implement proper handling of flying focal spot.
# Currently we do not fully account for it, merely making some "first
# order corrections" to the detector position and radial offset.
# Update angles with flying focal spot (in plane direction).
# This increases the resolution of the reconstructions.
angles = angles - offset_angular
# We correct for the mean offset due to the rotated angles, we need to
# shift the detector.
offset_detector_by_angles = det_radius * np.mean(offset_angular)
minp[0] -= offset_detector_by_angles
maxp[0] -= offset_detector_by_angles
# We currently apply only the mean of the offsets
src_radius = src_radius + np.mean(offset_radial)
# Partially compensate for a movement of the source by moving the object
# instead. We need to rescale by the magnification to get the correct
# change in the detector. This approximation is only exactly valid on the
# axis of rotation.
mean_offset_along_axis_for_ffz = np.mean(offset_axial) * (
src_radius / (src_radius + det_radius))
# Create partition for detector
detector_partition = odl.uniform_partition(minp, maxp, shape)
# Convert offset to odl defintions
offset_along_axis = (mean_offset_along_axis_for_ffz +
datasets[0].DetectorFocalCenterAxialPosition -
angles[0] / (2 * np.pi) * pitch)
# Assemble geometry
angle_partition = odl.nonuniform_partition(angles)
geometry = odl.tomo.ConeFlatGeometry(angle_partition,
detector_partition,
src_radius=src_radius,
det_radius=det_radius,
pitch=pitch,
offset_along_axis=offset_along_axis)
# Create a *temporary* ray transform (we need its range)
spc = odl.uniform_discr([-1] * 3, [1] * 3, [32] * 3)
ray_trafo = odl.tomo.RayTransform(spc, geometry, interp='linear')
# convert coordinates
theta, up, vp = ray_trafo.range.grid.meshgrid
d = src_radius + det_radius
u = d * np.arctan(up / d)
v = d / np.sqrt(d**2 + up**2) * vp
# Calculate projection data in rectangular coordinates since we have no
# backend that supports cylindrical
proj_data_cylinder = ray_trafo.range.element(data_array)
interpolated_values = proj_data_cylinder.interpolation((theta, u, v))
proj_data = ray_trafo.range.element(interpolated_values)
return geometry, proj_data.asarray() | python | {
"resource": ""
} |
q35530 | load_reconstruction | train | def load_reconstruction(folder, slice_start=0, slice_end=-1):
"""Load a volume from folder, also returns the corresponding partition.
Parameters
----------
folder : str
Path to the folder where the DICOM files are stored.
slice_start : int
Index of the first slice to use. Used for subsampling.
slice_end : int
Index of the final slice to use.
Returns
-------
partition : `odl.RectPartition`
Partition describing the geometric positioning of the voxels.
data : `numpy.ndarray`
Volumetric data. Scaled such that data = 1 for water (0 HU).
Notes
-----
DICOM data is highly non trivial. Typically, each slice has been computed
with a slice tickness (e.g. 3mm) but the slice spacing might be
different from that.
Further, the coordinates in DICOM is typically the *middle* of the pixel,
not the corners as in ODL.
This function should handle all of these peculiarities and give a volume
with the correct coordinate system attached.
"""
file_names = sorted([f for f in os.listdir(folder) if f.endswith(".IMA")])
if len(file_names) == 0:
raise ValueError('No DICOM files found in {}'.format(folder))
volumes = []
datasets = []
file_names = file_names[slice_start:slice_end]
for file_name in tqdm.tqdm(file_names, 'loading volume data'):
# read the file
dataset = dicom.read_file(folder + '/' + file_name)
# Get parameters
pixel_size = np.array(dataset.PixelSpacing)
pixel_thickness = float(dataset.SliceThickness)
rows = dataset.Rows
cols = dataset.Columns
# Get data array and convert to correct coordinates
data_array = np.array(np.frombuffer(dataset.PixelData, 'H'),
dtype='float32')
data_array = data_array.reshape([cols, rows], order='C')
data_array = np.rot90(data_array, -1)
# Convert from storage type to densities
# TODO: Optimize these computations
hu_values = (dataset.RescaleSlope * data_array +
dataset.RescaleIntercept)
densities = (hu_values + 1000) / 1000
# Store results
volumes.append(densities)
datasets.append(dataset)
voxel_size = np.array(list(pixel_size) + [pixel_thickness])
shape = np.array([rows, cols, len(volumes)])
# Compute geometry parameters
mid_pt = (np.array(dataset.ReconstructionTargetCenterPatient) -
np.array(dataset.DataCollectionCenterPatient))
reconstruction_size = (voxel_size * shape)
min_pt = mid_pt - reconstruction_size / 2
max_pt = mid_pt + reconstruction_size / 2
# axis 1 has reversed convention
min_pt[1], max_pt[1] = -max_pt[1], -min_pt[1]
if len(datasets) > 1:
slice_distance = np.abs(
float(datasets[1].DataCollectionCenterPatient[2]) -
float(datasets[0].DataCollectionCenterPatient[2]))
else:
# If we only have one slice, we must approximate the distance.
slice_distance = pixel_thickness
# The middle of the minimum/maximum slice can be computed from the
# DICOM attribute "DataCollectionCenterPatient". Since ODL uses corner
# points (e.g. edge of volume) we need to add half a voxel thickness to
# both sides.
min_pt[2] = -np.array(datasets[0].DataCollectionCenterPatient)[2]
min_pt[2] -= 0.5 * slice_distance
max_pt[2] = -np.array(datasets[-1].DataCollectionCenterPatient)[2]
max_pt[2] += 0.5 * slice_distance
partition = odl.uniform_partition(min_pt, max_pt, shape)
volume = np.transpose(np.array(volumes), (1, 2, 0))
return partition, volume | python | {
"resource": ""
} |
q35531 | newtons_method | train | def newtons_method(f, x, line_search=1.0, maxiter=1000, tol=1e-16,
cg_iter=None, callback=None):
r"""Newton's method for minimizing a functional.
Notes
-----
This is a general and optimized implementation of Newton's method
for solving the problem:
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
of finding a root of a function.
The algorithm is well-known and there is a vast literature about it.
Among others, the method is described in [BV2004], Sections 9.5
and 10.2 (`book available online
<http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_),
[GNS2009], Section 2.7 for solving nonlinear equations and Section
11.3 for its use in minimization, and wikipedia on `Newton's_method
<https://en.wikipedia.org/wiki/Newton's_method>`_.
The algorithm works by iteratively solving
.. math::
\partial f(x_k)p_k = -f(x_k)
and then updating as
.. math::
x_{k+1} = x_k + \alpha x_k,
where :math:`\alpha` is a suitable step length (see the
references). In this implementation the system of equations are
solved using the conjugate gradient method.
Parameters
----------
f : `Functional`
Goal functional. Needs to have ``f.gradient`` and
``f.gradient.derivative``.
x : ``op.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
cg_iter : int, optional
Number of iterations in the the conjugate gradient solver,
for computing the search direction.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate
References
----------
[BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*.
Cambridge university press, 2004.
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
"""
# TODO: update doc
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
''.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
if cg_iter is None:
# Motivated by that if it is Ax = b, x and b in Rn, it takes at most n
# iterations to solve with cg
cg_iter = grad.domain.size
# TODO: optimize by using lincomb and avoiding to create copies
for _ in range(maxiter):
# Initialize the search direction to 0
search_direction = x.space.zero()
# Compute hessian (as operator) and gradient in the current point
hessian = grad.derivative(x)
deriv_in_point = grad(x)
# Solving A*x = b for x, in this case f''(x)*p = -f'(x)
# TODO: Let the user provide/choose method for how to solve this?
try:
hessian_inverse = hessian.inverse
except NotImplementedError:
conjugate_gradient(hessian, search_direction,
-deriv_in_point, cg_iter)
else:
hessian_inverse(-deriv_in_point, out=search_direction)
# Computing step length
dir_deriv = search_direction.inner(deriv_in_point)
if np.abs(dir_deriv) <= tol:
return
step_length = line_search(x, search_direction, dir_deriv)
# Updating
x += step_length * search_direction
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35532 | bfgs_method | train | def bfgs_method(f, x, line_search=1.0, maxiter=1000, tol=1e-15, num_store=None,
hessinv_estimate=None, callback=None):
r"""Quasi-Newton BFGS method to minimize a differentiable function.
Can use either the regular BFGS method, or the limited memory BFGS method.
Notes
-----
This is a general and optimized implementation of a quasi-Newton
method with BFGS update for solving a general unconstrained
optimization problem
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
The QN method is an approximate Newton method, where the Hessian
is approximated and gradually updated in each step. This
implementation uses the rank-one BFGS update schema where the
inverse of the Hessian is recalculated in each iteration.
The algorithm is described in [GNS2009], Section 12.3 and in the
`BFGS Wikipedia article
<https://en.wikipedia.org/wiki/Broyden%E2%80%93Fletcher%E2%80%93\
Goldfarb%E2%80%93Shanno_algorithm>`_
Parameters
----------
f : `Functional`
Functional with ``f.gradient``.
x : ``f.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
maxiter : int, optional
Maximum number of iterations.
tol : float, optional
Tolerance that should be used for terminating the iteration.
num_store : int, optional
Maximum number of correction factors to store. For ``None``, the method
is the regular BFGS method. For an integer, the method becomes the
Limited Memory BFGS method.
hessinv_estimate : `Operator`, optional
Initial estimate of the inverse of the Hessian operator. Needs to be an
operator from ``f.domain`` to ``f.domain``.
Default: Identity on ``f.domain``
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
References
----------
[GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear
optimization*. Siam, 2009.
"""
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `grad` {!r}'
''.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
ys = []
ss = []
grad_x = grad(x)
for i in range(maxiter):
# Determine a stepsize using line search
search_dir = -_bfgs_direction(ss, ys, grad_x, hessinv_estimate)
dir_deriv = search_dir.inner(grad_x)
if np.abs(dir_deriv) == 0:
return # we found an optimum
step = line_search(x, direction=search_dir, dir_derivative=dir_deriv)
# Update x
x_update = search_dir
x_update *= step
x += x_update
grad_x, grad_diff = grad(x), grad_x
# grad_diff = grad(x) - grad(x_old)
grad_diff.lincomb(-1, grad_diff, 1, grad_x)
y_inner_s = grad_diff.inner(x_update)
# Test for convergence
if np.abs(y_inner_s) < tol:
if grad_x.norm() < tol:
return
else:
# Reset if needed
ys = []
ss = []
continue
# Update Hessian
ys.append(grad_diff)
ss.append(x_update)
if num_store is not None:
# Throw away factors if they are too many.
ss = ss[-num_store:]
ys = ys[-num_store:]
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35533 | broydens_method | train | def broydens_method(f, x, line_search=1.0, impl='first', maxiter=1000,
tol=1e-15, hessinv_estimate=None,
callback=None):
r"""Broyden's first method, a quasi-Newton scheme.
Notes
-----
This is a general and optimized implementation of Broyden's method,
a quasi-Newton method for solving a general unconstrained optimization
problem
.. math::
\min f(x)
for a differentiable function
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}
using a Newton-type update scheme with approximate Hessian.
The algorithm is described in [Bro1965] and [Kva1991], and in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Broyden's_method>`_.
Parameters
----------
f : `Functional`
Functional with ``f.gradient``
x : ``f.domain`` element
Starting point of the iteration
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, uses it as a
fixed step length.
impl : {'first', 'second'}, optional
What version of Broydens method to use. First is also known as Broydens
'good' method, while the second is known as Broydens 'bad' method.
maxiter : int, optional
Maximum number of iterations.
``tol``.
tol : float, optional
Tolerance that should be used for terminating the iteration.
hessinv_estimate : `Operator`, optional
Initial estimate of the inverse of the Hessian operator. Needs to be an
operator from ``f.domain`` to ``f.domain``.
Default: Identity on ``f.domain``
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
References
----------
[Bro1965] Broyden, C G. *A class of methods for solving nonlinear
simultaneous equations*. Mathematics of computation, 33 (1965),
pp 577--593.
[Kva1991] Kvaalen, E. *A faster Broyden method*. BIT Numerical
Mathematics 31 (1991), pp 369--372.
"""
grad = f.gradient
if x not in grad.domain:
raise TypeError('`x` {!r} is not in the domain of `grad` {!r}'
''.format(x, grad.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
impl, impl_in = str(impl).lower(), impl
if impl not in ('first', 'second'):
raise ValueError('`impl` {!r} not understood'
''.format(impl_in))
ss = []
ys = []
grad_x = grad(x)
for i in range(maxiter):
# find step size
search_dir = -_broydens_direction(ss, ys, grad_x,
hessinv_estimate, impl)
dir_deriv = search_dir.inner(grad_x)
if np.abs(dir_deriv) == 0:
return # we found an optimum
step = line_search(x, search_dir, dir_deriv)
# update x
x_update = step * search_dir
x += x_update
# compute new gradient
grad_x, grad_x_old = grad(x), grad_x
delta_grad = grad_x - grad_x_old
# update hessian.
# TODO: reuse from above
v = _broydens_direction(ss, ys, delta_grad, hessinv_estimate,
impl)
if impl == 'first':
divisor = x_update.inner(v)
# Test for convergence
if np.abs(divisor) < tol:
if grad_x.norm() < tol:
return
else:
# Reset if needed
ys = []
ss = []
continue
u = (x_update - v) / divisor
ss.append(u)
ys.append(x_update)
elif impl == 'second':
divisor = delta_grad.inner(delta_grad)
# Test for convergence
if np.abs(divisor) < tol:
if grad_x.norm() < tol:
return
else:
# Reset if needed
ys = []
ss = []
continue
u = (x_update - v) / divisor
ss.append(u)
ys.append(delta_grad)
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35534 | _axis_in_detector | train | def _axis_in_detector(geometry):
"""A vector in the detector plane that points along the rotation axis."""
du, dv = geometry.det_axes_init
axis = geometry.axis
c = np.array([np.vdot(axis, du), np.vdot(axis, dv)])
cnorm = np.linalg.norm(c)
# Check for numerical errors
assert cnorm != 0
return c / cnorm | python | {
"resource": ""
} |
q35535 | _rotation_direction_in_detector | train | def _rotation_direction_in_detector(geometry):
"""A vector in the detector plane that points in the rotation direction."""
du, dv = geometry.det_axes_init
axis = geometry.axis
det_normal = np.cross(dv, du)
rot_dir = np.cross(axis, det_normal)
c = np.array([np.vdot(rot_dir, du), np.vdot(rot_dir, dv)])
cnorm = np.linalg.norm(c)
# Check for numerical errors
assert cnorm != 0
return c / cnorm | python | {
"resource": ""
} |
q35536 | _fbp_filter | train | def _fbp_filter(norm_freq, filter_type, frequency_scaling):
"""Create a smoothing filter for FBP.
Parameters
----------
norm_freq : `array-like`
Frequencies normalized to lie in the interval [0, 1].
filter_type : {'Ram-Lak', 'Shepp-Logan', 'Cosine', 'Hamming', 'Hann',
callable}
The type of filter to be used.
If a string is given, use one of the standard filters with that name.
A callable should take an array of values in [0, 1] and return the
filter for these frequencies.
frequency_scaling : float
Scaling of the frequencies for the filter. All frequencies are scaled
by this number, any relative frequency above ``frequency_scaling`` is
set to 0.
Returns
-------
smoothing_filter : `numpy.ndarray`
Examples
--------
Create an FBP filter
>>> norm_freq = np.linspace(0, 1, 10)
>>> filt = _fbp_filter(norm_freq,
... filter_type='Hann',
... frequency_scaling=0.8)
"""
filter_type, filter_type_in = str(filter_type).lower(), filter_type
if callable(filter_type):
filt = filter_type(norm_freq)
elif filter_type == 'ram-lak':
filt = np.copy(norm_freq)
elif filter_type == 'shepp-logan':
filt = norm_freq * np.sinc(norm_freq / (2 * frequency_scaling))
elif filter_type == 'cosine':
filt = norm_freq * np.cos(norm_freq * np.pi / (2 * frequency_scaling))
elif filter_type == 'hamming':
filt = norm_freq * (
0.54 + 0.46 * np.cos(norm_freq * np.pi / (frequency_scaling)))
elif filter_type == 'hann':
filt = norm_freq * (
np.cos(norm_freq * np.pi / (2 * frequency_scaling)) ** 2)
else:
raise ValueError('unknown `filter_type` ({})'
''.format(filter_type_in))
indicator = (norm_freq <= frequency_scaling)
filt *= indicator
return filt | python | {
"resource": ""
} |
q35537 | tam_danielson_window | train | def tam_danielson_window(ray_trafo, smoothing_width=0.05, n_pi=1):
"""Create Tam-Danielson window from a `RayTransform`.
The Tam-Danielson window is an indicator function on the minimal set of
data needed to reconstruct a volume from given data. It is useful in
analytic reconstruction methods such as FBP to give a more accurate
reconstruction.
See [TAM1998] for more informationon the window.
See [PKGT2000] for information on the ``n_pi`` parameter.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform for which to compute the window.
smoothing_width : positive float, optional
Width of the smoothing applied to the window's edges given as a
fraction of the width of the full window.
n_pi : odd int, optional
Total number of half rotations to include in the window. Values larger
than 1 should be used if the pitch is much smaller than the detector
height.
Returns
-------
tam_danielson_window : ``ray_trafo.range`` element
See Also
--------
fbp_op : Filtered back-projection operator from `RayTransform`
tam_danielson_window : Weighting for short scan data
odl.tomo.geometry.conebeam.ConeFlatGeometry :
Primary use case for this window function.
References
----------
[TSS1998] Tam, K C, Samarasekera, S and Sauer, F.
*Exact cone beam CT with a spiral scan*.
Physics in Medicine & Biology 4 (1998), p 1015.
https://dx.doi.org/10.1088/0031-9155/43/4/028
[PKGT2000] Proksa R, Köhler T, Grass M, Timmer J.
*The n-PI-method for helical cone-beam CT*
IEEE Trans Med Imaging. 2000 Sep;19(9):848-63.
https://www.ncbi.nlm.nih.gov/pubmed/11127600
"""
# Extract parameters
src_radius = ray_trafo.geometry.src_radius
det_radius = ray_trafo.geometry.det_radius
pitch = ray_trafo.geometry.pitch
if pitch == 0:
raise ValueError('Tam-Danielson window is only defined with '
'`pitch!=0`')
smoothing_width = float(smoothing_width)
if smoothing_width < 0:
raise ValueError('`smoothing_width` should be a positive float')
if n_pi % 2 != 1:
raise ValueError('`n_pi` must be odd, got {}'.format(n_pi))
# Find projection of axis on detector
axis_proj = _axis_in_detector(ray_trafo.geometry)
rot_dir = _rotation_direction_in_detector(ray_trafo.geometry)
# Find distance from projection of rotation axis for each pixel
dx = (rot_dir[0] * ray_trafo.range.meshgrid[1]
+ rot_dir[1] * ray_trafo.range.meshgrid[2])
dx_axis = dx * src_radius / (src_radius + det_radius)
def Vn(u):
return (pitch / (2 * np.pi)
* (1 + (u / src_radius) ** 2)
* (n_pi * np.pi / 2.0 - np.arctan(u / src_radius)))
lower_proj_axis = -Vn(dx_axis)
upper_proj_axis = Vn(-dx_axis)
lower_proj = lower_proj_axis * (src_radius + det_radius) / src_radius
upper_proj = upper_proj_axis * (src_radius + det_radius) / src_radius
# Compute a smoothed width
interval = (upper_proj - lower_proj)
width = interval * smoothing_width / np.sqrt(2)
# Create window function
def window_fcn(x):
# Lazy import to improve `import odl` time
import scipy.special
x_along_axis = axis_proj[0] * x[1] + axis_proj[1] * x[2]
if smoothing_width != 0:
lower_wndw = 0.5 * (
1 + scipy.special.erf((x_along_axis - lower_proj) / width))
upper_wndw = 0.5 * (
1 + scipy.special.erf((upper_proj - x_along_axis) / width))
else:
lower_wndw = (x_along_axis >= lower_proj)
upper_wndw = (x_along_axis <= upper_proj)
return lower_wndw * upper_wndw
return ray_trafo.range.element(window_fcn) / n_pi | python | {
"resource": ""
} |
q35538 | parker_weighting | train | def parker_weighting(ray_trafo, q=0.25):
"""Create parker weighting for a `RayTransform`.
Parker weighting is a weighting function that ensures that oversampled
fan/cone beam data are weighted such that each line has unit weight. It is
useful in analytic reconstruction methods such as FBP to give a more
accurate result and can improve convergence rates for iterative methods.
See the article `Parker weights revisited`_ for more information.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform for which to compute the weights.
q : float, optional
Parameter controlling the speed of the roll-off at the edges of the
weighting. 1.0 gives the classical Parker weighting, while smaller
values in general lead to lower noise but stronger discretization
artifacts.
Returns
-------
parker_weighting : ``ray_trafo.range`` element
See Also
--------
fbp_op : Filtered back-projection operator from `RayTransform`
tam_danielson_window : Indicator function for helical data
odl.tomo.geometry.conebeam.FanBeamGeometry : Use case in 2d
odl.tomo.geometry.conebeam.ConeFlatGeometry : Use case in 3d (for pitch 0)
References
----------
.. _Parker weights revisited: https://www.ncbi.nlm.nih.gov/pubmed/11929021
"""
# Note: Parameter names taken from WES2002
# Extract parameters
src_radius = ray_trafo.geometry.src_radius
det_radius = ray_trafo.geometry.det_radius
ndim = ray_trafo.geometry.ndim
angles = ray_trafo.range.meshgrid[0]
min_rot_angle = ray_trafo.geometry.motion_partition.min_pt
alen = ray_trafo.geometry.motion_params.length
# Parker weightings are not defined for helical geometries
if ray_trafo.geometry.ndim != 2:
pitch = ray_trafo.geometry.pitch
if pitch != 0:
raise ValueError('Parker weighting window is only defined with '
'`pitch==0`')
# Find distance from projection of rotation axis for each pixel
if ndim == 2:
dx = ray_trafo.range.meshgrid[1]
elif ndim == 3:
# Find projection of axis on detector
rot_dir = _rotation_direction_in_detector(ray_trafo.geometry)
# If axis is aligned to a coordinate axis, save some memory and time by
# using broadcasting
if rot_dir[0] == 0:
dx = rot_dir[1] * ray_trafo.range.meshgrid[2]
elif rot_dir[1] == 0:
dx = rot_dir[0] * ray_trafo.range.meshgrid[1]
else:
dx = (rot_dir[0] * ray_trafo.range.meshgrid[1]
+ rot_dir[1] * ray_trafo.range.meshgrid[2])
# Compute parameters
dx_abs_max = np.max(np.abs(dx))
max_fan_angle = 2 * np.arctan2(dx_abs_max, src_radius + det_radius)
delta = max_fan_angle / 2
epsilon = alen - np.pi - max_fan_angle
if epsilon < 0:
raise Exception('data not sufficiently sampled for parker weighting')
# Define utility functions
def S(betap):
return (0.5 * (1.0 + np.sin(np.pi * betap)) * (np.abs(betap) < 0.5)
+ (betap >= 0.5))
def b(alpha):
return q * (2 * delta - 2 * alpha + epsilon)
# Create weighting function
beta = np.asarray(angles - min_rot_angle,
dtype=ray_trafo.range.dtype) # rotation angle
alpha = np.asarray(np.arctan2(dx, src_radius + det_radius),
dtype=ray_trafo.range.dtype)
# Compute sum in place to save memory
S_sum = S(beta / b(alpha) - 0.5)
S_sum += S((beta - 2 * delta + 2 * alpha - epsilon) / b(alpha) + 0.5)
S_sum -= S((beta - np.pi + 2 * alpha) / b(-alpha) - 0.5)
S_sum -= S((beta - np.pi - 2 * delta - epsilon) / b(-alpha) + 0.5)
scale = 0.5 * alen / np.pi
return ray_trafo.range.element(
np.broadcast_to(S_sum * scale, ray_trafo.range.shape)) | python | {
"resource": ""
} |
q35539 | fbp_op | train | def fbp_op(ray_trafo, padding=True, filter_type='Ram-Lak',
frequency_scaling=1.0):
"""Create filtered back-projection operator from a `RayTransform`.
The filtered back-projection is an approximate inverse to the ray
transform.
Parameters
----------
ray_trafo : `RayTransform`
The ray transform (forward operator) whose approximate inverse should
be computed. Its geometry has to be any of the following
`Parallel2dGeometry` : Exact reconstruction
`Parallel3dAxisGeometry` : Exact reconstruction
`FanBeamGeometry` : Approximate reconstruction, correct in limit of fan
angle = 0.
Only flat detectors are supported (det_curvature_radius is None).
`ConeFlatGeometry`, pitch = 0 (circular) : Approximate reconstruction,
correct in the limit of fan angle = 0 and cone angle = 0.
`ConeFlatGeometry`, pitch > 0 (helical) : Very approximate unless a
`tam_danielson_window` is used. Accurate with the window.
Other geometries: Not supported
padding : bool, optional
If the data space should be zero padded. Without padding, the data may
be corrupted due to the circular convolution used. Using padding makes
the algorithm slower.
filter_type : optional
The type of filter to be used.
The predefined options are, in approximate order from most noise
senstive to least noise sensitive:
``'Ram-Lak'``, ``'Shepp-Logan'``, ``'Cosine'``, ``'Hamming'`` and
``'Hann'``.
A callable can also be provided. It must take an array of values in
[0, 1] and return the filter for these frequencies.
frequency_scaling : float, optional
Relative cutoff frequency for the filter.
The normalized frequencies are rescaled so that they fit into the range
[0, frequency_scaling]. Any frequency above ``frequency_scaling`` is
set to zero.
Returns
-------
fbp_op : `Operator`
Approximate inverse operator of ``ray_trafo``.
See Also
--------
tam_danielson_window : Windowing for helical data.
parker_weighting : Windowing for overcomplete fan-beam data.
"""
return ray_trafo.adjoint * fbp_filter_op(ray_trafo, padding, filter_type,
frequency_scaling) | python | {
"resource": ""
} |
q35540 | walnut_data | train | def walnut_data():
"""Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'
dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1]
data = data.astype('float')
# Very crude gain normalization
data = -np.log(data / np.max(data, axis=1)[:, None])
return data | python | {
"resource": ""
} |
q35541 | lotus_root_data | train | def lotus_root_data():
"""Tomographic X-ray data of a lotus root.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_Lotus_v1/sinogram.mat'
dct = get_data('lotus_root.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram'], 0, 1)[:, :]
data = data.astype('float')
return data | python | {
"resource": ""
} |
q35542 | lotus_root_geometry | train | def lotus_root_geometry():
"""Tomographic geometry for the lotus root dataset.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299
"""
# To get the same rotation as in the reference article
a_offset = np.pi / 2
apart = uniform_partition(a_offset,
a_offset + 2 * np.pi * 366. / 360.,
366)
# TODO: Find exact value, determined experimentally
d_offset = 0.35
dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240)
geometry = FanBeamGeometry(apart, dpart,
src_radius=540, det_radius=90)
return geometry | python | {
"resource": ""
} |
q35543 | poisson_noise | train | def poisson_noise(intensity, seed=None):
r"""Poisson distributed noise with given intensity.
Parameters
----------
intensity : `TensorSpace` or `ProductSpace` element
The intensity (usually called lambda) parameter of the noise.
Returns
-------
poisson_noise : ``intensity.space`` element
Poisson distributed random variable.
seed : int, optional
Random seed to use for generating the noise.
For ``None``, use the current seed.
Notes
-----
For a Poisson distributed random variable :math:`X` with intensity
:math:`\lambda`, the probability of it taking the value
:math:`k \in \mathbb{N}_0` is given by
.. math::
\frac{\lambda^k e^{-\lambda}}{k!}
Note that the function only takes integer values.
See Also
--------
white_noise
salt_pepper_noise
uniform_noise
numpy.random.poisson
"""
from odl.space import ProductSpace
with NumpyRandomSeed(seed):
if isinstance(intensity.space, ProductSpace):
values = [poisson_noise(subintensity)
for subintensity in intensity]
else:
values = np.random.poisson(intensity.asarray())
return intensity.space.element(values) | python | {
"resource": ""
} |
q35544 | salt_pepper_noise | train | def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5,
low_val=None, high_val=None, seed=None):
"""Add salt and pepper noise to vector.
Salt and pepper noise replaces random elements in ``vector`` with
``low_val`` or ``high_val``.
Parameters
----------
vector : element of `TensorSpace` or `ProductSpace`
The vector that noise should be added to.
fraction : float, optional
The propotion of the elements in ``vector`` that should be converted
to noise.
salt_vs_pepper : float, optional
Relative abundance of salt (high) vs pepper (low) noise. A high value
means more salt than pepper noise.
low_val : float, optional
The "pepper" color in the noise.
Default: minimum value of ``vector``. For product spaces the minimum
value per subspace is taken.
high_val : float, optional
The "salt" value in the noise.
Default: maximuim value of ``vector``. For product spaces the maximum
value per subspace is taken.
seed : int, optional
Random seed to use for generating the noise.
For ``None``, use the current seed.
Returns
-------
salt_pepper_noise : ``vector.space`` element
``vector`` with salt and pepper noise.
See Also
--------
white_noise
poisson_noise
uniform_noise
"""
from odl.space import ProductSpace
# Validate input parameters
fraction, fraction_in = float(fraction), fraction
if not (0 <= fraction <= 1):
raise ValueError('`fraction` ({}) should be a float in the interval '
'[0, 1]'.format(fraction_in))
salt_vs_pepper, salt_vs_pepper_in = float(salt_vs_pepper), salt_vs_pepper
if not (0 <= salt_vs_pepper <= 1):
raise ValueError('`salt_vs_pepper` ({}) should be a float in the '
'interval [0, 1]'.format(salt_vs_pepper_in))
with NumpyRandomSeed(seed):
if isinstance(vector.space, ProductSpace):
values = [salt_pepper_noise(subintensity, fraction, salt_vs_pepper,
low_val, high_val)
for subintensity in vector]
else:
# Extract vector of values
values = vector.asarray().flatten()
# Determine fill-in values if not given
if low_val is None:
low_val = np.min(values)
if high_val is None:
high_val = np.max(values)
# Create randomly selected points as a subset of image.
a = np.arange(vector.size)
np.random.shuffle(a)
salt_indices = a[:int(fraction * vector.size * salt_vs_pepper)]
pepper_indices = a[int(fraction * vector.size * salt_vs_pepper):
int(fraction * vector.size)]
values[salt_indices] = high_val
values[pepper_indices] = -low_val
values = values.reshape(vector.space.shape)
return vector.space.element(values) | python | {
"resource": ""
} |
q35545 | uniform_partition_fromintv | train | def uniform_partition_fromintv(intv_prod, shape, nodes_on_bdry=False):
"""Return a partition of an interval product into equally sized cells.
Parameters
----------
intv_prod : `IntervalProd`
Interval product to be partitioned
shape : int or sequence of ints
Number of nodes per axis. For 1d intervals, a single integer
can be specified.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``intv_prod.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
See Also
--------
uniform_partition_fromgrid
Examples
--------
By default, no grid points are placed on the boundary:
>>> interval = odl.IntervalProd(0, 1)
>>> part = odl.uniform_partition_fromintv(interval, 4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition_fromintv(interval, 3,
... nodes_on_bdry=True)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> rect = odl.IntervalProd([0, 0], [1, 1])
>>> part = odl.uniform_partition_fromintv(
... rect, (3, 3), nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
grid = uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=nodes_on_bdry)
return RectPartition(intv_prod, grid) | python | {
"resource": ""
} |
q35546 | uniform_partition_fromgrid | train | def uniform_partition_fromgrid(grid, min_pt=None, max_pt=None):
"""Return a partition of an interval product based on a given grid.
This method is complementary to `uniform_partition_fromintv` in that
it infers the set to be partitioned from a given grid and optional
parameters for ``min_pt`` and ``max_pt`` of the set.
Parameters
----------
grid : `RectGrid`
Grid on which the partition is based
min_pt, max_pt : float, sequence of floats, or dict, optional
Spatial points defining the lower/upper limits of the intervals
to be partitioned. The points can be specified in two ways:
float or sequence: The values are used directly as ``min_pt``
and/or ``max_pt``.
dict: Index-value pairs specifying an axis and a spatial
coordinate to be used in that axis. In axes which are not a key
in the dictionary, the coordinate for the vector is calculated
as::
min_pt = x[0] - (x[1] - x[0]) / 2
max_pt = x[-1] + (x[-1] - x[-2]) / 2
See ``Examples`` below.
In general, ``min_pt`` may not be larger than ``grid.min_pt``,
and ``max_pt`` not smaller than ``grid.max_pt`` in any component.
``None`` is equivalent to an empty dictionary, i.e. the values
are calculated in each dimension.
See Also
--------
uniform_partition_fromintv
Examples
--------
Have ``min_pt`` and ``max_pt`` of the bounding box automatically
calculated:
>>> grid = odl.uniform_grid(0, 1, 3)
>>> grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
>>> part = odl.uniform_partition_fromgrid(grid)
>>> part.cell_boundary_vecs
(array([-0.25, 0.25, 0.75, 1.25]),)
``min_pt`` and ``max_pt`` can be given explicitly:
>>> part = odl.uniform_partition_fromgrid(grid, min_pt=0, max_pt=1)
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
Using dictionaries, selective axes can be explicitly set. The
keys refer to axes, the values to the coordinates to use:
>>> grid = odl.uniform_grid([0, 0], [1, 1], (3, 3))
>>> part = odl.uniform_partition_fromgrid(grid,
... min_pt={0: -1}, max_pt={-1: 3})
>>> part.cell_boundary_vecs[0]
array([-1. , 0.25, 0.75, 1.25])
>>> part.cell_boundary_vecs[1]
array([-0.25, 0.25, 0.75, 3. ])
"""
# Make dictionaries from `min_pt` and `max_pt` and fill with `None` where
# no value is given (taking negative indices into account)
if min_pt is None:
min_pt = {i: None for i in range(grid.ndim)}
elif not hasattr(min_pt, 'items'): # array-like
min_pt = np.atleast_1d(min_pt)
min_pt = {i: float(v) for i, v in enumerate(min_pt)}
else:
min_pt.update({i: None for i in range(grid.ndim)
if i not in min_pt and i - grid.ndim not in min_pt})
if max_pt is None:
max_pt = {i: None for i in range(grid.ndim)}
elif not hasattr(max_pt, 'items'):
max_pt = np.atleast_1d(max_pt)
max_pt = {i: float(v) for i, v in enumerate(max_pt)}
else:
max_pt.update({i: None for i in range(grid.ndim)
if i not in max_pt and i - grid.ndim not in max_pt})
# Set the values in the vectors by computing (None) or directly from the
# given vectors (otherwise).
min_pt_vec = np.empty(grid.ndim)
for ax, xmin in min_pt.items():
if xmin is None:
cvec = grid.coord_vectors[ax]
if len(cvec) == 1:
raise ValueError('in axis {}: cannot calculate `min_pt` with '
'only 1 grid point'.format(ax))
min_pt_vec[ax] = cvec[0] - (cvec[1] - cvec[0]) / 2
else:
min_pt_vec[ax] = xmin
max_pt_vec = np.empty(grid.ndim)
for ax, xmax in max_pt.items():
if xmax is None:
cvec = grid.coord_vectors[ax]
if len(cvec) == 1:
raise ValueError('in axis {}: cannot calculate `max_pt` with '
'only 1 grid point'.format(ax))
max_pt_vec[ax] = cvec[-1] + (cvec[-1] - cvec[-2]) / 2
else:
max_pt_vec[ax] = xmax
return RectPartition(IntervalProd(min_pt_vec, max_pt_vec), grid) | python | {
"resource": ""
} |
q35547 | uniform_partition | train | def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None,
nodes_on_bdry=False):
"""Return a partition with equally sized cells.
Parameters
----------
min_pt, max_pt : float or sequence of float, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
shape : int or sequence of ints, optional
Number of nodes per axis. ``None`` entries mean
"compute the value".
cell_sides : float or sequence of floats, optional
Side length of the partition cells per axis. ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Notes
-----
In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``,
``shape`` and ``cell_sides`` must be given. If all four are
provided, they are checked for consistency.
See Also
--------
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
Any combination of three of the four parameters can be used for
creation of a partition:
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, shape=4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(max_pt=2, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
In higher dimensions, the parameters can be given differently in
each axis. Where ``None`` is given, the value will be computed:
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[4, 2])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[None, 2], cell_sides=[0.25, None])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, None], max_pt=[None, 2],
... shape=[4, 2], cell_sides=[0.25, 1])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
By default, no grid points are placed on the boundary:
>>> part = odl.uniform_partition(0, 1, 4)
>>> part.nodes_on_bdry
False
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition(0, 1, 3, nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> part = odl.uniform_partition([0, 0], [1, 1], (3, 3),
... nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
# Normalize partition parameters
# np.size(None) == 1, so that would screw it for sizes 0 of the rest
sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides)
if p is not None]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv,
keep_none=True)
cell_sides = normalized_scalar_param_list(cell_sides, ndim,
param_conv=float, keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt, shape
for i, (xmin, xmax, n, dx, on_bdry) in enumerate(
zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)):
num_params = sum(p is not None for p in (xmin, xmax, n, dx))
if num_params < 3:
raise ValueError('in axis {}: expected at least 3 of the '
'parameters `min_pt`, `max_pt`, `shape`, '
'`cell_sides`, got {}'
''.format(i, num_params))
# Unpack the tuple if possible, else use bool globally for this axis
try:
bdry_l, bdry_r = on_bdry
except TypeError:
bdry_l = bdry_r = on_bdry
# For each node on the boundary, we subtract 1/2 from the number of
# full cells between min_pt and max_pt.
if xmin is None:
min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif xmax is None:
max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif n is None:
# Here we add to n since (e-b)/s gives the reduced number of cells.
n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0
n_round = int(round(n_calc))
if abs(n_calc - n_round) > 1e-5:
raise ValueError('in axis {}: calculated number of nodes '
'{} = ({} - {}) / {} too far from integer'
''.format(i, n_calc, xmax, xmin, dx))
shape[i] = n_round
elif dx is None:
pass
else:
xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
if not np.isclose(xmax, xmax_calc):
raise ValueError('in axis {}: calculated endpoint '
'{} = {} + {} * {} too far from given '
'endpoint {}.'
''.format(i, xmax_calc, xmin, n, dx, xmax))
return uniform_partition_fromintv(
IntervalProd(min_pt, max_pt), shape, nodes_on_bdry) | python | {
"resource": ""
} |
q35548 | nonuniform_partition | train | def nonuniform_partition(*coord_vecs, **kwargs):
"""Return a partition with un-equally sized cells.
Parameters
----------
coord_vecs1, ... coord_vecsN : `array-like`
Arrays of coordinates of the mid-points of the partition cells.
min_pt, max_pt : float or sequence of floats, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Cannot be given with both min_pt and max_pt since they determine the
same thing.
Default: ``False``
See Also
--------
uniform_partition : uniformly spaced points
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
With uniformly spaced points the result is the same as a
uniform partition:
>>> odl.nonuniform_partition([0, 1, 2, 3])
uniform_partition(-0.5, 3.5, 4)
>>> odl.nonuniform_partition([0, 1, 2, 3], [1, 2])
uniform_partition([-0.5, 0.5], [ 3.5, 2.5], (4, 2))
If the points are not uniformly spaced, a nonuniform partition is
created. Note that the containing interval is calculated by assuming
that the points are in the middle of the sub-intervals:
>>> odl.nonuniform_partition([0, 1, 3])
nonuniform_partition(
[ 0., 1., 3.]
)
Higher dimensional partitions are created by specifying the gridpoints
along each dimension:
>>> odl.nonuniform_partition([0, 1, 3], [1, 2])
nonuniform_partition(
[ 0., 1., 3.],
[ 1., 2.]
)
Partitions with a single element are by default degenerate
>>> odl.nonuniform_partition(1)
uniform_partition(1.0, 1.0, 1, nodes_on_bdry=True)
If the endpoints should be on the boundary, the ``nodes_on_bdry`` parameter
can be used:
>>> odl.nonuniform_partition([0, 1, 3], nodes_on_bdry=True)
nonuniform_partition(
[ 0., 1., 3.],
nodes_on_bdry=True
)
Users can also manually specify the containing intervals dimensions by
using the ``min_pt`` and ``max_pt`` arguments:
>>> odl.nonuniform_partition([0, 1, 3], min_pt=-2, max_pt=3)
nonuniform_partition(
[ 0., 1., 3.],
min_pt=-2.0, max_pt=3.0
)
"""
# Get parameters from kwargs
min_pt = kwargs.pop('min_pt', None)
max_pt = kwargs.pop('max_pt', None)
nodes_on_bdry = kwargs.pop('nodes_on_bdry', False)
# np.size(None) == 1
sizes = [len(coord_vecs)] + [np.size(p) for p in (min_pt, max_pt)]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt
for i, (xmin, xmax, (bdry_l, bdry_r), coords) in enumerate(
zip(min_pt, max_pt, nodes_on_bdry, coord_vecs)):
# Check input for redundancy
if xmin is not None and bdry_l:
raise ValueError('in axis {}: got both `min_pt` and '
'`nodes_on_bdry=True`'.format(i))
if xmax is not None and bdry_r:
raise ValueError('in axis {}: got both `max_pt` and '
'`nodes_on_bdry=True`'.format(i))
# Handle length 1 inputs
coords = np.array(coords, copy=False, ndmin=1)
# Compute boundary position if not given by user
if xmin is None:
if bdry_l or len(coords) == 1:
min_pt[i] = coords[0]
else:
min_pt[i] = coords[0] - (coords[1] - coords[0]) / 2.0
if xmax is None:
if bdry_r or len(coords) == 1:
max_pt[i] = coords[-1]
else:
max_pt[i] = coords[-1] + (coords[-1] - coords[-2]) / 2.0
interval = IntervalProd(min_pt, max_pt)
grid = RectGrid(*coord_vecs)
return RectPartition(interval, grid) | python | {
"resource": ""
} |
q35549 | RectPartition.nodes_on_bdry | train | def nodes_on_bdry(self):
"""Encoding of grid points lying on the boundary.
Examples
--------
Using global option (default ``False``):
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3])
>>> part.nodes_on_bdry
False
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
``False`` in axis 0, ``True`` in axis 1:
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=[False, True])
>>> part.nodes_on_bdry
(False, True)
In axis 0, ``False`` left and ``True`` right, in axis 1 ``False``:
>>> part = odl.nonuniform_partition([0, 2, 3], [1, 3],
... nodes_on_bdry=[[False, True],
... False])
>>> part.nodes_on_bdry
((False, True), False)
"""
if self.size == 0:
return True
nodes_on_bdry = []
for on_bdry in self.nodes_on_bdry_byaxis:
left, right = on_bdry
if left == right:
nodes_on_bdry.append(left)
else:
nodes_on_bdry.append((left, right))
if all(on_bdry == nodes_on_bdry[0] for on_bdry in nodes_on_bdry[1:]):
return nodes_on_bdry[0]
else:
return tuple(nodes_on_bdry) | python | {
"resource": ""
} |
q35550 | RectPartition.has_isotropic_cells | train | def has_isotropic_cells(self):
"""``True`` if `grid` is uniform and `cell_sides` are all equal.
Always ``True`` for 1D partitions.
Examples
--------
>>> part = uniform_partition([0, -1], [1, 1], (5, 10))
>>> part.has_isotropic_cells
True
>>> part = uniform_partition([0, -1], [1, 1], (5, 5))
>>> part.has_isotropic_cells
False
"""
return self.is_uniform and np.allclose(self.cell_sides[:-1],
self.cell_sides[1:]) | python | {
"resource": ""
} |
q35551 | RectPartition.boundary_cell_fractions | train | def boundary_cell_fractions(self):
"""Return a tuple of contained fractions of boundary cells.
Since the outermost grid points can have any distance to the
boundary of the partitioned set, the "natural" outermost cell
around these points can either be cropped or extended. This
property is a tuple of (float, float) tuples, one entry per
dimension, where the fractions of the left- and rightmost
cells inside the set are stored. If a grid point lies exactly
on the boundary, the value is 1/2 since the cell is cut in half.
Otherwise, any value larger than 1/2 is possible.
Returns
-------
on_bdry : tuple of 2-tuples of floats
Each 2-tuple contains the fraction of the leftmost
(first entry) and rightmost (second entry) cell in the
partitioned set in the corresponding dimension.
See Also
--------
cell_boundary_vecs
Examples
--------
We create a partition of the rectangle [0, 1.5] x [-2, 2] with
the grid points [0, 1] x [-1, 0, 2]. The "natural" cells at the
boundary would be:
[-0.5, 0.5] and [0.5, 1.5] in the first axis
[-1.5, -0.5] and [1, 3] in the second axis
Thus, in the first axis, the fractions contained in [0, 1.5]
are 0.5 and 1, and in the second axis, [-2, 2] contains the
fractions 1.5 and 0.5.
>>> rect = odl.IntervalProd([0, -2], [1.5, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.boundary_cell_fractions
((0.5, 1.0), (1.5, 0.5))
"""
frac_list = []
for ax, (cvec, bmin, bmax) in enumerate(zip(
self.grid.coord_vectors, self.set.min_pt, self.set.max_pt)):
# Degenerate axes have a value of 1.0 (this is used as weight
# in integration formulas later)
if len(cvec) == 1:
frac_list.append((1.0, 1.0))
else:
left_frac = 0.5 + (cvec[0] - bmin) / (cvec[1] - cvec[0])
right_frac = 0.5 + (bmax - cvec[-1]) / (cvec[-1] - cvec[-2])
frac_list.append((left_frac, right_frac))
return tuple(frac_list) | python | {
"resource": ""
} |
q35552 | RectPartition.cell_sizes_vecs | train | def cell_sizes_vecs(self):
"""Return the cell sizes as coordinate vectors.
Returns
-------
csizes : tuple of `numpy.ndarray`'s
The cell sizes per axis. The length of the vectors is the
same as the corresponding ``grid.coord_vectors``.
For axes with 1 grid point, cell size is set to 0.0.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
2 x 3 cells with the grid points [0, 1] x [-1, 0, 2]. This
implies that the cell boundaries are given as
[0, 0.5, 1] x [-1, -0.5, 1, 2], hence the cell size vectors
are [0.5, 0.5] x [0.5, 1.5, 1]:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.RectGrid([0, 1], [-1, 0, 2])
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. ]), array([-1. , -0.5, 1. , 2. ]))
>>> part.cell_sizes_vecs
(array([ 0.5, 0.5]), array([ 0.5, 1.5, 1. ]))
"""
csizes = []
for ax, cvec in enumerate(self.grid.coord_vectors):
if len(cvec) == 1:
csizes.append(np.array([0.0]))
else:
csize = np.empty_like(cvec)
csize[1:-1] = (cvec[2:] - cvec[:-2]) / 2.0
csize[0] = (cvec[0] + cvec[1]) / 2 - self.min()[ax]
csize[-1] = self.max()[ax] - (cvec[-2] + cvec[-1]) / 2
csizes.append(csize)
return tuple(csizes) | python | {
"resource": ""
} |
q35553 | RectPartition.cell_sides | train | def cell_sides(self):
"""Side lengths of all 'inner' cells of a uniform partition.
Only defined if ``self.grid`` is uniform.
Examples
--------
We create a partition of the rectangle [0, 1] x [-1, 2] into
3 x 3 cells, where the grid points lie on the boundary. This
means that the grid points are [0, 0.5, 1] x [-1, 0.5, 2],
i.e. the inner cell has side lengths 0.5 x 1.5:
>>> rect = odl.IntervalProd([0, -1], [1, 2])
>>> grid = odl.uniform_grid([0, -1], [1, 2], (3, 3))
>>> part = odl.RectPartition(rect, grid)
>>> part.cell_sides
array([ 0.5, 1.5])
"""
sides = self.grid.stride
sides[sides == 0] = self.extent[sides == 0]
return sides | python | {
"resource": ""
} |
q35554 | RectPartition.approx_equals | train | def approx_equals(self, other, atol):
"""Return ``True`` in case of approximate equality.
Returns
-------
approx_eq : bool
``True`` if ``other`` is a `RectPartition` instance with
``self.set == other.set`` up to ``atol`` and
``self.grid == other.other`` up to ``atol``, ``False`` otherwise.
"""
if other is self:
return True
elif not isinstance(other, RectPartition):
return False
else:
return (self.set.approx_equals(other.set, atol=atol) and
self.grid.approx_equals(other.grid, atol=atol)) | python | {
"resource": ""
} |
q35555 | RectPartition.insert | train | def insert(self, index, *parts):
"""Return a copy with ``parts`` inserted before ``index``.
The given partitions are inserted (as a block) into ``self``,
yielding a new partition whose number of dimensions is the sum of
the numbers of dimensions of all involved partitions.
Note that no changes are made in-place.
Parameters
----------
index : int
Index of the dimension before which ``other`` is to
be inserted. Negative indices count backwards from
``self.ndim``.
part1, ..., partN : `RectPartition`
Partitions to be inserted into ``self``.
Returns
-------
newpart : `RectPartition`
The enlarged partition.
Examples
--------
>>> part1 = odl.uniform_partition([0, -1], [1, 2], (3, 3))
>>> part2 = odl.uniform_partition(0, 1, 5)
>>> part1.insert(1, part2)
uniform_partition([ 0., 0., -1.], [ 1., 1., 2.], (3, 5, 3))
See Also
--------
append
"""
if not all(isinstance(p, RectPartition) for p in parts):
raise TypeError('`parts` must all be `RectPartition` instances, '
'got ({})'
''.format(', '.join(repr(p) for p in parts)))
newgrid = self.grid.insert(index, *(p.grid for p in parts))
newset = self.set.insert(index, *(p.set for p in parts))
return RectPartition(newset, newgrid) | python | {
"resource": ""
} |
q35556 | RectPartition.index | train | def index(self, value, floating=False):
"""Return the index of a value in the domain.
Parameters
----------
value : ``self.set`` element
Point whose index to find.
floating : bool, optional
If True, then the index should also give the position inside the
voxel. This is given by returning the integer valued index of the
voxel plus the distance from the left cell boundary as a fraction
of the full cell size.
Returns
-------
index : int, float, tuple of int or tuple of float
Index of the value, as counted from the left.
If ``self.ndim > 1`` the result is a tuple, else a scalar.
If ``floating=True`` the scalar is a float, else an int.
Examples
--------
Get the indices of start and end:
>>> p = odl.uniform_partition(0, 2, 5)
>>> p.index(0)
0
>>> p.index(2)
4
For points inside voxels, the index of the containing cell is returned:
>>> p.index(0.2)
0
By using the ``floating`` argument, partial positions inside the voxels
can instead be determined:
>>> p.index(0.2, floating=True)
0.5
These indices work with indexing, extracting the voxel in which the
point lies:
>>> p[p.index(0.1)]
uniform_partition(0.0, 0.4, 1)
The same principle also works in higher dimensions:
>>> p = uniform_partition([0, -1], [1, 2], (4, 1))
>>> p.index([0.5, 2])
(2, 0)
>>> p[p.index([0.5, 2])]
uniform_partition([ 0.5, -1. ], [ 0.75, 2. ], (1, 1))
"""
value = np.atleast_1d(self.set.element(value))
result = []
for val, cell_bdry_vec in zip(value, self.cell_boundary_vecs):
ind = np.searchsorted(cell_bdry_vec, val)
if floating:
if cell_bdry_vec[ind] == val:
# Value is on top of edge
result.append(float(ind))
else:
# interpolate between
csize = float(cell_bdry_vec[ind] - cell_bdry_vec[ind - 1])
result.append(ind - (cell_bdry_vec[ind] - val) / csize)
else:
if cell_bdry_vec[ind] == val and ind != len(cell_bdry_vec) - 1:
# Value is on top of edge, but not last edge
result.append(ind)
else:
result.append(ind - 1)
if self.ndim == 1:
result = result[0]
else:
result = tuple(result)
return result | python | {
"resource": ""
} |
q35557 | RectPartition.byaxis | train | def byaxis(self):
"""Object to index ``self`` along axes.
Examples
--------
Indexing with integers or slices:
>>> p = odl.uniform_partition([0, 1, 2], [1, 3, 5], (3, 5, 6))
>>> p.byaxis[0]
uniform_partition(0.0, 1.0, 3)
>>> p.byaxis[1]
uniform_partition(1.0, 3.0, 5)
>>> p.byaxis[2]
uniform_partition(2.0, 5.0, 6)
>>> p.byaxis[:] == p
True
>>> p.byaxis[1:]
uniform_partition([ 1., 2.], [ 3., 5.], (5, 6))
Lists can be used to stack subpartitions arbitrarily:
>>> p.byaxis[[0, 2, 0]]
uniform_partition([ 0., 2., 0.], [ 1., 5., 1.], (3, 6, 3))
"""
partition = self
class RectPartitionByAxis(object):
"""Helper class for accessing `RectPartition` by axis."""
def __getitem__(self, indices):
"""Return ``self[indices]``."""
try:
iter(indices)
except TypeError:
# Slice or integer
slc = np.zeros(partition.ndim, dtype=object)
slc[indices] = slice(None)
squeeze_axes = np.where(slc == 0)[0]
newpart = partition[tuple(slc)].squeeze(squeeze_axes)
else:
# Sequence, stack together from single-integer indexing
indices = [int(i) for i in indices]
byaxis = partition.byaxis
parts = [byaxis[i] for i in indices]
if not parts:
newpart = uniform_partition([], [], ())
else:
newpart = parts[0].append(*(parts[1:]))
return newpart
def __repr__(self):
"""Return ``repr(self)``.
Examples
--------
>>> p = odl.uniform_partition(0, 1, 5)
>>> p.byaxis
uniform_partition(0, 1, 5).byaxis
"""
return '{!r}.byaxis'.format(partition)
return RectPartitionByAxis() | python | {
"resource": ""
} |
q35558 | DivergentBeamGeometry.det_to_src | train | def det_to_src(self, angle, dparam, normalized=True):
"""Vector or direction from a detector location to the source.
The unnormalized version of this vector is computed as follows::
vec = src_position(angle) - det_point_position(angle, dparam)
Parameters
----------
angle : `array-like` or sequence
One or several (Euler) angles in radians at which to
evaluate. If ``motion_params.ndim >= 2``, a sequence of that
length must be provided.
dparam : `array-like` or sequence
Detector parameter(s) at which to evaluate. If
``det_params.ndim >= 2``, a sequence of that length must be
provided.
Returns
-------
det_to_src : `numpy.ndarray`
Vector(s) pointing from a detector point to the source (at
infinity).
The shape of the returned array is obtained from the
(broadcast) shapes of ``angle`` and ``dparam``, and
broadcasting is supported within both parameters and between
them. The precise definition of the shape is
``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``,
where ``bcast_angle`` is
- ``angle`` if `motion_params` is 1D,
- ``broadcast(*angle)`` otherwise,
and ``bcast_dparam`` defined analogously.
Examples
--------
The method works with single parameter values, in which case
a single vector is returned:
>>> apart = odl.uniform_partition(0, 2 * np.pi, 10)
>>> dpart = odl.uniform_partition(-1, 1, 20)
>>> geom = odl.tomo.FanBeamGeometry(apart, dpart, src_radius=2,
... det_radius=3)
>>> geom.det_to_src(0, 0)
array([ 0., -1.])
>>> geom.det_to_src(0, 0, normalized=False)
array([ 0., -5.])
>>> vec = geom.det_to_src(0, 1, normalized=False)
>>> np.allclose(geom.det_point_position(0, 1) + vec,
... geom.src_position(0))
True
>>> dir = geom.det_to_src(np.pi / 2, 0)
>>> np.allclose(dir, [1, 0])
True
>>> vec = geom.det_to_src(np.pi / 2, 0, normalized=False)
>>> np.allclose(vec, [5, 0])
True
Both variables support vectorized calls, i.e., stacks of
parameters can be provided. The order of axes in the output (left
of the ``ndim`` axis for the vector dimension) corresponds to the
order of arguments:
>>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1])
>>> dirs[1]
array([ 0., -1.])
>>> dirs.shape # (num_dparams, ndim)
(4, 2)
>>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0)
>>> np.allclose(dirs, [[0, -1],
... [1, 0],
... [0, 1]])
True
>>> dirs.shape # (num_angles, ndim)
(3, 2)
>>> # Providing 3 pairs of parameters, resulting in 3 vectors
>>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [0, -1, 1])
>>> dirs[0] # Corresponds to angle = 0, dparam = 0
array([ 0., -1.])
>>> dirs.shape
(3, 2)
>>> # Pairs of parameters arranged in arrays of same size
>>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape
(4, 5, 2)
>>> # "Outer product" type evaluation using broadcasting
>>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape
(4, 5, 2)
"""
# Always call the downstream methods with vectorized arguments
# to be able to reliably manipulate the final axes of the result
if self.motion_params.ndim == 1:
squeeze_angle = (np.shape(angle) == ())
angle = np.array(angle, dtype=float, copy=False, ndmin=1)
else:
squeeze_angle = (np.broadcast(*angle).shape == ())
angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1)
for a in angle)
if self.det_params.ndim == 1:
squeeze_dparam = (np.shape(dparam) == ())
dparam = np.array(dparam, dtype=float, copy=False, ndmin=1)
else:
squeeze_dparam = (np.broadcast(*dparam).shape == ())
dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in dparam)
det_to_src = (self.src_position(angle) -
self.det_point_position(angle, dparam))
if normalized:
det_to_src /= np.linalg.norm(det_to_src, axis=-1, keepdims=True)
if squeeze_angle and squeeze_dparam:
det_to_src = det_to_src.squeeze()
return det_to_src | python | {
"resource": ""
} |
q35559 | AxisOrientedGeometry.rotation_matrix | train | def rotation_matrix(self, angle):
"""Return the rotation matrix to the system state at ``angle``.
The matrix is computed according to
`Rodrigues' rotation formula
<https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula>`_.
Parameters
----------
angle : float or `array-like`
Angle(s) in radians describing the counter-clockwise
rotation of the system around `axis`.
Returns
-------
rot : `numpy.ndarray`
The rotation matrix (or matrices) mapping vectors at the
initial state to the ones in the state defined by ``angle``.
The rotation is extrinsic, i.e., defined in the "world"
coordinate system.
If ``angle`` is a single parameter, the returned array has
shape ``(3, 3)``, otherwise ``angle.shape + (3, 3)``.
"""
squeeze_out = (np.shape(angle) == ())
angle = np.array(angle, dtype=float, copy=False, ndmin=1)
if (self.check_bounds and
not is_inside_bounds(angle, self.motion_params)):
raise ValueError('`angle` {} not in the valid range {}'
''.format(angle, self.motion_params))
matrix = axis_rotation_matrix(self.axis, angle)
if squeeze_out:
matrix = matrix.squeeze()
return matrix | python | {
"resource": ""
} |
q35560 | elekta_icon_geometry | train | def elekta_icon_geometry(sad=780.0, sdd=1000.0,
piercing_point=(390.0, 0.0),
angles=None, num_angles=None,
detector_shape=(780, 720)):
"""Tomographic geometry of the Elekta Icon CBCT system.
See the [whitepaper]_ for specific descriptions of each parameter.
All measurments are given in millimeters unless otherwise stated.
Parameters
----------
sad : float, optional
Source to Axis distance.
sdd : float, optional
Source to Detector distance.
piercing_point : sequence of foat, optional
Position in the detector (in pixel coordinates) that a beam from the
source, passing through the axis of rotation perpendicularly, hits.
angles : array-like, optional
List of angles given in radians that the projection images were taken
at. Exclusive with num_angles.
Default: np.linspace(1.2, 5.0, 332)
num_angles : int, optional
Number of angles. Exclusive with angles.
Default: 332
detector_shape : sequence of int, optional
Shape of the detector (in pixels). Useful if a sub-sampled system
should be studied.
Returns
-------
elekta_icon_geometry : `ConeFlatGeometry`
Examples
--------
Create default geometry:
>>> from odl.contrib import tomo
>>> geometry = tomo.elekta_icon_geometry()
Use a smaller detector (improves efficiency):
>>> small_geometry = tomo.elekta_icon_geometry(detector_shape=[100, 100])
See Also
--------
elekta_icon_space : Default reconstruction space for the Elekta Icon CBCT.
elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT.
References
----------
.. [whitepaper] *Design and performance characteristics of a Cone Beam
CT system for Leksell Gamma Knife Icon*
"""
sad = float(sad)
assert sad > 0
sdd = float(sdd)
assert sdd > sad
piercing_point = np.array(piercing_point, dtype=float)
assert piercing_point.shape == (2,)
if angles is not None and num_angles is not None:
raise ValueError('cannot provide both `angles` and `num_angles`')
elif angles is not None:
angles = odl.nonuniform_partition(angles)
assert angles.ndim == 1
elif num_angles is not None:
angles = odl.uniform_partition(1.2, 5.0, num_angles)
else:
angles = odl.uniform_partition(1.2, 5.0, 332)
detector_shape = np.array(detector_shape, dtype=int)
# Constant system parameters
pixel_size = 0.368
det_extent_mm = np.array([287.04, 264.96])
# Compute the detector partition
piercing_point_mm = pixel_size * piercing_point
det_min_pt = -piercing_point_mm
det_max_pt = det_min_pt + det_extent_mm
detector_partition = odl.uniform_partition(min_pt=det_min_pt,
max_pt=det_max_pt,
shape=detector_shape)
# Create the geometry
geometry = odl.tomo.ConeFlatGeometry(
angles, detector_partition,
src_radius=sad, det_radius=sdd - sad)
return geometry | python | {
"resource": ""
} |
q35561 | elekta_icon_space | train | def elekta_icon_space(shape=(448, 448, 448), **kwargs):
"""Default reconstruction space for the Elekta Icon CBCT.
See the [whitepaper]_ for further information.
Parameters
----------
shape : sequence of int, optional
Shape of the space, in voxels.
kwargs :
Keyword arguments to pass to `uniform_discr` to modify the space, e.g.
use another backend. By default, the dtype is set to float32.
Returns
-------
elekta_icon_space : `DiscreteLp`
Examples
--------
Create default space:
>>> from odl.contrib import tomo
>>> space = tomo.elekta_icon_space()
Create sub-sampled space:
>>> space = tomo.elekta_icon_space(shape=(100, 100, 100))
See Also
--------
elekta_icon_geometry: Geometry for the Elekta Icon CBCT.
elekta_icon_fbp: Default reconstruction method for the Elekta Icon CBCT.
References
----------
.. [whitepaper] *Design and performance characteristics of a Cone Beam
CT system for Leksell Gamma Knife Icon*
"""
if 'dtype' not in kwargs:
kwargs['dtype'] = 'float32'
return odl.uniform_discr(min_pt=[-112.0, -112.0, 0.0],
max_pt=[112.0, 112.0, 224.0],
shape=shape,
**kwargs) | python | {
"resource": ""
} |
q35562 | elekta_icon_fbp | train | def elekta_icon_fbp(ray_transform,
padding=False, filter_type='Hann', frequency_scaling=0.6,
parker_weighting=True):
"""Approximation of the FDK reconstruction used in the Elekta Icon.
Parameters
----------
ray_transform : `RayTransform`
The ray transform to be used, should have an Elekta Icon geometry.
padding : bool, optional
Whether the FBP filter should use padding, increases memory use
significantly.
filter_type : str, optional
Type of filter to apply in the FBP filter.
frequency_scaling : float, optional
Frequency scaling for FBP filter.
parker_weighting : bool, optional
Whether Parker weighting should be applied to compensate for partial
scan.
Returns
-------
elekta_icon_fbp : `DiscreteLp`
Examples
--------
Create default FBP for default geometry:
>>> from odl.contrib import tomo
>>> geometry = tomo.elekta_icon_geometry()
>>> space = tomo.elekta_icon_space()
>>> ray_transform = odl.tomo.RayTransform(space, geometry)
>>> fbp_op = tomo.elekta_icon_fbp(ray_transform)
"""
fbp_op = odl.tomo.fbp_op(ray_transform,
padding=padding,
filter_type=filter_type,
frequency_scaling=frequency_scaling)
if parker_weighting:
parker_weighting = odl.tomo.parker_weighting(ray_transform)
fbp_op = fbp_op * parker_weighting
return fbp_op | python | {
"resource": ""
} |
q35563 | elekta_xvi_space | train | def elekta_xvi_space(shape=(512, 512, 512), **kwargs):
"""Default reconstruction space for the Elekta XVI CBCT.
Parameters
----------
shape : sequence of int, optional
Shape of the space, in voxels.
kwargs :
Keyword arguments to pass to `uniform_discr` to modify the space, e.g.
use another backend. By default, the dtype is set to float32.
Returns
-------
elekta_xvi_space : `DiscreteLp`
Examples
--------
Create default space:
>>> from odl.contrib import tomo
>>> space = tomo.elekta_xvi_space()
Create sub-sampled space:
>>> space = tomo.elekta_xvi_space(shape=(100, 100, 100))
See Also
--------
elekta_xvi_geometry: Geometry for the Elekta XVI CBCT.
elekta_xvi_fbp: Default reconstruction method for the Elekta XVI CBCT.
"""
if 'dtype' not in kwargs:
kwargs['dtype'] = 'float32'
return odl.uniform_discr(min_pt=[-128.0, -128, -128.0],
max_pt=[128.0, 128.0, 128.0],
shape=shape,
**kwargs) | python | {
"resource": ""
} |
q35564 | elekta_xvi_fbp | train | def elekta_xvi_fbp(ray_transform,
padding=False, filter_type='Hann', frequency_scaling=0.6):
"""Approximation of the FDK reconstruction used in the Elekta XVI.
Parameters
----------
ray_transform : `RayTransform`
The ray transform to be used, should have an Elekta XVI geometry.
padding : bool, optional
Whether the FBP filter should use padding, increases memory use
significantly.
filter_type : str, optional
Type of filter to apply in the FBP filter.
frequency_scaling : float, optional
Frequency scaling for FBP filter.
Returns
-------
elekta_xvi_fbp : `DiscreteLp`
Examples
--------
Create default FBP for default geometry:
>>> from odl.contrib import tomo
>>> geometry = tomo.elekta_xvi_geometry()
>>> space = tomo.elekta_xvi_space()
>>> ray_transform = odl.tomo.RayTransform(space, geometry)
>>> fbp_op = tomo.elekta_xvi_fbp(ray_transform)
"""
fbp_op = odl.tomo.fbp_op(ray_transform,
padding=padding,
filter_type=filter_type,
frequency_scaling=frequency_scaling)
return fbp_op | python | {
"resource": ""
} |
q35565 | _modified_shepp_logan_ellipsoids | train | def _modified_shepp_logan_ellipsoids(ellipsoids):
"""Modify ellipsoids to give the modified Shepp-Logan phantom.
Works for both 2d and 3d.
"""
intensities = [1.0, -0.8, -0.2, -0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]
# Add minimal numbers to ensure that the result is nowhere negative.
# This is needed due to numerical issues.
intensities[2] += 5e-17
intensities[3] += 5e-17
assert len(ellipsoids) == len(intensities)
for ellipsoid, intensity in zip(ellipsoids, intensities):
ellipsoid[0] = intensity | python | {
"resource": ""
} |
q35566 | shepp_logan_ellipsoids | train | def shepp_logan_ellipsoids(ndim, modified=False):
"""Ellipsoids for the standard Shepp-Logan phantom in 2 or 3 dimensions.
Parameters
----------
ndim : {2, 3}
Dimension of the space the ellipsoids should be in.
modified : bool, optional
True if the modified Shepp-Logan phantom should be given.
The modified phantom has greatly amplified contrast to aid
visualization.
See Also
--------
odl.phantom.geometric.ellipsoid_phantom :
Function for creating arbitrary ellipsoids phantoms
shepp_logan : Create a phantom with these ellipsoids
References
----------
.. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
"""
if ndim == 2:
ellipsoids = _shepp_logan_ellipse_2d()
elif ndim == 3:
ellipsoids = _shepp_logan_ellipsoids_3d()
else:
raise ValueError('dimension not 2 or 3, no phantom available')
if modified:
_modified_shepp_logan_ellipsoids(ellipsoids)
return ellipsoids | python | {
"resource": ""
} |
q35567 | shepp_logan | train | def shepp_logan(space, modified=False, min_pt=None, max_pt=None):
"""Standard Shepp-Logan phantom in 2 or 3 dimensions.
Parameters
----------
space : `DiscreteLp`
Space in which the phantom is created, must be 2- or 3-dimensional.
If ``space.shape`` is 1 in an axis, a corresponding slice of the
phantom is created.
modified : `bool`, optional
True if the modified Shepp-Logan phantom should be given.
The modified phantom has greatly amplified contrast to aid
visualization.
min_pt, max_pt : array-like, optional
If provided, use these vectors to determine the bounding box of the
phantom instead of ``space.min_pt`` and ``space.max_pt``.
It is currently required that ``min_pt >= space.min_pt`` and
``max_pt <= space.max_pt``, i.e., shifting or scaling outside the
original space is not allowed.
Providing one of them results in a shift, e.g., for ``min_pt``::
new_min_pt = min_pt
new_max_pt = space.max_pt + (min_pt - space.min_pt)
Providing both results in a scaled version of the phantom.
See Also
--------
forbild : Similar phantom but with more complexity. Only supports 2d.
odl.phantom.geometric.defrise : Geometry test phantom
shepp_logan_ellipsoids : Get the parameters that define this phantom
odl.phantom.geometric.ellipsoid_phantom :
Function for creating arbitrary ellipsoid phantoms
References
----------
.. _Shepp-Logan phantom: https://en.wikipedia.org/wiki/Shepp-Logan_phantom
"""
ellipsoids = shepp_logan_ellipsoids(space.ndim, modified)
return ellipsoid_phantom(space, ellipsoids, min_pt, max_pt) | python | {
"resource": ""
} |
q35568 | _scaling_func_list | train | def _scaling_func_list(bdry_fracs, exponent):
"""Return a list of lists of scaling functions for the boundary."""
def scaling(factor):
def scaling_func(x):
return x * factor
return scaling_func
func_list = []
for frac_l, frac_r in bdry_fracs:
func_list_entry = []
if np.isclose(frac_l, 1.0):
func_list_entry.append(None)
else:
func_list_entry.append(scaling(frac_l ** (1 / exponent)))
if np.isclose(frac_r, 1.0):
func_list_entry.append(None)
else:
func_list_entry.append(scaling(frac_r ** (1 / exponent)))
func_list.append(func_list_entry)
return func_list | python | {
"resource": ""
} |
q35569 | DiscreteLp.interp | train | def interp(self):
"""Interpolation type of this discretization."""
if self.ndim == 0:
return 'nearest'
elif all(interp == self.interp_byaxis[0]
for interp in self.interp_byaxis):
return self.interp_byaxis[0]
else:
return self.interp_byaxis | python | {
"resource": ""
} |
q35570 | DiscreteLp.tangent_bundle | train | def tangent_bundle(self):
"""The tangent bundle associated with `domain` using `partition`.
The tangent bundle of a space ``X`` of functions ``R^d --> F`` can be
interpreted as the space of vector-valued functions ``R^d --> F^d``.
This space can be identified with the power space ``X^d`` as used
in this implementation.
"""
if self.ndim == 0:
return ProductSpace(field=self.field)
else:
return ProductSpace(self, self.ndim) | python | {
"resource": ""
} |
q35571 | DiscreteLp.is_uniformly_weighted | train | def is_uniformly_weighted(self):
"""``True`` if the weighting is the same for all space points."""
try:
is_uniformly_weighted = self.__is_uniformly_weighted
except AttributeError:
bdry_fracs = self.partition.boundary_cell_fractions
is_uniformly_weighted = (
np.allclose(bdry_fracs, 1.0) or
self.exponent == float('inf') or
not getattr(self.tspace, 'is_weighted', False))
self.__is_uniformly_weighted = is_uniformly_weighted
return is_uniformly_weighted | python | {
"resource": ""
} |
q35572 | DiscreteLpElement.imag | train | def imag(self, newimag):
"""Set the imaginary part of this element to ``newimag``.
This method is invoked by ``x.imag = other``.
Parameters
----------
newimag : array-like or scalar
Values to be assigned to the imaginary part of this element.
Raises
------
ValueError
If the space is real, i.e., no imagninary part can be set.
"""
if self.space.is_real:
raise ValueError('cannot set imaginary part in real spaces')
self.tensor.imag = newimag | python | {
"resource": ""
} |
q35573 | DiscreteLpElement.conj | train | def conj(self, out=None):
"""Complex conjugate of this element.
Parameters
----------
out : `DiscreteLpElement`, optional
Element to which the complex conjugate is written.
Must be an element of this element's space.
Returns
-------
out : `DiscreteLpElement`
The complex conjugate element. If ``out`` is provided,
the returned object is a reference to it.
Examples
--------
>>> discr = uniform_discr(0, 1, 4, dtype=complex)
>>> x = discr.element([5+1j, 3, 2-2j, 1j])
>>> y = x.conj()
>>> print(y)
[ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j]
The out parameter allows you to avoid a copy:
>>> z = discr.element()
>>> z_out = x.conj(out=z)
>>> print(z)
[ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j]
>>> z_out is z
True
It can also be used for in-place conjugation:
>>> x_out = x.conj(out=x)
>>> print(x)
[ 5.-1.j, 3.-0.j, 2.+2.j, 0.-1.j]
>>> x_out is x
True
"""
if out is None:
return self.space.element(self.tensor.conj())
else:
self.tensor.conj(out=out.tensor)
return out | python | {
"resource": ""
} |
q35574 | _operator_norms | train | def _operator_norms(L):
"""Get operator norms if needed.
Parameters
----------
L : sequence of `Operator` or float
The operators or the norms of the operators that are used in the
`douglas_rachford_pd` method. For `Operator` entries, the norm
is computed with ``Operator.norm(estimate=True)``.
"""
L_norms = []
for Li in L:
if np.isscalar(Li):
L_norms.append(float(Li))
elif isinstance(Li, Operator):
L_norms.append(Li.norm(estimate=True))
else:
raise TypeError('invalid entry {!r} in `L`'.format(Li))
return L_norms | python | {
"resource": ""
} |
q35575 | douglas_rachford_pd_stepsize | train | def douglas_rachford_pd_stepsize(L, tau=None, sigma=None):
r"""Default step sizes for `douglas_rachford_pd`.
Parameters
----------
L : sequence of `Operator` or float
The operators or the norms of the operators that are used in the
`douglas_rachford_pd` method. For `Operator` entries, the norm
is computed with ``Operator.norm(estimate=True)``.
tau : positive float, optional
Use this value for ``tau`` instead of computing it from the
operator norms, see Notes.
sigma : tuple of float, optional
The ``sigma`` step size parameters for the dual update.
Returns
-------
tau : float
The ``tau`` step size parameter for the primal update.
sigma : tuple of float
The ``sigma`` step size parameters for the dual update.
Notes
-----
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma_i`
and :math:`L_i` need to satisfy
.. math::
\tau \sum_{i=1}^n \sigma_i \|L_i\|^2 < 4.
This function has 4 options, :math:`\tau`/:math:`\sigma` given or not
given.
- If neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as:
.. math::
\tau = \frac{1}{\sum_{i=1}^n \|L_i\|},
\quad
\sigma_i = \frac{2}{n \tau \|L_i\|^2}
- If only :math:`\sigma` is given, :math:`\tau` is set to:
.. math::
\tau = \frac{2}{\sum_{i=1}^n \sigma_i \|L_i\|^2}
- If only :math:`\tau` is given, :math:`\sigma` is set
to:
.. math::
\sigma_i = \frac{2}{n \tau \|L_i\|^2}
- If both are given, they are returned as-is without further validation.
"""
if tau is None and sigma is None:
L_norms = _operator_norms(L)
tau = 1 / sum(L_norms)
sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2)
for Li_norm in L_norms]
return tau, tuple(sigma)
elif tau is None:
L_norms = _operator_norms(L)
tau = 2 / sum(si * Li_norm ** 2
for si, Li_norm in zip(sigma, L_norms))
return tau, tuple(sigma)
elif sigma is None:
L_norms = _operator_norms(L)
tau = float(tau)
sigma = [2.0 / (len(L_norms) * tau * Li_norm ** 2)
for Li_norm in L_norms]
return tau, tuple(sigma)
else:
return float(tau), tuple(sigma) | python | {
"resource": ""
} |
q35576 | parallel_beam_geometry | train | def parallel_beam_geometry(space, num_angles=None, det_shape=None):
r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324
"""
# Find maximum distance from rotation axis
corners = space.domain.corners()[:, :2]
rho = np.max(np.linalg.norm(corners, axis=1))
# Find default values according to Nyquist criterion.
# We assume that the function is bandlimited by a wave along the x or y
# axis. The highest frequency we can measure is then a standing wave with
# period of twice the inter-node distance.
min_side = min(space.partition.cell_sides[:2])
omega = np.pi / min_side
num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1
if space.ndim == 2:
det_min_pt = -rho
det_max_pt = rho
if det_shape is None:
det_shape = num_px_horiz
elif space.ndim == 3:
num_px_vert = space.shape[2]
min_h = space.domain.min_pt[2]
max_h = space.domain.max_pt[2]
det_min_pt = [-rho, min_h]
det_max_pt = [rho, max_h]
if det_shape is None:
det_shape = [num_px_horiz, num_px_vert]
if num_angles is None:
num_angles = int(np.ceil(omega * rho))
angle_partition = uniform_partition(0, np.pi, num_angles)
det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape)
if space.ndim == 2:
return Parallel2dGeometry(angle_partition, det_partition)
elif space.ndim == 3:
return Parallel3dAxisGeometry(angle_partition, det_partition)
else:
raise ValueError('``space.ndim`` must be 2 or 3.') | python | {
"resource": ""
} |
q35577 | ParallelBeamGeometry.angles | train | def angles(self):
"""All angles of this geometry as an array.
If ``motion_params.ndim == 1``, the array has shape ``(N,)``,
where ``N`` is the number of angles.
Otherwise, the array shape is ``(ndim, N)``, where ``N`` is the
total number of angles, and ``ndim`` is ``motion_partitioin.ndim``.
The order of axes is chosen such that ``geometry.angles`` can be
used directly as input to any of the other methods of the
geometry.
"""
if self.motion_partition.ndim == 1:
return self.motion_grid.coord_vectors[0]
else:
return self.motion_grid.points().T | python | {
"resource": ""
} |
q35578 | ParallelBeamGeometry.det_to_src | train | def det_to_src(self, angle, dparam):
"""Direction from a detector location to the source.
The direction vector is computed as follows::
dir = rotation_matrix(angle).dot(detector.surface_normal(dparam))
Note that for flat detectors, ``surface_normal`` does not depend
on the parameter ``dparam``, hence this function is constant in
that variable.
Parameters
----------
angle : `array-like` or sequence
One or several (Euler) angles in radians at which to
evaluate. If ``motion_params.ndim >= 2``, a sequence of that
length must be provided.
dparam : `array-like` or sequence
Detector parameter(s) at which to evaluate. If
``det_params.ndim >= 2``, a sequence of that length must be
provided.
Returns
-------
det_to_src : `numpy.ndarray`
Vector(s) pointing from a detector point to the source (at
infinity).
The shape of the returned array is obtained from the
(broadcast) shapes of ``angle`` and ``dparam``, and
broadcasting is supported within both parameters and between
them. The precise definition of the shape is
``broadcast(bcast_angle, bcast_dparam).shape + (ndim,)``,
where ``bcast_angle`` is
- ``angle`` if `motion_params` is 1D,
- ``broadcast(*angle)`` otherwise,
and ``bcast_dparam`` defined analogously.
Examples
--------
The method works with single parameter values, in which case
a single vector is returned:
>>> apart = odl.uniform_partition(0, np.pi, 10)
>>> dpart = odl.uniform_partition(-1, 1, 20)
>>> geom = odl.tomo.Parallel2dGeometry(apart, dpart)
>>> geom.det_to_src(0, 0)
array([ 0., -1.])
>>> geom.det_to_src(0, 1)
array([ 0., -1.])
>>> dir = geom.det_to_src(np.pi / 2, 0)
>>> np.allclose(dir, [1, 0])
True
>>> dir = geom.det_to_src(np.pi / 2, 1)
>>> np.allclose(dir, [1, 0])
True
Both variables support vectorized calls, i.e., stacks of
parameters can be provided. The order of axes in the output (left
of the ``ndim`` axis for the vector dimension) corresponds to the
order of arguments:
>>> dirs = geom.det_to_src(0, [-1, 0, 0.5, 1])
>>> dirs
array([[ 0., -1.],
[ 0., -1.],
[ 0., -1.],
[ 0., -1.]])
>>> dirs.shape # (num_dparams, ndim)
(4, 2)
>>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], 0)
>>> np.allclose(dirs, [[0, -1],
... [1, 0],
... [0, 1]])
True
>>> dirs.shape # (num_angles, ndim)
(3, 2)
>>> # Providing 3 pairs of parameters, resulting in 3 vectors
>>> dirs = geom.det_to_src([0, np.pi / 2, np.pi], [-1, 0, 1])
>>> dirs[0] # Corresponds to angle = 0, dparam = -1
array([ 0., -1.])
>>> dirs.shape
(3, 2)
>>> # Pairs of parameters arranged in arrays of same size
>>> geom.det_to_src(np.zeros((4, 5)), np.zeros((4, 5))).shape
(4, 5, 2)
>>> # "Outer product" type evaluation using broadcasting
>>> geom.det_to_src(np.zeros((4, 1)), np.zeros((1, 5))).shape
(4, 5, 2)
"""
# Always call the downstream methods with vectorized arguments
# to be able to reliably manipulate the final axes of the result
if self.motion_params.ndim == 1:
squeeze_angle = (np.shape(angle) == ())
angle = np.array(angle, dtype=float, copy=False, ndmin=1)
matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim)
else:
squeeze_angle = (np.broadcast(*angle).shape == ())
angle = tuple(np.array(a, dtype=float, copy=False, ndmin=1)
for a in angle)
matrix = self.rotation_matrix(angle) # shape (m, ndim, ndim)
if self.det_params.ndim == 1:
squeeze_dparam = (np.shape(dparam) == ())
dparam = np.array(dparam, dtype=float, copy=False, ndmin=1)
else:
squeeze_dparam = (np.broadcast(*dparam).shape == ())
dparam = tuple(np.array(p, dtype=float, copy=False, ndmin=1)
for p in dparam)
normal = self.detector.surface_normal(dparam) # shape (d, ndim)
# Perform matrix-vector multiplication along the last axis of both
# `matrix` and `normal` while "zipping" all axes that do not
# participate in the matrix-vector product. In other words, the axes
# are labelled
# [0, 1, ..., r-1, r, r+1] for `matrix` and
# [0, 1, ..., r-1, r+1] for `normal`, and the output axes are set to
# [0, 1, ..., r-1, r]. This automatically supports broadcasting
# along the axes 0, ..., r-1.
matrix_axes = list(range(matrix.ndim))
normal_axes = list(range(matrix.ndim - 2)) + [matrix_axes[-1]]
out_axes = list(range(matrix.ndim - 1))
det_to_src = np.einsum(matrix, matrix_axes, normal, normal_axes,
out_axes)
if squeeze_angle and squeeze_dparam:
det_to_src = det_to_src.squeeze()
return det_to_src | python | {
"resource": ""
} |
q35579 | Parallel2dGeometry.frommatrix | train | def frommatrix(cls, apart, dpart, init_matrix, **kwargs):
"""Create an instance of `Parallel2dGeometry` using a matrix.
This alternative constructor uses a matrix to rotate and
translate the default configuration. It is most useful when
the transformation to be applied is already given as a matrix.
Parameters
----------
apart : 1-dim. `RectPartition`
Partition of the angle interval.
dpart : 1-dim. `RectPartition`
Partition of the detector parameter interval.
init_matrix : `array_like`, shape ``(2, 2)`` or ``(2, 3)``, optional
Transformation matrix whose left ``(2, 2)`` block is multiplied
with the default ``det_pos_init`` and ``det_axis_init`` to
determine the new vectors. If present, the third column acts
as a translation after the initial transformation.
The resulting ``det_axis_init`` will be normalized.
kwargs :
Further keyword arguments passed to the class constructor.
Returns
-------
geometry : `Parallel2dGeometry`
Examples
--------
Mirror the second unit vector, creating a left-handed system:
>>> apart = odl.uniform_partition(0, np.pi, 10)
>>> dpart = odl.uniform_partition(-1, 1, 20)
>>> matrix = np.array([[1, 0],
... [0, -1]])
>>> geom = Parallel2dGeometry.frommatrix(apart, dpart, matrix)
>>> e_x, e_y = np.eye(2) # standard unit vectors
>>> np.allclose(geom.det_pos_init, -e_y)
True
>>> np.allclose(geom.det_axis_init, e_x)
True
>>> np.allclose(geom.translation, (0, 0))
True
Adding a translation with a third matrix column:
>>> matrix = np.array([[1, 0, 1],
... [0, -1, 1]])
>>> geom = Parallel2dGeometry.frommatrix(apart, dpart, matrix)
>>> np.allclose(geom.translation, (1, 1))
True
>>> np.allclose(geom.det_pos_init, -e_y + (1, 1))
True
"""
# Get transformation and translation parts from `init_matrix`
init_matrix = np.asarray(init_matrix, dtype=float)
if init_matrix.shape not in ((2, 2), (2, 3)):
raise ValueError('`matrix` must have shape (2, 2) or (2, 3), '
'got array with shape {}'
''.format(init_matrix.shape))
trafo_matrix = init_matrix[:, :2]
translation = init_matrix[:, 2:].squeeze()
# Transform the default vectors
default_det_pos_init = cls._default_config['det_pos_init']
default_det_axis_init = cls._default_config['det_axis_init']
vecs_to_transform = [default_det_axis_init]
transformed_vecs = transform_system(
default_det_pos_init, None, vecs_to_transform, matrix=trafo_matrix)
# Use the standard constructor with these vectors
det_pos, det_axis = transformed_vecs
if translation.size != 0:
kwargs['translation'] = translation
return cls(apart, dpart, det_pos,
det_axis_init=det_axis, **kwargs) | python | {
"resource": ""
} |
q35580 | Parallel3dEulerGeometry.det_axes | train | def det_axes(self, angles):
"""Return the detector axes tuple at ``angles``.
Parameters
----------
angles : `array-like` or sequence
Euler angles in radians describing the rotation of the detector.
The length of the provided argument (along the first axis in
case of an array) must be equal to the number of Euler angles
in this geometry.
Returns
-------
axes : `numpy.ndarray`
Unit vector(s) along which the detector is aligned.
If ``angles`` is a single pair (or triplet) of Euler angles,
the returned array has shape ``(2, 3)``, otherwise
``broadcast(*angles).shape + (2, 3)``.
Notes
-----
To get an array that enumerates the detector axes in the first
dimension, move the second-to-last axis to the first position:
axes = det_axes(angle)
axes_enumeration = np.moveaxis(deriv, -2, 0)
Examples
--------
Calling the method with a single set of angles produces a
``(2, 3)`` array of vertically stacked vectors:
>>> apart = odl.uniform_partition([0, 0], [np.pi, 2 * np.pi],
... (10, 20))
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> geom = Parallel3dEulerGeometry(apart, dpart)
>>> geom.det_axes([0, 0])
array([[ 1., 0., 0.],
[ 0., 0., 1.]])
>>> np.allclose(geom.det_axes([np.pi / 2, 0]), [[0, 1, 0],
... [0, 0, 1]])
True
The method is vectorized, i.e., it can be called with multiple
angle parameters at once. Each of the angle arrays can have
different shapes and will be broadcast against each other to
determine the final shape:
>>> # The first axis enumerates the angles
>>> np.allclose(geom.det_axes(([0, np.pi / 2], [0, 0])),
... [[[1, 0, 0],
... [0, 0, 1]],
... [[0, 1, 0],
... [0, 0, 1]]])
True
>>> # Pairs of Euler angles in a (4, 5) array each
>>> geom.det_axes((np.zeros((4, 5)), np.zeros((4, 5)))).shape
(4, 5, 2, 3)
>>> # Using broadcasting for "outer product" type result
>>> geom.det_axes((np.zeros((4, 1)), np.zeros((1, 5)))).shape
(4, 5, 2, 3)
"""
# Transpose to take dot along axis 1
axes = self.rotation_matrix(angles).dot(self.det_axes_init.T)
# `axes` has shape (a, 3, 2), need to roll the last dimensions
# to the second to last place
return np.rollaxis(axes, -1, -2) | python | {
"resource": ""
} |
q35581 | Parallel3dEulerGeometry.rotation_matrix | train | def rotation_matrix(self, angles):
"""Return the rotation matrix to the system state at ``angles``.
Parameters
----------
angles : `array-like` or sequence
Euler angles in radians describing the rotation of the detector.
The length of the provided argument (along the first axis in
case of an array) must be equal to the number of Euler angles
in this geometry.
Returns
-------
rot : `numpy.ndarray`
Rotation matrix (or matrices) mapping vectors at the
initial state to the ones in the state defined by ``angles``.
The rotation is extrinsic, i.e., defined in the "world"
coordinate system.
If ``angles`` is a single pair (or triplet) of Euler angles,
an array of shape ``(3, 3)`` representing a single matrix is
returned. Otherwise, the shape of the returned array is
``broadcast(*angles).shape + (3, 3)``.
"""
squeeze_out = (np.broadcast(*angles).shape == ())
angles_in = angles
angles = tuple(np.array(angle, dtype=float, copy=False, ndmin=1)
for angle in angles)
if (self.check_bounds and
not is_inside_bounds(angles, self.motion_params)):
raise ValueError('`angles` {} not in the valid range '
'{}'.format(angles_in, self.motion_params))
matrix = euler_matrix(*angles)
if squeeze_out:
matrix = matrix.squeeze()
return matrix | python | {
"resource": ""
} |
q35582 | Parallel3dAxisGeometry.frommatrix | train | def frommatrix(cls, apart, dpart, init_matrix, **kwargs):
"""Create an instance of `Parallel3dAxisGeometry` using a matrix.
This alternative constructor uses a matrix to rotate and
translate the default configuration. It is most useful when
the transformation to be applied is already given as a matrix.
Parameters
----------
apart : 1-dim. `RectPartition`
Partition of the parameter interval.
dpart : 2-dim. `RectPartition`
Partition of the detector parameter set.
init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional
Transformation matrix whose left ``(3, 3)`` block is multiplied
with the default ``det_pos_init`` and ``det_axes_init`` to
determine the new vectors. If present, the fourth column acts
as a translation after the initial transformation.
The resulting ``det_axes_init`` will be normalized.
kwargs :
Further keyword arguments passed to the class constructor.
Returns
-------
geometry : `Parallel3dAxisGeometry`
Examples
--------
Map unit vectors ``e_y -> e_z`` and ``e_z -> -e_y``, keeping the
right-handedness:
>>> apart = odl.uniform_partition(0, np.pi, 10)
>>> dpart = odl.uniform_partition([-1, -1], [1, 1], (20, 20))
>>> matrix = np.array([[1, 0, 0],
... [0, 0, -1],
... [0, 1, 0]])
>>> geom = Parallel3dAxisGeometry.frommatrix(
... apart, dpart, init_matrix=matrix)
>>> geom.axis
array([ 0., -1., 0.])
>>> geom.det_pos_init
array([ 0., 0., 1.])
>>> geom.det_axes_init
array([[ 1., 0., 0.],
[ 0., -1., 0.]])
Adding a translation with a fourth matrix column:
>>> matrix = np.array([[0, 0, -1, 0],
... [0, 1, 0, 1],
... [1, 0, 0, 1]])
>>> geom = Parallel3dAxisGeometry.frommatrix(apart, dpart, matrix)
>>> geom.translation
array([ 0., 1., 1.])
>>> geom.det_pos_init # (0, 1, 0) + (0, 1, 1)
array([ 0., 2., 1.])
"""
# Get transformation and translation parts from `init_matrix`
init_matrix = np.asarray(init_matrix, dtype=float)
if init_matrix.shape not in ((3, 3), (3, 4)):
raise ValueError('`matrix` must have shape (3, 3) or (3, 4), '
'got array with shape {}'
''.format(init_matrix.shape))
trafo_matrix = init_matrix[:, :3]
translation = init_matrix[:, 3:].squeeze()
# Transform the default vectors
default_axis = cls._default_config['axis']
default_det_pos_init = cls._default_config['det_pos_init']
default_det_axes_init = cls._default_config['det_axes_init']
vecs_to_transform = (default_det_pos_init,) + default_det_axes_init
transformed_vecs = transform_system(
default_axis, None, vecs_to_transform, matrix=trafo_matrix)
# Use the standard constructor with these vectors
axis, det_pos, det_axis_0, det_axis_1 = transformed_vecs
if translation.size != 0:
kwargs['translation'] = translation
return cls(apart, dpart, axis,
det_pos_init=det_pos,
det_axes_init=[det_axis_0, det_axis_1],
**kwargs) | python | {
"resource": ""
} |
q35583 | wrap_ufunc_base | train | def wrap_ufunc_base(name, n_in, n_out, doc):
"""Return ufunc wrapper for implementation-agnostic ufunc classes."""
ufunc = getattr(np, name)
if n_in == 1:
if n_out == 1:
def wrapper(self, out=None, **kwargs):
if out is None or isinstance(out, (type(self.elem),
type(self.elem.data))):
out = (out,)
return self.elem.__array_ufunc__(
ufunc, '__call__', self.elem, out=out, **kwargs)
elif n_out == 2:
def wrapper(self, out=None, **kwargs):
if out is None:
out = (None, None)
return self.elem.__array_ufunc__(
ufunc, '__call__', self.elem, out=out, **kwargs)
else:
raise NotImplementedError
elif n_in == 2:
if n_out == 1:
def wrapper(self, x2, out=None, **kwargs):
return self.elem.__array_ufunc__(
ufunc, '__call__', self.elem, x2, out=(out,), **kwargs)
else:
raise NotImplementedError
else:
raise NotImplementedError
wrapper.__name__ = wrapper.__qualname__ = name
wrapper.__doc__ = doc
return wrapper | python | {
"resource": ""
} |
q35584 | wrap_ufunc_productspace | train | def wrap_ufunc_productspace(name, n_in, n_out, doc):
"""Return ufunc wrapper for `ProductSpaceUfuncs`."""
if n_in == 1:
if n_out == 1:
def wrapper(self, out=None, **kwargs):
if out is None:
result = [getattr(x.ufuncs, name)(**kwargs)
for x in self.elem]
return self.elem.space.element(result)
else:
for x, out_x in zip(self.elem, out):
getattr(x.ufuncs, name)(out=out_x, **kwargs)
return out
elif n_out == 2:
def wrapper(self, out1=None, out2=None, **kwargs):
if out1 is None:
out1 = self.elem.space.element()
if out2 is None:
out2 = self.elem.space.element()
for x, out1_x, out2_x in zip(self.elem, out1, out2):
getattr(x.ufuncs, name)(out1=out1_x, out2=out2_x, **kwargs)
return out1, out2
else:
raise NotImplementedError
elif n_in == 2:
if n_out == 1:
def wrapper(self, x2, out=None, **kwargs):
if x2 in self.elem.space:
if out is None:
result = [getattr(x.ufuncs, name)(x2p, **kwargs)
for x, x2p in zip(self.elem, x2)]
return self.elem.space.element(result)
else:
for x, x2p, outp in zip(self.elem, x2, out):
getattr(x.ufuncs, name)(x2p, out=outp, **kwargs)
return out
else:
if out is None:
result = [getattr(x.ufuncs, name)(x2, **kwargs)
for x in self.elem]
return self.elem.space.element(result)
else:
for x, outp in zip(self.elem, out):
getattr(x.ufuncs, name)(x2, out=outp, **kwargs)
return out
else:
raise NotImplementedError
else:
raise NotImplementedError
wrapper.__name__ = wrapper.__qualname__ = name
wrapper.__doc__ = doc
return wrapper | python | {
"resource": ""
} |
q35585 | landweber | train | def landweber(op, x, rhs, niter, omega=None, projection=None, callback=None):
r"""Optimized implementation of Landweber's method.
Solves the inverse problem::
A(x) = rhs
Parameters
----------
op : `Operator`
Operator in the inverse problem. ``op.derivative(x).adjoint`` must be
well-defined for ``x`` in the operator domain.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float, optional
Relaxation parameter in the iteration.
Default: ``1 / op.norm(estimate=True) ** 2``
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\mathcal{A} (x) = y,
for a given :math:`y\in \mathcal{Y}`, i.e. an approximate
solution :math:`x^*` to
.. math::
\min_{x\in \mathcal{X}} \| \mathcal{A}(x) - y \|_{\mathcal{Y}}^2
for a (Frechet-) differentiable operator
:math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert
spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k -
\omega \ \partial \mathcal{A}(x)^* (\mathcal{A}(x_k) - y),
where :math:`\partial \mathcal{A}(x)` is the Frechet derivative
of :math:`\mathcal{A}` at :math:`x` and :math:`\omega` is a
relaxation parameter. For linear problems, a choice
:math:`0 < \omega < 2/\lVert \mathcal{A}^2\rVert` guarantees
convergence, where :math:`\lVert\mathcal{A}\rVert` stands for the
operator norm of :math:`\mathcal{A}`.
Users may also optionally provide a projection to project each
iterate onto some subset. For example enforcing positivity.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Landweber_iteration>`_.
"""
# TODO: add a book reference
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, op.domain))
if omega is None:
omega = 1 / op.norm(estimate=True) ** 2
# Reusable temporaries
tmp_ran = op.range.element()
tmp_dom = op.domain.element()
for _ in range(niter):
op(x, out=tmp_ran)
tmp_ran -= rhs
op.derivative(x).adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -omega, tmp_dom)
if projection is not None:
projection(x)
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35586 | conjugate_gradient | train | def conjugate_gradient(op, x, rhs, niter, callback=None):
"""Optimized implementation of CG for self-adjoint operators.
This method solves the inverse problem (of the first kind)::
A(x) = y
for a linear and self-adjoint `Operator` ``A``.
It uses a minimum amount of memory copies by applying re-usable
temporaries and in-place evaluation.
The method is described (for linear systems) in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Conjugate_gradient_method>`_.
Parameters
----------
op : linear `Operator`
Operator in the inverse problem. It must be linear and
self-adjoint. This implies in particular that its domain and
range are equal.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
See Also
--------
conjugate_gradient_normal : Solver for nonsymmetric matrices
"""
# TODO: add a book reference
# TODO: update doc
if op.domain != op.range:
raise ValueError('operator needs to be self-adjoint')
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, op.domain))
r = op(x)
r.lincomb(1, rhs, -1, r) # r = rhs - A x
p = r.copy()
d = op.domain.element() # Extra storage for storing A x
sqnorm_r_old = r.norm() ** 2 # Only recalculate norm after update
if sqnorm_r_old == 0: # Return if no step forward
return
for _ in range(niter):
op(p, out=d) # d = A p
inner_p_d = p.inner(d)
if inner_p_d == 0.0: # Return if step is 0
return
alpha = sqnorm_r_old / inner_p_d
x.lincomb(1, x, alpha, p) # x = x + alpha*p
r.lincomb(1, r, -alpha, d) # r = r - alpha*d
sqnorm_r_new = r.norm() ** 2
beta = sqnorm_r_new / sqnorm_r_old
sqnorm_r_old = sqnorm_r_new
p.lincomb(1, r, beta, p) # p = s + b * p
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35587 | conjugate_gradient_normal | train | def conjugate_gradient_normal(op, x, rhs, niter=1, callback=None):
"""Optimized implementation of CG for the normal equation.
This method solves the inverse problem (of the first kind) ::
A(x) == rhs
with a linear `Operator` ``A`` by looking at the normal equation ::
A.adjoint(A(x)) == A.adjoint(rhs)
It uses a minimum amount of memory copies by applying re-usable
temporaries and in-place evaluation.
The method is described (for linear systems) in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Conjugate_gradient_method#\
Conjugate_gradient_on_the_normal_equations>`_.
Parameters
----------
op : `Operator`
Operator in the inverse problem. If not linear, it must have
an implementation of `Operator.derivative`, which
in turn must implement `Operator.adjoint`, i.e.
the call ``op.derivative(x).adjoint`` must be valid.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem
niter : int
Number of iterations.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
See Also
--------
conjugate_gradient : Optimized solver for symmetric matrices
odl.solvers.smooth.nonlinear_cg.conjugate_gradient_nonlinear :
Equivalent solver for the nonlinear case
"""
# TODO: add a book reference
# TODO: update doc
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, op.domain))
d = op(x)
d.lincomb(1, rhs, -1, d) # d = rhs - A x
p = op.derivative(x).adjoint(d)
s = p.copy()
q = op.range.element()
sqnorm_s_old = s.norm() ** 2 # Only recalculate norm after update
for _ in range(niter):
op(p, out=q) # q = A p
sqnorm_q = q.norm() ** 2
if sqnorm_q == 0.0: # Return if residual is 0
return
a = sqnorm_s_old / sqnorm_q
x.lincomb(1, x, a, p) # x = x + a*p
d.lincomb(1, d, -a, q) # d = d - a*Ap
op.derivative(p).adjoint(d, out=s) # s = A^T d
sqnorm_s_new = s.norm() ** 2
b = sqnorm_s_new / sqnorm_s_old
sqnorm_s_old = sqnorm_s_new
p.lincomb(1, s, b, p) # p = s + b * p
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35588 | gauss_newton | train | def gauss_newton(op, x, rhs, niter, zero_seq=exp_zero_seq(2.0),
callback=None):
"""Optimized implementation of a Gauss-Newton method.
This method solves the inverse problem (of the first kind)::
A(x) = y
for a (Frechet-) differentiable `Operator` ``A`` using a
Gauss-Newton iteration.
It uses a minimum amount of memory copies by applying re-usable
temporaries and in-place evaluation.
A variant of the method applied to a specific problem is described
in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm>`_.
Parameters
----------
op : `Operator`
Operator in the inverse problem. If not linear, it must have
an implementation of `Operator.derivative`, which
in turn must implement `Operator.adjoint`, i.e.
the call ``op.derivative(x).adjoint`` must be valid.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : ``op.range`` element
Right-hand side of the equation defining the inverse problem
niter : int
Maximum number of iterations.
zero_seq : iterable, optional
Zero sequence whose values are used for the regularization of
the linearized problem in each Newton step.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
"""
if x not in op.domain:
raise TypeError('`x` {!r} is not in the domain of `op` {!r}'
''.format(x, op.domain))
x0 = x.copy()
id_op = IdentityOperator(op.domain)
dx = op.domain.zero()
tmp_dom = op.domain.element()
u = op.domain.element()
tmp_ran = op.range.element()
v = op.range.element()
for _ in range(niter):
tm = next(zero_seq)
deriv = op.derivative(x)
deriv_adjoint = deriv.adjoint
# v = rhs - op(x) - deriv(x0-x)
# u = deriv.T(v)
op(x, out=tmp_ran) # eval op(x)
v.lincomb(1, rhs, -1, tmp_ran) # assign v = rhs - op(x)
tmp_dom.lincomb(1, x0, -1, x) # assign temp tmp_dom = x0 - x
deriv(tmp_dom, out=tmp_ran) # eval deriv(x0-x)
v -= tmp_ran # assign v = rhs-op(x)-deriv(x0-x)
deriv_adjoint(v, out=u) # eval/assign u = deriv.T(v)
# Solve equation Tikhonov regularized system
# (deriv.T o deriv + tm * id_op)^-1 u = dx
tikh_op = OperatorSum(OperatorComp(deriv.adjoint, deriv),
tm * id_op, tmp_dom)
# TODO: allow user to select other method
conjugate_gradient(tikh_op, dx, u, 3)
# Update x
x.lincomb(1, x0, 1, dx) # x = x0 + dx
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35589 | kaczmarz | train | def kaczmarz(ops, x, rhs, niter, omega=1, projection=None, random=False,
callback=None, callback_loop='outer'):
r"""Optimized implementation of Kaczmarz's method.
Solves the inverse problem given by the set of equations::
A_n(x) = rhs_n
This is also known as the Landweber-Kaczmarz's method, since the method
coincides with the Landweber method for a single operator.
Parameters
----------
ops : sequence of `Operator`'s
Operators in the inverse problem. ``op[i].derivative(x).adjoint`` must
be well-defined for ``x`` in the operator domain and for all ``i``.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : sequence of ``ops[i].range`` elements
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float or sequence of positive floats, optional
Relaxation parameter in the iteration. If a single float is given the
same step is used for all operators, otherwise separate steps are used.
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
random : bool, optional
If `True`, the order of the operators is randomized in each iteration.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
callback_loop : {'inner', 'outer'}
Whether the callback should be called in the inner or outer loop.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\mathcal{A}_i (x) = y_i \quad 1 \leq i \leq n,
for a given :math:`y_n \in \mathcal{Y}_n`, i.e. an approximate
solution :math:`x^*` to
.. math::
\min_{x\in \mathcal{X}}
\sum_{i=1}^n \| \mathcal{A}_i(x) - y_i \|_{\mathcal{Y}_i}^2
for a (Frechet-) differentiable operator
:math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert
spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k - \omega_{[k]} \ \partial \mathcal{A}_{[k]}(x_k)^*
(\mathcal{A}_{[k]}(x_k) - y_{[k]}),
where :math:`\partial \mathcal{A}_{[k]}(x_k)` is the Frechet derivative
of :math:`\mathcal{A}_{[k]}` at :math:`x_k`, :math:`\omega_{[k]}` is a
relaxation parameter and :math:`[k] := k \text{ mod } n`.
For linear problems, a choice
:math:`0 < \omega_i < 2/\lVert \mathcal{A}_{i}^2\rVert` guarantees
convergence, where :math:`\|\mathcal{A}_{i}\|` stands for the
operator norm of :math:`\mathcal{A}_{i}`.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Kaczmarz_method>`_. and in Natterer, F.
Mathematical Methods in Image Reconstruction, section 5.3.2.
See Also
--------
landweber
"""
domain = ops[0].domain
if any(domain != opi.domain for opi in ops):
raise ValueError('domains of `ops` are not all equal')
if x not in domain:
raise TypeError('`x` {!r} is not in the domain of `ops` {!r}'
''.format(x, domain))
if len(ops) != len(rhs):
raise ValueError('`number of `ops` {} does not match number of '
'`rhs` {}'.format(len(ops), len(rhs)))
omega = normalized_scalar_param_list(omega, len(ops), param_conv=float)
# Reusable elements in the range, one per type of space
ranges = [opi.range for opi in ops]
unique_ranges = set(ranges)
tmp_rans = {ran: ran.element() for ran in unique_ranges}
# Single reusable element in the domain
tmp_dom = domain.element()
# Iteratively find solution
for _ in range(niter):
if random:
rng = np.random.permutation(range(len(ops)))
else:
rng = range(len(ops))
for i in rng:
# Find residual
tmp_ran = tmp_rans[ops[i].range]
ops[i](x, out=tmp_ran)
tmp_ran -= rhs[i]
# Update x
ops[i].derivative(x).adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -omega[i], tmp_dom)
if projection is not None:
projection(x)
if callback is not None and callback_loop == 'inner':
callback(x)
if callback is not None and callback_loop == 'outer':
callback(x) | python | {
"resource": ""
} |
q35590 | conjugate_gradient_nonlinear | train | def conjugate_gradient_nonlinear(f, x, line_search=1.0, maxiter=1000, nreset=0,
tol=1e-16, beta_method='FR',
callback=None):
r"""Conjugate gradient for nonlinear problems.
Parameters
----------
f : `Functional`
Functional with ``f.gradient``.
x : ``op.domain`` element
Vector to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
line_search : float or `LineSearch`, optional
Strategy to choose the step length. If a float is given, it is used as
a fixed step length.
maxiter : int, optional
Maximum number of iterations to perform.
nreset : int, optional
Number of times the solver should be reset. Default: no reset.
tol : float, optional
Tolerance that should be used to terminating the iteration.
beta_method : {'FR', 'PR', 'HS', 'DY'}, optional
Method to calculate ``beta`` in the iterates.
- ``'FR'`` : Fletcher-Reeves
- ``'PR'`` : Polak-Ribiere
- ``'HS'`` : Hestenes-Stiefel
- ``'DY'`` : Dai-Yuan
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
Notes
-----
This is a general and optimized implementation of the nonlinear conjguate
gradient method for solving a general unconstrained optimization problem
.. math::
\min f(x)
for a differentiable functional
:math:`f: \mathcal{X}\to \mathbb{R}` on a Hilbert space
:math:`\mathcal{X}`. It does so by finding a zero of the gradient
.. math::
\nabla f: \mathcal{X} \to \mathcal{X}.
The method is described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Nonlinear_conjugate_gradient_method>`_.
See Also
--------
odl.solvers.smooth.newton.bfgs_method :
Quasi-Newton solver for the same problem
odl.solvers.iterative.iterative.conjugate_gradient :
Optimized solver for least-squares problem with linear and symmetric
operator
odl.solvers.iterative.iterative.conjugate_gradient_normal :
Equivalent solver but for least-squares problem with linear operator
"""
if x not in f.domain:
raise TypeError('`x` {!r} is not in the domain of `f` {!r}'
''.format(x, f.domain))
if not callable(line_search):
line_search = ConstantLineSearch(line_search)
if beta_method not in ['FR', 'PR', 'HS', 'DY']:
raise ValueError('unknown ``beta_method``')
for _ in range(nreset + 1):
# First iteration is done without beta
dx = -f.gradient(x)
dir_derivative = -dx.inner(dx)
if abs(dir_derivative) < tol:
return
a = line_search(x, dx, dir_derivative)
x.lincomb(1, x, a, dx) # x = x + a * dx
s = dx # for 'HS' and 'DY' beta methods
for _ in range(maxiter // (nreset + 1)):
# Compute dx as -grad f
dx, dx_old = -f.gradient(x), dx
# Calculate "beta"
if beta_method == 'FR':
beta = dx.inner(dx) / dx_old.inner(dx_old)
elif beta_method == 'PR':
beta = dx.inner(dx - dx_old) / dx_old.inner(dx_old)
elif beta_method == 'HS':
beta = - dx.inner(dx - dx_old) / s.inner(dx - dx_old)
elif beta_method == 'DY':
beta = - dx.inner(dx) / s.inner(dx - dx_old)
else:
raise RuntimeError('unknown ``beta_method``')
# Reset beta if negative.
beta = max(0, beta)
# Update search direction
s.lincomb(1, dx, beta, s) # s = dx + beta * s
# Find optimal step along s
dir_derivative = -dx.inner(s)
if abs(dir_derivative) <= tol:
return
a = line_search(x, s, dir_derivative)
# Update position
x.lincomb(1, x, a, s) # x = x + a * s
if callback is not None:
callback(x) | python | {
"resource": ""
} |
q35591 | tspace_type | train | def tspace_type(space, impl, dtype=None):
"""Select the correct corresponding tensor space.
Parameters
----------
space : `LinearSpace`
Template space from which to infer an adequate tensor space. If
it has a ``field`` attribute, ``dtype`` must be consistent with it.
impl : string
Implementation backend for the tensor space.
dtype : optional
Data type which the space is supposed to use. If ``None`` is
given, the space type is purely determined from ``space`` and
``impl``. Otherwise, it must be compatible with the
field of ``space``.
Returns
-------
stype : type
Space type selected after the space's field, the backend and
the data type.
"""
field_type = type(getattr(space, 'field', None))
if dtype is None:
pass
elif is_real_floating_dtype(dtype):
if field_type is None or field_type == ComplexNumbers:
raise TypeError('real floating data type {!r} requires space '
'field to be of type RealNumbers, got {}'
''.format(dtype, field_type))
elif is_complex_floating_dtype(dtype):
if field_type is None or field_type == RealNumbers:
raise TypeError('complex floating data type {!r} requires space '
'field to be of type ComplexNumbers, got {!r}'
''.format(dtype, field_type))
elif is_numeric_dtype(dtype):
if field_type == ComplexNumbers:
raise TypeError('non-floating data type {!r} requires space field '
'to be of type RealNumbers, got {!r}'
.format(dtype, field_type))
try:
return tensor_space_impl(impl)
except ValueError:
raise NotImplementedError('no corresponding tensor space available '
'for space {!r} and implementation {!r}'
''.format(space, impl)) | python | {
"resource": ""
} |
q35592 | DiscretizedSpace._lincomb | train | def _lincomb(self, a, x1, b, x2, out):
"""Raw linear combination."""
self.tspace._lincomb(a, x1.tensor, b, x2.tensor, out.tensor) | python | {
"resource": ""
} |
q35593 | DiscretizedSpace._dist | train | def _dist(self, x1, x2):
"""Raw distance between two elements."""
return self.tspace._dist(x1.tensor, x2.tensor) | python | {
"resource": ""
} |
q35594 | DiscretizedSpace._inner | train | def _inner(self, x1, x2):
"""Raw inner product of two elements."""
return self.tspace._inner(x1.tensor, x2.tensor) | python | {
"resource": ""
} |
q35595 | DiscretizedSpaceElement.sampling | train | def sampling(self, ufunc, **kwargs):
"""Sample a continuous function and assign to this element.
Parameters
----------
ufunc : ``self.space.fspace`` element
The continuous function that should be samplingicted.
kwargs :
Additional arugments for the sampling operator implementation
Examples
--------
>>> space = odl.uniform_discr(0, 1, 5)
>>> x = space.element()
Assign x according to a continuous function:
>>> x.sampling(lambda t: t)
>>> x # Print values at grid points (which are centered)
uniform_discr(0.0, 1.0, 5).element([ 0.1, 0.3, 0.5, 0.7, 0.9])
See Also
--------
DiscretizedSpace.sampling : For full description
"""
self.space.sampling(ufunc, out=self.tensor, **kwargs) | python | {
"resource": ""
} |
q35596 | _normalize_sampling_points | train | def _normalize_sampling_points(sampling_points, ndim):
"""Normalize points to an ndim-long list of linear index arrays.
This helper converts sampling indices for `SamplingOperator` from
integers or array-like objects to a list of length ``ndim``, where
each entry is a `numpy.ndarray` with ``dtype=int``.
The function also checks if all arrays have equal lengths, and that
they fulfill ``array.ndim=1`` (or ``size=0`` for if ``ndim == 0``).
The result of this normalization is intended to be used for indexing
an ``ndim``-dimensional array at ``sampling_points`` via NumPy fancy
indexing, i.e., ``result = ndim_array[sampling_points]``.
"""
sampling_points_in = sampling_points
if ndim == 0:
sampling_points = [np.array(sampling_points, dtype=int, copy=False)]
if sampling_points[0].size != 0:
raise ValueError('`sampling_points` must be empty for '
'0-dim. `domain`')
elif ndim == 1:
if isinstance(sampling_points, Integral):
sampling_points = (sampling_points,)
sampling_points = np.array(sampling_points, dtype=int, copy=False,
ndmin=1)
# Handle possible list of length one
if sampling_points.ndim == 2 and sampling_points.shape[0] == 1:
sampling_points = sampling_points[0]
sampling_points = [sampling_points]
if sampling_points[0].ndim > 1:
raise ValueError('expected 1D index (array), got {}'
''.format(sampling_points_in))
else:
try:
iter(sampling_points)
except TypeError:
raise TypeError('`sampling_points` must be a sequence '
'for domain with ndim > 1')
else:
if np.ndim(sampling_points) == 1:
sampling_points = [np.array(p, dtype=int)
for p in sampling_points]
else:
sampling_points = [
np.array(pts, dtype=int, copy=False, ndmin=1)
for pts in sampling_points]
if any(pts.ndim != 1 for pts in sampling_points):
raise ValueError(
'index arrays in `sampling_points` must be 1D, '
'got {!r}'.format(sampling_points_in))
return sampling_points | python | {
"resource": ""
} |
q35597 | PointwiseNorm.derivative | train | def derivative(self, vf):
"""Derivative of the point-wise norm operator at ``vf``.
The derivative at ``F`` of the point-wise norm operator ``N``
with finite exponent ``p`` and weights ``w`` is the pointwise
inner product with the vector field ::
x --> N(F)(x)^(1-p) * [ F_j(x) * |F_j(x)|^(p-2) ]_j
Note that this is not well-defined for ``F = 0``. If ``p < 2``,
any zero component will result in a singularity.
Parameters
----------
vf : `domain` `element-like`
Vector field ``F`` at which to evaluate the derivative.
Returns
-------
deriv : `PointwiseInner`
Derivative operator at the given point ``vf``.
Raises
------
NotImplementedError
* if the vector field space is complex, since the derivative
is not linear in that case
* if the exponent is ``inf``
"""
if self.domain.field == ComplexNumbers():
raise NotImplementedError('operator not Frechet-differentiable '
'on a complex space')
if self.exponent == float('inf'):
raise NotImplementedError('operator not Frechet-differentiable '
'for exponent = inf')
vf = self.domain.element(vf)
vf_pwnorm_fac = self(vf)
if self.exponent != 2: # optimize away most common case.
vf_pwnorm_fac **= (self.exponent - 1)
inner_vf = vf.copy()
for gi in inner_vf:
gi *= gi.ufuncs.absolute().ufuncs.power(self.exponent - 2)
if self.exponent >= 2:
# Any component that is zero is not divided with
nz = (vf_pwnorm_fac.asarray() != 0)
gi[nz] /= vf_pwnorm_fac[nz]
else:
# For exponents < 2 there will be a singularity if any
# component is zero. This results in inf or nan. See the
# documentation for further details.
gi /= vf_pwnorm_fac
return PointwiseInner(self.domain, inner_vf, weighting=self.weights) | python | {
"resource": ""
} |
q35598 | MatrixOperator.adjoint | train | def adjoint(self):
"""Adjoint operator represented by the adjoint matrix.
Returns
-------
adjoint : `MatrixOperator`
"""
return MatrixOperator(self.matrix.conj().T,
domain=self.range, range=self.domain,
axis=self.axis) | python | {
"resource": ""
} |
q35599 | MatrixOperator.inverse | train | def inverse(self):
"""Inverse operator represented by the inverse matrix.
Taking the inverse causes sparse matrices to become dense and is
generally very heavy computationally since the matrix is inverted
numerically (an O(n^3) operation). It is recommended to instead
use one of the solvers available in the ``odl.solvers`` package.
Returns
-------
inverse : `MatrixOperator`
"""
# Lazy import to improve `import odl` time
import scipy.sparse
if scipy.sparse.isspmatrix(self.matrix):
dense_matrix = self.matrix.toarray()
else:
dense_matrix = self.matrix
return MatrixOperator(np.linalg.inv(dense_matrix),
domain=self.range, range=self.domain,
axis=self.axis) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.