_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q35400
KullbackLeibler.gradient
train
def gradient(self): r"""Gradient of the KL functional. The gradient of `KullbackLeibler` with ``prior`` :math:`g` is given as .. math:: \nabla F(x) = 1 - \frac{g}{x}. The gradient is not defined in points where one or more components are non-positive. """ functional = self class KLGradient(Operator): """The gradient operator of this functional.""" def __init__(self): """Initialize a new instance.""" super(KLGradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point. The gradient is not defined in points where one or more components are non-positive. """ if functional.prior is None: return (-1.0) / x + 1 else: return (-functional.prior) / x + 1 return KLGradient()
python
{ "resource": "" }
q35401
KullbackLeiblerCrossEntropyConvexConj._call
train
def _call(self, x): """Return the value in the point ``x``.""" if self.prior is None: tmp = self.domain.element((np.exp(x) - 1)).inner(self.domain.one()) else: tmp = (self.prior * (np.exp(x) - 1)).inner(self.domain.one()) return tmp
python
{ "resource": "" }
q35402
SeparableSum._call
train
def _call(self, x): """Return the separable sum evaluated in ``x``.""" return sum(fi(xi) for xi, fi in zip(x, self.functionals))
python
{ "resource": "" }
q35403
SeparableSum.convex_conj
train
def convex_conj(self): """The convex conjugate functional. Convex conjugate distributes over separable sums, so the result is simply the separable sum of the convex conjugates. """ convex_conjs = [func.convex_conj for func in self.functionals] return SeparableSum(*convex_conjs)
python
{ "resource": "" }
q35404
QuadraticForm.convex_conj
train
def convex_conj(self): r"""The convex conjugate functional of the quadratic form. Notes ----- The convex conjugate of the quadratic form :math:`<x, Ax> + <b, x> + c` is given by .. math:: (<x, Ax> + <b, x> + c)^* (x) = <(x - b), A^-1 (x - b)> - c = <x , A^-1 x> - <x, A^-* b> - <x, A^-1 b> + <b, A^-1 b> - c. If the quadratic part of the functional is zero it is instead given by a translated indicator function on zero, i.e., if .. math:: f(x) = <b, x> + c, then .. math:: f^*(x^*) = \begin{cases} -c & \text{if } x^* = b \\ \infty & \text{else.} \end{cases} See Also -------- IndicatorZero """ if self.operator is None: tmp = IndicatorZero(space=self.domain, constant=-self.constant) if self.vector is None: return tmp else: return tmp.translated(self.vector) if self.vector is None: # Handle trivial case separately return QuadraticForm(operator=self.operator.inverse, constant=-self.constant) else: # Compute the needed variables opinv = self.operator.inverse vector = -opinv.adjoint(self.vector) - opinv(self.vector) constant = self.vector.inner(opinv(self.vector)) - self.constant # Create new quadratic form return QuadraticForm(operator=opinv, vector=vector, constant=constant)
python
{ "resource": "" }
q35405
NuclearNorm._asarray
train
def _asarray(self, vec): """Convert ``x`` to an array. Here the indices are changed such that the "outer" indices come last in order to have the access order as `numpy.linalg.svd` needs it. This is the inverse of `_asvector`. """ shape = self.domain[0, 0].shape + self.pshape arr = np.empty(shape, dtype=self.domain.dtype) for i, xi in enumerate(vec): for j, xij in enumerate(xi): arr[..., i, j] = xij.asarray() return arr
python
{ "resource": "" }
q35406
NuclearNorm._asvector
train
def _asvector(self, arr): """Convert ``arr`` to a `domain` element. This is the inverse of `_asarray`. """ result = moveaxis(arr, [-2, -1], [0, 1]) return self.domain.element(result)
python
{ "resource": "" }
q35407
NuclearNorm.proximal
train
def proximal(self): """Return the proximal operator. Raises ------ NotImplementedError if ``outer_exp`` is not 1 or ``singular_vector_exp`` is not 1, 2 or infinity """ if self.outernorm.exponent != 1: raise NotImplementedError('`proximal` only implemented for ' '`outer_exp==1`') if self.pwisenorm.exponent not in [1, 2, np.inf]: raise NotImplementedError('`proximal` only implemented for ' '`singular_vector_exp` in [1, 2, inf]') def nddot(a, b): """Compute pointwise matrix product in the last indices.""" return np.einsum('...ij,...jk->...ik', a, b) func = self # Add epsilon to fix rounding errors, i.e. make sure that when we # project on the unit ball, we actually end up slightly inside the unit # ball. Without, we may end up slightly outside. dtype = getattr(self.domain, 'dtype', float) eps = np.finfo(dtype).resolution * 10 class NuclearNormProximal(Operator): """Proximal operator of `NuclearNorm`.""" def __init__(self, sigma): self.sigma = float(sigma) super(NuclearNormProximal, self).__init__( func.domain, func.domain, linear=False) def _call(self, x): """Return ``self(x)``.""" arr = func._asarray(x) # Compute SVD U, s, Vt = np.linalg.svd(arr, full_matrices=False) # transpose pointwise V = Vt.swapaxes(-1, -2) # Take pseudoinverse of s sinv = s.copy() sinv[sinv != 0] = 1 / sinv[sinv != 0] # Take pointwise proximal operator of s w.r.t. the norm # on the singular vectors if func.pwisenorm.exponent == 1: abss = np.abs(s) - (self.sigma - eps) sprox = np.sign(s) * np.maximum(abss, 0) elif func.pwisenorm.exponent == 2: s_reordered = moveaxis(s, -1, 0) snorm = func.pwisenorm(s_reordered).asarray() snorm = np.maximum(self.sigma, snorm, out=snorm) sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s elif func.pwisenorm.exponent == np.inf: snorm = np.sum(np.abs(s), axis=-1) snorm = np.maximum(self.sigma, snorm, out=snorm) sprox = ((1 - eps) - self.sigma / snorm)[..., None] * s else: raise RuntimeError # Compute s matrix sproxsinv = (sprox * sinv)[..., :, None] # Compute the final result result = nddot(nddot(arr, V), sproxsinv * Vt) # Cast to vector and return. Note array and vector have # different shapes. return func._asvector(result) def __repr__(self): """Return ``repr(self)``.""" return '{!r}.proximal({})'.format(func, self.sigma) return NuclearNormProximal
python
{ "resource": "" }
q35408
NuclearNorm.convex_conj
train
def convex_conj(self): """Convex conjugate of the nuclear norm. The convex conjugate is the indicator function on the unit ball of the dual norm where the dual norm is obtained by taking the conjugate exponent of both the outer and singular vector exponents. """ return IndicatorNuclearNormUnitBall( self.domain, conj_exponent(self.outernorm.exponent), conj_exponent(self.pwisenorm.exponent))
python
{ "resource": "" }
q35409
IndicatorNuclearNormUnitBall.convex_conj
train
def convex_conj(self): """Convex conjugate of the unit ball indicator of the nuclear norm. The convex conjugate is the dual nuclear norm where the dual norm is obtained by taking the conjugate exponent of both the outer and singular vector exponents. """ return NuclearNorm(self.domain, conj_exponent(self.__norm.outernorm.exponent), conj_exponent(self.__norm.pwisenorm.exponent))
python
{ "resource": "" }
q35410
Huber.convex_conj
train
def convex_conj(self): """The convex conjugate""" if isinstance(self.domain, ProductSpace): norm = GroupL1Norm(self.domain, 2) else: norm = L1Norm(self.domain) return FunctionalQuadraticPerturb(norm.convex_conj, quadratic_coeff=self.gamma / 2)
python
{ "resource": "" }
q35411
TheanoOperator.make_node
train
def make_node(self, x): """Create a node for the computation graph. Parameters ---------- x : `theano.tensor.var.TensorVariable` Input to the node. Returns ------- node : `theano.gof.graph.Apply` Node for the Theano expression graph. Its only input is ``x``, and the output is of the same type. """ x = theano.tensor.as_tensor_variable(x) # Create tensor type with correct dtype. # The second argument specifies the number of dimensions of the output. # False means that we do not support broadcasting. if isinstance(self.operator, Functional): # Make scalar out type out_type = theano.tensor.TensorVariable( theano.tensor.TensorType(self.operator.domain.dtype, ())) else: out_type = theano.tensor.TensorVariable( theano.tensor.TensorType( self.operator.range.dtype, [False] * len(self.operator.range.shape))) return theano.Apply(self, [x], [out_type.type()])
python
{ "resource": "" }
q35412
TheanoOperator.perform
train
def perform(self, node, inputs, output_storage): """Evaluate this node's computation. Parameters ---------- node : `theano.gof.graph.Apply` The node of this Op in the computation graph. inputs : 1-element list of arrays Contains an array (usually `numpy.ndarray`) of concrete values supplied for the symbolic input variable ``x``. output_storage : 1-element list of 1-element lists The single 1-element list contained in ``output_storage`` by default contains only ``None``. This value must be replaced by the result of the application of `odl_op`. Examples -------- Perform a matrix multiplication: >>> space = odl.rn(3) >>> matrix = np.array([[1, 0, 1], ... [0, 1, 1]], dtype=float) >>> op = odl.MatrixOperator(matrix, domain=space) >>> matrix_op = TheanoOperator(op) >>> x = theano.tensor.dvector() >>> op_x = matrix_op(x) >>> op_func = theano.function([x], op_x) >>> op_func([1, 2, 3]) array([ 4., 5.]) Evaluate a functional, i.e., an operator with scalar output: >>> space = odl.rn(3) >>> functional = odl.solvers.L2NormSquared(space) >>> func_op = TheanoOperator(functional) >>> x = theano.tensor.dvector() >>> op_x = func_op(x) >>> op_func = theano.function([x], op_x) >>> op_func([1, 2, 3]) array(14.0) """ x = inputs[0] z = output_storage[0] z[0] = np.asarray(self.operator(x))
python
{ "resource": "" }
q35413
TheanoOperator.infer_shape
train
def infer_shape(self, node, input_shapes): """Return a list of output shapes based on ``input_shapes``. This method is optional. It allows to compute the shape of the output without having to evaluate. Parameters ---------- node : `theano.gof.graph.Apply` The node of this Op in the computation graph. input_shapes : 1-element list of `theano.compile.ops.Shape` Symbolic shape of the input. Returns ------- output_shapes : 1-element list of tuples Fixed shape of the output determined by `odl_op`. """ if isinstance(self.operator, Functional): return [()] else: # Need to convert to native to avoid error in Theano from # future.int return [tuple(native(si) for si in self.operator.range.shape)]
python
{ "resource": "" }
q35414
TheanoOperator.R_op
train
def R_op(self, inputs, eval_points): """Apply the adjoint of the Jacobian at ``inputs`` to ``eval_points``. This is the symbolic counterpart of ODL's :: op.derivative(x).adjoint(v) See `grad` for its usage. Parameters ---------- inputs : 1-element list of `theano.tensor.var.TensorVariable` Symbolic input to the gradient, the point at which the Jacobian is computed. eval_points : 1-element list of `theano.tensor.var.TensorVariable` Symbolic input to the adjoint of the Jacobian, i.e., the variable to which the Jacobian adjoint should be applied. Returns ------- outputs : 1-element list of `theano.tensor.var.TensorVariable` Symbolic result of the application of the Jacobian adjoint. It uses a wrapper class ``OdlDerivativeAdjointAsTheanoROp`` for ``(x, v) --> op.derivative(x).adjoint(v)``. """ # ODL weights spaces, Theano does not. We need to handle this try: dom_weight = self.operator.domain.weighting.const except AttributeError: dom_weight = 1.0 try: ran_weight = self.operator.range.weighting.const except AttributeError: ran_weight = 1.0 scale = dom_weight / ran_weight op = self class TheanoJacobianAdjoint(theano.Op): __props__ = () """Wrap ``op.derivative(x).adjoint(v)`` into a Theano Op. This Op has two inputs, ``x`` and ``v``, where ``x`` is the point at which the Jacobian is taken, and ``v`` the tensor to which its adjoint is applied. There is only one output, which is of the same type as ``v`` (and ``x``). """ def make_node(self, x, v): """Create a node for the computation graph.""" x = theano.tensor.as_tensor_variable(x) v = theano.tensor.as_tensor_variable(v) return theano.Apply(self, [x, v], [x.type()]) def perform(self, node, inputs_storage, output_storage): """Evaluate this node's computation. This method computes :: op.derivative(x).adjoint(v) """ x = inputs_storage[0] v = inputs_storage[1] out = output_storage[0] out[0] = np.asarray(op.operator.derivative(x).adjoint(v)) if scale != 1.0: out[0] *= scale def infer_shape(self, node, input_shapes): """Return a list of output shapes based on ``input_shapes``.""" # Need to convert to native to avoid error in theano from # future.int return [tuple(native(si) for si in op.operator.domain.shape)] r_op = TheanoJacobianAdjoint() r_op_apply = r_op(inputs[0], eval_points[0]) return [r_op_apply]
python
{ "resource": "" }
q35415
reciprocal_grid
train
def reciprocal_grid(grid, shift=True, axes=None, halfcomplex=False): """Return the reciprocal of the given regular grid. This function calculates the reciprocal (Fourier/frequency space) grid for a given regular grid defined by the nodes:: x[k] = x[0] + k * s, where ``k = (k[0], ..., k[d-1])`` is a ``d``-dimensional index in the range ``0 <= k < N`` (component-wise). The multi-index ``N`` is the shape of the input grid. This grid's reciprocal is then given by the nodes:: xi[j] = xi[0] + j * sigma, with the reciprocal grid stride ``sigma = 2*pi / (s * N)``. The minimum frequency ``xi[0]`` can in principle be chosen freely, but usually it is chosen in a such a way that the reciprocal grid is centered around zero. For this, there are two possibilities: 1. Make the grid point-symmetric around 0. 2. Make the grid "almost" point-symmetric around zero by shifting it to the left by half a reciprocal stride. In the first case, the minimum frequency (per axis) is given as:: xi_1[0] = -pi/s + pi/(s*n) = -pi/s + sigma/2. For the second case, it is:: xi_1[0] = -pi / s. Note that the zero frequency is contained in case 1 for an odd number of points, while for an even size, the second option guarantees that 0 is contained. If a real-to-complex (half-complex) transform is to be computed, the reciprocal grid has the shape ``M[i] = floor(N[i]/2) + 1`` in the last transform axis ``i``. Parameters ---------- grid : uniform `RectGrid` Original sampling grid,. shift : bool or sequence of bools, optional If ``True``, the grid is shifted by half a stride in the negative direction. With a sequence, this option is applied separately on each axis. axes : int or sequence of ints, optional Dimensions in which to calculate the reciprocal. The sequence must have the same length as ``shift`` if the latter is given as a sequence. ``None`` means all axes in ``grid``. halfcomplex : bool, optional If ``True``, return the half of the grid with last coordinate less than zero. This is related to the fact that for real-valued functions, the other half is the mirrored complex conjugate of the given half and therefore needs not be stored. Returns ------- reciprocal_grid : uniform `RectGrid` The reciprocal grid. """ if axes is None: axes = list(range(grid.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) # List indicating shift or not per "active" axis, same length as axes shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) # Full-length vectors stride = grid.stride.copy() stride[stride == 0] = 1 shape = np.array(grid.shape) rmin = grid.min_pt.copy() rmax = grid.max_pt.copy() rshape = list(shape) # Shifted axes (full length to avoid ugly double indexing) shifted = np.zeros(grid.ndim, dtype=bool) shifted[axes] = shift_list rmin[shifted] = -np.pi / stride[shifted] # Length min->max increases by double the shift, so we # have to compensate by a full stride rmax[shifted] = (-rmin[shifted] - 2 * np.pi / (stride[shifted] * shape[shifted])) # Non-shifted axes not_shifted = np.zeros(grid.ndim, dtype=bool) not_shifted[axes] = np.logical_not(shift_list) rmin[not_shifted] = ((-1.0 + 1.0 / shape[not_shifted]) * np.pi / stride[not_shifted]) rmax[not_shifted] = -rmin[not_shifted] # Change last axis shape and max if halfcomplex if halfcomplex: rshape[axes[-1]] = shape[axes[-1]] // 2 + 1 # - Odd and shifted: - stride / 2 # - Even and not shifted: + stride / 2 # - Otherwise: 0 last_odd = shape[axes[-1]] % 2 == 1 last_shifted = shift_list[-1] half_rstride = np.pi / (shape[axes[-1]] * stride[axes[-1]]) if last_odd and last_shifted: rmax[axes[-1]] = -half_rstride elif not last_odd and not last_shifted: rmax[axes[-1]] = half_rstride else: rmax[axes[-1]] = 0 return uniform_grid(rmin, rmax, rshape)
python
{ "resource": "" }
q35416
realspace_grid
train
def realspace_grid(recip_grid, x0, axes=None, halfcomplex=False, halfcx_parity='even'): """Return the real space grid from the given reciprocal grid. Given a reciprocal grid:: xi[j] = xi[0] + j * sigma, with a multi-index ``j = (j[0], ..., j[d-1])`` in the range ``0 <= j < M``, this function calculates the original grid:: x[k] = x[0] + k * s by using a provided ``x[0]`` and calculating the stride ``s``. If the reciprocal grid is interpreted as coming from a usual complex-to-complex FFT, it is ``N == M``, and the stride is:: s = 2*pi / (sigma * N) For a reciprocal grid from a real-to-complex (half-complex) FFT, it is ``M[i] = floor(N[i]/2) + 1`` in the last transform axis ``i``. To resolve the ambiguity regarding the parity of ``N[i]``, the it must be specified if the output shape should be even or odd, resulting in:: odd : N[i] = 2 * M[i] - 1 even: N[i] = 2 * M[i] - 2 The output stride is calculated with this ``N`` as above in this case. Parameters ---------- recip_grid : uniform `RectGrid` Sampling grid in reciprocal space. x0 : `array-like` Desired minimum point of the real space grid. axes : int or sequence of ints, optional Dimensions in which to calculate the real space grid. The sequence must have the same length as ``shift`` if the latter is given as a sequence. ``None`` means "all axes". halfcomplex : bool, optional If ``True``, interpret the given grid as the reciprocal as used in a half-complex FFT (see above). Otherwise, the grid is regarded as being used in a complex-to-complex transform. halfcx_parity : {'even', 'odd'} Use this parity for the shape of the returned grid in the last axis of ``axes`` in the case ``halfcomplex=True`` Returns ------- irecip : uniform `RectGrid` The inverse reciprocal grid. """ if axes is None: axes = list(range(recip_grid.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) rstride = recip_grid.stride rshape = recip_grid.shape # Calculate shape of the output grid by adjusting in axes[-1] irshape = list(rshape) if halfcomplex: if str(halfcx_parity).lower() == 'even': irshape[axes[-1]] = 2 * rshape[axes[-1]] - 2 elif str(halfcx_parity).lower() == 'odd': irshape[axes[-1]] = 2 * rshape[axes[-1]] - 1 else: raise ValueError("`halfcomplex` parity '{}' not understood" "".format(halfcx_parity)) irmin = np.asarray(x0) irshape = np.asarray(irshape) irstride = np.copy(rstride) irstride[axes] = 2 * np.pi / (irshape[axes] * rstride[axes]) irmax = irmin + (irshape - 1) * irstride return uniform_grid(irmin, irmax, irshape)
python
{ "resource": "" }
q35417
dft_preprocess_data
train
def dft_preprocess_data(arr, shift=True, axes=None, sign='-', out=None): """Pre-process the real-space data before DFT. This function multiplies the given data with the separable function:: p(x) = exp(+- 1j * dot(x - x[0], xi[0])) where ``x[0]`` and ``xi[0]`` are the minimum coodinates of the real-space and reciprocal grids, respectively. The sign of the exponent depends on the choice of ``sign``. In discretized form, this function becomes an array:: p[k] = exp(+- 1j * k * s * xi[0]) If the reciprocal grid is not shifted, i.e. symmetric around 0, it is ``xi[0] = pi/s * (-1 + 1/N)``, hence:: p[k] = exp(-+ 1j * pi * k * (1 - 1/N)) For a shifted grid, we have :math:``xi[0] = -pi/s``, thus the array is given by:: p[k] = (-1)**k Parameters ---------- arr : `array-like` Array to be pre-processed. If its data type is a real non-floating type, it is converted to 'float64'. shift : bool or or sequence of bools, optional If ``True``, the grid is shifted by half a stride in the negative direction. With a sequence, this option is applied separately on each axis. axes : int or sequence of ints, optional Dimensions in which to calculate the reciprocal. The sequence must have the same length as ``shift`` if the latter is given as a sequence. Default: all axes. sign : {'-', '+'}, optional Sign of the complex exponent. out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. For real data type, this is only possible for ``shift=True`` since the factors are complex otherwise. Returns ------- out : `numpy.ndarray` Result of the pre-processing. If ``out`` was given, the returned object is a reference to it. Notes ----- If ``out`` is not specified, the data type of the returned array is the same as that of ``arr`` except when ``arr`` has real data type and ``shift`` is not ``True``. In this case, the return type is the complex counterpart of ``arr.dtype``. """ arr = np.asarray(arr) if not is_numeric_dtype(arr.dtype): raise ValueError('array has non-numeric data type {}' ''.format(dtype_repr(arr.dtype))) elif is_real_dtype(arr.dtype) and not is_real_floating_dtype(arr.dtype): arr = arr.astype('float64') if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shape = arr.shape shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) # Make a copy of arr with correct data type if necessary, or copy values. if out is None: if is_real_dtype(arr.dtype) and not all(shift_list): out = np.array(arr, dtype=complex_dtype(arr.dtype), copy=True) else: out = arr.copy() else: out[:] = arr if is_real_dtype(out.dtype) and not shift: raise ValueError('cannot pre-process real input in-place without ' 'shift') if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) def _onedim_arr(length, shift): if shift: # (-1)^indices factor = np.ones(length, dtype=out.dtype) factor[1::2] = -1 else: factor = np.arange(length, dtype=out.dtype) factor *= -imag * np.pi * (1 - 1.0 / length) np.exp(factor, out=factor) return factor.astype(out.dtype, copy=False) onedim_arrs = [] for axis, shift in zip(axes, shift_list): length = shape[axis] onedim_arrs.append(_onedim_arr(length, shift)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out
python
{ "resource": "" }
q35418
_interp_kernel_ft
train
def _interp_kernel_ft(norm_freqs, interp): """Scaled FT of a one-dimensional interpolation kernel. For normalized frequencies ``-1/2 <= xi <= 1/2``, this function returns:: sinc(pi * xi)**k / sqrt(2 * pi) where ``k=1`` for 'nearest' and ``k=2`` for 'linear' interpolation. Parameters ---------- norm_freqs : `numpy.ndarray` Normalized frequencies between -1/2 and 1/2 interp : {'nearest', 'linear'} Type of interpolation kernel Returns ------- ker_ft : `numpy.ndarray` Values of the kernel FT at the given frequencies """ # Numpy's sinc(x) is equal to the 'math' sinc(pi * x) ker_ft = np.sinc(norm_freqs) interp_ = str(interp).lower() if interp_ == 'nearest': pass elif interp_ == 'linear': ker_ft *= ker_ft else: raise ValueError("`interp` '{}' not understood".format(interp)) ker_ft /= np.sqrt(2 * np.pi) return ker_ft
python
{ "resource": "" }
q35419
dft_postprocess_data
train
def dft_postprocess_data(arr, real_grid, recip_grid, shift, axes, interp, sign='-', op='multiply', out=None): """Post-process the Fourier-space data after DFT. This function multiplies the given data with the separable function:: q(xi) = exp(+- 1j * dot(x[0], xi)) * s * phi_hat(xi_bar) where ``x[0]`` and ``s`` are the minimum point and the stride of the real-space grid, respectively, and ``phi_hat(xi_bar)`` is the FT of the interpolation kernel. The sign of the exponent depends on the choice of ``sign``. Note that for ``op='divide'`` the multiplication with ``s * phi_hat(xi_bar)`` is replaced by a division with the same array. In discretized form on the reciprocal grid, the exponential part of this function becomes an array:: q[k] = exp(+- 1j * dot(x[0], xi[k])) and the arguments ``xi_bar`` to the interpolation kernel are the normalized frequencies:: for 'shift=True' : xi_bar[k] = -pi + pi * (2*k) / N for 'shift=False' : xi_bar[k] = -pi + pi * (2*k+1) / N See [Pre+2007], Section 13.9 "Computing Fourier Integrals Using the FFT" for a similar approach. Parameters ---------- arr : `array-like` Array to be pre-processed. An array with real data type is converted to its complex counterpart. real_grid : uniform `RectGrid` Real space grid in the transform. recip_grid : uniform `RectGrid` Reciprocal grid in the transform shift : bool or sequence of bools If ``True``, the grid is shifted by half a stride in the negative direction in the corresponding axes. The sequence must have the same length as ``axes``. axes : int or sequence of ints Dimensions along which to take the transform. The sequence must have the same length as ``shifts``. interp : string or sequence of strings Interpolation scheme used in the real-space. sign : {'-', '+'}, optional Sign of the complex exponent. op : {'multiply', 'divide'}, optional Operation to perform with the stride times the interpolation kernel FT out : `numpy.ndarray`, optional Array in which the result is stored. If ``out is arr``, an in-place modification is performed. Returns ------- out : `numpy.ndarray` Result of the post-processing. If ``out`` was given, the returned object is a reference to it. References ---------- [Pre+2007] Press, W H, Teukolsky, S A, Vetterling, W T, and Flannery, B P. *Numerical Recipes in C - The Art of Scientific Computing* (Volume 3). Cambridge University Press, 2007. """ arr = np.asarray(arr) if is_real_floating_dtype(arr.dtype): arr = arr.astype(complex_dtype(arr.dtype)) elif not is_complex_floating_dtype(arr.dtype): raise ValueError('array data type {} is not a complex floating point ' 'data type'.format(dtype_repr(arr.dtype))) if out is None: out = arr.copy() elif out is not arr: out[:] = arr if axes is None: axes = list(range(arr.ndim)) else: try: axes = [int(axes)] except TypeError: axes = list(axes) shift_list = normalized_scalar_param_list(shift, length=len(axes), param_conv=bool) if sign == '-': imag = -1j elif sign == '+': imag = 1j else: raise ValueError("`sign` '{}' not understood".format(sign)) op, op_in = str(op).lower(), op if op not in ('multiply', 'divide'): raise ValueError("kernel `op` '{}' not understood".format(op_in)) # Make a list from interp if that's not the case already if is_string(interp): interp = [str(interp).lower()] * arr.ndim onedim_arrs = [] for ax, shift, intp in zip(axes, shift_list, interp): x = real_grid.min_pt[ax] xi = recip_grid.coord_vectors[ax] # First part: exponential array onedim_arr = np.exp(imag * x * xi) # Second part: interpolation kernel len_dft = recip_grid.shape[ax] len_orig = real_grid.shape[ax] halfcomplex = (len_dft < len_orig) odd = len_orig % 2 fmin = -0.5 if shift else -0.5 + 1.0 / (2 * len_orig) if halfcomplex: # maximum lies around 0, possibly half a cell left or right of it if shift and odd: fmax = - 1.0 / (2 * len_orig) elif not shift and not odd: fmax = 1.0 / (2 * len_orig) else: fmax = 0.0 else: # not halfcomplex # maximum lies close to 0.5, half or full cell left of it if shift: # -0.5 + (N-1)/N = 0.5 - 1/N fmax = 0.5 - 1.0 / len_orig else: # -0.5 + 1/(2*N) + (N-1)/N = 0.5 - 1/(2*N) fmax = 0.5 - 1.0 / (2 * len_orig) freqs = np.linspace(fmin, fmax, num=len_dft) stride = real_grid.stride[ax] interp_kernel = _interp_kernel_ft(freqs, intp) interp_kernel *= stride if op == 'multiply': onedim_arr *= interp_kernel else: onedim_arr /= interp_kernel onedim_arrs.append(onedim_arr.astype(out.dtype, copy=False)) fast_1d_tensor_mult(out, onedim_arrs, axes=axes, out=out) return out
python
{ "resource": "" }
q35420
reciprocal_space
train
def reciprocal_space(space, axes=None, halfcomplex=False, shift=True, **kwargs): """Return the range of the Fourier transform on ``space``. Parameters ---------- space : `DiscreteLp` Real space whose reciprocal is calculated. It must be uniformly discretized. axes : sequence of ints, optional Dimensions along which the Fourier transform is taken. Default: all axes halfcomplex : bool, optional If ``True``, take only the negative frequency part along the last axis for. For ``False``, use the full frequency space. This option can only be used if ``space`` is a space of real-valued functions. shift : bool or sequence of bools, optional If ``True``, the reciprocal grid is shifted by half a stride in the negative direction. With a boolean sequence, this option is applied separately to each axis. If a sequence is provided, it must have the same length as ``axes`` if supplied. Note that this must be set to ``True`` in the halved axis in half-complex transforms. Default: ``True`` impl : string, optional Implementation back-end for the created space. Default: ``'numpy'`` exponent : float, optional Create a space with this exponent. By default, the conjugate exponent ``q = p / (p - 1)`` of the exponent of ``space`` is used, where ``q = inf`` for ``p = 1`` and vice versa. dtype : optional Complex data type of the created space. By default, the complex counterpart of ``space.dtype`` is used. Returns ------- rspace : `DiscreteLp` Reciprocal of the input ``space``. If ``halfcomplex=True``, the upper end of the domain (where the half space ends) is chosen to coincide with the grid node. """ if not isinstance(space, DiscreteLp): raise TypeError('`space` {!r} is not a `DiscreteLp` instance' ''.format(space)) if axes is None: axes = tuple(range(space.ndim)) axes = normalized_axes_tuple(axes, space.ndim) if not all(space.is_uniform_byaxis[axis] for axis in axes): raise ValueError('`space` is not uniformly discretized in the ' '`axes` of the transform') if halfcomplex and space.field != RealNumbers(): raise ValueError('`halfcomplex` option can only be used with real ' 'spaces') exponent = kwargs.pop('exponent', None) if exponent is None: exponent = conj_exponent(space.exponent) dtype = kwargs.pop('dtype', None) if dtype is None: dtype = complex_dtype(space.dtype) else: if not is_complex_floating_dtype(dtype): raise ValueError('{} is not a complex data type' ''.format(dtype_repr(dtype))) impl = kwargs.pop('impl', 'numpy') # Calculate range recip_grid = reciprocal_grid(space.grid, shift=shift, halfcomplex=halfcomplex, axes=axes) # Need to do this for axes of length 1 that are not transformed non_axes = [i for i in range(space.ndim) if i not in axes] min_pt = {i: space.min_pt[i] for i in non_axes} max_pt = {i: space.max_pt[i] for i in non_axes} # Make a partition with nodes on the boundary in the last transform axis # if `halfcomplex == True`, otherwise a standard partition. if halfcomplex: max_pt[axes[-1]] = recip_grid.max_pt[axes[-1]] part = uniform_partition_fromgrid(recip_grid, min_pt, max_pt) # Use convention of adding a hat to represent fourier transform of variable axis_labels = list(space.axis_labels) for i in axes: # Avoid double math label = axis_labels[i].replace('$', '') axis_labels[i] = '$\\^{{{}}}$'.format(label) recip_spc = uniform_discr_frompartition(part, exponent=exponent, dtype=dtype, impl=impl, axis_labels=axis_labels) return recip_spc
python
{ "resource": "" }
q35421
_initialize_if_needed
train
def _initialize_if_needed(): """Initialize ``TENSOR_SPACE_IMPLS`` if not already done.""" global IS_INITIALIZED, TENSOR_SPACE_IMPLS if not IS_INITIALIZED: # pkg_resources has long import time from pkg_resources import iter_entry_points for entry_point in iter_entry_points(group='odl.space', name=None): try: module = entry_point.load() except ImportError: pass else: TENSOR_SPACE_IMPLS.update(module.tensor_space_impls()) IS_INITIALIZED = True
python
{ "resource": "" }
q35422
tensor_space_impl
train
def tensor_space_impl(impl): """Tensor space class corresponding to the given impl name. Parameters ---------- impl : str Name of the implementation, see `tensor_space_impl_names` for the full list. Returns ------- tensor_space_impl : type Class inheriting from `TensorSpace`. Raises ------ ValueError If ``impl`` is not a valid name of a tensor space imlementation. """ if impl != 'numpy': # Shortcut to improve "import odl" times since most users do not use # non-numpy backends _initialize_if_needed() try: return TENSOR_SPACE_IMPLS[impl] except KeyError: raise ValueError("`impl` {!r} does not correspond to a valid tensor " "space implmentation".format(impl))
python
{ "resource": "" }
q35423
steepest_descent
train
def steepest_descent(f, x, line_search=1.0, maxiter=1000, tol=1e-16, projection=None, callback=None): r"""Steepest descent method to minimize an objective function. General implementation of steepest decent (also known as gradient decent) for solving .. math:: \min f(x) The algorithm is intended for unconstrained problems. It needs line search in order guarantee convergence. With appropriate line search, it can also be used for constrained problems where one wants to minimize over some given set :math:`C`. This can be done by defining :math:`f(x) = \infty` for :math:`x\\not\\in C`, or by providing a ``projection`` function that projects the iterates on :math:`C`. The algorithm is described in [BV2004], section 9.3--9.4 (`book available online <http://stanford.edu/~boyd/cvxbook/bv_cvxbook.pdf>`_), [GNS2009], Section 12.2, and wikipedia `Gradient_descent <https://en.wikipedia.org/wiki/Gradient_descent>`_. Parameters ---------- f : `Functional` Goal functional. Needs to have ``f.gradient``. x : ``f.domain`` element Starting point of the iteration line_search : float or `LineSearch`, optional Strategy to choose the step length. If a float is given, uses it as a fixed step length. maxiter : int, optional Maximum number of iterations. tol : float, optional Tolerance that should be used for terminating the iteration. projection : callable, optional Function that can be used to modify the iterates in each iteration, for example enforcing positivity. The function should take one argument and modify it in-place. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate See Also -------- odl.solvers.iterative.iterative.landweber : Optimized solver for the case ``f(x) = ||Ax - b||_2^2`` odl.solvers.iterative.iterative.conjugate_gradient : Optimized solver for the case ``f(x) = x^T Ax - 2 x^T b`` References ---------- [BV2004] Boyd, S, and Vandenberghe, L. *Convex optimization*. Cambridge university press, 2004. [GNS2009] Griva, I, Nash, S G, and Sofer, A. *Linear and nonlinear optimization*. Siam, 2009. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) if not callable(line_search): line_search = ConstantLineSearch(line_search) grad_x = grad.range.element() for _ in range(maxiter): grad(x, out=grad_x) dir_derivative = -grad_x.norm() ** 2 if np.abs(dir_derivative) < tol: return # we have converged step = line_search(x, -grad_x, dir_derivative) x.lincomb(1, x, -step, grad_x) if projection is not None: projection(x) if callback is not None: callback(x)
python
{ "resource": "" }
q35424
adam
train
def adam(f, x, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, maxiter=1000, tol=1e-16, callback=None): r"""ADAM method to minimize an objective function. General implementation of ADAM for solving .. math:: \min f(x) where :math:`f` is a differentiable functional. The algorithm is described in [KB2015] (`arxiv <https://arxiv.org/abs/1412.6980>`_). All parameter names and default valuesare taken from the article. Parameters ---------- f : `Functional` Goal functional. Needs to have ``f.gradient``. x : ``f.domain`` element Starting point of the iteration, updated in place. learning_rate : positive float, optional Step length of the method. beta1 : float in [0, 1), optional Update rate for first order moment estimate. beta2 : float in [0, 1), optional Update rate for second order moment estimate. eps : positive float, optional A small constant for numerical stability. maxiter : int, optional Maximum number of iterations. tol : positive float, optional Tolerance that should be used for terminating the iteration. callback : callable, optional Object executing code per iteration, e.g. plotting each iterate. See Also -------- odl.solvers.smooth.gradient.steepest_descent : Simple gradient descent. odl.solvers.iterative.iterative.landweber : Optimized solver for the case ``f(x) = ||Ax - b||_2^2``. odl.solvers.iterative.iterative.conjugate_gradient : Optimized solver for the case ``f(x) = x^T Ax - 2 x^T b``. References ---------- [KB2015] Kingma, D P and Ba, J. *Adam: A Method for Stochastic Optimization*, ICLR 2015. """ grad = f.gradient if x not in grad.domain: raise TypeError('`x` {!r} is not in the domain of `grad` {!r}' ''.format(x, grad.domain)) m = grad.domain.zero() v = grad.domain.zero() grad_x = grad.range.element() for _ in range(maxiter): grad(x, out=grad_x) if grad_x.norm() < tol: return m.lincomb(beta1, m, 1 - beta1, grad_x) v.lincomb(beta2, v, 1 - beta2, grad_x ** 2) step = learning_rate * np.sqrt(1 - beta2) / (1 - beta1) x.lincomb(1, x, -step, m / (np.sqrt(v) + eps)) if callback is not None: callback(x)
python
{ "resource": "" }
q35425
_approx_equal
train
def _approx_equal(x, y, eps): """Test if elements ``x`` and ``y`` are approximately equal. ``eps`` is a given absolute tolerance. """ if x.space != y.space: return False if x is y: return True try: return x.dist(y) <= eps except NotImplementedError: try: return x == y except NotImplementedError: return False
python
{ "resource": "" }
q35426
get_data_dir
train
def get_data_dir(): """Get the data directory.""" base_odl_dir = os.environ.get('ODL_HOME', expanduser(join('~', '.odl'))) data_home = join(base_odl_dir, 'datasets') if not exists(data_home): os.makedirs(data_home) return data_home
python
{ "resource": "" }
q35427
get_data
train
def get_data(filename, subset, url): """Get a dataset with from a url with local caching. Parameters ---------- filename : str Name of the file, for caching. subset : str To what subset the file belongs (e.g. 'ray_transform'). Each subset is saved in a separate subfolder. url : str url to the dataset online. Returns ------- dataset : dict Dictionary containing the dataset. """ # check if this data set has been already downloaded data_dir = join(get_data_dir(), subset) if not exists(data_dir): os.makedirs(data_dir) filename = join(data_dir, filename) # if the file does not exist, download it if not exists(filename): print('data {}/{} not in local storage, downloading from {}' ''.format(subset, filename, url)) # open the url of the data with contextlib.closing(urlopen(url)) as data_url: # store downloaded file locally with open(filename, 'w+b') as storage_file: copyfileobj(data_url, storage_file) # load dataset file with open(filename, 'rb') as storage_file: data_dict = io.loadmat(storage_file) return data_dict
python
{ "resource": "" }
q35428
forward_backward_pd
train
def forward_backward_pd(x, f, g, L, h, tau, sigma, niter, callback=None, **kwargs): r"""The forward-backward primal-dual splitting algorithm. The algorithm minimizes the sum of several convex functionals composed with linear operators:: min_x f(x) + sum_i g_i(L_i x) + h(x) where ``f``, ``g_i`` are convex functionals, ``L_i`` are linear operators, and ``h`` is a convex and differentiable functional. The method can also be used to solve the more general problem:: min_x f(x) + sum_i (g_i @ l_i)(L_i x) + h(x) where ``l_i`` are strongly convex functionals and @ is the infimal convolution:: (g @ l)(x) = inf_y { g(y) + l(x-y) } Note that the strong convexity of ``l_i`` makes the convex conjugate ``l_i^*`` differentiable; see the Notes section for more information on this. Parameters ---------- x : `LinearSpaceElement` Initial point, updated in-place. f : `Functional` The functional ``f``. Needs to have ``f.proximal``. g : sequence of `Functional`'s The functionals ``g_i``. Needs to have ``g_i.convex_conj.proximal``. L : sequence of `Operator`'s' Sequence of linear operators ``L_i``, with as many elements as ``g``. h : `Functional` The functional ``h``. Needs to have ``h.gradient``. tau : float Step size-like parameter for ``f``. sigma : sequence of floats Sequence of step size-like parameters for the sequence ``g``. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- l : sequence of `Functional`'s, optional The functionals ``l_i``. Needs to have ``g_i.convex_conj.gradient``. If omitted, the simpler problem without ``l_i`` will be considered. Notes ----- The mathematical problem to solve is .. math:: \min_x f(x) + \sum_{i=0}^n (g_i \Box l_i)(L_i x) + h(x), where :math:`f`, :math:`g_i`, :math:`l_i` and :math:`h` are functionals and :math:`L_i` are linear operators. The infimal convolution :math:`g \Box l` is defined by .. math:: (g \Box l)(x) = \inf_y g(y) + l(x - y). The exact conditions on the involved functionals are as follows: :math:`f` and :math:`g_i` are proper, convex and lower semicontinuous, and :math:`h` is convex and differentiable with :math:`\eta^{-1}`-Lipschitz continuous gradient, :math:`\eta > 0`. The optional operators :math:`\nabla l_i^*` need to be :math:`\nu_i`-Lipschitz continuous. Note that in the paper, the condition is formulated as :math:`l_i` being proper, lower semicontinuous, and :math:`\nu_i^{-1}`-strongly convex, which implies that :math:`l_i^*` have :math:`\nu_i`-Lipschitz continuous gradients. If the optional operators :math:`\nabla l_i^*` are omitted, the simpler problem without :math:`l_i` will be considered. Mathematically, this is done by taking :math:`l_i` to be the functionals that are zero only in the zero element and :math:`\infty` otherwise. This gives that :math:`l_i^*` are the zero functionals, and hence the corresponding gradients are the zero operators. To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma` and :math:`L_i` need to satisfy .. math:: 2 \min \{ \frac{1}{\tau}, \frac{1}{\sigma_1}, \ldots, \frac{1}{\sigma_m} \} \cdot \min\{ \eta, \nu_1, \ldots, \nu_m \} \cdot \sqrt{1 - \tau \sum_{i=1}^n \sigma_i ||L_i||^2} > 1, where, if the simpler problem is considered, all :math:`\nu_i` can be considered to be :math:`\infty`. For reference on the forward-backward primal-dual algorithm, see [BC2015]. For more on proximal operators and algorithms see [PB2014]. See Also -------- odl.solvers.nonsmooth.primal_dual_hybrid_gradient.pdhg : Solver for similar problems without differentiability in any of the terms. odl.solvers.nonsmooth.douglas_rachford.douglas_rachford_pd : Solver for similar problems without differentiability in any of the terms. References ---------- [BC2015] Bot, R I, and Csetnek, E R. *On the convergence rate of a forward-backward type primal-dual splitting algorithm for convex optimization problems*. Optimization, 64.1 (2015), pp 5--23. [PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*. Foundations and Trends in Optimization, 1 (2014), pp 127-239. """ # Problem size m = len(L) # Validate input if not all(isinstance(op, Operator) for op in L): raise ValueError('`L` not a sequence of operators') if not all(op.is_linear for op in L): raise ValueError('not all operators in `L` are linear') if not all(x in op.domain for op in L): raise ValueError('`x` not in the domain of all operators in `L`') if len(sigma) != m: raise ValueError('len(sigma) != len(L)') if len(g) != m: raise ValueError('len(prox_cc_g) != len(L)') # Extract operators prox_cc_g = [gi.convex_conj.proximal for gi in g] grad_h = h.gradient prox_f = f.proximal l = kwargs.pop('l', None) if l is not None: if len(l) != m: raise ValueError('`grad_cc_l` not same length as `L`') grad_cc_l = [li.convex_conj.gradient for li in l] if kwargs: raise TypeError('unexpected keyword argument: {}'.format(kwargs)) # Pre-allocate values v = [Li.range.zero() for Li in L] y = x.space.zero() for k in range(niter): x_old = x tmp_1 = grad_h(x) + sum(Li.adjoint(vi) for Li, vi in zip(L, v)) prox_f(tau)(x - tau * tmp_1, out=x) y.lincomb(2.0, x, -1, x_old) for i in range(m): if l is not None: # In this case gradients were given. tmp_2 = sigma[i] * (L[i](y) - grad_cc_l[i](v[i])) else: # In this case gradients were not given. Therefore the gradient # step is omitted. For more details, see the documentation. tmp_2 = sigma[i] * L[i](y) prox_cc_g[i](sigma[i])(v[i] + tmp_2, out=v[i]) if callback is not None: callback(x)
python
{ "resource": "" }
q35429
samples
train
def samples(*sets): """Generate samples from the given sets using their ``examples`` method. Parameters ---------- set1, ..., setN : `Set` instance Set(s) from which to generate the samples. Returns ------- samples : `generator` Generator that yields tuples of examples from the sets. Examples -------- >>> R, C = odl.RealNumbers(), odl.ComplexNumbers() >>> for [name_x, x], [name_y, y] in samples(R, C): pass # use examples """ if len(sets) == 1: for example in sets[0].examples: yield example else: generators = [set_.examples for set_ in sets] for examples in product(*generators): yield examples
python
{ "resource": "" }
q35430
cuboid
train
def cuboid(space, min_pt=None, max_pt=None): """Rectangular cuboid. Parameters ---------- space : `DiscreteLp` Space in which the phantom should be created. min_pt : array-like of shape ``(space.ndim,)``, optional Lower left corner of the cuboid. If ``None`` is given, a quarter of the extent from ``space.min_pt`` towards the inside is chosen. max_pt : array-like of shape ``(space.ndim,)``, optional Upper right corner of the cuboid. If ``None`` is given, ``min_pt`` plus half the extent is chosen. Returns ------- phantom : `DiscretizedSpaceElement` The generated cuboid phantom in ``space``. Examples -------- If both ``min_pt`` and ``max_pt`` are omitted, the cuboid lies in the middle of the space domain and extends halfway towards all sides: >>> space = odl.uniform_discr([0, 0], [1, 1], [4, 6]) >>> odl.phantom.cuboid(space) uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element( [[ 0., 0., 0., 0., 0., 0.], [ 0., 1., 1., 1., 1., 0.], [ 0., 1., 1., 1., 1., 0.], [ 0., 0., 0., 0., 0., 0.]] ) By specifying the corners, the cuboid can be arbitrarily placed and scaled: >>> odl.phantom.cuboid(space, [0.25, 0], [0.75, 0.5]) uniform_discr([ 0., 0.], [ 1., 1.], (4, 6)).element( [[ 0., 0., 0., 0., 0., 0.], [ 1., 1., 1., 0., 0., 0.], [ 1., 1., 1., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0.]] ) """ dom_min_pt = np.asarray(space.domain.min()) dom_max_pt = np.asarray(space.domain.max()) if min_pt is None: min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25 if max_pt is None: max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75 min_pt = np.atleast_1d(min_pt) max_pt = np.atleast_1d(max_pt) if min_pt.shape != (space.ndim,): raise ValueError('shape of `min_pt` must be {}, got {}' ''.format((space.ndim,), min_pt.shape)) if max_pt.shape != (space.ndim,): raise ValueError('shape of `max_pt` must be {}, got {}' ''.format((space.ndim,), max_pt.shape)) def phantom(x): result = True for xi, xmin, xmax in zip(x, min_pt, max_pt): result = (result & np.less_equal(xmin, xi) & np.less_equal(xi, xmax)) return result return space.element(phantom)
python
{ "resource": "" }
q35431
defrise
train
def defrise(space, nellipses=8, alternating=False, min_pt=None, max_pt=None): """Phantom with regularily spaced ellipses. This phantom is often used to verify cone-beam algorithms. Parameters ---------- space : `DiscreteLp` Space in which the phantom should be created, must be 2- or 3-dimensional. nellipses : int, optional Number of ellipses. If more ellipses are used, each ellipse becomes thinner. alternating : bool, optional True if the ellipses should have alternating densities (+1, -1), otherwise all ellipses have value +1. min_pt, max_pt : array-like, optional If provided, use these vectors to determine the bounding box of the phantom instead of ``space.min_pt`` and ``space.max_pt``. It is currently required that ``min_pt >= space.min_pt`` and ``max_pt <= space.max_pt``, i.e., shifting or scaling outside the original space is not allowed. Providing one of them results in a shift, e.g., for ``min_pt``:: new_min_pt = min_pt new_max_pt = space.max_pt + (min_pt - space.min_pt) Providing both results in a scaled version of the phantom. Returns ------- phantom : ``space`` element The generated phantom in ``space``. See Also -------- odl.phantom.transmission.shepp_logan """ ellipses = defrise_ellipses(space.ndim, nellipses=nellipses, alternating=alternating) return ellipsoid_phantom(space, ellipses, min_pt, max_pt)
python
{ "resource": "" }
q35432
defrise_ellipses
train
def defrise_ellipses(ndim, nellipses=8, alternating=False): """Ellipses for the standard Defrise phantom in 2 or 3 dimensions. Parameters ---------- ndim : {2, 3} Dimension of the space for the ellipses/ellipsoids. nellipses : int, optional Number of ellipses. If more ellipses are used, each ellipse becomes thinner. alternating : bool, optional True if the ellipses should have alternating densities (+1, -1), otherwise all ellipses have value +1. See Also -------- odl.phantom.geometric.ellipsoid_phantom : Function for creating arbitrary ellipsoids phantoms odl.phantom.transmission.shepp_logan_ellipsoids """ ellipses = [] if ndim == 2: for i in range(nellipses): if alternating: value = (-1.0 + 2.0 * (i % 2)) else: value = 1.0 axis_1 = 0.5 axis_2 = 0.5 / (nellipses + 1) center_x = 0.0 center_y = -1 + 2.0 / (nellipses + 1.0) * (i + 1) rotation = 0 ellipses.append( [value, axis_1, axis_2, center_x, center_y, rotation]) elif ndim == 3: for i in range(nellipses): if alternating: value = (-1.0 + 2.0 * (i % 2)) else: value = 1.0 axis_1 = axis_2 = 0.5 axis_3 = 0.5 / (nellipses + 1) center_x = center_y = 0.0 center_z = -1 + 2.0 / (nellipses + 1.0) * (i + 1) rotation_phi = rotation_theta = rotation_psi = 0 ellipses.append( [value, axis_1, axis_2, axis_3, center_x, center_y, center_z, rotation_phi, rotation_theta, rotation_psi]) return ellipses
python
{ "resource": "" }
q35433
indicate_proj_axis
train
def indicate_proj_axis(space, scale_structures=0.5): """Phantom indicating along which axis it is projected. The number (n) of rectangles in a parallel-beam projection along a main axis (0, 1, or 2) indicates the projection to be along the (n-1)the dimension. Parameters ---------- space : `DiscreteLp` Space in which the phantom should be created, must be 2- or 3-dimensional. scale_structures : positive float in (0, 1], optional Scales objects (cube, cuboids) Returns ------- phantom : ``space`` element Projection helper phantom in ``space``. Examples -------- Phantom in 2D space: >>> space = odl.uniform_discr([0, 0], [1, 1], shape=(8, 8)) >>> phantom = indicate_proj_axis(space).asarray() >>> print(odl.util.array_str(phantom, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 1., 1., 0., 0., 0.], [ 0., 0., 0., 1., 1., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 1., 0., 0., 0.], [ 0., 0., 0., 1., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]] >>> space = odl.uniform_discr([0] * 3, [1] * 3, [8, 8, 8]) >>> phantom = odl.phantom.indicate_proj_axis(space).asarray() >>> axis_sum_0 = np.sum(phantom, axis=0) >>> print(odl.util.array_str(axis_sum_0, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 3., 3., 0., 0., 0.], [ 0., 0., 0., 3., 3., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]] >>> axis_sum_1 = np.sum(phantom, axis=1) >>> print(odl.util.array_str(axis_sum_1, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 1., 1., 0., 0., 0.], [ 0., 0., 0., 1., 1., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]] >>> axis_sum_2 = np.sum(phantom, axis=2) >>> print(odl.util.array_str(axis_sum_2, nprint=10)) [[ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], [ 0., 0., 0., 2., 2., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.], [ 0., 0., 0., 0., 2., 0., 0., 0.], [ 0., 0., 0., 2., 0., 0., 0., 0.], [ 0., 0., 0., 0., 0., 0., 0., 0.]] """ if not 0 < scale_structures <= 1: raise ValueError('`scale_structures` ({}) is not in (0, 1]' ''.format(scale_structures)) assert space.ndim in (2, 3) shape = space.shape phan = np.zeros(shape) shape = np.array(shape) - 1 cen = np.round(0.5 * shape) dx = np.floor(scale_structures * 0.25 * shape) dx[dx == 0] = 1 # cube of size 2 * dx, offset in x axis, symmetric in others ix0 = int((cen - 3 * dx)[0]) if space.ndim == 2: ix, iy = (cen - 1 * dx).astype(int) phan[ix0:ix, iy:-iy] = 1 elif space.ndim == 3: ix, iy, iz = (cen - 1 * dx).astype(int) phan[ix0:ix, iy:-iy, iz:-iz] = 1 # 1st cuboid of size (dx[0], dx[1], 2 * dx[2]), offset in x and y axes, # symmetric in z axis ix0 = int((cen + 1 * dx)[1]) ix1 = int((cen + 2 * dx)[1]) iy0 = int(cen[1]) if space.ndim == 2: phan[ix0:ix1, iy0:-iy] = 1 elif space.ndim == 3: iz = int((cen - dx)[2]) phan[ix0:ix1, iy0:-iy, iz:-iz] = 1 # 2nd cuboid of (dx[0], dx[1], 2 * dx[2]) touching the first diagonally # at a long edge; offset in x and y axes, symmetric in z axis ix0 = int((cen + 2 * dx)[1]) ix1 = int((cen + 3 * dx)[1]) iy1 = int(cen[1]) if space.ndim == 2: phan[ix0:ix1, iy:iy1] = 1 elif space.ndim == 3: iz = int((cen - dx)[2]) phan[ix0:ix1, iy:iy1, iz:-iz] = 1 return space.element(phan)
python
{ "resource": "" }
q35434
_getshapes_2d
train
def _getshapes_2d(center, max_radius, shape): """Calculate indices and slices for the bounding box of a disk.""" index_mean = shape * center index_radius = max_radius / 2.0 * np.array(shape) # Avoid negative indices min_idx = np.maximum(np.floor(index_mean - index_radius), 0).astype(int) max_idx = np.ceil(index_mean + index_radius).astype(int) idx = [slice(minx, maxx) for minx, maxx in zip(min_idx, max_idx)] shapes = [(idx[0], slice(None)), (slice(None), idx[1])] return tuple(idx), tuple(shapes)
python
{ "resource": "" }
q35435
_ellipse_phantom_2d
train
def _ellipse_phantom_2d(space, ellipses): """Create a phantom of ellipses in 2d space. Parameters ---------- space : `DiscreteLp` Uniformly discretized space in which the phantom should be generated. If ``space.shape`` is 1 in an axis, a corresponding slice of the phantom is created (instead of squashing the whole phantom into the slice). ellipses : list of lists Each row should contain the entries :: 'value', 'axis_1', 'axis_2', 'center_x', 'center_y', 'rotation' The provided ellipses need to be specified relative to the reference rectangle ``[-1, -1] x [1, 1]``. Angles are to be given in radians. Returns ------- phantom : ``space`` element 2D ellipse phantom in ``space``. See Also -------- shepp_logan : The typical use-case for this function. """ # Blank image p = np.zeros(space.shape, dtype=space.dtype) minp = space.grid.min_pt maxp = space.grid.max_pt # Create the pixel grid grid_in = space.grid.meshgrid # move points to [-1, 1] grid = [] for i in range(2): mean_i = (minp[i] + maxp[i]) / 2.0 # Where space.shape = 1, we have minp = maxp, so we set diff_i = 1 # to avoid division by zero. Effectively, this allows constructing # a slice of a 2D phantom. diff_i = (maxp[i] - minp[i]) / 2.0 or 1.0 grid.append((grid_in[i] - mean_i) / diff_i) for ellip in ellipses: assert len(ellip) == 6 intensity = ellip[0] a_squared = ellip[1] ** 2 b_squared = ellip[2] ** 2 x0 = ellip[3] y0 = ellip[4] theta = ellip[5] scales = [1 / a_squared, 1 / b_squared] center = (np.array([x0, y0]) + 1.0) / 2.0 # Create the offset x,y and z values for the grid if theta != 0: # Rotate the points to the expected coordinate system. ctheta = np.cos(theta) stheta = np.sin(theta) mat = np.array([[ctheta, stheta], [-stheta, ctheta]]) # Calculate the points that could possibly be inside the volume # Since the points are rotated, we cannot do anything directional # without more logic max_radius = np.sqrt( np.abs(mat).dot([a_squared, b_squared])) idx, shapes = _getshapes_2d(center, max_radius, space.shape) subgrid = [g[idi] for g, idi in zip(grid, shapes)] offset_points = [vec * (xi - x0i)[..., None] for xi, vec, x0i in zip(subgrid, mat.T, [x0, y0])] rotated = offset_points[0] + offset_points[1] np.square(rotated, out=rotated) radius = np.dot(rotated, scales) else: # Calculate the points that could possibly be inside the volume max_radius = np.sqrt([a_squared, b_squared]) idx, shapes = _getshapes_2d(center, max_radius, space.shape) subgrid = [g[idi] for g, idi in zip(grid, shapes)] squared_dist = [ai * (xi - x0i) ** 2 for xi, ai, x0i in zip(subgrid, scales, [x0, y0])] # Parentheses to get best order for broadcasting radius = squared_dist[0] + squared_dist[1] # Find the points within the ellipse inside = radius <= 1 # Add the ellipse intensity to those points p[idx][inside] += intensity return space.element(p)
python
{ "resource": "" }
q35436
ellipsoid_phantom
train
def ellipsoid_phantom(space, ellipsoids, min_pt=None, max_pt=None): """Return a phantom given by ellipsoids. Parameters ---------- space : `DiscreteLp` Space in which the phantom should be created, must be 2- or 3-dimensional. If ``space.shape`` is 1 in an axis, a corresponding slice of the phantom is created (instead of squashing the whole phantom into the slice). ellipsoids : sequence of sequences If ``space`` is 2-dimensional, each row should contain the entries :: 'value', 'axis_1', 'axis_2', 'center_x', 'center_y', 'rotation' If ``space`` is 3-dimensional, each row should contain the entries :: 'value', 'axis_1', 'axis_2', 'axis_3', 'center_x', 'center_y', 'center_z', 'rotation_phi', 'rotation_theta', 'rotation_psi' The provided ellipsoids need to be specified relative to the reference rectangle ``[-1, -1] x [1, 1]``, or analogously in 3d. The angles are to be given in radians. min_pt, max_pt : array-like, optional If provided, use these vectors to determine the bounding box of the phantom instead of ``space.min_pt`` and ``space.max_pt``. It is currently required that ``min_pt >= space.min_pt`` and ``max_pt <= space.max_pt``, i.e., shifting or scaling outside the original space is not allowed. Providing one of them results in a shift, e.g., for ``min_pt``:: new_min_pt = min_pt new_max_pt = space.max_pt + (min_pt - space.min_pt) Providing both results in a scaled version of the phantom. Notes ----- The phantom is created by adding the values of each ellipse. The ellipses are defined by a center point ``(center_x, center_y, [center_z])``, the lengths of its principial axes ``(axis_1, axis_2, [axis_2])``, and a rotation angle ``rotation`` in 2D or Euler angles ``(rotation_phi, rotation_theta, rotation_psi)`` in 3D. This function is heavily optimized, achieving runtimes about 20 times faster than "trivial" implementations. It is therefore recommended to use it in all phantoms where applicable. The main optimization is that it only considers a subset of all the points when updating for each ellipse. It does this by first finding a subset of points that could possibly be inside the ellipse. This optimization is very good for "spherical" ellipsoids, but not so much for elongated or rotated ones. It also does calculations wherever possible on the meshgrid instead of individual points. Examples -------- Create a circle with a smaller circle inside: >>> space = odl.uniform_discr([-1, -1], [1, 1], [5, 5]) >>> ellipses = [[1.0, 1.0, 1.0, 0.0, 0.0, 0.0], ... [1.0, 0.6, 0.6, 0.0, 0.0, 0.0]] >>> print(ellipsoid_phantom(space, ellipses)) [[ 0., 0., 1., 0., 0.], [ 0., 1., 2., 1., 0.], [ 1., 2., 2., 2., 1.], [ 0., 1., 2., 1., 0.], [ 0., 0., 1., 0., 0.]] See Also -------- odl.phantom.transmission.shepp_logan : Classical Shepp-Logan phantom, typically used for transmission imaging odl.phantom.transmission.shepp_logan_ellipsoids : Ellipses for the Shepp-Logan phantom odl.phantom.geometric.defrise_ellipses : Ellipses for the Defrise phantom """ if space.ndim == 2: _phantom = _ellipse_phantom_2d elif space.ndim == 3: _phantom = _ellipsoid_phantom_3d else: raise ValueError('dimension not 2 or 3, no phantom available') if min_pt is None and max_pt is None: return _phantom(space, ellipsoids) else: # Generate a temporary space with given `min_pt` and `max_pt` # (snapped to the cell grid), create the phantom in that space and # resize to the target size for `space`. # The snapped points are constructed by finding the index of # `min/max_pt` in the space partition, indexing the partition with # that index, yielding a single-cell partition, and then taking # the lower-left/upper-right corner of that cell. if min_pt is None: snapped_min_pt = space.min_pt else: min_pt_cell = space.partition[space.partition.index(min_pt)] snapped_min_pt = min_pt_cell.min_pt if max_pt is None: snapped_max_pt = space.max_pt else: max_pt_cell = space.partition[space.partition.index(max_pt)] snapped_max_pt = max_pt_cell.max_pt # Avoid snapping to the next cell where max_pt falls exactly on # a boundary for i in range(space.ndim): if max_pt[i] in space.partition.cell_boundary_vecs[i]: snapped_max_pt[i] = max_pt[i] tmp_space = uniform_discr_fromdiscr( space, min_pt=snapped_min_pt, max_pt=snapped_max_pt, cell_sides=space.cell_sides) tmp_phantom = _phantom(tmp_space, ellipsoids) offset = space.partition.index(tmp_space.min_pt) return space.element( resize_array(tmp_phantom, space.shape, offset))
python
{ "resource": "" }
q35437
smooth_cuboid
train
def smooth_cuboid(space, min_pt=None, max_pt=None, axis=0): """Cuboid with smooth variations. Parameters ---------- space : `DiscreteLp` Discretized space in which the phantom is supposed to be created. min_pt : array-like of shape ``(space.ndim,)``, optional Lower left corner of the cuboid. If ``None`` is given, a quarter of the extent from ``space.min_pt`` towards the inside is chosen. max_pt : array-like of shape ``(space.ndim,)``, optional Upper right corner of the cuboid. If ``None`` is given, ``min_pt`` plus half the extent is chosen. axis : int or sequence of int Dimension(s) along which the smooth variation should happen. Returns ------- phantom : ``space``-element The generated cuboid phantom in ``space``. Values have range [0, 1]. """ dom_min_pt = space.domain.min() dom_max_pt = space.domain.max() if min_pt is None: min_pt = dom_min_pt * 0.75 + dom_max_pt * 0.25 if max_pt is None: max_pt = dom_min_pt * 0.25 + dom_max_pt * 0.75 min_pt = np.atleast_1d(min_pt) max_pt = np.atleast_1d(max_pt) axis = np.array(axis, dtype=int, ndmin=1) if min_pt.shape != (space.ndim,): raise ValueError('shape of `min_pt` must be {}, got {}' ''.format((space.ndim,), min_pt.shape)) if max_pt.shape != (space.ndim,): raise ValueError('shape of `max_pt` must be {}, got {}' ''.format((space.ndim,), max_pt.shape)) sign = 0 for i, coord in enumerate(space.meshgrid): sign = sign | (coord < min_pt[i]) | (coord > max_pt[i]) values = 0 for i in axis: coord = space.meshgrid[i] extent = (dom_max_pt[i] - dom_min_pt[i]) values = values + 2 * (coord - dom_min_pt[i]) / extent - 1 # Properly scale using sign sign = (3 * sign - 2) / axis.size # Fit in [0, 1] values = values * sign values = (values - np.min(values)) / (np.max(values) - np.min(values)) return space.element(values)
python
{ "resource": "" }
q35438
tgv_phantom
train
def tgv_phantom(space, edge_smoothing=0.2): """Piecewise affine phantom. This phantom is taken from [Bre+2010] and includes both linearly varying regions and sharp discontinuities. It is designed to work well with Total Generalized Variation (TGV) type regularization. Parameters ---------- space : `DiscreteLp`, 2 dimensional Discretized space in which the phantom is supposed to be created. Needs to be two-dimensional. edge_smoothing : nonnegative float, optional Smoothing of the edges of the phantom, given as smoothing width in units of minimum pixel size. Returns ------- phantom : ``space``-element The generated phantom in ``space``. Values have range [0, 1]. Notes ----- The original phantom is given by a specific image. In this implementation, we extracted the underlying parameters and the phantom thus works with spaces of any shape. Due to this, small variations may occur when compared to the original phantom. References ---------- [Bre+2010] K. Bredies, K. Kunisch, and T. Pock. *Total Generalized Variation*. SIAM Journal on Imaging Sciences, 3(3):492-526, Jan. 2010 """ if space.ndim != 2: raise ValueError('`space.ndim` must be 2, got {}' ''.format(space.ndim)) y, x = space.meshgrid # Use a smooth sigmoid to get some anti-aliasing across edges. scale = edge_smoothing / np.min(space.shape) def sigmoid(val): if edge_smoothing != 0: val = val / scale return 1 / (1 + np.exp(-val)) else: return (val > 0).astype(val.dtype) # Normalize to [0, 1] x = (x - np.min(x)) / (np.max(x) - np.min(x)) y = (y - np.min(y)) / (np.max(y) - np.min(y)) # Background values = -(x + y) / 2 # Square-ish region indicator = np.ones(space.shape) indicator *= sigmoid(-(0.015199034981905914 * x - y + 0.13896260554885403)) indicator *= sigmoid((0.3333333333333323 * y - x + 0.598958333333334)) indicator *= sigmoid((-2.4193548387096726 * y - x + 2.684979838709672)) values += indicator * 2 * (x + y - 1) # Ellipse part x_c = x - 0.71606842360499456 y_c = y - 0.18357884949910641 width = 0.55677657235995637 height = 0.37279391542283741 phi = 0.62911754900697558 x_c_rot = (np.cos(phi) * x_c - np.sin(phi) * y_c) / width y_c_rot = (np.sin(phi) * x_c + np.cos(phi) * y_c) / height indicator = sigmoid(np.sqrt(x_c_rot ** 2 + y_c_rot ** 2) - 1) values = indicator * values + 1.5 * (1 - indicator) * (-x - 2 * y + 0.6) # Normalize values values = (values - np.min(values)) / (np.max(values) - np.min(values)) return space.element(values)
python
{ "resource": "" }
q35439
print_objective
train
def print_objective(x): """Calculate the objective value and prints it.""" value = 0 for minp, maxp in rectangles: x_proj = np.minimum(np.maximum(x, minp), maxp) value += (x - x_proj).norm() print('Point = [{:.4f}, {:.4f}], Value = {:.4f}'.format(x[0], x[1], value))
python
{ "resource": "" }
q35440
sparse_meshgrid
train
def sparse_meshgrid(*x): """Make a sparse `meshgrid` by adding empty dimensions. Parameters ---------- x1,...,xN : `array-like` Input arrays to turn into sparse meshgrid vectors. Returns ------- meshgrid : tuple of `numpy.ndarray`'s Sparse coordinate vectors representing an N-dimensional grid. See Also -------- numpy.meshgrid : dense or sparse meshgrids Examples -------- >>> x, y = [0, 1], [2, 3, 4] >>> mesh = sparse_meshgrid(x, y) >>> sum(xi for xi in mesh).ravel() # first axis slowest array([2, 3, 4, 3, 4, 5]) """ n = len(x) mesh = [] for ax, xi in enumerate(x): xi = np.asarray(xi) slc = [None] * n slc[ax] = slice(None) mesh.append(np.ascontiguousarray(xi[tuple(slc)])) return tuple(mesh)
python
{ "resource": "" }
q35441
uniform_grid_fromintv
train
def uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=True): """Return a grid from sampling an interval product uniformly. The resulting grid will by default include ``intv_prod.min_pt`` and ``intv_prod.max_pt`` as grid points. If you want a subdivision into equally sized cells with grid points in the middle, use `uniform_partition` instead. Parameters ---------- intv_prod : `IntervalProd` Set to be sampled. shape : int or sequence of ints Number of nodes per axis. Entries corresponding to degenerate axes must be equal to 1. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Returns ------- sampling : `RectGrid` Uniform sampling grid for the interval product. Examples -------- >>> rbox = odl.IntervalProd([-1.5, 2], [-0.5, 3]) >>> grid = uniform_grid_fromintv(rbox, (3, 3)) >>> grid.coord_vectors (array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ])) To have the nodes in the "middle", use ``nodes_on_bdry=False``: >>> grid = uniform_grid_fromintv(rbox, (2, 2), nodes_on_bdry=False) >>> grid.coord_vectors (array([-1.25, -0.75]), array([ 2.25, 2.75])) See Also -------- uniform_grid : Create a uniform grid directly. odl.discr.partition.uniform_partition_fromintv : divide interval product into equally sized subsets """ if not isinstance(intv_prod, IntervalProd): raise TypeError('{!r} is not an `IntervalProd` instance' ''.format(intv_prod)) if (np.any(np.isinf(intv_prod.min_pt)) or np.any(np.isinf(intv_prod.max_pt))): raise ValueError('`intv_prod` must be finite, got {!r}' ''.format('intv_prod')) shape = normalized_scalar_param_list(shape, intv_prod.ndim, safe_int_conv) if np.shape(nodes_on_bdry) == (): nodes_on_bdry = ([(bool(nodes_on_bdry), bool(nodes_on_bdry))] * intv_prod.ndim) elif intv_prod.ndim == 1 and len(nodes_on_bdry) == 2: nodes_on_bdry = [nodes_on_bdry] elif len(nodes_on_bdry) != intv_prod.ndim: raise ValueError('`nodes_on_bdry` has length {}, expected {}' ''.format(len(nodes_on_bdry), intv_prod.ndim)) else: shape = tuple(int(n) for n in shape) # We need to determine the placement of the grid minimum and maximum # points based on the choices in nodes_on_bdry. If in a given axis, # and for a given side (left or right), the entry is True, the node lies # on the boundary, so this coordinate can simply be taken as-is. # # Otherwise, the following conditions must be met: # # 1. The node should be half a stride s away from the boundary # 2. Adding or subtracting (n-1)*s should give the other extremal node. # # If both nodes are to be shifted half a stride inside, # the second condition yields # a + s/2 + (n-1)*s = b - s/2 => s = (b - a) / n, # hence the extremal grid points are # gmin = a + s/2 = a + (b - a) / (2 * n), # gmax = b - s/2 = b - (b - a) / (2 * n). # # In the case where one node, say the rightmost, lies on the boundary, # the condition 2. reads as # a + s/2 + (n-1)*s = b => s = (b - a) / (n - 1/2), # thus # gmin = a + (b - a) / (2 * n - 1). gmin, gmax = [], [] for n, xmin, xmax, on_bdry in zip(shape, intv_prod.min_pt, intv_prod.max_pt, nodes_on_bdry): # Unpack the tuple if possible, else use bool globally for this axis try: bdry_l, bdry_r = on_bdry except TypeError: bdry_l = bdry_r = on_bdry if bdry_l and bdry_r: gmin.append(xmin) gmax.append(xmax) elif bdry_l and not bdry_r: gmin.append(xmin) gmax.append(xmax - (xmax - xmin) / (2 * n - 1)) elif not bdry_l and bdry_r: gmin.append(xmin + (xmax - xmin) / (2 * n - 1)) gmax.append(xmax) else: gmin.append(xmin + (xmax - xmin) / (2 * n)) gmax.append(xmax - (xmax - xmin) / (2 * n)) # Create the grid coord_vecs = [np.linspace(mi, ma, num, endpoint=True, dtype=np.float64) for mi, ma, num in zip(gmin, gmax, shape)] return RectGrid(*coord_vecs)
python
{ "resource": "" }
q35442
uniform_grid
train
def uniform_grid(min_pt, max_pt, shape, nodes_on_bdry=True): """Return a grid from sampling an implicit interval product uniformly. Parameters ---------- min_pt : float or sequence of float Vectors of lower ends of the intervals in the product. max_pt : float or sequence of float Vectors of upper ends of the intervals in the product. shape : int or sequence of ints Number of nodes per axis. Entries corresponding to degenerate axes must be equal to 1. nodes_on_bdry : bool or sequence, optional If a sequence is provided, it determines per axis whether to place the last grid point on the boundary (``True``) or shift it by half a cell size into the interior (``False``). In each axis, an entry may consist in a single bool or a 2-tuple of bool. In the latter case, the first tuple entry decides for the left, the second for the right boundary. The length of the sequence must be ``array.ndim``. A single boolean is interpreted as a global choice for all boundaries. Returns ------- uniform_grid : `RectGrid` The resulting uniform grid. See Also -------- uniform_grid_fromintv : sample a given interval product odl.discr.partition.uniform_partition : divide implicitly defined interval product into equally sized subsets Examples -------- By default, the min/max points are included in the grid: >>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (3, 3)) >>> grid.coord_vectors (array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ])) If ``shape`` is supposed to refer to small subvolumes, and the grid should be their centers, use the option ``nodes_on_bdry=False``: >>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (2, 2), ... nodes_on_bdry=False) >>> grid.coord_vectors (array([-1.25, -0.75]), array([ 2.25, 2.75])) In 1D, we don't need sequences: >>> grid = odl.uniform_grid(0, 1, 3) >>> grid.coord_vectors (array([ 0. , 0.5, 1. ]),) """ return uniform_grid_fromintv(IntervalProd(min_pt, max_pt), shape, nodes_on_bdry=nodes_on_bdry)
python
{ "resource": "" }
q35443
RectGrid.ndim
train
def ndim(self): """Number of dimensions of the grid.""" try: return self.__ndim except AttributeError: ndim = len(self.coord_vectors) self.__ndim = ndim return ndim
python
{ "resource": "" }
q35444
RectGrid.shape
train
def shape(self): """Number of grid points per axis.""" try: return self.__shape except AttributeError: shape = tuple(len(vec) for vec in self.coord_vectors) self.__shape = shape return shape
python
{ "resource": "" }
q35445
RectGrid.size
train
def size(self): """Total number of grid points.""" # Since np.prod(()) == 1.0 we need to handle that by ourselves return (0 if self.shape == () else int(np.prod(self.shape, dtype='int64')))
python
{ "resource": "" }
q35446
RectGrid.min
train
def min(self, **kwargs): """Return `min_pt`. Parameters ---------- kwargs For duck-typing with `numpy.amin` See Also -------- max odl.set.domain.IntervalProd.min Examples -------- >>> g = RectGrid([1, 2, 5], [-2, 1.5, 2]) >>> g.min() array([ 1., -2.]) Also works with Numpy: >>> np.min(g) array([ 1., -2.]) """ out = kwargs.get('out', None) if out is not None: out[:] = self.min_pt return out else: return self.min_pt
python
{ "resource": "" }
q35447
RectGrid.max
train
def max(self, **kwargs): """Return `max_pt`. Parameters ---------- kwargs For duck-typing with `numpy.amax` See Also -------- min odl.set.domain.IntervalProd.max Examples -------- >>> g = RectGrid([1, 2, 5], [-2, 1.5, 2]) >>> g.max() array([ 5., 2.]) Also works with Numpy: >>> np.max(g) array([ 5., 2.]) """ out = kwargs.get('out', None) if out is not None: out[:] = self.max_pt return out else: return self.max_pt
python
{ "resource": "" }
q35448
RectGrid.stride
train
def stride(self): """Step per axis between neighboring points of a uniform grid. If the grid contains axes that are not uniform, ``stride`` has a ``NaN`` entry. For degenerate (length 1) axes, ``stride`` has value ``0.0``. Returns ------- stride : numpy.array Array of dtype ``float`` and length `ndim`. Examples -------- >>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3)) >>> rg.stride array([ 1., 2.]) NaN returned for non-uniform dimension: >>> g = RectGrid([0, 1, 2], [0, 1, 4]) >>> g.stride array([ 1., nan]) 0.0 returned for degenerate dimension: >>> g = RectGrid([0, 1, 2], [0]) >>> g.stride array([ 1., 0.]) """ # Cache for efficiency instead of re-computing if self.__stride is None: strd = [] for i in range(self.ndim): if not self.is_uniform_byaxis[i]: strd.append(float('nan')) elif self.nondegen_byaxis[i]: strd.append(self.extent[i] / (self.shape[i] - 1.0)) else: strd.append(0.0) self.__stride = np.array(strd) return self.__stride.copy()
python
{ "resource": "" }
q35449
RectGrid.approx_equals
train
def approx_equals(self, other, atol): """Test if this grid is equal to another grid. Parameters ---------- other : Object to be tested atol : float Allow deviations up to this number in absolute value per vector entry. Returns ------- equals : bool ``True`` if ``other`` is a `RectGrid` instance with all coordinate vectors equal (up to the given tolerance), to the ones of this grid, ``False`` otherwise. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([-0.1, 1.1], [-1, 0.1, 2]) >>> g1.approx_equals(g2, atol=0) False >>> g1.approx_equals(g2, atol=0.15) True """ if other is self: return True return (type(other) is type(self) and self.ndim == other.ndim and self.shape == other.shape and all(np.allclose(vec_s, vec_o, atol=atol, rtol=0.0) for (vec_s, vec_o) in zip(self.coord_vectors, other.coord_vectors)))
python
{ "resource": "" }
q35450
RectGrid.approx_contains
train
def approx_contains(self, other, atol): """Test if ``other`` belongs to this grid up to a tolerance. Parameters ---------- other : `array-like` or float The object to test for membership in this grid atol : float Allow deviations up to this number in absolute value per vector entry. Examples -------- >>> g = RectGrid([0, 1], [-1, 0, 2]) >>> g.approx_contains([0, 0], atol=0.0) True >>> [0, 0] in g # equivalent True >>> g.approx_contains([0.1, -0.1], atol=0.0) False >>> g.approx_contains([0.1, -0.1], atol=0.15) True """ other = np.atleast_1d(other) return (other.shape == (self.ndim,) and all(np.any(np.isclose(vector, coord, atol=atol, rtol=0.0)) for vector, coord in zip(self.coord_vectors, other)))
python
{ "resource": "" }
q35451
RectGrid.is_subgrid
train
def is_subgrid(self, other, atol=0.0): """Return ``True`` if this grid is a subgrid of ``other``. Parameters ---------- other : `RectGrid` The other grid which is supposed to contain this grid atol : float, optional Allow deviations up to this number in absolute value per coordinate vector entry. Returns ------- is_subgrid : bool ``True`` if all coordinate vectors of ``self`` are within absolute distance ``atol`` of the other grid, else ``False``. Examples -------- >>> rg = uniform_grid([-2, -2], [0, 4], (3, 4)) >>> rg.coord_vectors (array([-2., -1., 0.]), array([-2., 0., 2., 4.])) >>> rg_sub = uniform_grid([-1, 2], [0, 4], (2, 2)) >>> rg_sub.coord_vectors (array([-1., 0.]), array([ 2., 4.])) >>> rg_sub.is_subgrid(rg) True Fuzzy check is also possible. Note that the tolerance still applies to the coordinate vectors. >>> rg_sub = uniform_grid([-1.015, 2], [0, 3.99], (2, 2)) >>> rg_sub.is_subgrid(rg, atol=0.01) False >>> rg_sub.is_subgrid(rg, atol=0.02) True """ # Optimization for some common cases if other is self: return True if not isinstance(other, RectGrid): return False if not all(self.shape[i] <= other.shape[i] and self.min_pt[i] >= other.min_pt[i] - atol and self.max_pt[i] <= other.max_pt[i] + atol for i in range(self.ndim)): return False if self.size == 0: return True if self.is_uniform and other.is_uniform: # For uniform grids, it suffices to show that min_pt, max_pt # and g[1,...,1] are contained in the other grid. For axes # with less than 2 points, this reduces to min_pt and max_pt, # and the corresponding indices in the other check point are # set to 0. minmax_contained = ( other.approx_contains(self.min_pt, atol=atol) and other.approx_contains(self.max_pt, atol=atol)) check_idx = np.zeros(self.ndim, dtype=int) check_idx[np.array(self.shape) >= 3] = 1 checkpt_contained = other.approx_contains(self[tuple(check_idx)], atol=atol) return minmax_contained and checkpt_contained else: # Array version of the fuzzy subgrid test, about 3 times faster # than the loop version. for vec_o, vec_s in zip(other.coord_vectors, self.coord_vectors): # Create array of differences of all entries in vec_o and # vec_s. If there is no almost zero entry in each row, # return False. vec_o_mg, vec_s_mg = sparse_meshgrid(vec_o, vec_s) if not np.all(np.any(np.isclose(vec_s_mg, vec_o_mg, atol=atol), axis=0)): return False return True
python
{ "resource": "" }
q35452
RectGrid.insert
train
def insert(self, index, *grids): """Return a copy with ``grids`` inserted before ``index``. The given grids are inserted (as a block) into ``self``, yielding a new grid whose number of dimensions is the sum of the numbers of dimensions of all involved grids. Note that no changes are made in-place. Parameters ---------- index : int The index of the dimension before which ``grids`` are to be inserted. Negative indices count backwards from ``self.ndim``. grid1, ..., gridN : `RectGrid` The grids to be inserted into ``self``. Returns ------- newgrid : `RectGrid` The enlarged grid. Examples -------- >>> g1 = RectGrid([0, 1], [-1, 0, 2]) >>> g2 = RectGrid([1], [-6, 15]) >>> g1.insert(1, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) >>> g1.insert(1, g2, g2) RectGrid( [ 0., 1.], [ 1.], [ -6., 15.], [ 1.], [ -6., 15.], [-1., 0., 2.] ) See Also -------- append """ index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(grids) == 0: # Copy of `self` return RectGrid(*self.coord_vectors) elif len(grids) == 1: # Insert single grid grid = grids[0] if not isinstance(grid, RectGrid): raise TypeError('{!r} is not a `RectGrid` instance' ''.format(grid)) new_vecs = (self.coord_vectors[:index] + grid.coord_vectors + self.coord_vectors[index:]) return RectGrid(*new_vecs) else: # Recursively insert first grid and the remaining into the result return self.insert(index, grids[0]).insert( index + grids[0].ndim, *(grids[1:]))
python
{ "resource": "" }
q35453
RectGrid.points
train
def points(self, order='C'): """All grid points in a single array. Parameters ---------- order : {'C', 'F'}, optional Axis ordering in the resulting point array. Returns ------- points : `numpy.ndarray` The shape of the array is ``size x ndim``, i.e. the points are stored as rows. Examples -------- >>> g = RectGrid([0, 1], [-1, 0, 2]) >>> g.points() array([[ 0., -1.], [ 0., 0.], [ 0., 2.], [ 1., -1.], [ 1., 0.], [ 1., 2.]]) >>> g.points(order='F') array([[ 0., -1.], [ 1., -1.], [ 0., 0.], [ 1., 0.], [ 0., 2.], [ 1., 2.]]) """ if str(order).upper() not in ('C', 'F'): raise ValueError('order {!r} not recognized'.format(order)) else: order = str(order).upper() axes = range(self.ndim) if order == 'C' else reversed(range(self.ndim)) shape = self.shape if order == 'C' else tuple(reversed(self.shape)) point_arr = np.empty((self.size, self.ndim)) for i, axis in enumerate(axes): view = point_arr[:, axis].reshape(shape) coord_shape = (1,) * i + (-1,) + (1,) * (self.ndim - i - 1) view[:] = self.coord_vectors[axis].reshape(coord_shape) return point_arr
python
{ "resource": "" }
q35454
RectGrid.corner_grid
train
def corner_grid(self): """Return a grid with only the corner points. Returns ------- cgrid : `RectGrid` Grid with size 2 in non-degenerate dimensions and 1 in degenerate ones Examples -------- >>> g = RectGrid([0, 1], [-1, 0, 2]) >>> g.corner_grid() uniform_grid([ 0., -1.], [ 1., 2.], (2, 2)) """ minmax_vecs = [] for axis in range(self.ndim): if self.shape[axis] == 1: minmax_vecs.append(self.coord_vectors[axis][0]) else: minmax_vecs.append((self.coord_vectors[axis][0], self.coord_vectors[axis][-1])) return RectGrid(*minmax_vecs)
python
{ "resource": "" }
q35455
PartialDerivative._call
train
def _call(self, x, out=None): """Calculate partial derivative of ``x``.""" if out is None: out = self.range.element() # TODO: this pipes CUDA arrays through NumPy. Write native operator. with writable_array(out) as out_arr: finite_diff(x.asarray(), axis=self.axis, dx=self.dx, method=self.method, pad_mode=self.pad_mode, pad_const=self.pad_const, out=out_arr) return out
python
{ "resource": "" }
q35456
Gradient._call
train
def _call(self, x, out=None): """Calculate the spatial gradient of ``x``.""" if out is None: out = self.range.element() x_arr = x.asarray() ndim = self.domain.ndim dx = self.domain.cell_sides for axis in range(ndim): with writable_array(out[axis]) as out_arr: finite_diff(x_arr, axis=axis, dx=dx[axis], method=self.method, pad_mode=self.pad_mode, pad_const=self.pad_const, out=out_arr) return out
python
{ "resource": "" }
q35457
Divergence._call
train
def _call(self, x, out=None): """Calculate the divergence of ``x``.""" if out is None: out = self.range.element() ndim = self.range.ndim dx = self.range.cell_sides tmp = np.empty(out.shape, out.dtype, order=out.space.default_order) with writable_array(out) as out_arr: for axis in range(ndim): finite_diff(x[axis], axis=axis, dx=dx[axis], method=self.method, pad_mode=self.pad_mode, pad_const=self.pad_const, out=tmp) if axis == 0: out_arr[:] = tmp else: out_arr += tmp return out
python
{ "resource": "" }
q35458
Laplacian._call
train
def _call(self, x, out=None): """Calculate the spatial Laplacian of ``x``.""" if out is None: out = self.range.zero() else: out.set_zero() x_arr = x.asarray() out_arr = out.asarray() tmp = np.empty(out.shape, out.dtype, order=out.space.default_order) ndim = self.domain.ndim dx = self.domain.cell_sides with writable_array(out) as out_arr: for axis in range(ndim): # TODO: this can be optimized finite_diff(x_arr, axis=axis, dx=dx[axis] ** 2, method='forward', pad_mode=self.pad_mode, pad_const=self.pad_const, out=tmp) out_arr += tmp finite_diff(x_arr, axis=axis, dx=dx[axis] ** 2, method='backward', pad_mode=self.pad_mode, pad_const=self.pad_const, out=tmp) out_arr -= tmp return out
python
{ "resource": "" }
q35459
divide_1Darray_equally
train
def divide_1Darray_equally(ind, nsub): """Divide an array into equal chunks to be used for instance in OSEM. Parameters ---------- ind : ndarray input array nsubsets : int number of subsets to be divided into Returns ------- sub2ind : list list of indices for each subset ind2sub : list list of subsets for each index """ n_ind = len(ind) sub2ind = partition_equally_1d(ind, nsub, order='interlaced') ind2sub = [] for i in range(n_ind): ind2sub.append([]) for i in range(nsub): for j in sub2ind[i]: ind2sub[j].append(i) return (sub2ind, ind2sub)
python
{ "resource": "" }
q35460
total_variation
train
def total_variation(domain, grad=None): """Total variation functional. Parameters ---------- domain : odlspace domain of TV functional grad : gradient operator, optional Gradient operator of the total variation functional. This may be any linear operator and thereby generalizing TV. default=forward differences with Neumann boundary conditions Examples -------- Check that the total variation of a constant is zero >>> import odl.contrib.spdhg as spdhg, odl >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tv = spdhg.total_variation(space) >>> x = space.one() >>> tv(x) < 1e-10 """ if grad is None: grad = odl.Gradient(domain, method='forward', pad_mode='symmetric') grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2)) else: grad = grad f = odl.solvers.GroupL1Norm(grad.range, exponent=2) return f * grad
python
{ "resource": "" }
q35461
fgp_dual
train
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs): """Computes a solution to the ROF problem with the fast gradient projection algorithm. Parameters ---------- p : np.array dual initial variable data : np.array noisy data / proximal point alpha : float regularization parameter niter : int number of iterations grad : instance of gradient class class that supports grad(x), grad.adjoint(x), grad.norm proj_C : function projection onto the constraint set of the primal variable, e.g. non-negativity proj_P : function projection onto the constraint set of the dual variable, e.g. norm <= 1 tol : float (optional) nonnegative parameter that gives the tolerance for convergence. If set None, then the algorithm will run for a fixed number of iterations Other Parameters ---------------- callback : callable, optional Function called with the current iterate after each iteration. """ # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable'.format(callback)) factr = 1 / (grad.norm**2 * alpha) q = p.copy() x = data.space.zero() t = 1. if tol is None: def convergence_eval(p1, p2): return False else: def convergence_eval(p1, p2): return (p1 - p2).norm() / p1.norm() < tol pnew = p.copy() if callback is not None: callback(p) for k in range(niter): t0 = t grad.adjoint(q, out=x) proj_C(data - alpha * x, out=x) grad(x, out=pnew) pnew *= factr pnew += q proj_P(pnew, out=pnew) converged = convergence_eval(p, pnew) if not converged: # update step size t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2. # calculate next iterate q[:] = pnew + (t0 - 1) / t * (pnew - p) p[:] = pnew if converged: t = None break if callback is not None: callback(p) # get current image estimate x = proj_C(data - alpha * grad.adjoint(p)) return x
python
{ "resource": "" }
q35462
TotalVariationNonNegative.proximal
train
def proximal(self, sigma): """Prox operator of TV. It allows the proximal step length to be a vector of positive elements. Examples -------- Check that the proximal operator is the identity for sigma=0 >>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2) >>> x = -space.one() >>> y = tvnn.proximal(0)(x) >>> (y-x).norm() < 1e-10 Check that negative functions are mapped to 0 >>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2) >>> x = -space.one() >>> y = tvnn.proximal(0.1)(x) >>> y.norm() < 1e-10 """ if sigma == 0: return odl.IdentityOperator(self.domain) else: def tv_prox(z, out=None): if out is None: out = z.space.zero() opts = self.prox_options sigma_ = np.copy(sigma) z_ = z.copy() if self.strong_convexity > 0: sigma_ /= (1 + sigma * self.strong_convexity) z_ /= (1 + sigma * self.strong_convexity) if opts['name'] == 'FGP': if opts['warmstart']: if opts['p'] is None: opts['p'] = self.grad.range.zero() p = opts['p'] else: p = self.grad.range.zero() sigma_sqrt = np.sqrt(sigma_) z_ /= sigma_sqrt grad = sigma_sqrt * self.grad grad.norm = sigma_sqrt * self.grad.norm niter = opts['niter'] alpha = self.alpha out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C, self.proj_P, tol=opts['tol']) out *= sigma_sqrt return out else: raise NotImplementedError('Not yet implemented') return tv_prox
python
{ "resource": "" }
q35463
_fields_from_table
train
def _fields_from_table(spec_table, id_key): """Read a specification and return a list of fields. The given specification is assumed to be in `reST grid table format <http://docutils.sourceforge.net/docs/user/rst/quickref.html#tables>`_. Parameters ---------- spec_table : str Specification given as a string containing a definition table. id_key : str Dictionary key (= column header) for the ID labels in ``spec``. Returns ------- fields : tuple of dicts Field list of the specification with combined multi-line entries. Each field corresponds to one (multi-)line of the spec. """ # Reformat the table, throwing away lines not starting with '|' spec_lines = [line[1:-1].rstrip() for line in spec_table.splitlines() if line.startswith('|')] # Guess the CSV dialect and read the table, producing an iterable dialect = csv.Sniffer().sniff(spec_lines[0], delimiters='|') reader = csv.DictReader(spec_lines, dialect=dialect) # Read the fields as dictionaries and transform keys and values to # lowercase. fields = [] for row in reader: new_row = {} if row[id_key].strip(): # Start of a new field, indicated by a nontrivial ID entry for key, val in row.items(): new_row[key.strip()] = val.strip() fields.append(new_row) else: # We have the second row of a multi-line field. We # append all stripped values to the corresponding existing entry # value with an extra space. if not fields: # Just to make sure that this situation did not happen at # the very beginning of the table continue for key, val in row.items(): fields[-1][key.strip()] += (' ' + val).rstrip() return tuple(fields)
python
{ "resource": "" }
q35464
header_fields_from_table
train
def header_fields_from_table(spec_table, keys, dtype_map): """Convert the specification table to a standardized format. The specification table is assumed to be in `reST grid table format <http://docutils.sourceforge.net/docs/user/rst/quickref.html#tables>`_. It must have the following 5 columns: 1. ID : an arbitrary unique identifier, e.g., a number. 2. Byte range : Bytes in the file covered by this field, given as number range (e.g. ``15-24``). The byte values start at 1 (not 0), and the upper value of the range is included. 3. Data type : Field values are stored in this format. For multiple entries, a shape can be specified immediately after the type specifier, e.g., ``Float32(4)`` or ``Int32(2,2)``. It is also possible to give an incomplete shape, e.g., ``Int32(2)`` with a 24-byte field. In this case, the shape is completed to ``(3, 2)`` automatically. By default, the one-dimensional shape is determined from the data type and the byte range. The data type must map to a NumPy data type (``dtype_map``). 4. Name : The name of the field as used later (in lowercase) for identification. 5. Description : An explanation of the field. The converted specification is a tuple of dictionaries, each corresponding to one (multi-)row (=field) of the original table. Each field has key-value pairs for the following keys: +--------------+---------+------------------------------------------+ |Name |Data type|Description | +==============+=========+==========================================+ |'name' |string |Name of the element | +--------------+---------+------------------------------------------+ |'offset' |int |Offset of the current element in bytes | +--------------+---------+------------------------------------------+ |'size' |int |Size of the current element in bytes | +--------------+---------+------------------------------------------+ |'dtype' |type |Data type of the current element as | | | |defined by Numpy | +--------------+---------+------------------------------------------+ |'description' |string |Description of the element (optional) | +--------------+---------+------------------------------------------+ |'dshape' |tuple |For multi-elements: number of elements per| | | |dimension. Optional for single elements. | +--------------+---------+------------------------------------------+ Parameters ---------- spec_table : str Specification given as a string containing a definition table. keys : dict Dictionary with the following entries for the column headers in the specification table: - ``'id'`` - ``'byte_range'`` - ``'dtype'`` - ``'name'`` - ``'description'`` dtype_map : dict Mapping from the data type specifiers in the specification table to NumPy data types. Returns ------- standardized_fields : tuple of dicts The standardized fields according to the above table, one for each (multi-)row. """ field_list = _fields_from_table(spec_table, id_key=keys['id']) # Parse the fields and represent them in a unfied way conv_list = [] for field in field_list: new_field = {} # Name and description: lowercase name, copy description new_field['name'] = field[keys['name']].lower() new_field['description'] = field[keys['description']] # Get byte range and set start byte_range = field[keys['byte_range']].split('-') offset_bytes = int(byte_range[0]) - 1 end_bytes = int(byte_range[-1]) - 1 size_bytes = end_bytes - offset_bytes + 1 new_field['offset'] = offset_bytes new_field['size'] = size_bytes # Data type: transform to Numpy format and get shape dtype_shape = field[keys['dtype']].split('(') dtype = dtype_map[dtype_shape[0]] new_field['dtype'] = dtype if len(dtype_shape) == 2: # Shape was given in data type specification # Re-attach left parenthesis that was removed in the split dshape = np.atleast_1d(eval('(' + dtype_shape[-1])) size_bytes_from_shape = np.prod(dshape) * dtype.itemsize if size_bytes_from_shape >= size_bytes: raise ValueError( "entry '{}': field size {} from shape {} and " "dtype.itemsize {} larger than field size {} from spec" "".format(field[keys['name']], size_bytes_from_shape, dshape, dtype.itemsize, size_bytes)) # Try to complete the given shape if size_bytes % size_bytes_from_shape: raise ValueError( "entry '{}': shape {} cannot be completed consistently " "using field size {} and `dtype.itemsize` {}" "".format(field[keys['name']], dshape, size_bytes, dtype.itemsize)) dshape = (size_bytes // size_bytes_from_shape,) + tuple(dshape) else: if size_bytes % dtype.itemsize: raise ValueError( "entry '{}': field size {} not a multiple of " "`dtype.itemsize` {}" "".format(field[keys['name']], field[keys['byte_range']], dtype.itemsize, field[keys['dtype']])) dshape = (size_bytes // dtype.itemsize,) new_field['dshape'] = dshape conv_list.append(new_field) return tuple(conv_list)
python
{ "resource": "" }
q35465
FileReaderRawBinaryWithHeader.header_size
train
def header_size(self): """Size of `file`'s header in bytes. The size of the header is determined from `header`. If this is not possible (i.e., before the header has been read), 0 is returned. """ if not self.header: return 0 # Determine header size by finding the largest offset and the # value of the corresponding entry. The header size is the # offset plus the size of the entry. max_entry = max(self.header.values(), key=lambda val: val['offset']) return max_entry['offset'] + max_entry['value'].nbytes
python
{ "resource": "" }
q35466
FileReaderRawBinaryWithHeader.read_header
train
def read_header(self): """Read the header from `file`. The header is also stored in the `header` attribute. Returns ------- header : `OrderedDict` Header from `file`, stored in an ordered dictionary, where each entry has the following form:: 'name': {'value': value_as_array, 'offset': offset_in_bytes 'description': description_string} All ``'value'``'s are `numpy.ndarray`'s with at least one dimension. If a ``'shape'`` is given in `header_fields`, the resulting array is reshaped accordingly. See Also -------- read_data """ # Read all fields except data. We use an OrderedDict such that # the order is the same as in `header_fields`. This makes it simple # to write later on in the correct order, based only on `header`. header = OrderedDict() for field in self.header_fields: # Get all the values from the dictionary name = field['name'] if name == 'data': continue entry = {'description': field.get('description', '')} offset_bytes = int(field['offset']) entry['offset'] = offset_bytes size_bytes = int(field['size']) dtype = np.dtype(field['dtype']) shape = field.get('dshape', -1) # no-op by default if size_bytes is None: # Default if 'size' is omitted num_elems = 1 else: num_elems = size_bytes / dtype.itemsize if size_bytes % dtype.itemsize: raise RuntimeError( "field '{}': `size` {} and `dtype.itemsize` {} " " result in non-integer number of elements" "".format(name, size_bytes, dtype.itemsize)) # Create format string for struct module to unpack the binary # data if np.issubdtype(dtype, np.dtype('S')): # Have conversion only for 'S1', so we need to translate fmt = (str(int(num_elems) * dtype.itemsize) + 's') else: # Format character can be obtained as dtype.char fmt = str(int(num_elems)) + dtype.char if struct.calcsize(fmt) != size_bytes: raise RuntimeError( "field '{}': format '{}' results in {} bytes, but " "`size` is {}" "".format(name, fmt, struct.calcsize(fmt), size_bytes)) self.file.seek(offset_bytes) packed_value = self.file.read(size_bytes) if np.issubdtype(dtype, np.dtype('S')): # Bytestring type, decode instead of unpacking. Replace # \x00 characters with whitespace so the final length is # correct packed_value = packed_value.replace(b'\x00', b' ') value = np.fromiter(packed_value.decode().ljust(size_bytes), dtype=dtype) entry['value'] = value.reshape(shape) else: value = np.array(struct.unpack_from(fmt, packed_value), dtype=dtype) entry['value'] = value.reshape(shape) header[name] = entry # Store information gained from the header self.__header = header return header
python
{ "resource": "" }
q35467
FileReaderRawBinaryWithHeader.read_data
train
def read_data(self, dstart=None, dend=None): """Read data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is taken to be the header size as determined from reading the header. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. Returns ------- data : `numpy.ndarray` The data read from `file`. See Also -------- read_header """ self.file.seek(0, 2) # 2 means "from the end" filesize_bytes = self.file.tell() if dstart is None: dstart_abs = int(self.header_size) elif dstart < 0: dstart_abs = filesize_bytes + int(dstart) else: dstart_abs = int(dstart) if dend is None: dend_abs = int(filesize_bytes) elif dend < 0: dend_abs = int(dend) + filesize_bytes else: dend_abs = int(dend) if dstart_abs >= dend_abs: raise ValueError('invalid `dstart` and `dend`, resulting in ' 'absolute `dstart` >= `dend` ({} >= {})' ''.format(dstart_abs, dend_abs)) if dstart_abs < self.header_size: raise ValueError('invalid `dstart`, resulting in absolute ' '`dstart` < `header_size` ({} < {})' ''.format(dstart_abs, self.header_size)) if dend_abs > filesize_bytes: raise ValueError('invalid `dend`, resulting in absolute ' '`dend` > `filesize_bytes` ({} < {})' ''.format(dend_abs, filesize_bytes)) num_elems = (dend_abs - dstart_abs) / self.data_dtype.itemsize if num_elems != int(num_elems): raise ValueError( 'trying to read {} bytes, which is not a multiple of ' 'the itemsize {} of the data type {}' ''.format(dend_abs - dstart_abs, self.data_dtype.itemsize, self.data_dtype)) self.file.seek(dstart_abs) array = np.empty(int(num_elems), dtype=self.data_dtype) self.file.readinto(array.data) return array
python
{ "resource": "" }
q35468
FileWriterRawBinaryWithHeader.write_header
train
def write_header(self): """Write `header` to `file`. See Also -------- write_data """ for properties in self.header.values(): value = properties['value'] offset_bytes = int(properties['offset']) self.file.seek(offset_bytes) value.tofile(self.file)
python
{ "resource": "" }
q35469
RosenbrockFunctional.gradient
train
def gradient(self): """Gradient operator of the Rosenbrock functional.""" functional = self c = self.scale class RosenbrockGradient(Operator): """The gradient operator of the Rosenbrock functional.""" def __init__(self): """Initialize a new instance.""" super(RosenbrockGradient, self).__init__( functional.domain, functional.domain, linear=False) def _call(self, x, out): """Apply the gradient operator to the given point.""" for i in range(1, self.domain.size - 1): out[i] = (2 * c * (x[i] - x[i - 1]**2) - 4 * c * (x[i + 1] - x[i]**2) * x[i] - 2 * (1 - x[i])) out[0] = (-4 * c * (x[1] - x[0] ** 2) * x[0] + 2 * (x[0] - 1)) out[-1] = 2 * c * (x[-1] - x[-2] ** 2) def derivative(self, x): """The derivative of the gradient. This is also known as the Hessian. """ # TODO: Implement optimized version of this that does not need # a matrix. shape = (functional.domain.size, functional.domain.size) matrix = np.zeros(shape) # Straightforward computation for i in range(0, self.domain.size - 1): matrix[i, i] = (2 * c + 2 + 12 * c * x[i] ** 2 - 4 * c * x[i + 1]) matrix[i + 1, i] = -4 * c * x[i] matrix[i, i + 1] = -4 * c * x[i] matrix[-1, -1] = 2 * c matrix[0, 0] = 2 + 12 * c * x[0] ** 2 - 4 * c * x[1] return MatrixOperator(matrix, self.domain, self.range) return RosenbrockGradient()
python
{ "resource": "" }
q35470
normalized_scalar_param_list
train
def normalized_scalar_param_list(param, length, param_conv=None, keep_none=True, return_nonconv=False): """Return a list of given length from a scalar parameter. The typical use case is when a single value or a sequence of values is accepted as input. This function makes a list from a given sequence or a list of identical elements from a single value, with cast to a given parameter type if desired. To distinguish a parameter sequence from a single parameter, the following rules are applied: * If ``param`` is not a sequence, it is treated as a single parameter (e.g. ``1``). * If ``len(param) == length == 1``, then ``param`` is interpreted as a single parameter (e.g. ``[1]`` or ``'1'``). * If ``len(param) == length != 1``, then ``param`` is interpreted as sequence of parameters. * Otherwise, ``param`` is interpreted as a single parameter. Note that this function is not applicable to parameters which are themselves iterable (e.g. ``'abc'`` with ``length=3`` will be interpreted as equivalent to ``['a', 'b', 'c']``). Parameters ---------- param : Input parameter to turn into a list. length : nonnegative int Desired length of the output list. param_conv : callable, optional Conversion applied to each list element. ``None`` means no conversion. keep_none : bool, optional If ``True``, ``None`` is not converted. return_nonconv : bool, optional If ``True``, return also the list where no conversion has been applied. Returns ------- plist : list Input parameter turned into a list of length ``length``. nonconv : list The same as ``plist``, but without conversion. This is only returned if ``return_nonconv == True``. Examples -------- Turn input into a list of given length, possibly by broadcasting. By default, no conversion is performed. >>> normalized_scalar_param_list((1, 2, 3), 3) [1, 2, 3] >>> normalized_scalar_param_list((1, None, 3.0), 3) [1, None, 3.0] Single parameters are broadcast to the given length. >>> normalized_scalar_param_list(1, 3) [1, 1, 1] >>> normalized_scalar_param_list('10', 3) ['10', '10', '10'] >>> normalized_scalar_param_list(None, 3) [None, None, None] List entries can be explicitly converted using ``param_conv``. If ``None`` should be kept, set ``keep_none`` to ``True``: >>> normalized_scalar_param_list(1, 3, param_conv=float) [1.0, 1.0, 1.0] >>> normalized_scalar_param_list('10', 3, param_conv=int) [10, 10, 10] >>> normalized_scalar_param_list((1, None, 3.0), 3, param_conv=int, ... keep_none=True) # default [1, None, 3] The conversion parameter can be any callable: >>> def myconv(x): ... return False if x is None else bool(x) >>> normalized_scalar_param_list((0, None, 3.0), 3, param_conv=myconv, ... keep_none=False) [False, False, True] """ length, length_in = int(length), length if length < 0: raise ValueError('`length` must be nonnegative, got {}' ''.format(length_in)) param = np.array(param, dtype=object, copy=True, ndmin=1) nonconv_list = list(np.broadcast_to(param, (length,))) if len(nonconv_list) != length: raise ValueError('sequence `param` has length {}, expected {}' ''.format(len(nonconv_list), length)) if param_conv is None: out_list = list(nonconv_list) else: out_list = [] for p in nonconv_list: if p is None and keep_none: out_list.append(p) else: out_list.append(param_conv(p)) if return_nonconv: return out_list, nonconv_list else: return out_list
python
{ "resource": "" }
q35471
normalized_index_expression
train
def normalized_index_expression(indices, shape, int_to_slice=False): """Enable indexing with almost Numpy-like capabilities. Implements the following features: - Usage of general slices and sequences of slices - Conversion of `Ellipsis` into an adequate number of ``slice(None)`` objects - Fewer indices than axes by filling up with an `Ellipsis` - Error checking with respect to a given shape - Conversion of integer indices into corresponding slices Parameters ---------- indices : int, `slice`, `Ellipsis` or sequence of these Index expression to be normalized. shape : sequence of ints Target shape for error checking of out-of-bounds indices. Also needed to determine the number of axes. int_to_slice : bool, optional If ``True``, turn integers into corresponding slice objects. Returns ------- normalized : tuple of ints or `slice`'s Normalized index expression Examples -------- Sequences are turned into tuples. We can have at most as many entries as the length of ``shape``, but fewer are allowed - the remaining list places are filled up by ``slice(None)``: >>> normalized_index_expression([1, 2, 3], shape=(3, 4, 5)) (1, 2, 3) >>> normalized_index_expression([1, 2], shape=(3, 4, 5)) (1, 2, slice(None, None, None)) >>> normalized_index_expression([slice(2), 2], shape=(3, 4, 5)) (slice(None, 2, None), 2, slice(None, None, None)) >>> normalized_index_expression([1, Ellipsis], shape=(3, 4, 5)) (1, slice(None, None, None), slice(None, None, None)) By default, integer indices are kept. If they should be converted to slices, use ``int_to_slice=True``. This can be useful to guarantee that the result of slicing with the returned object is of the same type as the array into which is sliced and has the same number of axes: >>> x = np.zeros(shape=(3, 4, 5)) >>> idx1 = normalized_index_expression([1, 2, 3], shape=(3, 4, 5), ... int_to_slice=True) >>> idx1 (slice(1, 2, None), slice(2, 3, None), slice(3, 4, None)) >>> x[idx1] array([[[ 0.]]]) >>> idx2 = normalized_index_expression([1, 2, 3], shape=(3, 4, 5), ... int_to_slice=False) >>> idx2 (1, 2, 3) >>> x[idx2] 0.0 """ ndim = len(shape) # Support indexing with fewer indices as indexing along the first # corresponding axes. In the other cases, normalize the input. if np.isscalar(indices): indices = [indices, Ellipsis] elif (isinstance(indices, slice) or indices is Ellipsis): indices = [indices] indices = list(indices) if len(indices) < ndim and Ellipsis not in indices: indices.append(Ellipsis) # Turn Ellipsis into the correct number of slice(None) if Ellipsis in indices: if indices.count(Ellipsis) > 1: raise ValueError('cannot use more than one Ellipsis.') eidx = indices.index(Ellipsis) extra_dims = ndim - len(indices) + 1 indices = (indices[:eidx] + [slice(None)] * extra_dims + indices[eidx + 1:]) # Turn single indices into length-1 slices if desired for (i, idx), n in zip(enumerate(indices), shape): if np.isscalar(idx): if idx < 0: idx += n if idx >= n: raise IndexError('Index {} is out of bounds for axis ' '{} with size {}.' ''.format(idx, i, n)) if int_to_slice: indices[i] = slice(idx, idx + 1) # Catch most common errors if any(s.start == s.stop and s.start is not None or s.start == n for s, n in zip(indices, shape) if isinstance(s, slice)): raise ValueError('Slices with empty axes not allowed.') if None in indices: raise ValueError('creating new axes is not supported.') if len(indices) > ndim: raise IndexError('too may indices: {} > {}.' ''.format(len(indices), ndim)) return tuple(indices)
python
{ "resource": "" }
q35472
normalized_nodes_on_bdry
train
def normalized_nodes_on_bdry(nodes_on_bdry, length): """Return a list of 2-tuples of bool from the input parameter. This function is intended to normalize a ``nodes_on_bdry`` parameter that can be given as a single boolean (global) or as a sequence (per axis). Each entry of the sequence can either be a single boolean (global for the axis) or a boolean sequence of length 2. Parameters ---------- nodes_on_bdry : bool or sequence Input parameter to be normalized according to the above scheme. length : positive int Desired length of the returned list. Returns ------- normalized : list of 2-tuples of bool Normalized list with ``length`` entries, each of which is a 2-tuple of boolean values. Examples -------- Global for all axes: >>> normalized_nodes_on_bdry(True, length=2) [(True, True), (True, True)] Global per axis: >>> normalized_nodes_on_bdry([True, False], length=2) [(True, True), (False, False)] Mixing global and explicit per axis: >>> normalized_nodes_on_bdry([[True, False], False, True], length=3) [(True, False), (False, False), (True, True)] """ shape = np.shape(nodes_on_bdry) if shape == (): out_list = [(bool(nodes_on_bdry), bool(nodes_on_bdry))] * length elif length == 1 and shape == (2,): out_list = [(bool(nodes_on_bdry[0]), bool(nodes_on_bdry[1]))] elif len(nodes_on_bdry) == length: out_list = [] for i, on_bdry in enumerate(nodes_on_bdry): shape_i = np.shape(on_bdry) if shape_i == (): out_list.append((bool(on_bdry), bool(on_bdry))) elif shape_i == (2,): out_list.append((bool(on_bdry[0]), bool(on_bdry[1]))) else: raise ValueError('in axis {}: `nodes_on_bdry` has shape {}, ' 'expected (2,)' .format(i, shape_i)) else: raise ValueError('`nodes_on_bdry` has shape {}, expected ({},)' ''.format(shape, length)) return out_list
python
{ "resource": "" }
q35473
normalized_axes_tuple
train
def normalized_axes_tuple(axes, ndim): """Return a tuple of ``axes`` converted to positive integers. This function turns negative entries into equivalent positive ones according to standard Python indexing "from the right". Parameters ---------- axes : int or sequence of ints Single integer or integer sequence of arbitrary length. Duplicate entries are not allowed. All entries must fulfill ``-ndim <= axis <= ndim - 1``. ndim : positive int Number of available axes determining the valid axis range. Returns ------- axes_list : tuple of ints The converted tuple of axes. Examples -------- Normalizing a sequence of axes: >>> normalized_axes_tuple([0, -1, 2], ndim=3) (0, 2, 2) Single integer works, too: >>> normalized_axes_tuple(-3, ndim=3) (0,) """ try: axes, axes_in = (int(axes),), axes except TypeError: axes, axes_in = tuple(int(axis) for axis in axes), axes if any(axis != axis_in for axis, axis_in in zip(axes, axes_in)): raise ValueError('`axes` may only contain integers, got {}' ''.format(axes_in)) else: if axes[0] != axes_in: raise TypeError('`axes` must be integer or sequence, got {}' ''.format(axes_in)) if len(set(axes)) != len(axes): raise ValueError('`axes` may not contain duplicate entries') ndim, ndim_in = int(ndim), ndim if ndim <= 0: raise ValueError('`ndim` must be positive, got {}'.format(ndim_in)) axes_arr = np.array(axes) axes_arr[axes_arr < 0] += ndim if np.any((axes_arr < 0) | (axes_arr >= ndim)): raise ValueError('all `axes` entries must satisfy -{0} <= axis < {0}, ' 'got {1}'.format(ndim, axes_in)) return tuple(axes_arr)
python
{ "resource": "" }
q35474
safe_int_conv
train
def safe_int_conv(number): """Safely convert a single number to integer.""" try: return int(np.array(number).astype(int, casting='safe')) except TypeError: raise ValueError('cannot safely convert {} to integer'.format(number))
python
{ "resource": "" }
q35475
astra_cpu_forward_projector
train
def astra_cpu_forward_projector(vol_data, geometry, proj_space, out=None): """Run an ASTRA forward projection on the given data using the CPU. Parameters ---------- vol_data : `DiscreteLpElement` Volume data to which the forward projector is applied geometry : `Geometry` Geometry defining the tomographic setup proj_space : `DiscreteLp` Space to which the calling operator maps out : ``proj_space`` element, optional Element of the projection space to which the result is written. If ``None``, an element in ``proj_space`` is created. Returns ------- out : ``proj_space`` element Projection data resulting from the application of the projector. If ``out`` was provided, the returned object is a reference to it. """ if not isinstance(vol_data, DiscreteLpElement): raise TypeError('volume data {!r} is not a `DiscreteLpElement` ' 'instance.'.format(vol_data)) if vol_data.space.impl != 'numpy': raise TypeError("`vol_data.space.impl` must be 'numpy', got {!r}" "".format(vol_data.space.impl)) if not isinstance(geometry, Geometry): raise TypeError('geometry {!r} is not a Geometry instance' ''.format(geometry)) if not isinstance(proj_space, DiscreteLp): raise TypeError('`proj_space` {!r} is not a DiscreteLp ' 'instance.'.format(proj_space)) if proj_space.impl != 'numpy': raise TypeError("`proj_space.impl` must be 'numpy', got {!r}" "".format(proj_space.impl)) if vol_data.ndim != geometry.ndim: raise ValueError('dimensions {} of volume data and {} of geometry ' 'do not match' ''.format(vol_data.ndim, geometry.ndim)) if out is None: out = proj_space.element() else: if out not in proj_space: raise TypeError('`out` {} is neither None nor a ' 'DiscreteLpElement instance'.format(out)) ndim = vol_data.ndim # Create astra geometries vol_geom = astra_volume_geometry(vol_data.space) proj_geom = astra_projection_geometry(geometry) # Create projector if not all(s == vol_data.space.interp_byaxis[0] for s in vol_data.space.interp_byaxis): raise ValueError('volume interpolation must be the same in each ' 'dimension, got {}'.format(vol_data.space.interp)) vol_interp = vol_data.space.interp proj_id = astra_projector(vol_interp, vol_geom, proj_geom, ndim, impl='cpu') # Create ASTRA data structures vol_data_arr = np.asarray(vol_data) vol_id = astra_data(vol_geom, datatype='volume', data=vol_data_arr, allow_copy=True) with writable_array(out, dtype='float32', order='C') as out_arr: sino_id = astra_data(proj_geom, datatype='projection', data=out_arr, ndim=proj_space.ndim) # Create algorithm algo_id = astra_algorithm('forward', ndim, vol_id, sino_id, proj_id, impl='cpu') # Run algorithm astra.algorithm.run(algo_id) # Delete ASTRA objects astra.algorithm.delete(algo_id) astra.data2d.delete((vol_id, sino_id)) astra.projector.delete(proj_id) return out
python
{ "resource": "" }
q35476
astra_cpu_back_projector
train
def astra_cpu_back_projector(proj_data, geometry, reco_space, out=None): """Run an ASTRA back-projection on the given data using the CPU. Parameters ---------- proj_data : `DiscreteLpElement` Projection data to which the back-projector is applied geometry : `Geometry` Geometry defining the tomographic setup reco_space : `DiscreteLp` Space to which the calling operator maps out : ``reco_space`` element, optional Element of the reconstruction space to which the result is written. If ``None``, an element in ``reco_space`` is created. Returns ------- out : ``reco_space`` element Reconstruction data resulting from the application of the backward projector. If ``out`` was provided, the returned object is a reference to it. """ if not isinstance(proj_data, DiscreteLpElement): raise TypeError('projection data {!r} is not a DiscreteLpElement ' 'instance'.format(proj_data)) if proj_data.space.impl != 'numpy': raise TypeError('`proj_data` must be a `numpy.ndarray` based, ' "container got `impl` {!r}" "".format(proj_data.space.impl)) if not isinstance(geometry, Geometry): raise TypeError('geometry {!r} is not a Geometry instance' ''.format(geometry)) if not isinstance(reco_space, DiscreteLp): raise TypeError('reconstruction space {!r} is not a DiscreteLp ' 'instance'.format(reco_space)) if reco_space.impl != 'numpy': raise TypeError("`reco_space.impl` must be 'numpy', got {!r}" "".format(reco_space.impl)) if reco_space.ndim != geometry.ndim: raise ValueError('dimensions {} of reconstruction space and {} of ' 'geometry do not match'.format( reco_space.ndim, geometry.ndim)) if out is None: out = reco_space.element() else: if out not in reco_space: raise TypeError('`out` {} is neither None nor a ' 'DiscreteLpElement instance'.format(out)) ndim = proj_data.ndim # Create astra geometries vol_geom = astra_volume_geometry(reco_space) proj_geom = astra_projection_geometry(geometry) # Create ASTRA data structure sino_id = astra_data(proj_geom, datatype='projection', data=proj_data, allow_copy=True) # Create projector # TODO: implement with different schemes for angles and detector if not all(s == proj_data.space.interp_byaxis[0] for s in proj_data.space.interp_byaxis): raise ValueError('data interpolation must be the same in each ' 'dimension, got {}' ''.format(proj_data.space.interp_byaxis)) proj_interp = proj_data.space.interp proj_id = astra_projector(proj_interp, vol_geom, proj_geom, ndim, impl='cpu') # Convert out to correct dtype and order if needed. with writable_array(out, dtype='float32', order='C') as out_arr: vol_id = astra_data(vol_geom, datatype='volume', data=out_arr, ndim=reco_space.ndim) # Create algorithm algo_id = astra_algorithm('backward', ndim, vol_id, sino_id, proj_id, impl='cpu') # Run algorithm astra.algorithm.run(algo_id) # Weight the adjoint by appropriate weights scaling_factor = float(proj_data.space.weighting.const) scaling_factor /= float(reco_space.weighting.const) out *= scaling_factor # Delete ASTRA objects astra.algorithm.delete(algo_id) astra.data2d.delete((vol_id, sino_id)) astra.projector.delete(proj_id) return out
python
{ "resource": "" }
q35477
_default_call_out_of_place
train
def _default_call_out_of_place(op, x, **kwargs): """Default out-of-place evaluation. Parameters ---------- op : `Operator` Operator to call x : ``op.domain`` element Point in which to call the operator. kwargs: Optional arguments to the operator. Returns ------- out : `range` element An object in the operator range. The result of an operator evaluation. """ out = op.range.element() result = op._call_in_place(x, out, **kwargs) if result is not None and result is not out: raise ValueError('`op` returned a different value than `out`.' 'With in-place evaluation, the operator can ' 'only return nothing (`None`) or the `out` ' 'parameter.') return out
python
{ "resource": "" }
q35478
_function_signature
train
def _function_signature(func): """Return the signature of a callable as a string. Parameters ---------- func : callable Function whose signature to extract. Returns ------- sig : string Signature of the function. """ if sys.version_info.major > 2: # Python 3 already implements this functionality return func.__name__ + str(inspect.signature(func)) # In Python 2 we have to do it manually, unfortunately spec = inspect.getargspec(func) posargs = spec.args defaults = spec.defaults if spec.defaults is not None else [] varargs = spec.varargs kwargs = spec.keywords deflen = 0 if defaults is None else len(defaults) nodeflen = 0 if posargs is None else len(posargs) - deflen args = ['{}'.format(arg) for arg in posargs[:nodeflen]] args.extend('{}={}'.format(arg, dval) for arg, dval in zip(posargs[nodeflen:], defaults)) if varargs: args.append('*{}'.format(varargs)) if kwargs: args.append('**{}'.format(kwargs)) argstr = ', '.join(args) return '{}({})'.format(func.__name__, argstr)
python
{ "resource": "" }
q35479
Operator.norm
train
def norm(self, estimate=False, **kwargs): """Return the operator norm of this operator. If this operator is non-linear, this should be the Lipschitz constant. Parameters ---------- estimate : bool If true, estimate the operator norm. By default, it is estimated using `power_method_opnorm`, which is only applicable for linear operators. Subclasses are allowed to ignore this parameter if they can provide an exact value. Other Parameters ---------------- kwargs : If ``estimate`` is True, pass these arguments to the `power_method_opnorm` call. Returns ------- norm : float Examples -------- Some operators know their own operator norm and do not need an estimate >>> spc = odl.rn(3) >>> id = odl.IdentityOperator(spc) >>> id.norm(True) 1.0 For others, there is no closed form expression and an estimate is needed: >>> spc = odl.uniform_discr(0, 1, 3) >>> grad = odl.Gradient(spc) >>> opnorm = grad.norm(estimate=True) """ if not estimate: raise NotImplementedError('`Operator.norm()` not implemented, use ' '`Operator.norm(estimate=True)` to ' 'obtain an estimate.') else: norm = getattr(self, '__norm', None) if norm is not None: return norm else: from odl.operator.oputils import power_method_opnorm self.__norm = power_method_opnorm(self, **kwargs) return self.__norm
python
{ "resource": "" }
q35480
OperatorSum.derivative
train
def derivative(self, x): """Return the operator derivative at ``x``. The derivative of a sum of two operators is equal to the sum of the derivatives. Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative """ if self.is_linear: return self else: return OperatorSum(self.left.derivative(x), self.right.derivative(x), self.__tmp_dom, self.__tmp_ran)
python
{ "resource": "" }
q35481
OperatorComp.derivative
train
def derivative(self, x): """Return the operator derivative. The derivative of the operator composition follows the chain rule: ``OperatorComp(left, right).derivative(y) == OperatorComp(left.derivative(right(y)), right.derivative(y))`` Parameters ---------- x : `domain` `element-like` Evaluation point of the derivative. Needs to be usable as input for the ``right`` operator. """ if self.is_linear: return self else: if self.left.is_linear: left_deriv = self.left else: left_deriv = self.left.derivative(self.right(x)) right_deriv = self.right.derivative(x) return OperatorComp(left_deriv, right_deriv, self.__tmp)
python
{ "resource": "" }
q35482
_indent
train
def _indent(x): """Indent a string by 4 characters.""" lines = x.splitlines() for i, line in enumerate(lines): lines[i] = ' ' + line return '\n'.join(lines)
python
{ "resource": "" }
q35483
ProductSpace.shape
train
def shape(self): """Total spaces per axis, computed recursively. The recursion ends at the fist level that does not have a shape. Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> pspace = odl.ProductSpace(r2, r3) >>> pspace.shape (2,) >>> pspace2 = odl.ProductSpace(pspace, 3) >>> pspace2.shape (3, 2) If the space is a "pure" product space, shape recurses all the way into the components: >>> r2_2 = odl.ProductSpace(r2, 3) >>> r2_2.shape (3, 2) """ if len(self) == 0: return () elif self.is_power_space: try: sub_shape = self[0].shape except AttributeError: sub_shape = () else: sub_shape = () return (len(self),) + sub_shape
python
{ "resource": "" }
q35484
ProductSpace.dtype
train
def dtype(self): """The data type of this space. This is only well defined if all subspaces have the same dtype. Raises ------ AttributeError If any of the subspaces does not implement `dtype` or if the dtype of the subspaces does not match. """ dtypes = [space.dtype for space in self.spaces] if all(dtype == dtypes[0] for dtype in dtypes): return dtypes[0] else: raise AttributeError("`dtype`'s of subspaces not equal")
python
{ "resource": "" }
q35485
ProductSpace.element
train
def element(self, inp=None, cast=True): """Create an element in the product space. Parameters ---------- inp : optional If ``inp`` is ``None``, a new element is created from scratch by allocation in the spaces. If ``inp`` is already an element of this space, it is re-wrapped. Otherwise, a new element is created from the components by calling the ``element()`` methods in the component spaces. cast : bool, optional If ``True``, casting is allowed. Otherwise, a ``TypeError`` is raised for input that is not a sequence of elements of the spaces that make up this product space. Returns ------- element : `ProductSpaceElement` The new element Examples -------- >>> r2, r3 = odl.rn(2), odl.rn(3) >>> vec_2, vec_3 = r2.element(), r3.element() >>> r2x3 = ProductSpace(r2, r3) >>> vec_2x3 = r2x3.element() >>> vec_2.space == vec_2x3[0].space True >>> vec_3.space == vec_2x3[1].space True Create an element of the product space >>> r2, r3 = odl.rn(2), odl.rn(3) >>> prod = ProductSpace(r2, r3) >>> x2 = r2.element([1, 2]) >>> x3 = r3.element([1, 2, 3]) >>> x = prod.element([x2, x3]) >>> x ProductSpace(rn(2), rn(3)).element([ [ 1., 2.], [ 1., 2., 3.] ]) """ # If data is given as keyword arg, prefer it over arg list if inp is None: inp = [space.element() for space in self.spaces] if inp in self: return inp if len(inp) != len(self): raise ValueError('length of `inp` {} does not match length of ' 'space {}'.format(len(inp), len(self))) if (all(isinstance(v, LinearSpaceElement) and v.space == space for v, space in zip(inp, self.spaces))): parts = list(inp) elif cast: # Delegate constructors parts = [space.element(arg) for arg, space in zip(inp, self.spaces)] else: raise TypeError('input {!r} not a sequence of elements of the ' 'component spaces'.format(inp)) return self.element_type(self, parts)
python
{ "resource": "" }
q35486
ProductSpace.examples
train
def examples(self): """Return examples from all sub-spaces.""" for examples in product(*[spc.examples for spc in self.spaces]): name = ', '.join(name for name, _ in examples) element = self.element([elem for _, elem in examples]) yield (name, element)
python
{ "resource": "" }
q35487
ProductSpaceElement.asarray
train
def asarray(self, out=None): """Extract the data of this vector as a numpy array. Only available if `is_power_space` is True. The ordering is such that it commutes with indexing:: self[ind].asarray() == self.asarray()[ind] Parameters ---------- out : `numpy.ndarray`, optional Array in which the result should be written in-place. Has to be contiguous and of the correct dtype and shape. Raises ------ ValueError If `is_power_space` is false. Examples -------- >>> spc = odl.ProductSpace(odl.rn(3), 2) >>> x = spc.element([[ 1., 2., 3.], ... [ 4., 5., 6.]]) >>> x.asarray() array([[ 1., 2., 3.], [ 4., 5., 6.]]) """ if not self.space.is_power_space: raise ValueError('cannot use `asarray` if `space.is_power_space` ' 'is `False`') else: if out is None: out = np.empty(self.shape, self.dtype) for i in range(len(self)): out[i] = np.asarray(self[i]) return out
python
{ "resource": "" }
q35488
ProductSpaceElement.real
train
def real(self): """Real part of the element. The real part can also be set using ``x.real = other``, where ``other`` is array-like or scalar. Examples -------- >>> space = odl.ProductSpace(odl.cn(3), odl.cn(2)) >>> x = space.element([[1 + 1j, 2, 3 - 3j], ... [-1 + 2j, -2 - 3j]]) >>> x.real ProductSpace(rn(3), rn(2)).element([ [ 1., 2., 3.], [-1., -2.] ]) The real part can also be set using different array-like types: >>> x.real = space.real_space.zero() >>> x ProductSpace(cn(3), cn(2)).element([ [ 0.+1.j, 0.+0.j, 0.-3.j], [ 0.+2.j, 0.-3.j] ]) >>> x.real = 1.0 >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+1.j, 1.+0.j, 1.-3.j], [ 1.+2.j, 1.-3.j] ]) >>> x.real = [[2, 3, 4], [5, 6]] >>> x ProductSpace(cn(3), cn(2)).element([ [ 2.+1.j, 3.+0.j, 4.-3.j], [ 5.+2.j, 6.-3.j] ]) """ real_part = [part.real for part in self.parts] return self.space.real_space.element(real_part)
python
{ "resource": "" }
q35489
ProductSpaceElement.real
train
def real(self, newreal): """Setter for the real part. This method is invoked by ``x.real = other``. Parameters ---------- newreal : array-like or scalar Values to be assigned to the real part of this element. """ try: iter(newreal) except TypeError: # `newreal` is not iterable, assume it can be assigned to # all indexed parts for part in self.parts: part.real = newreal return if self.space.is_power_space: try: # Set same value in all parts for part in self.parts: part.real = newreal except (ValueError, TypeError): # Iterate over all parts and set them separately for part, new_re in zip(self.parts, newreal): part.real = new_re pass elif len(newreal) == len(self): for part, new_re in zip(self.parts, newreal): part.real = new_re else: raise ValueError( 'dimensions of the new real part does not match the space, ' 'got element {} to set real part of {}'.format(newreal, self))
python
{ "resource": "" }
q35490
ProductSpaceElement.imag
train
def imag(self): """Imaginary part of the element. The imaginary part can also be set using ``x.imag = other``, where ``other`` is array-like or scalar. Examples -------- >>> space = odl.ProductSpace(odl.cn(3), odl.cn(2)) >>> x = space.element([[1 + 1j, 2, 3 - 3j], ... [-1 + 2j, -2 - 3j]]) >>> x.imag ProductSpace(rn(3), rn(2)).element([ [ 1., 0., -3.], [ 2., -3.] ]) The imaginary part can also be set using different array-like types: >>> x.imag = space.real_space.zero() >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+0.j, 2.+0.j, 3.+0.j], [-1.+0.j, -2.+0.j] ]) >>> x.imag = 1.0 >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+1.j, 2.+1.j, 3.+1.j], [-1.+1.j, -2.+1.j] ]) >>> x.imag = [[2, 3, 4], [5, 6]] >>> x ProductSpace(cn(3), cn(2)).element([ [ 1.+2.j, 2.+3.j, 3.+4.j], [-1.+5.j, -2.+6.j] ]) """ imag_part = [part.imag for part in self.parts] return self.space.real_space.element(imag_part)
python
{ "resource": "" }
q35491
ProductSpaceElement.conj
train
def conj(self): """Complex conjugate of the element.""" complex_conj = [part.conj() for part in self.parts] return self.space.element(complex_conj)
python
{ "resource": "" }
q35492
ProductSpaceElement.show
train
def show(self, title=None, indices=None, **kwargs): """Display the parts of this product space element graphically. Parameters ---------- title : string, optional Title of the figures indices : int, slice, tuple or list, optional Display parts of ``self`` in the way described in the following. A single list of integers selects the corresponding parts of this vector. For other tuples or lists, the first entry indexes the parts of this vector, and the remaining entries (if any) are used to slice into the parts. Handling those remaining indices is up to the ``show`` methods of the parts to be displayed. The types of the first entry trigger the following behaviors: - ``int``: take the part corresponding to this index - ``slice``: take a subset of the parts - ``None``: equivalent to ``slice(None)``, i.e., everything Typical use cases are displaying of selected parts, which can be achieved with a list, e.g., ``indices=[0, 2]`` for parts 0 and 2, and plotting of all parts sliced in a certain way, e.g., ``indices=[None, 20, None]`` for showing all parts sliced with indices ``[20, None]``. A single ``int``, ``slice``, ``list`` or ``None`` object indexes the parts only, i.e., is treated roughly as ``(indices, Ellipsis)``. In particular, for ``None``, all parts are shown with default slicing. in_figs : sequence of `matplotlib.figure.Figure`, optional Update these figures instead of creating new ones. Typically the return value of an earlier call to ``show`` is used for this parameter. kwargs Additional arguments passed on to the ``show`` methods of the parts. Returns ------- figs : tuple of `matplotlib.figure.Figure` The resulting figures. In an interactive shell, they are automatically displayed. See Also -------- odl.discr.lp_discr.DiscreteLpElement.show : Display of a discretized function odl.space.base_tensors.Tensor.show : Display of sequence type data odl.util.graphics.show_discrete_data : Underlying implementation """ if title is None: title = 'ProductSpaceElement' if indices is None: if len(self) < 5: indices = list(range(len(self))) else: indices = list(np.linspace(0, len(self) - 1, 4, dtype=int)) else: if (isinstance(indices, tuple) or (isinstance(indices, list) and not all(isinstance(idx, Integral) for idx in indices))): # Tuples or lists containing non-integers index by axis. # We use the first index for the current pspace and pass # on the rest. indices, kwargs['indices'] = indices[0], indices[1:] # Support `indices=[None, 0, None]` like syntax (`indices` is # the first entry as of now in that case) if indices is None: indices = slice(None) if isinstance(indices, slice): indices = list(range(*indices.indices(len(self)))) elif isinstance(indices, Integral): indices = [indices] else: # Use `indices` as-is pass in_figs = kwargs.pop('fig', None) in_figs = [None] * len(indices) if in_figs is None else in_figs figs = [] parts = self[indices] if len(parts) == 0: return () elif len(parts) == 1: # Don't extend the title if there is only one plot fig = parts[0].show(title=title, fig=in_figs[0], **kwargs) figs.append(fig) else: # Extend titles by indexed part to make them distinguishable for i, part, fig in zip(indices, parts, in_figs): fig = part.show(title='{}. Part {}'.format(title, i), fig=fig, **kwargs) figs.append(fig) return tuple(figs)
python
{ "resource": "" }
q35493
ProductSpaceArrayWeighting.inner
train
def inner(self, x1, x2): """Calculate the array-weighted inner product of two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided elements. """ if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), dtype=x1[0].space.dtype, count=len(x1)) inner = np.dot(inners, self.array) if is_real_dtype(x1[0].dtype): return float(inner) else: return complex(inner)
python
{ "resource": "" }
q35494
ProductSpaceArrayWeighting.norm
train
def norm(self, x): """Calculate the array-weighted norm of an element. Parameters ---------- x : `ProductSpaceElement` Element whose norm is calculated. Returns ------- norm : float The norm of the provided element. """ if self.exponent == 2.0: norm_squared = self.inner(x, x).real # TODO: optimize?! return np.sqrt(norm_squared) else: norms = np.fromiter( (xi.norm() for xi in x), dtype=np.float64, count=len(x)) if self.exponent in (1.0, float('inf')): norms *= self.array else: norms *= self.array ** (1.0 / self.exponent) return float(np.linalg.norm(norms, ord=self.exponent))
python
{ "resource": "" }
q35495
ProductSpaceConstWeighting.inner
train
def inner(self, x1, x2): """Calculate the constant-weighted inner product of two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose inner product is calculated. Returns ------- inner : float or complex The inner product of the two provided elements. """ if self.exponent != 2.0: raise NotImplementedError('no inner product defined for ' 'exponent != 2 (got {})' ''.format(self.exponent)) inners = np.fromiter( (x1i.inner(x2i) for x1i, x2i in zip(x1, x2)), dtype=x1[0].space.dtype, count=len(x1)) inner = self.const * np.sum(inners) return x1.space.field.element(inner)
python
{ "resource": "" }
q35496
ProductSpaceConstWeighting.dist
train
def dist(self, x1, x2): """Calculate the constant-weighted distance between two elements. Parameters ---------- x1, x2 : `ProductSpaceElement` Elements whose mutual distance is calculated. Returns ------- dist : float The distance between the elements. """ dnorms = np.fromiter( ((x1i - x2i).norm() for x1i, x2i in zip(x1, x2)), dtype=np.float64, count=len(x1)) if self.exponent == float('inf'): return self.const * np.linalg.norm(dnorms, ord=self.exponent) else: return (self.const ** (1 / self.exponent) * np.linalg.norm(dnorms, ord=self.exponent))
python
{ "resource": "" }
q35497
euler_matrix
train
def euler_matrix(phi, theta=None, psi=None): """Rotation matrix in 2 and 3 dimensions. Its rows represent the canonical unit vectors as seen from the rotated system while the columns are the rotated unit vectors as seen from the canonical system. Parameters ---------- phi : float or `array-like` Either 2D counter-clockwise rotation angle (in radians) or first Euler angle. theta, psi : float or `array-like`, optional Second and third Euler angles in radians. If both are ``None``, a 2D rotation matrix is computed. Otherwise a 3D rotation is computed, where the default ``None`` is equivalent to ``0.0``. The rotation is performed in "ZXZ" rotation order, see the Wikipedia article `Euler angles`_. Returns ------- mat : `numpy.ndarray` Rotation matrix corresponding to the given angles. The returned array has shape ``(ndim, ndim)`` if all angles represent single parameters, with ``ndim == 2`` for ``phi`` only and ``ndim == 3`` for 2 or 3 Euler angles. If any of the angle parameters is an array, the shape of the returned array is ``broadcast(phi, theta, psi).shape + (ndim, ndim)``. References ---------- .. _Euler angles: https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix """ if theta is None and psi is None: squeeze_out = (np.shape(phi) == ()) ndim = 2 phi = np.array(phi, dtype=float, copy=False, ndmin=1) theta = psi = 0.0 else: # `None` broadcasts like a scalar squeeze_out = (np.broadcast(phi, theta, psi).shape == ()) ndim = 3 phi = np.array(phi, dtype=float, copy=False, ndmin=1) if theta is None: theta = 0.0 if psi is None: psi = 0.0 theta = np.array(theta, dtype=float, copy=False, ndmin=1) psi = np.array(psi, dtype=float, copy=False, ndmin=1) ndim = 3 cph = np.cos(phi) sph = np.sin(phi) cth = np.cos(theta) sth = np.sin(theta) cps = np.cos(psi) sps = np.sin(psi) if ndim == 2: mat = np.array([[cph, -sph], [sph, cph]]) else: mat = np.array([ [cph * cps - sph * cth * sps, -cph * sps - sph * cth * cps, sph * sth], [sph * cps + cph * cth * sps, -sph * sps + cph * cth * cps, -cph * sth], [sth * sps + 0 * cph, sth * cps + 0 * cph, cth + 0 * (cph + cps)]]) # Make sure all components broadcast if squeeze_out: return mat.squeeze() else: # Move the `(ndim, ndim)` axes to the end extra_dims = len(np.broadcast(phi, theta, psi).shape) newaxes = list(range(2, 2 + extra_dims)) + [0, 1] return np.transpose(mat, newaxes)
python
{ "resource": "" }
q35498
axis_rotation
train
def axis_rotation(axis, angle, vectors, axis_shift=(0, 0, 0)): """Rotate a vector or an array of vectors around an axis in 3d. The rotation is computed by `Rodrigues' rotation formula`_. Parameters ---------- axis : `array-like`, shape ``(3,)`` Rotation axis, assumed to be a unit vector. angle : float Angle of the counter-clockwise rotation. vectors : `array-like`, shape ``(3,)`` or ``(N, 3)`` The vector(s) to be rotated. axis_shift : `array_like`, shape ``(3,)``, optional Shift the rotation center by this vector. Note that only shifts perpendicular to ``axis`` matter. Returns ------- rot_vec : `numpy.ndarray` The rotated vector(s). References ---------- .. _Rodrigues' rotation formula: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula Examples -------- Rotating around the third coordinate axis by and angle of 90 degrees: >>> axis = (0, 0, 1) >>> rot1 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0)) >>> np.allclose(rot1, (0, 1, 0)) True >>> rot2 = axis_rotation(axis, angle=np.pi / 2, vectors=(0, 1, 0)) >>> np.allclose(rot2, (-1, 0, 0)) True The rotation can be performed with shifted rotation center. A shift along the axis does not matter: >>> rot3 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(0, 0, 2)) >>> np.allclose(rot3, (0, 1, 0)) True The distance between the rotation center and the vector to be rotated determines the radius of the rotation circle: >>> # Rotation center in the point to be rotated, should do nothing >>> rot4 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(1, 0, 0)) >>> np.allclose(rot4, (1, 0, 0)) True >>> # Distance 2, thus rotates to (0, 2, 0) in the shifted system, >>> # resulting in (-1, 2, 0) from shifting back after rotating >>> rot5 = axis_rotation(axis, angle=np.pi / 2, vectors=(1, 0, 0), ... axis_shift=(-1, 0, 0)) >>> np.allclose(rot5, (-1, 2, 0)) True Rotation of multiple vectors can be done in bulk: >>> vectors = [[1, 0, 0], [0, 1, 0]] >>> rot = axis_rotation(axis, angle=np.pi / 2, vectors=vectors) >>> np.allclose(rot[0], (0, 1, 0)) True >>> np.allclose(rot[1], (-1, 0, 0)) True """ rot_matrix = axis_rotation_matrix(axis, angle) vectors = np.asarray(vectors, dtype=float) if vectors.shape == (3,): vectors = vectors[None, :] elif vectors.ndim == 2 and vectors.shape[1] == 3: pass else: raise ValueError('`vectors` must have shape (3,) or (N, 3), got array ' 'with shape {}'.format(vectors.shape)) # Get `axis_shift` part that is perpendicular to `axis` axis_shift = np.asarray(axis_shift, dtype=float) axis = np.asarray(axis, dtype=float) axis_shift = axis_shift - axis.dot(axis_shift) * axis # Shift vectors with the negative of the axis shift to move the rotation # center to the origin. Then rotate and shift back. centered_vecs = vectors - axis_shift[None, :] # Need to transpose the vectors to make the axis of length 3 come first rot_vecs = rot_matrix.dot(centered_vecs.T).T return axis_shift[None, :] + rot_vecs
python
{ "resource": "" }
q35499
axis_rotation_matrix
train
def axis_rotation_matrix(axis, angle): """Matrix of the rotation around an axis in 3d. The matrix is computed according to `Rodriguez' rotation formula`_. Parameters ---------- axis : `array-like`, shape ``(3,)`` Rotation axis, assumed to be a unit vector. angle : float or `array-like` Angle(s) of counter-clockwise rotation. Returns ------- mat : `numpy.ndarray`, shape ``(3, 3)`` The axis rotation matrix. References ---------- .. _Rodriguez' rotation formula: https://en.wikipedia.org/wiki/Rodrigues'_rotation_formula """ scalar_out = (np.shape(angle) == ()) axis = np.asarray(axis) if axis.shape != (3,): raise ValueError('`axis` shape must be (3,), got {}' ''.format(axis.shape)) angle = np.array(angle, dtype=float, copy=False, ndmin=1) cross_mat = np.array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]], [-axis[1], axis[0], 0]]) dy_mat = np.outer(axis, axis) id_mat = np.eye(3) cos_ang = np.cos(angle) sin_ang = np.sin(angle) # Add extra dimensions for broadcasting extra_dims = cos_ang.ndim mat_slc = (None,) * extra_dims + (slice(None), slice(None)) ang_slc = (slice(None),) * extra_dims + (None, None) # Matrices will have shape (1, ..., 1, ndim, ndim) cross_mat = cross_mat[mat_slc] dy_mat = dy_mat[mat_slc] id_mat = id_mat[mat_slc] # Angle arrays will have shape (..., 1, 1) cos_ang = cos_ang[ang_slc] sin_ang = sin_ang[ang_slc] axis_mat = cos_ang * id_mat + (1. - cos_ang) * dy_mat + sin_ang * cross_mat if scalar_out: return axis_mat.squeeze() else: return axis_mat
python
{ "resource": "" }