_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q35600
SamplingOperator._call
train
def _call(self, x): """Return values at indices, possibly weighted.""" out = x.asarray().ravel()[self._indices_flat] if self.variant == 'point_eval': weights = 1.0 elif self.variant == 'integrate': weights = getattr(self.domain, 'cell_volume', 1.0) else: raise RuntimeError('bad variant {!r}'.format(self.variant)) if weights != 1.0: out *= weights return out
python
{ "resource": "" }
q35601
SamplingOperator.adjoint
train
def adjoint(self): """Adjoint of the sampling operator, a `WeightedSumSamplingOperator`. If each sampling point occurs only once, the adjoint consists in inserting the given values into the output at the sampling points. Duplicate sampling points are weighted with their multiplicity. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.SamplingOperator(space, sampling_points) >>> x = space.element([[1, 2, 3], ... [4, 5, 6]]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True The ``'integrate'`` variant adjoint puts ones at the indices in ``sampling_points``, multiplied by their multiplicity: >>> op = odl.SamplingOperator(space, sampling_points, ... variant='integrate') >>> op.adjoint(op.range.one()) # (0, 0) occurs twice uniform_discr([-1., -1.], [ 1., 1.], (2, 3)).element( [[ 2., 0., 0.], [ 0., 1., 1.]] ) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True """ if self.variant == 'point_eval': variant = 'dirac' elif self.variant == 'integrate': variant = 'char_fun' else: raise RuntimeError('bad variant {!r}'.format(self.variant)) return WeightedSumSamplingOperator(self.domain, self.sampling_points, variant)
python
{ "resource": "" }
q35602
WeightedSumSamplingOperator._call
train
def _call(self, x): """Sum all values if indices are given multiple times.""" y = np.bincount(self._indices_flat, weights=x, minlength=self.range.size) out = y.reshape(self.range.shape) if self.variant == 'dirac': weights = getattr(self.range, 'cell_volume', 1.0) elif self.variant == 'char_fun': weights = 1.0 else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) if weights != 1.0: out /= weights return out
python
{ "resource": "" }
q35603
WeightedSumSamplingOperator.adjoint
train
def adjoint(self): """Adjoint of this operator, a `SamplingOperator`. The ``'char_fun'`` variant of this operator corresponds to the ``'integrate'`` sampling operator, and ``'dirac'`` corresponds to ``'point_eval'``. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 3)) >>> # Point (0, 0) occurs twice >>> sampling_points = [[0, 1, 1, 0], ... [0, 1, 2, 0]] >>> op = odl.WeightedSumSamplingOperator(space, sampling_points, ... variant='dirac') >>> y = op.range.element([[1, 2, 3], ... [4, 5, 6]]) >>> op.adjoint(y) rn(4).element([ 1., 5., 6., 1.]) >>> x = op.domain.element([1, 2, 3, 4]) >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True >>> op = odl.WeightedSumSamplingOperator(space, sampling_points, ... variant='char_fun') >>> abs(op.adjoint(op(x)).inner(x) - op(x).inner(op(x))) < 1e-10 True """ if self.variant == 'dirac': variant = 'point_eval' elif self.variant == 'char_fun': variant = 'integrate' else: raise RuntimeError('The variant "{!r}" is not yet supported' ''.format(self.variant)) return SamplingOperator(self.range, self.sampling_points, variant)
python
{ "resource": "" }
q35604
FlatteningOperator.inverse
train
def inverse(self): """Operator that reshapes to original shape. Examples -------- >>> space = odl.uniform_discr([-1, -1], [1, 1], shape=(2, 4)) >>> op = odl.FlatteningOperator(space) >>> y = op.range.element([1, 2, 3, 4, 5, 6, 7, 8]) >>> op.inverse(y) uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( [[ 1., 2., 3., 4.], [ 5., 6., 7., 8.]] ) >>> op = odl.FlatteningOperator(space, order='F') >>> op.inverse(y) uniform_discr([-1., -1.], [ 1., 1.], (2, 4)).element( [[ 1., 3., 5., 7.], [ 2., 4., 6., 8.]] ) >>> op(op.inverse(y)) == y True """ op = self scaling = getattr(self.domain, 'cell_volume', 1.0) class FlatteningOperatorInverse(Operator): """Inverse of `FlatteningOperator`. This operator reshapes a flat vector back to original shape:: FlatteningOperatorInverse(x) == reshape(x, orig_shape) """ def __init__(self): """Initialize a new instance.""" super(FlatteningOperatorInverse, self).__init__( op.range, op.domain, linear=True) def _call(self, x): """Reshape ``x`` back to n-dim. shape.""" return np.reshape(x.asarray(), self.range.shape, order=op.order) def adjoint(self): """Adjoint of this operator, a scaled `FlatteningOperator`.""" return scaling * op def inverse(self): """Inverse of this operator.""" return op def __repr__(self): """Return ``repr(self)``.""" return '{!r}.inverse'.format(op) def __str__(self): """Return ``str(self)``.""" return repr(self) return FlatteningOperatorInverse()
python
{ "resource": "" }
q35605
MRCHeaderProperties.data_shape
train
def data_shape(self): """Shape tuple of the whole data block as determined from `header`. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is ``(nx, ny, nz)``. Note: this is the shape of the data as defined by the header. For a non-trivial axis ordering, the shape of actual data will be different. See Also -------- data_storage_shape data_axis_order """ if not self.header: return -1 try: nx = self.header['nx']['value'] ny = self.header['ny']['value'] nz = self.header['nz']['value'] except KeyError: return -1 else: return tuple(int(n) for n in (nx, ny, nz))
python
{ "resource": "" }
q35606
MRCHeaderProperties.data_storage_shape
train
def data_storage_shape(self): """Shape tuple of the data as stored in the file. If no header is available (i.e., before it has been initialized), or any of the header entries ``'nx', 'ny', 'nz'`` is missing, -1 is returned, which makes reshaping a no-op. Otherwise, the returned shape is a permutation of `data_shape`, i.e., ``(nx, ny, nz)``, according to `data_axis_order` in the following way:: data_shape[i] == data_storage_shape[data_axis_order[i]] See Also -------- data_shape data_axis_order """ if self.data_shape == -1: return -1 else: return tuple(self.data_shape[ax] for ax in np.argsort(self.data_axis_order))
python
{ "resource": "" }
q35607
MRCHeaderProperties.data_dtype
train
def data_dtype(self): """Data type of the data block as determined from `header`. If no header is available (i.e., before it has been initialized), or the header entry ``'mode'`` is missing, the data type gained from the ``dtype`` argument in the initializer is returned. Otherwise, it is determined from ``mode``. """ if not self.header: return self._init_data_dtype try: mode = int(self.header['mode']['value']) except KeyError: return self._init_data_dtype else: try: return MRC_MODE_TO_NPY_DTYPE[mode] except KeyError: raise ValueError('data mode {} not supported'.format(mode))
python
{ "resource": "" }
q35608
MRCHeaderProperties.cell_sides_angstrom
train
def cell_sides_angstrom(self): """Array of sizes of a unit cell in Angstroms. The value is determined from the ``'cella'`` entry in `header`. """ return np.asarray( self.header['cella']['value'], dtype=float) / self.data_shape
python
{ "resource": "" }
q35609
MRCHeaderProperties.labels
train
def labels(self): """Return the 10-tuple of text labels from `header`. The value is determined from the header entries ``'nlabl'`` and ``'label'``. """ label_array = self.header['label']['value'] labels = tuple(''.join(row.astype(str)) for row in label_array) try: nlabels = int(self.header['nlabl']['value']) except KeyError: nlabels = len(labels) # Check if there are nontrivial labels after the number given in # the header. If yes, ignore the 'nlabl' information and return # all labels. if any(label.strip() for label in labels[nlabels:]): return labels else: return labels[:nlabels]
python
{ "resource": "" }
q35610
FileReaderMRC.read_extended_header
train
def read_extended_header(self, groupby='field', force_type=''): """Read the extended header according to `extended_header_type`. Currently, only the FEI extended header format is supported. See `print_fei_ext_header_spec` or `this homepage`_ for the format specification. The extended header usually has one header section per image (slice), in case of the FEI header 128 bytes each, with a total of 1024 sections. Parameters ---------- groupby : {'field', 'section'}, optional How to group the values in the extended header sections. ``'field'`` : make an array per section field, e.g.:: 'defocus': [dval1, dval2, ..., dval1024], 'exp_time': [tval1, tval2, ..., tval1024], ... ``'section'`` : make a dictionary for each section, e.g.:: {'defocus': dval1, 'exp_time': tval1}, {'defocus': dval2, 'exp_time': tval2}, ... If the number of images is smaller than 1024, the last values are all set to zero. force_type : string, optional If given, this value overrides the `extended_header_type` from `header`. Currently supported: ``'FEI1'`` Returns ------- ext_header: `OrderedDict` or tuple For ``groupby == 'field'``, a dictionary with the field names as keys, like in the example. For ``groupby == 'section'``, a tuple of dictionaries as shown above. The returned data structures store no offsets, in contrast to the regular header. See Also -------- References ---------- .. _this homepage: http://www.2dx.unibas.ch/documentation/mrc-software/fei-\ extended-mrc-format-not-used-by-2dx """ ext_header_type = str(force_type).upper() or self.extended_header_type if ext_header_type != 'FEI1': raise ValueError("extended header type '{}' not supported" "".format(self.extended_header_type)) groupby, groupby_in = str(groupby).lower(), groupby ext_header_len = int(self.header['nsymbt']['value']) if ext_header_len % MRC_FEI_SECTION_SIZE: raise ValueError('extended header length {} from header is ' 'not divisible by extended header section size ' '{}'.format(ext_header_len, MRC_FEI_SECTION_SIZE)) num_sections = ext_header_len // MRC_FEI_SECTION_SIZE if num_sections != MRC_FEI_NUM_SECTIONS: raise ValueError('calculated number of sections ({}) not equal to ' 'expected number of sections ({})' ''.format(num_sections, MRC_FEI_NUM_SECTIONS)) section_fields = header_fields_from_table( MRC_FEI_EXT_HEADER_SECTION, keys=MRC_SPEC_KEYS, dtype_map=MRC_DTYPE_TO_NPY_DTYPE) # Make a list for each field and append the values for that # field. Then create an array from that list and store it # under the field name. ext_header = OrderedDict() for field in section_fields: value_list = [] field_offset = field['offset'] field_dtype = field['dtype'] field_dshape = field['dshape'] # Compute some parameters num_items = int(np.prod(field_dshape)) size_bytes = num_items * field_dtype.itemsize fmt = '{}{}'.format(num_items, field_dtype.char) for section in range(num_sections): # Get the bytestring from the right position in the file, # unpack it and append the value to the list. start = section * MRC_FEI_SECTION_SIZE + field_offset self.file.seek(start) packed_value = self.file.read(size_bytes) value_list.append(struct.unpack(fmt, packed_value)) ext_header[field['name']] = np.array(value_list, dtype=field_dtype) if groupby == 'field': return ext_header elif groupby == 'section': # Transpose the data and return as tuple. return tuple({key: ext_header[key][i] for key in ext_header} for i in range(num_sections)) else: raise ValueError("`groupby` '{}' not understood" "".format(groupby_in))
python
{ "resource": "" }
q35611
FileReaderMRC.read_data
train
def read_data(self, dstart=None, dend=None, swap_axes=True): """Read the data from `file` and return it as Numpy array. Parameters ---------- dstart : int, optional Offset in bytes of the data field. By default, it is equal to ``header_size``. Backwards indexing with negative values is also supported. Use a value larger than the header size to extract a data subset. dend : int, optional End position in bytes until which data is read (exclusive). Backwards indexing with negative values is also supported. Use a value different from the file size to extract a data subset. swap_axes : bool, optional If ``True``, use `data_axis_order` to swap the axes in the returned array. In that case, the shape of the array may no longer agree with `data_storage_shape`. Returns ------- data : `numpy.ndarray` The data read from `file`. """ data = super(FileReaderMRC, self).read_data(dstart, dend) data = data.reshape(self.data_shape, order='F') if swap_axes: data = np.transpose(data, axes=self.data_axis_order) assert data.shape == self.data_shape return data
python
{ "resource": "" }
q35612
dedent
train
def dedent(string, indent_str=' ', max_levels=None): """Revert the effect of indentation. Examples -------- Remove a simple one-level indentation: >>> text = '''<->This is line 1. ... <->Next line. ... <->And another one.''' >>> print(text) <->This is line 1. <->Next line. <->And another one. >>> print(dedent(text, '<->')) This is line 1. Next line. And another one. Multiple levels of indentation: >>> text = '''<->Level 1. ... <-><->Level 2. ... <-><-><->Level 3.''' >>> print(text) <->Level 1. <-><->Level 2. <-><-><->Level 3. >>> print(dedent(text, '<->')) Level 1. <->Level 2. <-><->Level 3. >>> text = '''<-><->Level 2. ... <-><-><->Level 3.''' >>> print(text) <-><->Level 2. <-><-><->Level 3. >>> print(dedent(text, '<->')) Level 2. <->Level 3. >>> print(dedent(text, '<->', max_levels=1)) <->Level 2. <-><->Level 3. """ if len(indent_str) == 0: return string lines = string.splitlines() # Determine common (minumum) number of indentation levels, capped at # `max_levels` if given def num_indents(line): max_num = int(np.ceil(len(line) / len(indent_str))) for i in range(max_num): if line.startswith(indent_str): line = line[len(indent_str):] else: break return i num_levels = num_indents(min(lines, key=num_indents)) if max_levels is not None: num_levels = min(num_levels, max_levels) # Dedent dedent_len = num_levels * len(indent_str) return '\n'.join(line[dedent_len:] for line in lines)
python
{ "resource": "" }
q35613
array_str
train
def array_str(a, nprint=6): """Stringification of an array. Parameters ---------- a : `array-like` The array to print. nprint : int, optional Maximum number of elements to print per axis in ``a``. For larger arrays, a summary is printed, with ``nprint // 2`` elements on each side and ``...`` in the middle (per axis). Examples -------- Printing 1D arrays: >>> print(array_str(np.arange(4))) [0, 1, 2, 3] >>> print(array_str(np.arange(10))) [0, 1, 2, ..., 7, 8, 9] >>> print(array_str(np.arange(10), nprint=10)) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] For 2D and higher, the ``nprint`` limitation applies per axis: >>> print(array_str(np.arange(24).reshape(4, 6))) [[ 0, 1, 2, 3, 4, 5], [ 6, 7, 8, 9, 10, 11], [12, 13, 14, 15, 16, 17], [18, 19, 20, 21, 22, 23]] >>> print(array_str(np.arange(32).reshape(4, 8))) [[ 0, 1, 2, ..., 5, 6, 7], [ 8, 9, 10, ..., 13, 14, 15], [16, 17, 18, ..., 21, 22, 23], [24, 25, 26, ..., 29, 30, 31]] >>> print(array_str(np.arange(32).reshape(8, 4))) [[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], ..., [20, 21, 22, 23], [24, 25, 26, 27], [28, 29, 30, 31]] >>> print(array_str(np.arange(64).reshape(8, 8))) [[ 0, 1, 2, ..., 5, 6, 7], [ 8, 9, 10, ..., 13, 14, 15], [16, 17, 18, ..., 21, 22, 23], ..., [40, 41, 42, ..., 45, 46, 47], [48, 49, 50, ..., 53, 54, 55], [56, 57, 58, ..., 61, 62, 63]] Printing of empty arrays and 0D arrays: >>> print(array_str(np.array([]))) # 1D, size=0 [] >>> print(array_str(np.array(1.0))) # 0D, size=1 1.0 Small deviations from round numbers will be suppressed: >>> # 2.0000000000000004 in double precision >>> print(array_str((np.array([2.0]) ** 0.5) ** 2)) [ 2.] """ a = np.asarray(a) max_shape = tuple(n if n < nprint else nprint for n in a.shape) with npy_printoptions(threshold=int(np.prod(max_shape)), edgeitems=nprint // 2, suppress=True): a_str = np.array2string(a, separator=', ') return a_str
python
{ "resource": "" }
q35614
dtype_repr
train
def dtype_repr(dtype): """Stringify ``dtype`` for ``repr`` with default for int and float.""" dtype = np.dtype(dtype) if dtype == np.dtype(int): return "'int'" elif dtype == np.dtype(float): return "'float'" elif dtype == np.dtype(complex): return "'complex'" elif dtype.shape: return "('{}', {})".format(dtype.base, dtype.shape) else: return "'{}'".format(dtype)
python
{ "resource": "" }
q35615
is_numeric_dtype
train
def is_numeric_dtype(dtype): """Return ``True`` if ``dtype`` is a numeric type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.number)
python
{ "resource": "" }
q35616
is_int_dtype
train
def is_int_dtype(dtype): """Return ``True`` if ``dtype`` is an integer type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.integer)
python
{ "resource": "" }
q35617
is_real_floating_dtype
train
def is_real_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a real floating point type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.floating)
python
{ "resource": "" }
q35618
is_complex_floating_dtype
train
def is_complex_floating_dtype(dtype): """Return ``True`` if ``dtype`` is a complex floating point type.""" dtype = np.dtype(dtype) return np.issubsctype(getattr(dtype, 'base', None), np.complexfloating)
python
{ "resource": "" }
q35619
real_dtype
train
def real_dtype(dtype, default=None): """Return the real counterpart of ``dtype`` if existing. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no real counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- real_dtype : `numpy.dtype` The real counterpart of ``dtype``. Raises ------ ValueError if there is no real counterpart to the given data type and ``default == None``. See Also -------- complex_dtype Examples -------- Convert scalar dtypes: >>> real_dtype(complex) dtype('float64') >>> real_dtype('complex64') dtype('float32') >>> real_dtype(float) dtype('float64') Dtypes with shape are also supported: >>> real_dtype(np.dtype((complex, (3,)))) dtype(('<f8', (3,))) >>> real_dtype(('complex64', (3,))) dtype(('<f4', (3,))) """ dtype, dtype_in = np.dtype(dtype), dtype if is_real_floating_dtype(dtype): return dtype try: real_base_dtype = TYPE_MAP_C2R[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no real counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((real_base_dtype, dtype.shape))
python
{ "resource": "" }
q35620
complex_dtype
train
def complex_dtype(dtype, default=None): """Return complex counterpart of ``dtype`` if existing, else ``default``. Parameters ---------- dtype : Real or complex floating point data type. It can be given in any way the `numpy.dtype` constructor understands. default : Object to be returned if no complex counterpart is found for ``dtype``, except for ``None``, in which case an error is raised. Returns ------- complex_dtype : `numpy.dtype` The complex counterpart of ``dtype``. Raises ------ ValueError if there is no complex counterpart to the given data type and ``default == None``. Examples -------- Convert scalar dtypes: >>> complex_dtype(float) dtype('complex128') >>> complex_dtype('float32') dtype('complex64') >>> complex_dtype(complex) dtype('complex128') Dtypes with shape are also supported: >>> complex_dtype(np.dtype((float, (3,)))) dtype(('<c16', (3,))) >>> complex_dtype(('float32', (3,))) dtype(('<c8', (3,))) """ dtype, dtype_in = np.dtype(dtype), dtype if is_complex_floating_dtype(dtype): return dtype try: complex_base_dtype = TYPE_MAP_R2C[dtype.base] except KeyError: if default is not None: return default else: raise ValueError('no complex counterpart exists for `dtype` {}' ''.format(dtype_repr(dtype_in))) else: return np.dtype((complex_base_dtype, dtype.shape))
python
{ "resource": "" }
q35621
preload_first_arg
train
def preload_first_arg(instance, mode): """Decorator to preload the first argument of a call method. Parameters ---------- instance : Class instance to preload the call with mode : {'out-of-place', 'in-place'} 'out-of-place': call is out-of-place -- ``f(x, **kwargs)`` 'in-place': call is in-place -- ``f(x, out, **kwargs)`` Notes ----- The decorated function has the signature according to ``mode``. Examples -------- Define two functions which need some instance to act on and decorate them manually: >>> class A(object): ... '''My name is A.''' >>> a = A() ... >>> def f_oop(inst, x): ... print(inst.__doc__) ... >>> def f_ip(inst, out, x): ... print(inst.__doc__) ... >>> f_oop_new = preload_first_arg(a, 'out-of-place')(f_oop) >>> f_ip_new = preload_first_arg(a, 'in-place')(f_ip) ... >>> f_oop_new(0) My name is A. >>> f_ip_new(0, out=1) My name is A. Decorate upon definition: >>> @preload_first_arg(a, 'out-of-place') ... def set_x(obj, x): ... '''Function to set x in ``obj`` to a given value.''' ... obj.x = x >>> set_x(0) >>> a.x 0 The function's name and docstring are preserved: >>> set_x.__name__ 'set_x' >>> set_x.__doc__ 'Function to set x in ``obj`` to a given value.' """ def decorator(call): @wraps(call) def oop_wrapper(x, **kwargs): return call(instance, x, **kwargs) @wraps(call) def ip_wrapper(x, out, **kwargs): return call(instance, x, out, **kwargs) if mode == 'out-of-place': return oop_wrapper elif mode == 'in-place': return ip_wrapper else: raise ValueError('bad mode {!r}'.format(mode)) return decorator
python
{ "resource": "" }
q35622
signature_string
train
def signature_string(posargs, optargs, sep=', ', mod='!r'): """Return a stringified signature from given arguments. Parameters ---------- posargs : sequence Positional argument values, always included in the returned string. They appear in the string as (roughly):: sep.join(str(arg) for arg in posargs) optargs : sequence of 3-tuples Optional arguments with names and defaults, given in the form:: [(name1, value1, default1), (name2, value2, default2), ...] Only those parameters that are different from the given default are included as ``name=value`` keyword pairs. **Note:** The comparison is done by using ``if value == default:``, which is not valid for, e.g., NumPy arrays. sep : string or sequence of strings, optional Separator(s) for the argument strings. A provided single string is used for all joining operations. A given sequence must have 3 entries ``pos_sep, opt_sep, part_sep``. The ``pos_sep`` and ``opt_sep`` strings are used for joining the respective sequences of argument strings, and ``part_sep`` joins these two joined strings. mod : string or callable or sequence, optional Format modifier(s) for the argument strings. In its most general form, ``mod`` is a sequence of 2 sequences ``pos_mod, opt_mod`` with ``len(pos_mod) == len(posargs)`` and ``len(opt_mod) == len(optargs)``. Each entry ``m`` in those sequences can be eiter a string, resulting in the following stringification of ``arg``:: arg_fmt = {{{}}}.format(m) arg_str = arg_fmt.format(arg) For a callable ``to_str``, the stringification is simply ``arg_str = to_str(arg)``. The entries ``pos_mod, opt_mod`` of ``mod`` can also be strings or callables instead of sequences, in which case the modifier applies to all corresponding arguments. Finally, if ``mod`` is a string or callable, it is applied to all arguments. The default behavior is to apply the "{!r}" (``repr``) conversion. For floating point scalars, the number of digits printed is determined by the ``precision`` value in NumPy's printing options, which can be temporarily modified with `npy_printoptions`. Returns ------- signature : string Stringification of a signature, typically used in the form:: '{}({})'.format(self.__class__.__name__, signature) Examples -------- Usage with non-trivial entries in both sequences, with a typical use case: >>> posargs = [1, 'hello', None] >>> optargs = [('dtype', 'float32', 'float64')] >>> signature_string(posargs, optargs) "1, 'hello', None, dtype='float32'" >>> '{}({})'.format('MyClass', signature_string(posargs, optargs)) "MyClass(1, 'hello', None, dtype='float32')" Empty sequences and optargs values equal to default are omitted: >>> posargs = ['hello'] >>> optargs = [('size', 1, 1)] >>> signature_string(posargs, optargs) "'hello'" >>> posargs = [] >>> optargs = [('size', 2, 1)] >>> signature_string(posargs, optargs) 'size=2' >>> posargs = [] >>> optargs = [('size', 1, 1)] >>> signature_string(posargs, optargs) '' Using a different separator, globally or per argument "category": >>> posargs = [1, 'hello', None] >>> optargs = [('dtype', 'float32', 'float64'), ... ('order', 'F', 'C')] >>> signature_string(posargs, optargs) "1, 'hello', None, dtype='float32', order='F'" >>> signature_string(posargs, optargs, sep=(',', ',', ', ')) "1,'hello',None, dtype='float32',order='F'" Using format modifiers: >>> posargs = ['hello', 2.345] >>> optargs = [('extent', 1.442, 1.0), ('spacing', 0.0151, 1.0)] >>> signature_string(posargs, optargs) "'hello', 2.345, extent=1.442, spacing=0.0151" >>> # Print only two significant digits for all arguments. >>> # NOTE: this also affects the string! >>> mod = ':.2' >>> signature_string(posargs, optargs, mod=mod) 'he, 2.3, extent=1.4, spacing=0.015' >>> mod = [['', ''], [':.3', ':.2']] # one modifier per argument >>> signature_string(posargs, optargs, mod=mod) "'hello', 2.345, extent=1.44, spacing=0.015" Using callables for stringification: >>> posargs = ['arg1', np.ones(3)] >>> optargs = [] >>> signature_string(posargs, optargs, mod=[['', array_str], []]) "'arg1', [ 1., 1., 1.]" The number of printed digits in floating point numbers can be changed with `npy_printoptions`: >>> posargs = ['hello', 0.123456789012345] >>> optargs = [('extent', 1.234567890123456, 1.0)] >>> signature_string(posargs, optargs) # default is 8 digits "'hello', 0.12345679, extent=1.2345679" >>> with npy_printoptions(precision=2): ... sig_str = signature_string(posargs, optargs) >>> sig_str "'hello', 0.12, extent=1.2" """ # Define the separators for the two possible cases if is_string(sep): pos_sep = opt_sep = part_sep = sep else: pos_sep, opt_sep, part_sep = sep # Get the stringified parts posargs_conv, optargs_conv = signature_string_parts(posargs, optargs, mod) # Join the arguments using the separators parts = [] if posargs_conv: parts.append(pos_sep.join(argstr for argstr in posargs_conv)) if optargs_conv: parts.append(opt_sep.join(optargs_conv)) return part_sep.join(parts)
python
{ "resource": "" }
q35623
_separators
train
def _separators(strings, linewidth): """Return separators that keep joined strings within the line width.""" if len(strings) <= 1: return () indent_len = 4 separators = [] cur_line_len = indent_len + len(strings[0]) + 1 if cur_line_len + 2 <= linewidth and '\n' not in strings[0]: # Next string might fit on same line separators.append(', ') cur_line_len += 1 # for the extra space else: # Use linebreak if string contains newline or doesn't fit separators.append(',\n') cur_line_len = indent_len for i, s in enumerate(strings[1:-1]): cur_line_len += len(s) + 1 if '\n' in s: # Use linebreak before and after if string contains newline separators[i] = ',\n' cur_line_len = indent_len separators.append(',\n') elif cur_line_len + 2 <= linewidth: # This string fits, next one might also fit on same line separators.append(', ') cur_line_len += 1 # for the extra space elif cur_line_len <= linewidth: # This string fits, but next one won't separators.append(',\n') cur_line_len = indent_len else: # This string doesn't fit but has no newlines in it separators[i] = ',\n' cur_line_len = indent_len + len(s) + 1 # Need to determine again what should come next if cur_line_len + 2 <= linewidth: # Next string might fit on same line separators.append(', ') else: separators.append(',\n') cur_line_len += len(strings[-1]) if cur_line_len + 1 > linewidth or '\n' in strings[-1]: # This string and a comma don't fit on this line separators[-1] = ',\n' return tuple(separators)
python
{ "resource": "" }
q35624
repr_string
train
def repr_string(outer_string, inner_strings, allow_mixed_seps=True): r"""Return a pretty string for ``repr``. The returned string is formatted such that it does not extend beyond the line boundary if avoidable. The line width is taken from NumPy's printing options that can be retrieved with `numpy.get_printoptions`. They can be temporarily overridden using the `npy_printoptions` context manager. See Examples for details. Parameters ---------- outer_str : str Name of the class or function that should be printed outside the parentheses. inner_strings : sequence of sequence of str Stringifications of the positional and optional arguments. This is usually the return value of `signature_string_parts`. allow_mixed_seps : bool, optional If ``False`` and the string does not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``inner_strings`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- repr_string : str Full string that can be returned by a class' ``__repr__`` method. Examples -------- Things that fit into one line are printed on one line: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass(1, 'hello', None, dtype='float32') Otherwise, if a part of ``inner_strings`` fits on a line of its own, it is printed on one line, but separated from the other part with a line break: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) If those parts are themselves too long, they are broken down into several lines: >>> outer_string = 'MyClass' >>> inner_strings = [("'this_is_a_very_long_argument_string'", ... "'another_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'", ... "long_opt2_arg='this_wont_fit_on_one_line_either'")] >>> print(repr_string(outer_string, inner_strings)) MyClass( 'this_is_a_very_long_argument_string', 'another_very_long_argument_string', long_opt_arg='another_quite_long_string', long_opt2_arg='this_wont_fit_on_one_line_either' ) The usage of mixed separators to optimally use horizontal space can be disabled by setting ``allow_mixed_seps=False``: >>> outer_string = 'MyClass' >>> inner_strings = [('2.0', "'this_is_a_very_long_argument_string'"), ... ("long_opt_arg='another_quite_long_string'",)] >>> print(repr_string(outer_string, inner_strings, allow_mixed_seps=False)) MyClass( 2.0, 'this_is_a_very_long_argument_string', long_opt_arg='another_quite_long_string' ) With the ``npy_printoptions`` context manager, the available line width can be changed: >>> outer_string = 'MyClass' >>> inner_strings = [('1', "'hello'", 'None'), ... ("dtype='float32'",)] >>> with npy_printoptions(linewidth=20): ... print(repr_string(outer_string, inner_strings)) MyClass( 1, 'hello', None, dtype='float32' ) """ linewidth = np.get_printoptions()['linewidth'] pos_strings, opt_strings = inner_strings # Length of the positional and optional argument parts of the signature, # including separators `', '` pos_sig_len = (sum(len(pstr) for pstr in pos_strings) + 2 * max((len(pos_strings) - 1), 0)) opt_sig_len = (sum(len(pstr) for pstr in opt_strings) + 2 * max((len(opt_strings) - 1), 0)) # Length of the one-line string, including 2 for the parentheses and # 2 for the joining ', ' repr_len = len(outer_string) + 2 + pos_sig_len + 2 + opt_sig_len if repr_len <= linewidth and not any('\n' in s for s in pos_strings + opt_strings): # Everything fits on one line fmt = '{}({})' pos_str = ', '.join(pos_strings) opt_str = ', '.join(opt_strings) parts_sep = ', ' else: # Need to split lines in some way fmt = '{}(\n{}\n)' if not allow_mixed_seps: pos_separators = [',\n'] * (len(pos_strings) - 1) else: pos_separators = _separators(pos_strings, linewidth) if len(pos_strings) == 0: pos_str = '' else: pos_str = pos_strings[0] for s, sep in zip(pos_strings[1:], pos_separators): pos_str = sep.join([pos_str, s]) if not allow_mixed_seps: opt_separators = [',\n'] * (len(opt_strings) - 1) else: opt_separators = _separators(opt_strings, linewidth) if len(opt_strings) == 0: opt_str = '' else: opt_str = opt_strings[0] for s, sep in zip(opt_strings[1:], opt_separators): opt_str = sep.join([opt_str, s]) # Check if we can put both parts on one line. This requires their # concatenation including 4 for indentation and 2 for ', ' to # be less than the line width. And they should contain no newline. if pos_str and opt_str: inner_len = 4 + len(pos_str) + 2 + len(opt_str) elif (pos_str and not opt_str) or (opt_str and not pos_str): inner_len = 4 + len(pos_str) + len(opt_str) else: inner_len = 0 if (not allow_mixed_seps or any('\n' in s for s in [pos_str, opt_str]) or inner_len > linewidth): parts_sep = ',\n' pos_str = indent(pos_str) opt_str = indent(opt_str) else: parts_sep = ', ' pos_str = indent(pos_str) # Don't indent `opt_str` parts = [s for s in [pos_str, opt_str] if s.strip()] # ignore empty inner_string = parts_sep.join(parts) return fmt.format(outer_string, inner_string)
python
{ "resource": "" }
q35625
attribute_repr_string
train
def attribute_repr_string(inst_str, attr_str): """Return a repr string for an attribute that respects line width. Parameters ---------- inst_str : str Stringification of a class instance. attr_str : str Name of the attribute (not including the ``'.'``). Returns ------- attr_repr_str : str Concatenation of the two strings in a way that the line width is respected. Examples -------- >>> inst_str = 'rn((2, 3))' >>> attr_str = 'byaxis' >>> print(attribute_repr_string(inst_str, attr_str)) rn((2, 3)).byaxis >>> inst_str = 'MyClass()' >>> attr_str = 'attr_name' >>> print(attribute_repr_string(inst_str, attr_str)) MyClass().attr_name >>> inst_str = 'MyClass' >>> attr_str = 'class_attr' >>> print(attribute_repr_string(inst_str, attr_str)) MyClass.class_attr >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> long_attr_str = 'long_attribute_name' >>> print(attribute_repr_string(long_inst_str, long_attr_str)) MyClass( 'long string that will definitely trigger a line break' ).long_attribute_name """ linewidth = np.get_printoptions()['linewidth'] if (len(inst_str) + 1 + len(attr_str) <= linewidth or '(' not in inst_str): # Instance string + dot + attribute string fit in one line or # no parentheses -> keep instance string as-is and append attr string parts = [inst_str, attr_str] else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: init_parts = [left] if middle: init_parts.append(indent(middle)) new_inst_str = '(\n'.join(init_parts) + '\n)' + right parts = [new_inst_str, attr_str] return '.'.join(parts)
python
{ "resource": "" }
q35626
method_repr_string
train
def method_repr_string(inst_str, meth_str, arg_strs=None, allow_mixed_seps=True): r"""Return a repr string for a method that respects line width. This function is useful to generate a ``repr`` string for a derived class that is created through a method, for instance :: functional.translated(x) as a better way of representing :: FunctionalTranslation(functional, x) Parameters ---------- inst_str : str Stringification of a class instance. meth_str : str Name of the method (not including the ``'.'``). arg_strs : sequence of str, optional Stringification of the arguments to the method. allow_mixed_seps : bool, optional If ``False`` and the argument strings do not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``arg_strs`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- meth_repr_str : str Concatenation of all strings in a way that the line width is respected. Examples -------- >>> inst_str = 'MyClass' >>> meth_str = 'empty' >>> arg_strs = [] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.empty() >>> inst_str = 'MyClass' >>> meth_str = 'fromfile' >>> arg_strs = ["'tmpfile.txt'"] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.fromfile('tmpfile.txt') >>> inst_str = "MyClass('init string')" >>> meth_str = 'method' >>> arg_strs = ['2.0'] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass('init string').method(2.0) >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> meth_str = 'method' >>> long_arg1 = "'long argument string that should come on the next line'" >>> arg2 = 'param1=1' >>> arg3 = 'param2=2.0' >>> arg_strs = [long_arg1, arg2, arg3] >>> print(method_repr_string(long_inst_str, meth_str, arg_strs)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) >>> print(method_repr_string(long_inst_str, meth_str, arg_strs, ... allow_mixed_seps=False)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) """ linewidth = np.get_printoptions()['linewidth'] # Part up to the method name if (len(inst_str) + 1 + len(meth_str) + 1 <= linewidth or '(' not in inst_str): init_parts = [inst_str, meth_str] # Length of the line to the end of the method name meth_line_start_len = len(inst_str) + 1 + len(meth_str) else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: new_inst_str = '(\n'.join([left, indent(middle)]) + '\n)' + right # Length of the line to the end of the method name, consisting of # ')' + '.' + <method name> meth_line_start_len = 1 + 1 + len(meth_str) init_parts = [new_inst_str, meth_str] # Method call part arg_str_oneline = ', '.join(arg_strs) if meth_line_start_len + 1 + len(arg_str_oneline) + 1 <= linewidth: meth_call_str = '(' + arg_str_oneline + ')' elif not arg_str_oneline: meth_call_str = '(\n)' else: if allow_mixed_seps: arg_seps = _separators(arg_strs, linewidth - 4) # indented else: arg_seps = [',\n'] * (len(arg_strs) - 1) full_arg_str = '' for arg_str, sep in zip_longest(arg_strs, arg_seps, fillvalue=''): full_arg_str += arg_str + sep meth_call_str = '(\n' + indent(full_arg_str) + '\n)' return '.'.join(init_parts) + meth_call_str
python
{ "resource": "" }
q35627
pkg_supports
train
def pkg_supports(feature, pkg_version, pkg_feat_dict): """Return bool indicating whether a package supports ``feature``. Parameters ---------- feature : str Name of a potential feature of a package. pkg_version : str Version of the package that should be checked for presence of the feature. pkg_feat_dict : dict Specification of features of a package. Each item has the following form:: feature_name: version_specification Here, ``feature_name`` is a string that is matched against ``feature``, and ``version_specification`` is a string or a sequence of strings that specifies version sets. These specifications are the same as for ``setuptools`` requirements, just without the package name. A ``None`` entry signals "no support in any version", i.e., always ``False``. If a sequence of requirements are given, they are OR-ed together. See ``Examples`` for details. Returns ------- supports : bool ``True`` if ``pkg_version`` of the package in question supports ``feature``, ``False`` otherwise. Examples -------- >>> feat_dict = { ... 'feat1': '==0.5.1', ... 'feat2': '>0.6, <=0.9', # both required simultaneously ... 'feat3': ['>0.6', '<=0.9'], # only one required, i.e. always True ... 'feat4': ['==0.5.1', '>0.6, <=0.9'], ... 'feat5': None ... } >>> pkg_supports('feat1', '0.5.1', feat_dict) True >>> pkg_supports('feat1', '0.4', feat_dict) False >>> pkg_supports('feat2', '0.5.1', feat_dict) False >>> pkg_supports('feat2', '0.6.1', feat_dict) True >>> pkg_supports('feat2', '0.9', feat_dict) True >>> pkg_supports('feat2', '1.0', feat_dict) False >>> pkg_supports('feat3', '0.4', feat_dict) True >>> pkg_supports('feat3', '1.0', feat_dict) True >>> pkg_supports('feat4', '0.5.1', feat_dict) True >>> pkg_supports('feat4', '0.6', feat_dict) False >>> pkg_supports('feat4', '0.6.1', feat_dict) True >>> pkg_supports('feat4', '1.0', feat_dict) False >>> pkg_supports('feat5', '0.6.1', feat_dict) False >>> pkg_supports('feat5', '1.0', feat_dict) False """ from pkg_resources import parse_requirements feature = str(feature) pkg_version = str(pkg_version) supp_versions = pkg_feat_dict.get(feature, None) if supp_versions is None: return False # Make sequence from single string if is_string(supp_versions): supp_versions = [supp_versions] # Make valid package requirements ver_specs = ['pkg' + supp_ver for supp_ver in supp_versions] # Each parse_requirements list contains only one entry since we specify # only one package ver_reqs = [list(parse_requirements(ver_spec))[0] for ver_spec in ver_specs] # If one of the requirements in the list is met, return True for req in ver_reqs: if req.specifier.contains(pkg_version, prereleases=True): return True # No match return False
python
{ "resource": "" }
q35628
unique
train
def unique(seq): """Return the unique values in a sequence. Parameters ---------- seq : sequence Sequence with (possibly duplicate) elements. Returns ------- unique : list Unique elements of ``seq``. Order is guaranteed to be the same as in seq. Examples -------- Determine unique elements in list >>> unique([1, 2, 3, 3]) [1, 2, 3] >>> unique((1, 'str', 'str')) [1, 'str'] The utility also works with unhashable types: >>> unique((1, [1], [1])) [1, [1]] """ # First check if all elements are hashable, if so O(n) can be done try: return list(OrderedDict.fromkeys(seq)) except TypeError: # Unhashable, resort to O(n^2) unique_values = [] for i in seq: if i not in unique_values: unique_values.append(i) return unique_values
python
{ "resource": "" }
q35629
vector
train
def vector(array, dtype=None, order=None, impl='numpy'): """Create a vector from an array-like object. Parameters ---------- array : `array-like` Array from which to create the vector. Scalars become one-dimensional vectors. dtype : optional Set the data type of the vector manually with this option. By default, the space type is inferred from the input data. order : {None, 'C', 'F'}, optional Axis ordering of the data storage. For the default ``None``, no contiguousness is enforced, avoiding a copy if possible. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. Returns ------- vector : `Tensor` Vector created from the input array. Its concrete type depends on the provided arguments. Notes ----- This is a convenience function and not intended for use in speed-critical algorithms. Examples -------- Create one-dimensional vectors: >>> odl.vector([1, 2, 3]) # No automatic cast to float tensor_space(3, dtype=int).element([1, 2, 3]) >>> odl.vector([1, 2, 3], dtype=float) rn(3).element([ 1., 2., 3.]) >>> odl.vector([1, 2 - 1j, 3]) cn(3).element([ 1.+0.j, 2.-1.j, 3.+0.j]) Non-scalar types are also supported: >>> odl.vector([True, True, False]) tensor_space(3, dtype=bool).element([ True, True, False]) The function also supports multi-dimensional input: >>> odl.vector([[1, 2, 3], ... [4, 5, 6]]) tensor_space((2, 3), dtype=int).element( [[1, 2, 3], [4, 5, 6]] ) """ # Sanitize input arr = np.array(array, copy=False, order=order, ndmin=1) if arr.dtype is object: raise ValueError('invalid input data resulting in `dtype==object`') # Set dtype if dtype is not None: space_dtype = dtype else: space_dtype = arr.dtype space = tensor_space(arr.shape, dtype=space_dtype, impl=impl) return space.element(arr)
python
{ "resource": "" }
q35630
tensor_space
train
def tensor_space(shape, dtype=None, impl='numpy', **kwargs): """Return a tensor space with arbitrary scalar data type. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. For ``None``, the `TensorSpace.default_dtype` of the created space is used. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- space : `TensorSpace` Examples -------- Space of 3-tuples with ``uint64`` entries (although not strictly a vector space): >>> odl.tensor_space(3, dtype='uint64') tensor_space(3, dtype='uint64') 2x3 tensors with same data type: >>> odl.tensor_space((2, 3), dtype='uint64') tensor_space((2, 3), dtype='uint64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'float64'``: >>> ts = odl.tensor_space((2, 3)) >>> ts rn((2, 3)) >>> ts.dtype dtype('float64') See Also -------- rn, cn : Constructors for real and complex spaces """ tspace_cls = tensor_space_impl(impl) if dtype is None: dtype = tspace_cls.default_dtype() # Use args by keyword since the constructor may take other arguments # by position return tspace_cls(shape=shape, dtype=dtype, **kwargs)
python
{ "resource": "" }
q35631
cn
train
def cn(shape, dtype=None, impl='numpy', **kwargs): """Return a space of complex tensors. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. Only complex floating-point data types are allowed. For ``None``, the `TensorSpace.default_dtype` of the created space is used in the form ``default_dtype(ComplexNumbers())``. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- cn : `TensorSpace` Examples -------- Space of complex 3-tuples with ``complex64`` entries: >>> odl.cn(3, dtype='complex64') cn(3, dtype='complex64') Complex 2x3 tensors with ``complex64`` entries: >>> odl.cn((2, 3), dtype='complex64') cn((2, 3), dtype='complex64') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'complex128'``: >>> space = odl.cn((2, 3)) >>> space cn((2, 3)) >>> space.dtype dtype('complex128') See Also -------- tensor_space : Space of tensors with arbitrary scalar data type. rn : Real tensor space. """ cn_cls = tensor_space_impl(impl) if dtype is None: dtype = cn_cls.default_dtype(ComplexNumbers()) # Use args by keyword since the constructor may take other arguments # by position cn = cn_cls(shape=shape, dtype=dtype, **kwargs) if not cn.is_complex: raise ValueError('data type {!r} not a complex floating-point type.' ''.format(dtype)) return cn
python
{ "resource": "" }
q35632
rn
train
def rn(shape, dtype=None, impl='numpy', **kwargs): """Return a space of real tensors. Parameters ---------- shape : positive int or sequence of positive ints Number of entries per axis for elements in this space. A single integer results in a space with 1 axis. dtype : optional Data type of each element. Can be provided in any way the `numpy.dtype` function understands, e.g. as built-in type or as a string. Only real floating-point data types are allowed. For ``None``, the `TensorSpace.default_dtype` of the created space is used in the form ``default_dtype(RealNumbers())``. impl : str, optional Impmlementation back-end for the space. See `odl.space.entry_points.tensor_space_impl_names` for available options. kwargs : Extra keyword arguments passed to the space constructor. Returns ------- real_space : `TensorSpace` Examples -------- Space of real 3-tuples with ``float32`` entries: >>> odl.rn(3, dtype='float32') rn(3, dtype='float32') Real 2x3 tensors with ``float32`` entries: >>> odl.rn((2, 3), dtype='float32') rn((2, 3), dtype='float32') The default data type depends on the implementation. For ``impl='numpy'``, it is ``'float64'``: >>> ts = odl.rn((2, 3)) >>> ts rn((2, 3)) >>> ts.dtype dtype('float64') See Also -------- tensor_space : Space of tensors with arbitrary scalar data type. cn : Complex tensor space. """ rn_cls = tensor_space_impl(impl) if dtype is None: dtype = rn_cls.default_dtype(RealNumbers()) # Use args by keyword since the constructor may take other arguments # by position rn = rn_cls(shape=shape, dtype=dtype, **kwargs) if not rn.is_real: raise ValueError('data type {!r} not a real floating-point type.' ''.format(dtype)) return rn
python
{ "resource": "" }
q35633
WaveletTransformBase.scales
train
def scales(self): """Get the scales of each coefficient. Returns ------- scales : ``range`` element The scale of each coefficient, given by an integer. 0 for the lowest resolution and self.nlevels for the highest. """ if self.impl == 'pywt': if self.__variant == 'forward': discr_space = self.domain wavelet_space = self.range else: discr_space = self.range wavelet_space = self.domain shapes = pywt.wavedecn_shapes(discr_space.shape, self.pywt_wavelet, mode=self.pywt_pad_mode, level=self.nlevels, axes=self.axes) coeff_list = [np.full(shapes[0], 0)] for i in range(1, 1 + len(shapes[1:])): coeff_list.append({k: np.full(shapes[i][k], i) for k in shapes[i].keys()}) coeffs = pywt.ravel_coeffs(coeff_list, axes=self.axes)[0] return wavelet_space.element(coeffs) else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
python
{ "resource": "" }
q35634
WaveletTransform._call
train
def _call(self, x): """Return wavelet transform of ``x``.""" if self.impl == 'pywt': coeffs = pywt.wavedecn( x, wavelet=self.pywt_wavelet, level=self.nlevels, mode=self.pywt_pad_mode, axes=self.axes) return pywt.ravel_coeffs(coeffs, axes=self.axes)[0] else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
python
{ "resource": "" }
q35635
WaveletTransform.adjoint
train
def adjoint(self): """Adjoint wavelet transform. Returns ------- adjoint : `WaveletTransformInverse` If the transform is orthogonal, the adjoint is the inverse. Raises ------ OpNotImplementedError if `is_orthogonal` is ``False`` """ if self.is_orthogonal: scale = 1 / self.domain.partition.cell_volume return scale * self.inverse else: # TODO: put adjoint here return super(WaveletTransform, self).adjoint
python
{ "resource": "" }
q35636
WaveletTransform.inverse
train
def inverse(self): """Inverse wavelet transform. Returns ------- inverse : `WaveletTransformInverse` See Also -------- adjoint """ return WaveletTransformInverse( range=self.domain, wavelet=self.pywt_wavelet, nlevels=self.nlevels, pad_mode=self.pad_mode, pad_const=self.pad_const, impl=self.impl, axes=self.axes)
python
{ "resource": "" }
q35637
WaveletTransformInverse._call
train
def _call(self, coeffs): """Return the inverse wavelet transform of ``coeffs``.""" if self.impl == 'pywt': coeffs = pywt.unravel_coeffs(coeffs, coeff_slices=self._coeff_slices, coeff_shapes=self._coeff_shapes, output_format='wavedecn') recon = pywt.waverecn( coeffs, wavelet=self.pywt_wavelet, mode=self.pywt_pad_mode, axes=self.axes) recon_shape = self.range.shape if recon.shape != recon_shape: # If the original shape was odd along any transformed axes it # will have been rounded up to the next even size after the # reconstruction. The extra sample should be discarded. # The underlying reason is decimation by two in reconstruction # must keep ceil(N/2) samples in each band for perfect # reconstruction. Reconstruction then upsamples by two. # When N is odd, (2 * np.ceil(N/2)) != N. recon_slc = [] for i, (n_recon, n_intended) in enumerate(zip(recon.shape, recon_shape)): if n_recon == n_intended + 1: # Upsampling added one entry too much in this axis, # drop last one recon_slc.append(slice(-1)) elif n_recon == n_intended: recon_slc.append(slice(None)) else: raise ValueError( 'in axis {}: expected size {} or {} in ' '`recon_shape`, got {}' ''.format(i, n_recon - 1, n_recon, n_intended)) recon = recon[tuple(recon_slc)] return recon else: raise RuntimeError("bad `impl` '{}'".format(self.impl))
python
{ "resource": "" }
q35638
pdhg
train
def pdhg(x, f, g, A, tau, sigma, niter, **kwargs): """Computes a saddle point with PDHG. This algorithm is the same as "algorithm 1" in [CP2011a] but with extrapolation on the dual variable. Parameters ---------- x : primal variable This variable is both input and output of the method. f : function Functional Y -> IR_infty that has a convex conjugate with a proximal operator, i.e. f.convex_conj.proximal(sigma) : Y -> Y. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : function Operator A : X -> Y that possesses an adjoint: A.adjoint tau : scalar / vector / matrix Step size for primal variable. Note that the proximal operator of g has to be well-defined for this input. sigma : scalar Scalar / vector / matrix used as step size for dual variable. Note that the proximal operator related to f (see above) has to be well-defined for this input. niter : int Number of iterations Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. theta : scalar Extrapolation factor. callback : callable Function called with the current iterate after each iteration. References ---------- [CP2011a] Chambolle, A and Pock, T. *A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging*. Journal of Mathematical Imaging and Vision, 40 (2011), pp 120-145. """ def fun_select(k): return [0] f = odl.solvers.SeparableSum(f) A = odl.BroadcastOperator(A, 1) # Dual variable y = kwargs.pop('y', None) if y is None: y_new = None else: y_new = A.range.element([y]) spdhg_generic(x, f, g, A, tau, [sigma], niter, fun_select, y=y_new, **kwargs) if y is not None: y.assign(y_new[0])
python
{ "resource": "" }
q35639
da_spdhg
train
def da_spdhg(x, f, g, A, tau, sigma_tilde, niter, mu, **kwargs): r"""Computes a saddle point with a PDHG and dual acceleration. It therefore requires the functionals f*_i to be mu[i] strongly convex. Parameters ---------- x : primal variable This variable is both input and output of the method. f : functions Functionals Y[i] -> IR_infty that all have a convex conjugate with a proximal operator, i.e. f[i].convex_conj.proximal(sigma[i]) : Y[i] -> Y[i]. g : function Functional X -> IR_infty that has a proximal operator, i.e. g.proximal(tau) : X -> X. A : functions Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint tau : scalar Initial step size for primal variable. sigma_tilde : scalar Related to initial step size for dual variable. niter : int Number of iterations mu: list List of strong convexity constants of f*, i.e. mu[i] is the strong convexity constant of f*[i]. Other Parameters ---------------- y: dual variable Dual variable is part of a product space z: variable Adjoint of dual variable, z = A^* y. prob: list List of probabilities that an index i is selected each iteration. By default this is uniform serial sampling, p_i = 1/n. fun_select : function Function that selects blocks at every iteration IN -> {1,...,n}. By default this is serial sampling, fun_select(k) selects an index i \in {1,...,n} with probability p_i. extra: list List of local extrapolation paramters for every index i. By default extra_i = 1 / p_i. callback : callable, optional Function called with the current iterate after each iteration. References ---------- [CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb, *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017). """ # Callback object callback = kwargs.pop('callback', None) if callback is not None and not callable(callback): raise TypeError('`callback` {} is not callable' ''.format(callback)) # Probabilities prob = kwargs.pop('prob', None) if prob is None: prob = [1 / len(A)] * len(A) # Selection function fun_select = kwargs.pop('fun_select', None) if fun_select is None: def fun_select(x): return [int(np.random.choice(len(A), 1, p=prob))] # Dual variable y = kwargs.pop('y', None) if y is None: y = A.range.zero() # Adjoint of dual variable z = kwargs.pop('z', None) if z is None and y.norm() == 0: z = A.domain.zero() # Extrapolation extra = kwargs.pop('extra', None) if extra is None: extra = [1 / p for p in prob] # Initialize variables z_relax = z.copy() dz = A.domain.element() y_old = A.range.element() # Save proximal operators prox_dual = [fi.convex_conj.proximal for fi in f] prox_primal = g.proximal # run the iterations for k in range(niter): # select block selected = fun_select(k) # update extrapolation parameter theta theta = float(1 / np.sqrt(1 + 2 * sigma_tilde)) # update primal variable # tmp = x - tau * z_relax; z_relax used as tmp variable z_relax.lincomb(1, x, -tau, z_relax) # x = prox(tmp) prox_primal(tau)(z_relax, out=x) # update dual variable and z, z_relax z_relax.assign(z) for i in selected: # compute the step sizes sigma_i based on sigma_tilde sigma_i = sigma_tilde / ( mu[i] * (prob[i] - 2 * (1 - prob[i]) * sigma_tilde)) # save old yi y_old[i].assign(y[i]) # tmp = Ai(x) A[i](x, out=y[i]) # tmp = y_old + sigma_i * Ai(x) y[i].lincomb(1, y_old[i], sigma_i, y[i]) # yi++ = fi*.prox_sigmai(yi) prox_dual[i](sigma_i)(y[i], out=y[i]) # update adjoint of dual variable y_old[i].lincomb(-1, y_old[i], 1, y[i]) A[i].adjoint(y_old[i], out=dz) z += dz # compute extrapolation z_relax.lincomb(1, z_relax, 1 + theta * extra[i], dz) # update the step sizes tau and sigma_tilde for acceleration sigma_tilde *= theta tau /= theta if callback is not None: callback([x, y])
python
{ "resource": "" }
q35640
LinearSpace.dist
train
def dist(self, x1, x2): """Return the distance between ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose distance to compute. Returns ------- dist : float Distance between ``x1`` and ``x2``. """ if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) return float(self._dist(x1, x2))
python
{ "resource": "" }
q35641
LinearSpace.inner
train
def inner(self, x1, x2): """Return the inner product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Elements whose inner product to compute. Returns ------- inner : `LinearSpace.field` element Inner product of ``x1`` and ``x2``. """ if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) inner = self._inner(x1, x2) if self.field is None: return inner else: return self.field.element(self._inner(x1, x2))
python
{ "resource": "" }
q35642
LinearSpace.multiply
train
def multiply(self, x1, x2, out=None): """Return the pointwise product of ``x1`` and ``x2``. Parameters ---------- x1, x2 : `LinearSpaceElement` Multiplicands in the product. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Product of the elements. If ``out`` was provided, the returned object is a reference to it. """ if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._multiply(x1, x2, out) return out
python
{ "resource": "" }
q35643
LinearSpace.divide
train
def divide(self, x1, x2, out=None): """Return the pointwise quotient of ``x1`` and ``x2`` Parameters ---------- x1 : `LinearSpaceElement` Dividend in the quotient. x2 : `LinearSpaceElement` Divisor in the quotient. out : `LinearSpaceElement`, optional Element to which the result is written. Returns ------- out : `LinearSpaceElement` Quotient of the elements. If ``out`` was provided, the returned object is a reference to it. """ if out is None: out = self.element() if out not in self: raise LinearSpaceTypeError('`out` {!r} is not an element of ' '{!r}'.format(out, self)) if x1 not in self: raise LinearSpaceTypeError('`x1` {!r} is not an element of ' '{!r}'.format(x1, self)) if x2 not in self: raise LinearSpaceTypeError('`x2` {!r} is not an element of ' '{!r}'.format(x2, self)) self._divide(x1, x2, out) return out
python
{ "resource": "" }
q35644
pywt_wavelet
train
def pywt_wavelet(wavelet): """Convert ``wavelet`` to a `pywt.Wavelet` instance.""" if isinstance(wavelet, pywt.Wavelet): return wavelet else: return pywt.Wavelet(wavelet)
python
{ "resource": "" }
q35645
pywt_pad_mode
train
def pywt_pad_mode(pad_mode, pad_const=0): """Convert ODL-style padding mode to pywt-style padding mode. Parameters ---------- pad_mode : str The ODL padding mode to use at the boundaries. pad_const : float, optional Value to use outside the signal boundaries when ``pad_mode`` is 'constant'. Only a value of 0. is supported by PyWavelets Returns ------- pad_mode_pywt : str The corresponding name of the requested padding mode in PyWavelets. See `signal extension modes`_. References ---------- .. _signal extension modes: https://pywavelets.readthedocs.io/en/latest/ref/signal-extension-modes.html """ pad_mode = str(pad_mode).lower() if pad_mode == 'constant' and pad_const != 0.0: raise ValueError('constant padding with constant != 0 not supported ' 'for `pywt` back-end') try: return PAD_MODES_ODL2PYWT[pad_mode] except KeyError: raise ValueError("`pad_mode` '{}' not understood".format(pad_mode))
python
{ "resource": "" }
q35646
precompute_raveled_slices
train
def precompute_raveled_slices(coeff_shapes, axes=None): """Return slices and shapes for raveled multilevel wavelet coefficients. The output is equivalent to the ``coeff_slices`` output of `pywt.ravel_coeffs`, but this function does not require computing a wavelet transform first. Parameters ---------- coeff_shapes : array-like A list of multilevel wavelet coefficient shapes as returned by `pywt.wavedecn_shapes`. axes : sequence of ints, optional Axes over which the DWT that created ``coeffs`` was performed. The default value of None corresponds to all axes. Returns ------- coeff_slices : list List of slices corresponding to each coefficient. As a 2D example, ``coeff_arr[coeff_slices[1]['dd']]`` would extract the first level detail coefficients from ``coeff_arr``. Examples -------- >>> import pywt >>> data_shape = (64, 64) >>> coeff_shapes = pywt.wavedecn_shapes(data_shape, wavelet='db2', level=3, ... mode='periodization') >>> coeff_slices = precompute_raveled_slices(coeff_shapes) >>> print(coeff_slices[0]) # approximation coefficients slice(None, 64, None) >>> d1_coeffs = coeff_slices[-1] # first level detail coefficients >>> (d1_coeffs['ad'], d1_coeffs['da'], d1_coeffs['dd']) (slice(1024, 2048, None), slice(2048, 3072, None), slice(3072, 4096, None)) """ # initialize with the approximation coefficients. a_shape = coeff_shapes[0] a_size = np.prod(a_shape) if len(coeff_shapes) == 1: # only a single approximation coefficient array was found return [slice(a_size), ] a_slice = slice(a_size) # initialize list of coefficient slices coeff_slices = [] coeff_slices.append(a_slice) # loop over the detail cofficients, embedding them in coeff_arr details_list = coeff_shapes[1:] offset = a_size for shape_dict in details_list: # new dictionaries for detail coefficient slices and shapes coeff_slices.append({}) keys = sorted(shape_dict.keys()) for key in keys: shape = shape_dict[key] size = np.prod(shape) sl = slice(offset, offset + size) offset += size coeff_slices[-1][key] = sl return coeff_slices
python
{ "resource": "" }
q35647
combine_proximals
train
def combine_proximals(*factory_list): r"""Combine proximal operators into a diagonal product space operator. This assumes the functional to be separable across variables in order to make use of the separable sum property of proximal operators. Parameters ---------- factory_list : sequence of callables Proximal operator factories to be combined. Returns ------- diag_op : function Returns a diagonal product space operator factory to be initialized with the same step size parameter Notes ----- That two functionals :math:`F` and :math:`G` are separable across variables means that :math:`F((x, y)) = F(x)` and :math:`G((x, y)) = G(y)`, and in this case the proximal operator of the sum is given by .. math:: \mathrm{prox}_{\sigma (F(x) + G(y))}(x, y) = (\mathrm{prox}_{\sigma F}(x), \mathrm{prox}_{\sigma G}(y)). """ def diag_op_factory(sigma): """Diagonal matrix of operators. Parameters ---------- sigma : positive float or sequence of positive floats Step size parameter(s), if a sequence, the length must match the length of the ``factory_list``. Returns ------- diag_op : `DiagonalOperator` """ if np.isscalar(sigma): sigma = [sigma] * len(factory_list) return DiagonalOperator( *[factory(sigmai) for sigmai, factory in zip(sigma, factory_list)]) return diag_op_factory
python
{ "resource": "" }
q35648
proximal_convex_conj
train
def proximal_convex_conj(prox_factory): r"""Calculate the proximal of the dual using Moreau decomposition. Parameters ---------- prox_factory : callable A factory function that, when called with a step size, returns the proximal operator of ``F`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The Moreau identity states that for any convex function :math:`F` with convex conjugate :math:`F^*`, the proximals satisfy .. math:: \mathrm{prox}_{\sigma F^*}(x) +\sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) = x where :math:`\sigma` is a scalar step size. Using this, the proximal of the convex conjugate is given by .. math:: \mathrm{prox}_{\sigma F^*}(x) = x - \sigma \, \mathrm{prox}_{F / \sigma}(x / \sigma) Note that since :math:`(F^*)^* = F`, this can be used to get the proximal of the original function from the proximal of the convex conjugate. For reference on the Moreau identity, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def convex_conj_prox_factory(sigma): """Create proximal for the dual with a given sigma. Parameters ---------- sigma : positive float or array-like Step size parameter. Can be a pointwise positive space element or a sequence of positive floats if `prox_factory` supports that. Returns ------- proximal : `Operator` The proximal operator of ``s * F^*`` where ``s`` is the step size """ # Get the underlying space. At the same time, check if the given # prox_factory accepts stepsize objects of the type given by sigma. space = prox_factory(sigma).domain mult_inner = MultiplyOperator(1.0 / sigma, domain=space, range=space) mult_outer = MultiplyOperator(sigma, domain=space, range=space) result = (IdentityOperator(space) - mult_outer * prox_factory(1.0 / sigma) * mult_inner) return result return convex_conj_prox_factory
python
{ "resource": "" }
q35649
proximal_composition
train
def proximal_composition(proximal, operator, mu): r"""Proximal operator factory of functional composed with unitary operator. For a functional ``F`` and a linear unitary `Operator` ``L`` this is the factory for the proximal operator of ``F * L``. Parameters ---------- proximal : callable A factory function that, when called with a step size returns the proximal operator of ``F`` operator : `Operator` The operator to compose the functional with mu : ``operator.field`` element Scalar such that ``(operator.adjoint * operator)(x) = mu * x`` Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- Given a linear operator :math:`L` with the property that for a scalar :math:`\mu` .. math:: L^*(L(x)) = \mu * x and a convex function :math:`F`, the following identity holds .. math:: \mathrm{prox}_{\sigma F \circ L}(x) = x + \frac{1}{\mu} L^* \left( \mathrm{prox}_{\mu \sigma F}(Lx) - Lx \right) This factory function implements this functionality. There is no simple formula for more general operators. The function cannot verify that the operator is unitary, the user needs to verify this. For reference on the identity used, see [CP2011c]. References ---------- [CP2011c] Combettes, P L, and Pesquet, J-C. *Proximal splitting methods in signal processing.* In: Bauschke, H H, Burachik, R S, Combettes, P L, Elser, V, Luke, D R, and Wolkowicz, H. Fixed-point algorithms for inverse problems in science and engineering, Springer, 2011. """ def proximal_composition_factory(sigma): """Create proximal for the dual with a given sigma Parameters ---------- sigma : positive float Step size parameter Returns ------- proximal : `Operator` The proximal operator of ``prox[sigma * F * L](x)`` """ Id = IdentityOperator(operator.domain) Ir = IdentityOperator(operator.range) prox_muf = proximal(mu * sigma) return (Id + (1.0 / mu) * operator.adjoint * ((prox_muf - Ir) * operator)) return proximal_composition_factory
python
{ "resource": "" }
q35650
proximal_convex_conj_l2_squared
train
def proximal_convex_conj_l2_squared(space, lam=1, g=None): r"""Proximal operator factory of the convex conj of the squared l2-dist Function for the proximal operator of the convex conjugate of the functional F where F is the l2-norm (or distance to g, if given):: F(x) = lam ||x - g||_2^2 with x and g elements in ``space``, scaling factor lam, and given data g. Parameters ---------- space : `LinearSpace` Domain of F(x). Needs to be a Hilbert space. That is, have an inner product (`LinearSpace.inner`). lam : positive float, optional Scaling factor or regularization parameter. g : ``space`` element, optional An element in ``space``. Default: ``space.zero``. Returns ------- prox_factory : function Factory for the proximal operator to be initialized Notes ----- The squared :math:`L_2`-norm/distance :math:`F` is given by .. math:: F(x) = \lambda \|x - g\|_2^2. The convex conjugate :math:`F^*` of :math:`F` is given by .. math:: F^*(y) = \frac{1}{4\lambda} \left( \| y\|_2^2 + \langle y, g \rangle \right) For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F^*` is given by .. math:: \mathrm{prox}_{\sigma F^*}(y) = \frac{y - \sigma g}{1 + \sigma/(2 \lambda)} See Also -------- proximal_convex_conj_l2 : proximal without square proximal_l2_squared : proximal without convex conjugate """ lam = float(lam) if g is not None and g not in space: raise TypeError('{!r} is not an element of {!r}'.format(g, space)) class ProximalConvexConjL2Squared(Operator): """Proximal operator of the convex conj of the squared l2-norm/dist.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float or pointwise positive space.element Step size parameter. If scalar, it contains a global stepsize, otherwise the space.element defines a stepsize for each point. """ super(ProximalConvexConjL2Squared, self).__init__( domain=space, range=space, linear=g is None) if np.isscalar(sigma): self.sigma = float(sigma) else: self.sigma = space.element(sigma) def _call(self, x, out): """Apply the operator to ``x`` and store the result in ``out``""" # (x - sig*g) / (1 + sig/(2 lam)) sig = self.sigma if np.isscalar(sig): if g is None: out.lincomb(1 / (1 + 0.5 * sig / lam), x) else: out.lincomb(1 / (1 + 0.5 * sig / lam), x, -sig / (1 + 0.5 * sig / lam), g) elif sig in space: if g is None: x.divide(1 + 0.5 / lam * sig, out=out) else: if x is out: # Can't write to `out` since old `x` is still needed tmp = sig.multiply(g) out.lincomb(1, x, -1, tmp) else: sig.multiply(g, out=out) out.lincomb(1, x, -1, out) out.divide(1 + 0.5 / lam * sig, out=out) else: raise RuntimeError( '`sigma` is neither a scalar nor a space element.' ) return ProximalConvexConjL2Squared
python
{ "resource": "" }
q35651
proximal_linfty
train
def proximal_linfty(space): r"""Proximal operator factory of the ``l_\infty``-norm. Function for the proximal operator of the functional ``F`` where ``F`` is the ``l_\infty``-norm:: ``F(x) = \sup_i |x_i|`` Parameters ---------- space : `LinearSpace` Domain of ``F``. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The proximal is computed by the Moreau identity and a projection onto an l1-ball [PB2014]. See Also -------- proj_l1 : projection onto l1-ball """ class ProximalLInfty(Operator): """Proximal operator of the linf-norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float Step size parameter """ super(ProximalLInfty, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x)``.""" radius = 1 if x is out: x = x.copy() proj_l1(x, radius, out) out.lincomb(-1, out, 1, x) return ProximalLInfty
python
{ "resource": "" }
q35652
proj_l1
train
def proj_l1(x, radius=1, out=None): r"""Projection onto l1-ball. Projection onto:: ``{ x \in X | ||x||_1 \leq r}`` with ``r`` being the radius. Parameters ---------- space : `LinearSpace` Space / domain ``X``. radius : positive float, optional Radius ``r`` of the ball. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto an l1-ball can be computed by projection onto a simplex, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proximal_linfty : proximal for l-infinity norm proj_simplex : projection onto simplex """ if out is None: out = x.space.element() u = x.ufuncs.absolute() v = x.ufuncs.sign() proj_simplex(u, radius, out) out *= v return out
python
{ "resource": "" }
q35653
proj_simplex
train
def proj_simplex(x, diameter=1, out=None): r"""Projection onto simplex. Projection onto:: ``{ x \in X | x_i \geq 0, \sum_i x_i = r}`` with :math:`r` being the diameter. It is computed by the formula proposed in [D+2008]. Parameters ---------- space : `LinearSpace` Space / domain ``X``. diameter : positive float, optional Diameter of the simplex. Returns ------- prox_factory : callable Factory for the proximal operator to be initialized. Notes ----- The projection onto a simplex is not of closed-form but can be solved by a non-iterative algorithm, see [D+2008] for details. References ---------- [D+2008] Duchi, J., Shalev-Shwartz, S., Singer, Y., and Chandra, T. *Efficient Projections onto the L1-ball for Learning in High dimensions*. ICML 2008, pp. 272-279. http://doi.org/10.1145/1390156.1390191 See Also -------- proj_l1 : projection onto l1-norm ball """ if out is None: out = x.space.element() # sort values in descending order x_sor = x.asarray().flatten() x_sor.sort() x_sor = x_sor[::-1] # find critical index j = np.arange(1, x.size + 1) x_avrg = (1 / j) * (np.cumsum(x_sor) - diameter) crit = x_sor - x_avrg i = np.argwhere(crit >= 0).flatten().max() # output is a shifted and thresholded version of the input out[:] = np.maximum(x - x_avrg[i], 0) return out
python
{ "resource": "" }
q35654
proximal_convex_conj_kl
train
def proximal_convex_conj_kl(space, lam=1, g=None): r"""Proximal operator factory of the convex conjugate of the KL divergence. Function returning the proximal operator of the convex conjugate of the functional F where F is the entropy-type Kullback-Leibler (KL) divergence:: F(x) = sum_i (x_i - g_i + g_i ln(g_i) - g_i ln(pos(x_i))) + ind_P(x) with ``x`` and ``g`` elements in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl_cross_entropy : proximal for releated functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i - g_i + g_i \ln(g_i) - g_i \ln(pos(x_i))) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i (-g_i \ln(\text{pos}({1_X}_i - p_i))) + I_{1_X - p \geq 0}(p) where :math:`p` is the variable dual to :math:`x`, and :math:`1_X` is an element of the space :math:`X` with all components set to 1. The proximal operator of the convex conjugate of F is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = \frac{\lambda 1_X + x - \sqrt{(x - \lambda 1_X)^2 + 4 \lambda \sigma g}}{2} where :math:`\sigma` is the step size-like parameter, and :math:`\lambda` is the weighting in front of the function :math:`F`. KL based objectives are common in MLEM optimization problems and are often used when data noise governed by a multivariate Poisson probability distribution is significant. The intermediate image estimates can have negative values even though the converged solution will be non-negative. Non-negative intermediate image estimates can be enforced by adding an indicator function ind_P the primal objective. This functional :math:`F`, described above, is related to the Kullback-Leibler cross entropy functional. The KL cross entropy is the one described in `this Wikipedia article <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_, and the functional :math:`F` is obtained by switching place of the prior and the varialbe in the KL cross entropy functional. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKL(Operator): """Proximal operator of the convex conjugate of the KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ super(ProximalConvexConjKL, self).__init__( domain=space, range=space, linear=False) self.sigma = float(sigma) def _call(self, x, out): """Return ``self(x, out=out)``.""" # (x + lam - sqrt((x - lam)^2 + 4*lam*sig*g)) / 2 # out = (x - lam)^2 if x is out: # Handle aliased `x` and `out` (need original `x` later on) x = x.copy() else: out.assign(x) out -= lam out.ufuncs.square(out=out) # out = ... + 4*lam*sigma*g # If g is None, it is taken as the one element if g is None: out += 4.0 * lam * self.sigma else: out.lincomb(1, out, 4.0 * lam * self.sigma, g) # out = x - sqrt(...) + lam out.ufuncs.sqrt(out=out) out.lincomb(1, x, -1, out) out += lam # out = 1/2 * ... out /= 2 return ProximalConvexConjKL
python
{ "resource": "" }
q35655
proximal_convex_conj_kl_cross_entropy
train
def proximal_convex_conj_kl_cross_entropy(space, lam=1, g=None): r"""Proximal factory of the convex conj of cross entropy KL divergence. Function returning the proximal factory of the convex conjugate of the functional F, where F is the cross entropy Kullback-Leibler (KL) divergence given by:: F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x) with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative. Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator function for nonnegativity. Parameters ---------- space : `TensorSpace` Space X which is the domain of the functional F lam : positive float, optional Scaling factor. g : ``space`` element, optional Data term, positive. If None it is take as the one-element. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- proximal_convex_conj_kl : proximal for related functional Notes ----- The functional is given by the expression .. math:: F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) + I_{x \geq 0}(x) The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the domain of :math:`F` such that :math:`F` is defined over whole space :math:`X`. The non-negativity thresholding :math:`pos` is used to define :math:`F` in the real numbers. Note that the functional is not well-defined without a prior g. Hence, if g is omitted this will be interpreted as if g is equal to the one-element. The convex conjugate :math:`F^*` of :math:`F` is .. math:: F^*(p) = \sum_i g_i (exp(p_i) - 1) where :math:`p` is the variable dual to :math:`x`. The proximal operator of the convex conjugate of :math:`F` is .. math:: \mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda W(\frac{\sigma}{\lambda} g e^{x/\lambda}) where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is the weighting in front of the function :math:`F`, and :math:`W` is the Lambert W function (see, for example, the `Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_). For real-valued input x, the Lambert :math:`W` function is defined only for :math:`x \geq -1/e`, and it has two branches for values :math:`-1/e \leq x < 0`. However, for inteneded use-cases, where :math:`\lambda` and :math:`g` are positive, the argument of :math:`W` will always be positive. `Wikipedia article on Kullback Leibler divergence <https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_. For further information about the functional, see for example `this article <http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_. The KL cross entropy functional :math:`F`, described above, is related to another functional functional also know as KL divergence. This functional is often used as data discrepancy term in inverse problems, when data is corrupted with Poisson noise. This functional is obtained by changing place of the prior and the variable. See the See Also section. """ lam = float(lam) if g is not None and g not in space: raise TypeError('{} is not an element of {}'.format(g, space)) class ProximalConvexConjKLCrossEntropy(Operator): """Proximal operator of conjugate of cross entropy KL divergence.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalConvexConjKLCrossEntropy, self).__init__( domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" # Lazy import to improve `import odl` time import scipy.special if g is None: # If g is None, it is taken as the one element # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * np.exp(x / lam)) else: # Different branches of lambertw is not an issue, see Notes lambw = scipy.special.lambertw( (self.sigma / lam) * g * np.exp(x / lam)) if not np.issubsctype(self.domain.dtype, np.complexfloating): lambw = lambw.real lambw = x.space.element(lambw) out.lincomb(1, x, -lam, lambw) return ProximalConvexConjKLCrossEntropy
python
{ "resource": "" }
q35656
proximal_huber
train
def proximal_huber(space, gamma): """Proximal factory of the Huber norm. Parameters ---------- space : `TensorSpace` The domain of the functional gamma : float The smoothing parameter of the Huber norm functional. Returns ------- prox_factory : function Factory for the proximal operator to be initialized. See Also -------- odl.solvers.default_functionals.Huber : the Huber norm functional Notes ----- The proximal operator is given by given by the proximal operator of ``1/(2*gamma) * L2 norm`` in points that are ``<= gamma``, and by the proximal operator of the l1 norm in points that are ``> gamma``. """ gamma = float(gamma) class ProximalHuber(Operator): """Proximal operator of Huber norm.""" def __init__(self, sigma): """Initialize a new instance. Parameters ---------- sigma : positive float """ self.sigma = float(sigma) super(ProximalHuber, self).__init__(domain=space, range=space, linear=False) def _call(self, x, out): """Return ``self(x, out=out)``.""" if isinstance(self.domain, ProductSpace): norm = PointwiseNorm(self.domain, 2)(x) else: norm = x.ufuncs.absolute() mask = norm.ufuncs.less_equal(gamma + self.sigma) out[mask] = gamma / (gamma + self.sigma) * x[mask] mask.ufuncs.logical_not(out=mask) sign_x = x.ufuncs.sign() out[mask] = x[mask] - self.sigma * sign_x[mask] return out return ProximalHuber
python
{ "resource": "" }
q35657
mri_head_reco_op_32_channel
train
def mri_head_reco_op_32_channel(): """Reconstruction operator for 32 channel MRI of a head. This is a T2 weighted TSE scan of a healthy volunteer. The reconstruction operator is the sum of the modulus of each channel. See the data source with DOI `10.5281/zenodo.800527`_ or the `project webpage`_ for further information. See Also -------- mri_head_data_32_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800527 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html """ # To get the same rotation as in the reference article space = odl.uniform_discr(min_pt=[-115.2, -115.2], max_pt=[115.2, 115.2], shape=[256, 256], dtype=complex) trafo = odl.trafos.FourierTransform(space) return odl.ReductionOperator(odl.ComplexModulus(space) * trafo.inverse, 32)
python
{ "resource": "" }
q35658
mri_knee_data_8_channel
train
def mri_knee_data_8_channel(): """Raw data for 8 channel MRI of a knee. This is an SE measurement of the knee of a healthy volunteer. The data has been rescaled so that the reconstruction fits approximately in [0, 1]. See the data source with DOI `10.5281/zenodo.800529`_ or the `project webpage`_ for further information. See Also -------- mri_knee_inverse_8_channel References ---------- .. _10.5281/zenodo.800529: https://zenodo.org/record/800529 .. _project webpage: http://imsc.uni-graz.at/mobis/internal/\ platform_aktuell.html """ # TODO: Store data in some ODL controlled url url = 'https://zenodo.org/record/800529/files/3_rawdata_knee_8ch.mat' dct = get_data('3_rawdata_knee_8ch.mat', subset=DATA_SUBSET, url=url) # Change axes to match ODL definitions data = flip(np.swapaxes(dct['rawdata'], 0, -1) * 9e3, 2) return data
python
{ "resource": "" }
q35659
convert_to_odl
train
def convert_to_odl(image): """Convert image to ODL object.""" shape = image.shape if len(shape) == 2: space = odl.uniform_discr([0, 0], shape, shape) elif len(shape) == 3: d = shape[2] shape = shape[:2] image = np.transpose(image, (2, 0, 1)) space = odl.uniform_discr([0, 0], shape, shape) ** d image = space.element(image) return image
python
{ "resource": "" }
q35660
IntervalProd.mid_pt
train
def mid_pt(self): """Midpoint of this interval product.""" midp = (self.max_pt + self.min_pt) / 2. midp[~self.nondegen_byaxis] = self.min_pt[~self.nondegen_byaxis] return midp
python
{ "resource": "" }
q35661
IntervalProd.element
train
def element(self, inp=None): """Return an element of this interval product. Parameters ---------- inp : float or `array-like`, optional Point to be cast to an element. Returns ------- element : `numpy.ndarray` or float Array (`ndim` > 1) or float version of ``inp`` if provided, otherwise ``self.mid_pt``. Examples -------- >>> interv = IntervalProd(0, 1) >>> interv.element(0.5) 0.5 """ if inp is None: return self.mid_pt elif inp in self: if self.ndim == 1: return float(inp) else: return np.asarray(inp) else: raise TypeError('`inp` {!r} is not a valid element of {!r}' ''.format(inp, self))
python
{ "resource": "" }
q35662
IntervalProd.approx_equals
train
def approx_equals(self, other, atol): """Return ``True`` if ``other`` is equal to this set up to ``atol``. Parameters ---------- other : Object to be tested. atol : float Maximum allowed difference in maximum norm between the interval endpoints. Examples -------- >>> rbox1 = IntervalProd(0, 0.5) >>> rbox2 = IntervalProd(0, np.sqrt(0.5)**2) >>> rbox1.approx_equals(rbox2, atol=0) # Numerical error False >>> rbox1.approx_equals(rbox2, atol=1e-15) True """ if other is self: return True elif not isinstance(other, IntervalProd): return False return (np.allclose(self.min_pt, other.min_pt, atol=atol, rtol=0.0) and np.allclose(self.max_pt, other.max_pt, atol=atol, rtol=0.0))
python
{ "resource": "" }
q35663
IntervalProd.approx_contains
train
def approx_contains(self, point, atol): """Return ``True`` if ``point`` is "almost" contained in this set. Parameters ---------- point : `array-like` or float Point to be tested. Its length must be equal to `ndim`. In the 1d case, ``point`` can be given as a float. atol : float Maximum allowed distance in maximum norm from ``point`` to ``self``. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> # Numerical error >>> rbox.approx_contains([-1 + np.sqrt(0.5)**2, 0., 2.9], atol=0) False >>> rbox.approx_contains([-1 + np.sqrt(0.5)**2, 0., 2.9], atol=1e-9) True """ try: # Duck-typed check of type point = np.array(point, dtype=np.float, copy=False, ndmin=1) except (ValueError, TypeError): return False if point.size == 0: return True elif point.shape != (self.ndim,): return False return self.dist(point, exponent=np.inf) <= atol
python
{ "resource": "" }
q35664
IntervalProd.contains_all
train
def contains_all(self, other, atol=0.0): """Return ``True`` if all points defined by ``other`` are contained. Parameters ---------- other : Collection of points to be tested. Can be given as a single point, a ``(d, N)`` array-like where ``d`` is the number of dimensions, or a length-``d`` `meshgrid` tuple. atol : float, optional The maximum allowed distance in 'inf'-norm between the other set and this interval product. Returns ------- contains : bool ``True`` if all points are contained, ``False`` otherwise. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) Arrays are expected in ``(ndim, npoints)`` shape: >>> arr = np.array([[-1, 0, 2], # defining one point at a time ... [-0.5, 0, 2]]) >>> rbox.contains_all(arr.T) True Implicit meshgrids defined by coordinate vectors: >>> from odl.discr.grid import sparse_meshgrid >>> vec1 = (-1, -0.9, -0.7) >>> vec2 = (0, 0, 0) >>> vec3 = (2.5, 2.75, 3) >>> mg = sparse_meshgrid(vec1, vec2, vec3) >>> rbox.contains_all(mg) True Works also with an arbitrary iterable: >>> rbox.contains_all([[-1, -0.5], # define points by axis ... [0, 0], ... [2, 2]]) True Grids are also accepted as input: >>> agrid = odl.uniform_grid(rbox.min_pt, rbox.max_pt, [3, 1, 3]) >>> rbox.contains_all(agrid) True """ atol = float(atol) # First try optimized methods if other in self: return True if hasattr(other, 'meshgrid'): return self.contains_all(other.meshgrid, atol=atol) elif is_valid_input_meshgrid(other, self.ndim): vecs = tuple(vec.squeeze() for vec in other) mins = np.fromiter((np.min(vec) for vec in vecs), dtype=float) maxs = np.fromiter((np.max(vec) for vec in vecs), dtype=float) return (np.all(mins >= self.min_pt - atol) and np.all(maxs <= self.max_pt + atol)) # Convert to array and check each element other = np.asarray(other) if is_valid_input_array(other, self.ndim): if self.ndim == 1: mins = np.min(other) maxs = np.max(other) else: mins = np.min(other, axis=1) maxs = np.max(other, axis=1) return np.all(mins >= self.min_pt) and np.all(maxs <= self.max_pt) else: return False
python
{ "resource": "" }
q35665
IntervalProd.measure
train
def measure(self, ndim=None): """Return the Lebesgue measure of this interval product. Parameters ---------- ndim : int, optional Dimension of the measure to apply. ``None`` is interpreted as `true_ndim`, which always results in a finite and positive result (unless the set is a single point). Examples -------- >>> min_pt, max_pt = [-1, 2.5, 0], [-0.5, 10, 0] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.measure() 3.75 >>> rbox.measure(ndim=3) 0.0 >>> rbox.measure(ndim=3) == rbox.volume True >>> rbox.measure(ndim=1) inf >>> rbox.measure() == rbox.squeeze().volume True """ if self.true_ndim == 0: return 0.0 if ndim is None: return self.measure(ndim=self.true_ndim) elif ndim < self.true_ndim: return np.inf elif ndim > self.true_ndim: return 0.0 else: return np.prod(self.extent[self.nondegen_byaxis])
python
{ "resource": "" }
q35666
IntervalProd.dist
train
def dist(self, point, exponent=2.0): """Return the distance of ``point`` to this set. Parameters ---------- point : `array-like` or float Point whose distance to calculate. Its length must be equal to the set's dimension. Can be a float in the 1d case. exponent : non-zero float or ``float('inf')``, optional Exponent of the norm used in the distance calculation. Returns ------- dist : float Distance to the interior of the IntervalProd. Points strictly inside have distance ``0.0``, points with ``NaN`` have distance ``float('inf')``. See Also -------- numpy.linalg.norm : norm used to compute the distance Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.dist([-5, 3, 2]) 5.0 >>> rbox.dist([-5, 3, 2], exponent=float('inf')) 4.0 """ point = np.atleast_1d(point) if len(point) != self.ndim: raise ValueError('`point` must have length {}, got {}' ''.format(self.ndim, len(point))) if np.any(np.isnan(point)): return float('inf') i_larger = np.where(point > self.max_pt) i_smaller = np.where(point < self.min_pt) # Access [0] since np.where returns a tuple. if len(i_larger[0]) == 0 and len(i_smaller[0]) == 0: return 0.0 else: proj = np.concatenate((point[i_larger], point[i_smaller])) border = np.concatenate((self.max_pt[i_larger], self.min_pt[i_smaller])) return np.linalg.norm(proj - border, ord=exponent)
python
{ "resource": "" }
q35667
IntervalProd.collapse
train
def collapse(self, indices, values): """Partly collapse the interval product to single values. Note that no changes are made in-place. Parameters ---------- indices : int or sequence of ints The indices of the dimensions along which to collapse. values : `array-like` or float The values to which to collapse. Must have the same length as ``indices``. Values must lie within the interval boundaries. Returns ------- collapsed : `IntervalProd` The collapsed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0) IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]) IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5]) """ indices = np.atleast_1d(indices).astype('int64', casting='safe') values = np.atleast_1d(values) if len(indices) != len(values): raise ValueError('lengths of indices {} and values {} do not ' 'match ({} != {})' ''.format(indices, values, len(indices), len(values))) for axis, index in enumerate(indices): if not 0 <= index <= self.ndim: raise IndexError('in axis {}: index {} out of range 0 --> {}' ''.format(axis, index, self.ndim - 1)) if np.any(values < self.min_pt[indices]): raise ValueError('values {} not above the lower interval ' 'boundaries {}' ''.format(values, self.min_pt[indices])) if np.any(values > self.max_pt[indices]): raise ValueError('values {} not below the upper interval ' 'boundaries {}' ''.format(values, self.max_pt[indices])) b_new = self.min_pt.copy() b_new[indices] = values e_new = self.max_pt.copy() e_new[indices] = values return IntervalProd(b_new, e_new)
python
{ "resource": "" }
q35668
IntervalProd.squeeze
train
def squeeze(self): """Remove the degenerate dimensions. Note that no changes are made in-place. Returns ------- squeezed : `IntervalProd` Squeezed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0).squeeze() IntervalProd([-1., 2.], [-0.5, 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]).squeeze() IntervalProd(-1.0, -0.5) >>> rbox.collapse([0, 1, 2], [-1, 0, 2.5]).squeeze() IntervalProd([], []) """ b_new = self.min_pt[self.nondegen_byaxis] e_new = self.max_pt[self.nondegen_byaxis] return IntervalProd(b_new, e_new)
python
{ "resource": "" }
q35669
IntervalProd.insert
train
def insert(self, index, *intvs): """Return a copy with ``intvs`` inserted before ``index``. The given interval products are inserted (as a block) into ``self``, yielding a new interval product whose number of dimensions is the sum of the numbers of dimensions of all involved interval products. Note that no changes are made in-place. Parameters ---------- index : int Index of the dimension before which ``other`` is to be inserted. Must fulfill ``-ndim <= index <= ndim``. Negative indices count backwards from ``self.ndim``. intv1, ..., intvN : `IntervalProd` Interval products to be inserted into ``self``. Returns ------- newintvp : `IntervalProd` The enlarged interval product. Examples -------- >>> intv = IntervalProd([-1, 2], [-0.5, 3]) >>> intv2 = IntervalProd(0, 1) >>> intv.insert(0, intv2) IntervalProd([ 0., -1., 2.], [ 1. , -0.5, 3. ]) >>> intv.insert(-1, intv2) IntervalProd([-1., 0., 2.], [-0.5, 1. , 3. ]) >>> intv.insert(1, intv2, intv2) IntervalProd([-1., 0., 0., 2.], [-0.5, 1. , 1. , 3. ]) """ index, index_in = safe_int_conv(index), index if not -self.ndim <= index <= self.ndim: raise IndexError('index {0} outside the valid range -{1} ... {1}' ''.format(index_in, self.ndim)) if index < 0: index += self.ndim if len(intvs) == 0: # Copy of `self` return IntervalProd(self.min_pt, self.max_pt) elif len(intvs) == 1: # Insert single interval product intv = intvs[0] if not isinstance(intv, IntervalProd): raise TypeError('{!r} is not a `IntervalProd` instance' ''.format(intv)) new_min_pt = np.empty(self.ndim + intv.ndim) new_max_pt = np.empty(self.ndim + intv.ndim) new_min_pt[: index] = self.min_pt[: index] new_max_pt[: index] = self.max_pt[: index] new_min_pt[index: index + intv.ndim] = intv.min_pt new_max_pt[index: index + intv.ndim] = intv.max_pt if index < self.ndim: # Avoid IndexError new_min_pt[index + intv.ndim:] = self.min_pt[index:] new_max_pt[index + intv.ndim:] = self.max_pt[index:] return IntervalProd(new_min_pt, new_max_pt) else: # Recursively insert one, then rest into the result return self.insert(index, intvs[0]).insert( index + intvs[0].ndim, *(intvs[1:]))
python
{ "resource": "" }
q35670
IntervalProd.corners
train
def corners(self, order='C'): """Return the corner points as a single array. Parameters ---------- order : {'C', 'F'}, optional Ordering of the axes in which the corners appear in the output. ``'C'`` means that the first axis varies slowest and the last one fastest, vice versa in ``'F'`` ordering. Returns ------- corners : `numpy.ndarray` Array containing the corner coordinates. The size of the array is ``2^m x ndim``, where ``m`` is the number of non-degenerate axes, i.e. the corners are stored as rows. Examples -------- >>> intv = IntervalProd([-1, 2, 0], [-0.5, 3, 0.5]) >>> intv.corners() array([[-1. , 2. , 0. ], [-1. , 2. , 0.5], [-1. , 3. , 0. ], [-1. , 3. , 0.5], [-0.5, 2. , 0. ], [-0.5, 2. , 0.5], [-0.5, 3. , 0. ], [-0.5, 3. , 0.5]]) >>> intv.corners(order='F') array([[-1. , 2. , 0. ], [-0.5, 2. , 0. ], [-1. , 3. , 0. ], [-0.5, 3. , 0. ], [-1. , 2. , 0.5], [-0.5, 2. , 0.5], [-1. , 3. , 0.5], [-0.5, 3. , 0.5]]) """ from odl.discr.grid import RectGrid minmax_vecs = [0] * self.ndim for axis in np.where(~self.nondegen_byaxis)[0]: minmax_vecs[axis] = self.min_pt[axis] for axis in np.where(self.nondegen_byaxis)[0]: minmax_vecs[axis] = (self.min_pt[axis], self.max_pt[axis]) minmax_grid = RectGrid(*minmax_vecs) return minmax_grid.points(order=order)
python
{ "resource": "" }
q35671
RayTransform._call_real
train
def _call_real(self, x_real, out_real): """Real-space forward projection for the current set-up. This method also sets ``self._astra_projector`` for ``impl='astra_cuda'`` and enabled cache. """ if self.impl.startswith('astra'): backend, data_impl = self.impl.split('_') if data_impl == 'cpu': return astra_cpu_forward_projector( x_real, self.geometry, self.range.real_space, out_real) elif data_impl == 'cuda': if self._astra_wrapper is None: astra_wrapper = AstraCudaProjectorImpl( self.geometry, self.domain.real_space, self.range.real_space) if self.use_cache: self._astra_wrapper = astra_wrapper else: astra_wrapper = self._astra_wrapper return astra_wrapper.call_forward(x_real, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl)) elif self.impl == 'skimage': return skimage_radon_forward(x_real, self.geometry, self.range.real_space, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl))
python
{ "resource": "" }
q35672
RayBackProjection._call_real
train
def _call_real(self, x_real, out_real): """Real-space back-projection for the current set-up. This method also sets ``self._astra_backprojector`` for ``impl='astra_cuda'`` and enabled cache. """ if self.impl.startswith('astra'): backend, data_impl = self.impl.split('_') if data_impl == 'cpu': return astra_cpu_back_projector(x_real, self.geometry, self.range.real_space, out_real) elif data_impl == 'cuda': if self._astra_wrapper is None: astra_wrapper = AstraCudaBackProjectorImpl( self.geometry, self.range.real_space, self.domain.real_space) if self.use_cache: self._astra_wrapper = astra_wrapper else: astra_wrapper = self._astra_wrapper return astra_wrapper.call_backward(x_real, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl)) elif self.impl == 'skimage': return skimage_radon_back_projector(x_real, self.geometry, self.range.real_space, out_real) else: # Should never happen raise RuntimeError('bad `impl` {!r}'.format(self.impl))
python
{ "resource": "" }
q35673
mlem
train
def mlem(op, x, data, niter, callback=None, **kwargs): """Maximum Likelihood Expectation Maximation algorithm. Attempts to solve:: max_x L(x | data) where ``L(x | data)`` is the Poisson likelihood of ``x`` given ``data``. The likelihood depends on the forward operator ``op`` such that (approximately):: op(x) = data Parameters ---------- op : `Operator` Forward operator in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : ``op.range`` `element-like` Right-hand side of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains a ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op.adjoint(op.range.one())`` Notes ----- Given a forward model :math:`A` and data :math:`g`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: P(g | g \text{ is } Poisson(A(x)) \text{ distributed}). The algorithm is explicitly given by: .. math:: x_{n+1} = \frac{x_n}{A^* 1} A^* (g / A(x_n)) See Also -------- osmlem : Ordered subsets MLEM loglikelihood : Function for calculating the logarithm of the likelihood """ osmlem([op], x, [data], niter=niter, callback=callback, **kwargs)
python
{ "resource": "" }
q35674
osmlem
train
def osmlem(op, x, data, niter, callback=None, **kwargs): r"""Ordered Subsets Maximum Likelihood Expectation Maximation algorithm. This solver attempts to solve:: max_x L(x | data) where ``L(x, | data)`` is the likelihood of ``x`` given ``data``. The likelihood depends on the forward operators ``op[0], ..., op[n-1]`` such that (approximately):: op[i](x) = data[i] Parameters ---------- op : sequence of `Operator` Forward operators in the inverse problem. x : ``op.domain`` element Vector to which the result is written. Its initial value is used as starting point of the iteration, and its values are updated in each iteration step. The initial value of ``x`` should be non-negative. data : sequence of ``op.range`` `element-like` Right-hand sides of the equation defining the inverse problem. niter : int Number of iterations. callback : callable, optional Function called with the current iterate after each iteration. Other Parameters ---------------- sensitivities : float or ``op.domain`` `element-like`, optional The algorithm contains an ``A^T 1`` term, if this parameter is given, it is replaced by it. Default: ``op[i].adjoint(op[i].range.one())`` Notes ----- Given forward models :math:`A_i`, and data :math:`g_i`, :math:`i = 1, ..., M`, the algorithm attempts to find an :math:`x` that maximizes: .. math:: \prod_{i=1}^M P(g_i | g_i \text{ is } Poisson(A_i(x)) \text{ distributed}). The algorithm is explicitly given by partial updates: .. math:: x_{n + m/M} = \frac{x_{n + (m - 1)/M}}{A_i^* 1} A_i^* (g_i / A_i(x_{n + (m - 1)/M})) for :math:`m = 1, ..., M` and :math:`x_{n+1} = x_{n + M/M}`. The algorithm is not guaranteed to converge, but works for many practical problems. References ---------- Natterer, F. Mathematical Methods in Image Reconstruction, section 5.3.2. See Also -------- mlem : Ordinary MLEM algorithm without subsets. loglikelihood : Function for calculating the logarithm of the likelihood """ n_ops = len(op) if len(data) != n_ops: raise ValueError('number of data ({}) does not match number of ' 'operators ({})'.format(len(data), n_ops)) if not all(x in opi.domain for opi in op): raise ValueError('`x` not an element in the domains of all operators') # Convert data to range elements data = [op[i].range.element(data[i]) for i in range(len(op))] # Parameter used to enforce positivity. # TODO: let users give this. eps = 1e-8 if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') # Extract the sensitivites parameter sensitivities = kwargs.pop('sensitivities', None) if sensitivities is None: sensitivities = [np.maximum(opi.adjoint(opi.range.one()), eps) for opi in op] else: # Make sure the sensitivities is a list of the correct size. try: list(sensitivities) except TypeError: sensitivities = [sensitivities] * n_ops tmp_dom = op[0].domain.element() tmp_ran = [opi.range.element() for opi in op] for _ in range(niter): for i in range(n_ops): op[i](x, out=tmp_ran[i]) tmp_ran[i].ufuncs.maximum(eps, out=tmp_ran[i]) data[i].divide(tmp_ran[i], out=tmp_ran[i]) op[i].adjoint(tmp_ran[i], out=tmp_dom) tmp_dom /= sensitivities[i] x *= tmp_dom if callback is not None: callback(x)
python
{ "resource": "" }
q35675
poisson_log_likelihood
train
def poisson_log_likelihood(x, data): """Poisson log-likelihood of ``data`` given noise parametrized by ``x``. Parameters ---------- x : ``op.domain`` element Value to condition the log-likelihood on. data : ``op.range`` element Data whose log-likelihood given ``x`` shall be calculated. """ if np.any(np.less(x, 0)): raise ValueError('`x` must be non-negative') return np.sum(data * np.log(x + 1e-8) - x)
python
{ "resource": "" }
q35676
fom
train
def fom(reco, true_image): """Sobolev type FoM enforcing both gradient and absolute similarity.""" gradient = odl.Gradient(reco.space) return (gradient(reco - true_image).norm() + reco.space.dist(reco, true_image))
python
{ "resource": "" }
q35677
astra_cuda_bp_scaling_factor
train
def astra_cuda_bp_scaling_factor(proj_space, reco_space, geometry): """Volume scaling accounting for differing adjoint definitions. ASTRA defines the adjoint operator in terms of a fully discrete setting (transposed "projection matrix") without any relation to physical dimensions, which makes a re-scaling necessary to translate it to spaces with physical dimensions. Behavior of ASTRA changes slightly between versions, so we keep track of it and adapt the scaling accordingly. """ # Angular integration weighting factor # angle interval weight by approximate cell volume angle_extent = geometry.motion_partition.extent num_angles = geometry.motion_partition.shape # TODO: this gives the wrong factor for Parallel3dEulerGeometry with # 2 angles scaling_factor = (angle_extent / num_angles).prod() # Correct in case of non-weighted spaces proj_extent = float(proj_space.partition.extent.prod()) proj_size = float(proj_space.partition.size) proj_weighting = proj_extent / proj_size scaling_factor *= (proj_space.weighting.const / proj_weighting) scaling_factor /= (reco_space.weighting.const / reco_space.cell_volume) if parse_version(ASTRA_VERSION) < parse_version('1.8rc1'): if isinstance(geometry, Parallel2dGeometry): # Scales with 1 / cell_volume scaling_factor *= float(reco_space.cell_volume) elif (isinstance(geometry, FanBeamGeometry) and geometry.det_curvature_radius is None): # Scales with 1 / cell_volume scaling_factor *= float(reco_space.cell_volume) # Additional magnification correction src_radius = geometry.src_radius det_radius = geometry.det_radius scaling_factor *= ((src_radius + det_radius) / src_radius) elif isinstance(geometry, Parallel3dAxisGeometry): # Scales with voxel stride # In 1.7, only cubic voxels are supported voxel_stride = reco_space.cell_sides[0] scaling_factor /= float(voxel_stride) elif isinstance(geometry, ConeFlatGeometry): # Scales with 1 / cell_volume # In 1.7, only cubic voxels are supported voxel_stride = reco_space.cell_sides[0] scaling_factor /= float(voxel_stride) # Magnification correction src_radius = geometry.src_radius det_radius = geometry.det_radius scaling_factor *= ((src_radius + det_radius) / src_radius) ** 2 # Check if the development version of astra is used if parse_version(ASTRA_VERSION) == parse_version('1.9.0dev'): if isinstance(geometry, Parallel2dGeometry): # Scales with 1 / cell_volume scaling_factor *= float(reco_space.cell_volume) elif (isinstance(geometry, FanBeamGeometry) and geometry.det_curvature_radius is None): # Scales with 1 / cell_volume scaling_factor *= float(reco_space.cell_volume) # Magnification correction src_radius = geometry.src_radius det_radius = geometry.det_radius scaling_factor *= ((src_radius + det_radius) / src_radius) elif isinstance(geometry, Parallel3dAxisGeometry): # Scales with cell volume # currently only square voxels are supported scaling_factor /= reco_space.cell_volume elif isinstance(geometry, ConeFlatGeometry): # Scales with cell volume scaling_factor /= reco_space.cell_volume # Magnification correction (scaling = 1 / magnification ** 2) src_radius = geometry.src_radius det_radius = geometry.det_radius scaling_factor *= ((src_radius + det_radius) / src_radius) ** 2 # Correction for scaled 1/r^2 factor in ASTRA's density weighting. # This compensates for scaled voxels and pixels, as well as a # missing factor src_radius ** 2 in the ASTRA BP with # density weighting. det_px_area = geometry.det_partition.cell_volume scaling_factor *= (src_radius ** 2 * det_px_area ** 2) else: if isinstance(geometry, Parallel2dGeometry): # Scales with 1 / cell_volume scaling_factor *= float(reco_space.cell_volume) elif (isinstance(geometry, FanBeamGeometry) and geometry.det_curvature_radius is None): # Scales with 1 / cell_volume scaling_factor *= float(reco_space.cell_volume) # Magnification correction src_radius = geometry.src_radius det_radius = geometry.det_radius scaling_factor *= ((src_radius + det_radius) / src_radius) elif isinstance(geometry, Parallel3dAxisGeometry): # Scales with cell volume # currently only square voxels are supported scaling_factor /= reco_space.cell_volume elif isinstance(geometry, ConeFlatGeometry): # Scales with cell volume scaling_factor /= reco_space.cell_volume # Magnification correction (scaling = 1 / magnification ** 2) src_radius = geometry.src_radius det_radius = geometry.det_radius scaling_factor *= ((src_radius + det_radius) / src_radius) ** 2 # Correction for scaled 1/r^2 factor in ASTRA's density weighting. # This compensates for scaled voxels and pixels, as well as a # missing factor src_radius ** 2 in the ASTRA BP with # density weighting. det_px_area = geometry.det_partition.cell_volume scaling_factor *= (src_radius ** 2 * det_px_area ** 2 / reco_space.cell_volume ** 2) # TODO: add case with new ASTRA release return scaling_factor
python
{ "resource": "" }
q35678
AstraCudaProjectorImpl.call_forward
train
def call_forward(self, vol_data, out=None): """Run an ASTRA forward projection on the given data using the GPU. Parameters ---------- vol_data : ``reco_space`` element Volume data to which the projector is applied. out : ``proj_space`` element, optional Element of the projection space to which the result is written. If ``None``, an element in `proj_space` is created. Returns ------- out : ``proj_space`` element Projection data resulting from the application of the projector. If ``out`` was provided, the returned object is a reference to it. """ with self._mutex: assert vol_data in self.reco_space if out is not None: assert out in self.proj_space else: out = self.proj_space.element() # Copy data to GPU memory if self.geometry.ndim == 2: astra.data2d.store(self.vol_id, vol_data.asarray()) elif self.geometry.ndim == 3: astra.data3d.store(self.vol_id, vol_data.asarray()) else: raise RuntimeError('unknown ndim') # Run algorithm astra.algorithm.run(self.algo_id) # Copy result to host if self.geometry.ndim == 2: out[:] = self.out_array elif self.geometry.ndim == 3: out[:] = np.swapaxes(self.out_array, 0, 1).reshape( self.proj_space.shape) # Fix scaling to weight by pixel size if isinstance(self.geometry, Parallel2dGeometry): # parallel2d scales with pixel stride out *= 1 / float(self.geometry.det_partition.cell_volume) return out
python
{ "resource": "" }
q35679
AstraCudaProjectorImpl.create_ids
train
def create_ids(self): """Create ASTRA objects.""" # Create input and output arrays if self.geometry.motion_partition.ndim == 1: motion_shape = self.geometry.motion_partition.shape else: # Need to flatten 2- or 3-dimensional angles into one axis motion_shape = (np.prod(self.geometry.motion_partition.shape),) proj_shape = motion_shape + self.geometry.det_partition.shape proj_ndim = len(proj_shape) if proj_ndim == 2: astra_proj_shape = proj_shape astra_vol_shape = self.reco_space.shape elif proj_ndim == 3: # The `u` and `v` axes of the projection data are swapped, # see explanation in `astra_*_3d_geom_to_vec`. astra_proj_shape = (proj_shape[1], proj_shape[0], proj_shape[2]) astra_vol_shape = self.reco_space.shape self.in_array = np.empty(astra_vol_shape, dtype='float32', order='C') self.out_array = np.empty(astra_proj_shape, dtype='float32', order='C') # Create ASTRA data structures vol_geom = astra_volume_geometry(self.reco_space) proj_geom = astra_projection_geometry(self.geometry) self.vol_id = astra_data(vol_geom, datatype='volume', ndim=self.reco_space.ndim, data=self.in_array, allow_copy=False) self.proj_id = astra_projector('nearest', vol_geom, proj_geom, ndim=proj_ndim, impl='cuda') self.sino_id = astra_data(proj_geom, datatype='projection', ndim=proj_ndim, data=self.out_array, allow_copy=False) # Create algorithm self.algo_id = astra_algorithm( 'forward', proj_ndim, self.vol_id, self.sino_id, proj_id=self.proj_id, impl='cuda')
python
{ "resource": "" }
q35680
AstraCudaBackProjectorImpl.call_backward
train
def call_backward(self, proj_data, out=None): """Run an ASTRA back-projection on the given data using the GPU. Parameters ---------- proj_data : ``proj_space`` element Projection data to which the back-projector is applied. out : ``reco_space`` element, optional Element of the reconstruction space to which the result is written. If ``None``, an element in ``reco_space`` is created. Returns ------- out : ``reco_space`` element Reconstruction data resulting from the application of the back-projector. If ``out`` was provided, the returned object is a reference to it. """ with self._mutex: assert proj_data in self.proj_space if out is not None: assert out in self.reco_space else: out = self.reco_space.element() # Copy data to GPU memory if self.geometry.ndim == 2: astra.data2d.store(self.sino_id, proj_data.asarray()) elif self.geometry.ndim == 3: shape = (-1,) + self.geometry.det_partition.shape reshaped_proj_data = proj_data.asarray().reshape(shape) swapped_proj_data = np.ascontiguousarray( np.swapaxes(reshaped_proj_data, 0, 1)) astra.data3d.store(self.sino_id, swapped_proj_data) # Run algorithm astra.algorithm.run(self.algo_id) # Copy result to CPU memory out[:] = self.out_array # Fix scaling to weight by pixel/voxel size out *= astra_cuda_bp_scaling_factor( self.proj_space, self.reco_space, self.geometry) return out
python
{ "resource": "" }
q35681
find_min_signature
train
def find_min_signature(ufunc, dtypes_in): """Determine the minimum matching ufunc signature for given dtypes. Parameters ---------- ufunc : str or numpy.ufunc Ufunc whose signatures are to be considered. dtypes_in : Sequence of objects specifying input dtypes. Its length must match the number of inputs of ``ufunc``, and its entries must be understood by `numpy.dtype`. Returns ------- signature : str Minimum matching ufunc signature, see, e.g., ``np.add.types`` for examples. Raises ------ TypeError If no valid signature is found. """ if not isinstance(ufunc, np.ufunc): ufunc = getattr(np, str(ufunc)) dtypes_in = [np.dtype(dt_in) for dt_in in dtypes_in] tcs_in = [dt.base.char for dt in dtypes_in] if len(tcs_in) != ufunc.nin: raise ValueError('expected {} input dtype(s) for {}, got {}' ''.format(ufunc.nin, ufunc, len(tcs_in))) valid_sigs = [] for sig in ufunc.types: sig_tcs_in, sig_tcs_out = sig.split('->') if all(np.dtype(tc_in) <= np.dtype(sig_tc_in) and sig_tc_in in SUPP_TYPECODES for tc_in, sig_tc_in in zip(tcs_in, sig_tcs_in)): valid_sigs.append(sig) if not valid_sigs: raise TypeError('no valid signature found for {} and input dtypes {}' ''.format(ufunc, tuple(dt.name for dt in dtypes_in))) def in_dtypes(sig): """Comparison key function for input dtypes of a signature.""" sig_tcs_in = sig.split('->')[0] return tuple(np.dtype(tc) for tc in sig_tcs_in) return min(valid_sigs, key=in_dtypes)
python
{ "resource": "" }
q35682
gradient_factory
train
def gradient_factory(name): """Create gradient `Functional` for some ufuncs.""" if name == 'sin': def gradient(self): """Return the gradient operator.""" return cos(self.domain) elif name == 'cos': def gradient(self): """Return the gradient operator.""" return -sin(self.domain) elif name == 'tan': def gradient(self): """Return the gradient operator.""" return 1 + square(self.domain) * self elif name == 'sqrt': def gradient(self): """Return the gradient operator.""" return FunctionalQuotient(ConstantFunctional(self.domain, 0.5), self) elif name == 'square': def gradient(self): """Return the gradient operator.""" return ScalingFunctional(self.domain, 2.0) elif name == 'log': def gradient(self): """Return the gradient operator.""" return reciprocal(self.domain) elif name == 'exp': def gradient(self): """Return the gradient operator.""" return self elif name == 'reciprocal': def gradient(self): """Return the gradient operator.""" return FunctionalQuotient(ConstantFunctional(self.domain, -1.0), square(self.domain)) elif name == 'sinh': def gradient(self): """Return the gradient operator.""" return cosh(self.domain) elif name == 'cosh': def gradient(self): """Return the gradient operator.""" return sinh(self.domain) else: # Fallback to default gradient = Functional.gradient return gradient
python
{ "resource": "" }
q35683
derivative_factory
train
def derivative_factory(name): """Create derivative function for some ufuncs.""" if name == 'sin': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(cos(self.domain)(point)) elif name == 'cos': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(-sin(self.domain)(point)) elif name == 'tan': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(1 + self(point) ** 2) elif name == 'sqrt': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(0.5 / self(point)) elif name == 'square': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(2.0 * point) elif name == 'log': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(1.0 / point) elif name == 'exp': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(self(point)) elif name == 'reciprocal': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(-self(point) ** 2) elif name == 'sinh': def derivative(self, point): """Return the derivative operator.""" point = self.domain.element(point) return MultiplyOperator(cosh(self.domain)(point)) elif name == 'cosh': def derivative(self, point): """Return the derivative operator.""" return MultiplyOperator(sinh(self.domain)(point)) else: # Fallback to default derivative = Operator.derivative return derivative
python
{ "resource": "" }
q35684
ufunc_functional_factory
train
def ufunc_functional_factory(name, nargin, nargout, docstring): """Create a ufunc `Functional` from a given specification.""" assert 0 <= nargin <= 2 def __init__(self, field): """Initialize an instance. Parameters ---------- field : `Field` The domain of the functional. """ if not isinstance(field, Field): raise TypeError('`field` {!r} not a `Field`'.format(space)) if _is_integer_only_ufunc(name): raise ValueError("ufunc '{}' only defined with integral dtype" "".format(name)) linear = name in LINEAR_UFUNCS Functional.__init__(self, space=field, linear=linear) def _call(self, x): """Return ``self(x)``.""" if nargin == 1: return getattr(np, name)(x) else: return getattr(np, name)(*x) def __repr__(self): """Return ``repr(self)``.""" return '{}({!r})'.format(name, self.domain) # Create example (also functions as doctest) if nargin != 1: raise NotImplementedError('Currently not suppored') if nargout != 1: raise NotImplementedError('Currently not suppored') space = RealNumbers() val = 1.0 arg = '{}'.format(val) with np.errstate(all='ignore'): result = np.float64(getattr(np, name)(val)) examples_docstring = RAW_EXAMPLES_DOCSTRING.format(space=space, name=name, arg=arg, result=result) full_docstring = docstring + examples_docstring attributes = {"__init__": __init__, "_call": _call, "gradient": property(gradient_factory(name)), "__repr__": __repr__, "__doc__": full_docstring} full_name = name + '_op' return type(full_name, (Functional,), attributes)
python
{ "resource": "" }
q35685
pdhg_stepsize
train
def pdhg_stepsize(L, tau=None, sigma=None): r"""Default step sizes for `pdhg`. Parameters ---------- L : `Operator` or float Operator or norm of the operator that are used in the `pdhg` method. If it is an `Operator`, the norm is computed with ``Operator.norm(estimate=True)``. tau : positive float, optional Use this value for ``tau`` instead of computing it from the operator norms, see Notes. sigma : positive float, optional The ``sigma`` step size parameters for the dual update. Returns ------- tau : float The ``tau`` step size parameter for the primal update. sigma : tuple of float The ``sigma`` step size parameter for the dual update. Notes ----- To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma` and :math:`L` need to satisfy .. math:: \tau \sigma \|L\|^2 < 1 This function has 4 options, :math:`\tau`/:math:`\sigma` given or not given. - Neither :math:`\tau` nor :math:`\sigma` are given, they are chosen as .. math:: \tau = \sigma = \frac{\sqrt{0.9}}{\|L\|} - If only :math:`\sigma` is given, :math:`\tau` is set to .. math:: \tau = \frac{0.9}{\sigma \|L\|^2} - If only :math:`\tau` is given, :math:`\sigma` is set to .. math:: \sigma = \frac{0.9}{\tau \|L\|^2} - If both are given, they are returned as-is without further validation. """ if tau is not None and sigma is not None: return float(tau), float(sigma) L_norm = L.norm(estimate=True) if isinstance(L, Operator) else float(L) if tau is None and sigma is None: tau = sigma = np.sqrt(0.9) / L_norm return tau, sigma elif tau is None: tau = 0.9 / (sigma * L_norm ** 2) return tau, float(sigma) else: # sigma is None sigma = 0.9 / (tau * L_norm ** 2) return float(tau), sigma
python
{ "resource": "" }
q35686
haarpsi_similarity_map
train
def haarpsi_similarity_map(img1, img2, axis, c, a): r"""Local similarity map for directional features along an axis. Parameters ---------- img1, img2 : array-like The images to compare. They must have equal shape. axis : {0, 1} Direction in which to look for edge similarities. c : positive float Constant determining the score of maximally dissimilar values. Smaller constant means higher penalty for dissimilarity. See Notes for details. a : positive float Parameter in the logistic function. Larger value leads to a steeper curve, thus lowering the threshold for an input to be mapped to an output close to 1. See Notes for details. Returns ------- local_sim : `numpy.ndarray` Pointwise similarity of directional edge features of ``img1`` and ``img2``, measured using two Haar wavelet detail levels. Notes ----- For input images :math:`f_1, f_2` this function is defined as .. math:: \mathrm{HS}_{f_1, f_2}^{(k)}(x) = l_a \left( \frac{1}{2} \sum_{j=1}^2 S\left(\left|g_j^{(k)} \ast f_1 \right|(x), \left|g_j^{(k)} \ast f_2 \right|(x), c\right) \right), see `[Rei+2016] <https://arxiv.org/abs/1607.06140>`_ equation (10). Here, the superscript :math:`(k)` refers to the axis (0 or 1) in which edge features are compared, :math:`l_a` is the logistic function :math:`l_a(x) = (1 + \mathrm{e}^{-a x})^{-1}`, and :math:`S` is the pointwise similarity score .. math:: S(x, y, c) = \frac{2xy + c^2}{x^2 + y^2 + c^2}, Hence, :math:`c` is the :math:`y`-value at which the score drops to :math:`1 / 2` for :math:`x = 0`. In other words, the smaller :math:`c` is chosen, the more dissimilarity is penalized. The filters :math:`g_j^{(k)}` are high-pass Haar wavelet filters in the axis :math:`k` and low-pass Haar wavelet filters in the other axes. The index :math:`j` refers to the scaling level of the wavelet. In code, these filters can be computed as :: f_lo_level1 = [np.sqrt(2), np.sqrt(2)] # low-pass Haar filter f_hi_level1 = [-np.sqrt(2), np.sqrt(2)] # high-pass Haar filter f_lo_level2 = np.repeat(f_lo_level1, 2) f_hi_level2 = np.repeat(f_hi_level1, 2) f_lo_level3 = np.repeat(f_lo_level2, 2) f_hi_level3 = np.repeat(f_hi_level2, 2) ... The logistic function :math:`l_a` transforms values in :math:`[0, \infty)` to :math:`[1/2, 1)`, where the parameter :math:`a` determines how fast the curve attains values close to 1. Larger :math:`a` means that smaller :math:`x` will yield a value :math:`l_a(x)` close to 1 (and thus result in a higher score). In other words, the larger :math:`a`, the more forgiving the similarity measure. References ---------- [Rei+2016] Reisenhofer, R, Bosse, S, Kutyniok, G, and Wiegand, T. *A Haar Wavelet-Based Perceptual Similarity Index for Image Quality Assessment*. arXiv:1607.06140 [cs], Jul. 2016. """ # TODO: generalize for nD import scipy.special impl = 'pyfftw' if PYFFTW_AVAILABLE else 'numpy' # Haar wavelet filters for levels 1 and 2 dec_lo_lvl1 = np.array([np.sqrt(2), np.sqrt(2)]) dec_lo_lvl2 = np.repeat(dec_lo_lvl1, 2) dec_hi_lvl1 = np.array([-np.sqrt(2), np.sqrt(2)]) dec_hi_lvl2 = np.repeat(dec_hi_lvl1, 2) if axis == 0: # High-pass in axis 0, low-pass in axis 1 fh_lvl1 = dec_hi_lvl1 fv_lvl1 = dec_lo_lvl1 fh_lvl2 = dec_hi_lvl2 fv_lvl2 = dec_lo_lvl2 elif axis == 1: # Low-pass in axis 0, high-pass in axis 1 fh_lvl1 = dec_lo_lvl1 fv_lvl1 = dec_hi_lvl1 fh_lvl2 = dec_lo_lvl2 fv_lvl2 = dec_hi_lvl2 else: raise ValueError('`axis` out of the valid range 0 -> 1') # Filter images with level 1 and 2 filters img1_lvl1 = filter_image_sep2d(img1, fh_lvl1, fv_lvl1, impl=impl) img1_lvl2 = filter_image_sep2d(img1, fh_lvl2, fv_lvl2, impl=impl) img2_lvl1 = filter_image_sep2d(img2, fh_lvl1, fv_lvl1, impl=impl) img2_lvl2 = filter_image_sep2d(img2, fh_lvl2, fv_lvl2, impl=impl) c = float(c) def S(x, y): """Return ``(2 * x * y + c ** 2) / (x ** 2 + y ** 2 + c ** 2)``.""" num = 2 * x num *= y num += c ** 2 denom = x ** 2 denom += y ** 2 denom += c ** 2 frac = num frac /= denom return frac # Compute similarity scores for both levels np.abs(img1_lvl1, out=img1_lvl1) np.abs(img2_lvl1, out=img2_lvl1) np.abs(img1_lvl2, out=img1_lvl2) np.abs(img2_lvl2, out=img2_lvl2) sim_lvl1 = S(img1_lvl1, img2_lvl1) sim_lvl2 = S(img1_lvl2, img2_lvl2) # Return logistic of the mean value sim = sim_lvl1 sim += sim_lvl2 sim /= 2 sim *= a return scipy.special.expit(sim)
python
{ "resource": "" }
q35687
haarpsi_weight_map
train
def haarpsi_weight_map(img1, img2, axis): r"""Weighting map for directional features along an axis. Parameters ---------- img1, img2 : array-like The images to compare. They must have equal shape. axis : {0, 1} Direction in which to look for edge similarities. Returns ------- weight_map : `numpy.ndarray` The pointwise weight map. See Notes for details. Notes ----- The pointwise weight map of associated with input images :math:`f_1, f_2` and axis :math:`k` is defined as .. math:: \mathrm{W}_{f_1, f_2}^{(k)}(x) = \max \left\{ \left|g_3^{(k)} \ast f_1 \right|(x), \left|g_3^{(k)} \ast f_2 \right|(x) \right\}, see `[Rei+2016] <https://arxiv.org/abs/1607.06140>`_ equations (11) and (13). Here, :math:`g_3^{(k)}` is a Haar wavelet filter for scaling level 3 that performs high-pass filtering in axis :math:`k` and low-pass filtering in the other axes. Such a filter can be computed as :: f_lo_level1 = [np.sqrt(2), np.sqrt(2)] # low-pass Haar filter f_hi_level1 = [-np.sqrt(2), np.sqrt(2)] # high-pass Haar filter f_lo_level3 = np.repeat(f_lo_level1, 4) f_hi_level3 = np.repeat(f_hi_level1, 4) References ---------- [Rei+2016] Reisenhofer, R, Bosse, S, Kutyniok, G, and Wiegand, T. *A Haar Wavelet-Based Perceptual Similarity Index for Image Quality Assessment*. arXiv:1607.06140 [cs], Jul. 2016. """ # TODO: generalize for nD impl = 'pyfftw' if PYFFTW_AVAILABLE else 'numpy' # Haar wavelet filters for level 3 dec_lo_lvl3 = np.repeat([np.sqrt(2), np.sqrt(2)], 4) dec_hi_lvl3 = np.repeat([-np.sqrt(2), np.sqrt(2)], 4) if axis == 0: fh_lvl3 = dec_hi_lvl3 fv_lvl3 = dec_lo_lvl3 elif axis == 1: fh_lvl3 = dec_lo_lvl3 fv_lvl3 = dec_hi_lvl3 else: raise ValueError('`axis` out of the valid range 0 -> 1') # Filter with level 3 wavelet filter img1_lvl3 = filter_image_sep2d(img1, fh_lvl3, fv_lvl3, impl=impl) img2_lvl3 = filter_image_sep2d(img2, fh_lvl3, fv_lvl3, impl=impl) # Return the pointwise maximum of the filtered images np.abs(img1_lvl3, out=img1_lvl3) np.abs(img2_lvl3, out=img2_lvl3) return np.maximum(img1_lvl3, img2_lvl3)
python
{ "resource": "" }
q35688
spherical_sum
train
def spherical_sum(image, binning_factor=1.0): """Sum image values over concentric annuli. Parameters ---------- image : `DiscreteLp` element Input data whose radial sum should be computed. binning_factor : positive float, optional Reduce the number of output bins by this factor. Increasing this number can help reducing fluctuations due to the variance of points that fall in a particular annulus. A binning factor of ``1`` corresponds to a bin size equal to image pixel size for images with square pixels, otherwise :: max(norm2(c)) / norm2(shape) where the maximum is taken over all corners of the image domain. Returns ------- spherical_sum : 1D `DiscreteLp` element The spherical sum of ``image``. Its space is one-dimensional with domain ``[0, rmax]``, where ``rmax`` is the radius of the smallest ball containing ``image.space.domain``. Its shape is ``(N,)`` with :: N = int(sqrt(sum(n ** 2 for n in image.shape)) / binning_factor) """ r = np.sqrt(sum(xi ** 2 for xi in image.space.meshgrid)) rmax = max(np.linalg.norm(c) for c in image.space.domain.corners()) n_bins = int(np.sqrt(sum(n ** 2 for n in image.shape)) / binning_factor) rad_sum, _ = np.histogram(r, weights=image, bins=n_bins, range=(0, rmax)) out_spc = uniform_discr(min_pt=0, max_pt=rmax, shape=n_bins, impl=image.space.impl, dtype=image.space.dtype, interp="linear", axis_labels=["$r$"]) return out_spc.element(rad_sum)
python
{ "resource": "" }
q35689
simple_functional
train
def simple_functional(space, fcall=None, grad=None, prox=None, grad_lip=np.nan, convex_conj_fcall=None, convex_conj_grad=None, convex_conj_prox=None, convex_conj_grad_lip=np.nan, linear=False): """Simplified interface to create a functional with specific properties. Users may specify as many properties as-is needed by the application. Parameters ---------- space : `LinearSpace` Space that the functional should act on. fcall : callable, optional Function to evaluate when calling the functional. grad : callable or `Operator`, optional Gradient operator of the functional. prox : `proximal factory`, optional Proximal factory for the functional. grad_lip : float, optional lipschitz constant of the functional. convex_conj_fcall : callable, optional Function to evaluate when calling the convex conjugate functional. convex_conj_grad : callable or `Operator`, optional Gradient operator of the convex conjugate functional convex_conj_prox : `proximal factory`, optional Proximal factory for the convex conjugate functional. convex_conj_grad_lip : float, optional lipschitz constant of the convex conjugate functional. linear : bool, optional True if the operator is linear. Examples -------- Create squared sum functional on rn: >>> def f(x): ... return sum(xi**2 for xi in x) >>> def dfdx(x): ... return 2 * x >>> space = odl.rn(3) >>> func = simple_functional(space, f, grad=dfdx) >>> func.domain rn(3) >>> func.range RealNumbers() >>> func([1, 2, 3]) 14.0 >>> func.gradient([1, 2, 3]) rn(3).element([ 2., 4., 6.]) """ if grad is not None and not isinstance(grad, Operator): grad_in = grad class SimpleFunctionalGradient(Operator): """Gradient of a `SimpleFunctional`.""" def _call(self, x): """Return ``self(x)``.""" return grad_in(x) grad = SimpleFunctionalGradient(space, space, linear=False) if (convex_conj_grad is not None and not isinstance(convex_conj_grad, Operator)): convex_conj_grad_in = convex_conj_grad class SimpleFunctionalConvexConjGradient(Operator): """Gradient of the convex conj of a `SimpleFunctional`.""" def _call(self, x): """Return ``self(x)``.""" return convex_conj_grad_in(x) convex_conj_grad = SimpleFunctionalConvexConjGradient( space, space, linear=False) class SimpleFunctional(Functional): """A simplified functional for examples.""" def __init__(self): """Initialize an instance.""" super(SimpleFunctional, self).__init__( space, linear=linear, grad_lipschitz=grad_lip) def _call(self, x): """Return ``self(x)``.""" if fcall is None: raise NotImplementedError('call not implemented') else: return fcall(x) @property def proximal(self): """Return the proximal of the operator.""" if prox is None: raise NotImplementedError('proximal not implemented') else: return prox @property def gradient(self): """Return the gradient of the operator.""" if grad is None: raise NotImplementedError('gradient not implemented') else: return grad @property def convex_conj(self): return simple_functional(space, fcall=convex_conj_fcall, grad=convex_conj_grad, prox=convex_conj_prox, grad_lip=convex_conj_grad_lip, convex_conj_fcall=fcall, convex_conj_grad=grad, convex_conj_prox=prox, convex_conj_grad_lip=grad_lip, linear=linear) return SimpleFunctional()
python
{ "resource": "" }
q35690
FunctionalLeftScalarMult.convex_conj
train
def convex_conj(self): """Convex conjugate functional of the scaled functional. ``Functional.__rmul__`` takes care of the case scalar = 0. """ if self.scalar <= 0: raise ValueError('scaling with nonpositive values have no convex ' 'conjugate. Current value: {}.' ''.format(self.scalar)) return self.scalar * self.functional.convex_conj * (1.0 / self.scalar)
python
{ "resource": "" }
q35691
FunctionalLeftScalarMult.proximal
train
def proximal(self): """Proximal factory of the scaled functional. ``Functional.__rmul__`` takes care of the case scalar = 0 See Also -------- odl.solvers.nonsmooth.proximal_operators.proximal_const_func """ if self.scalar < 0: raise ValueError('proximal operator of functional scaled with a ' 'negative value {} is not well-defined' ''.format(self.scalar)) elif self.scalar == 0: # Should not get here. `Functional.__rmul__` takes care of the case # scalar = 0 return proximal_const_func(self.domain) else: def proximal_left_scalar_mult(sigma=1.0): """Proximal operator for left scalar multiplication. Parameters ---------- sigma : positive float, optional Step size parameter. Default: 1.0 """ return self.functional.proximal(sigma * self.scalar) return proximal_left_scalar_mult
python
{ "resource": "" }
q35692
FunctionalComp.gradient
train
def gradient(self): """Gradient of the compositon according to the chain rule.""" func = self.left op = self.right class FunctionalCompositionGradient(Operator): """Gradient of the compositon according to the chain rule.""" def __init__(self): """Initialize a new instance.""" super(FunctionalCompositionGradient, self).__init__( op.domain, op.domain, linear=False) def _call(self, x): """Apply the gradient operator to the given point.""" return op.derivative(x).adjoint(func.gradient(op(x))) def derivative(self, x): """The derivative in point ``x``. This is only defined """ if not op.is_linear: raise NotImplementedError('derivative only implemented ' 'for linear opertors.') else: return (op.adjoint * func.gradient * op).derivative(x) return FunctionalCompositionGradient()
python
{ "resource": "" }
q35693
FunctionalQuadraticPerturb.proximal
train
def proximal(self): """Proximal factory of the quadratically perturbed functional.""" if self.quadratic_coeff < 0: raise TypeError('`quadratic_coeff` {} must be non-negative' ''.format(self.quadratic_coeff)) return proximal_quadratic_perturbation( self.functional.proximal, a=self.quadratic_coeff, u=self.linear_term)
python
{ "resource": "" }
q35694
FunctionalQuadraticPerturb.convex_conj
train
def convex_conj(self): r"""Convex conjugate functional of the functional. Notes ----- Given a functional :math:`f`, the convex conjugate of a linearly perturbed version :math:`f(x) + <y, x>` is given by a translation of the convex conjugate of :math:`f`: .. math:: (f + \langle y, \cdot \rangle)^* (x^*) = f^*(x^* - y). For reference on the identity used, see `[KP2015]`_. Moreover, the convex conjugate of :math:`f + c` is by definition .. math:: (f + c)^* (x^*) = f^*(x^*) - c. References ---------- [KP2015] Komodakis, N, and Pesquet, J-C. *Playing with Duality: An overview of recent primal-dual approaches for solving large-scale optimization problems*. IEEE Signal Processing Magazine, 32.6 (2015), pp 31--54. .. _[KP2015]: https://arxiv.org/abs/1406.5429 """ if self.quadratic_coeff == 0: cconj = self.functional.convex_conj.translated(self.linear_term) if self.constant != 0: cconj = cconj - self.constant return cconj else: return super(FunctionalQuadraticPerturb, self).convex_conj
python
{ "resource": "" }
q35695
estimate_noise_std
train
def estimate_noise_std(img, average=True): """Estimate standard deviation of noise in ``img``. The algorithm, given in [Immerkaer1996], estimates the noise in an image. Parameters ---------- img : array-like Array to estimate noise in. average : bool If ``True``, return the mean noise in the image, otherwise give a pointwise estimate. Returns ------- noise : float Examples -------- Create image with noise 1.0, verify result >>> img = np.random.randn(10, 10) >>> result = estimate_noise_std(img) # should be about 1 Also works with higher dimensional arrays >>> img = np.random.randn(3, 3, 3) >>> result = estimate_noise_std(img) The method can also estimate the noise pointwise (but with high uncertainity): >>> img = np.random.randn(3, 3, 3) >>> result = estimate_noise_std(img, average=False) References ---------- [Immerkaer1996] Immerkaer, J. *Fast Noise Variance Estimation*. Computer Vision and Image Understanding, 1996. """ import scipy.signal import functools img = np.asarray(img, dtype='float') M = functools.reduce(np.add.outer, [[-1, 2, -1]] * img.ndim) convolved = scipy.signal.fftconvolve(img, M, mode='valid') if average: conv_var = np.sum(convolved ** 2) / convolved.size else: conv_var = convolved ** 2 # Pad in order to retain shape conv_var = np.pad(conv_var, pad_width=1, mode='edge') scale = np.sum(np.square(M)) sigma = np.sqrt(conv_var / scale) return sigma
python
{ "resource": "" }
q35696
cone_beam_geometry
train
def cone_beam_geometry(space, src_radius, det_radius, num_angles=None, short_scan=False, det_shape=None): r"""Create a default fan or cone beam geometry from ``space``. This function is intended for simple test cases where users do not need the full flexibility of the geometries, but simply wants a geometry that works. The geometry returned by this function has equidistant angles that lie (strictly) between 0 and either ``2 * pi`` (full scan) or ``pi + fan_angle`` (short scan). The detector is centered around 0, and its size is chosen such that the whole ``space`` is covered with lines. The number of angles and detector elements is chosen such that the resulting sinogram is fully sampled according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors since the latter is always origin-centered. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Must be 2- or 3-dimensional. src_radius : nonnegative float Radius of the source circle. Must be larger than the radius of the smallest vertical cylinder containing ``space.domain``, i.e., the source must be outside the volume for all rotations. det_radius : nonnegative float Radius of the detector circle. short_scan : bool, optional Use the minimum required angular range ``[0, pi + fan_angle]``. For ``True``, the `parker_weighting` should be used in FBP. By default, the range ``[0, 2 * pi]`` is used. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. det_shape : int or sequence of ints, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `DivergentBeamGeometry` Projection geometry with equidistant angles and zero-centered detector as determined by sampling criteria. - If ``space`` is 2D, the result is a `FanBeamGeometry`. - If ``space`` is 3D, the result is a `ConeFlatGeometry`. Examples -------- Create a fan beam geometry from a 2d space: >>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20)) >>> geometry = cone_beam_geometry(space, src_radius=5, det_radius=5) >>> geometry.angles.size 78 >>> geometry.detector.size 57 For a short scan geometry (from 0 to ``pi + fan_angle``), the ``short_scan`` flag can be set, resulting in a smaller number of angles: >>> geometry = cone_beam_geometry(space, src_radius=5, det_radius=5, ... short_scan=True) >>> geometry.angles.size 46 If the source is close to the object, the detector becomes larger due to more magnification: >>> geometry = cone_beam_geometry(space, src_radius=3, det_radius=9) >>> geometry.angles.size 80 >>> geometry.detector.size 105 Notes ----- According to [NW2001]_, pages 75--76, a function :math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support .. math:: \| x \| > \rho \implies f(x) = 0, and is essentially bandlimited .. math:: \| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0, can be fully reconstructed from a fan beam ray transform with source-detector distance :math:`r` (assuming all detector points have the same distance to the source) if (1) the projection angles are sampled with a spacing of :math:`\Delta \psi` such that .. math:: \Delta \psi \leq \frac{r + \rho}{r}\, \frac{\pi}{\rho \Omega}, and (2) the detector is sampled with an angular interval :math:`\Delta \alpha` that satisfies .. math:: \Delta \alpha \leq \frac{\pi}{r \Omega}. For a flat detector, the angular interval is smallest in the center of the fan and largest at the boundaries. The worst-case relation between the linear and angular sampling intervals are .. math:: \Delta s = R \Delta \alpha, \quad R^2 = r^2 + (w / 2)^2, where :math:`w` is the width of the detector. Thus, to satisfy the angular detector condition one can choose .. math:: \Delta s \leq \frac{\pi \sqrt{r^2 + (w / 2)^2}}{r \Omega}. The geometry returned by this function satisfies these conditions exactly. If the domain is 3-dimensional, a circular cone beam geometry is created with the third coordinate axis as rotation axis. This does, of course, not yield complete data, but is equivalent to the 2D fan beam case in the :math:`z = 0` slice. The vertical size of the detector is chosen such that it covers the object vertically with rays, using a containing cuboid :math:`[-\rho, \rho]^2 \times [z_{\mathrm{min}}, z_{\mathrm{min}}]` to compute the cone angle. References ---------- .. [NW2001] Natterer, F and Wuebbeling, F. *Mathematical Methods in Image Reconstruction*. SIAM, 2001. https://dx.doi.org/10.1137/1.9780898718324 """ # Find maximum distance from rotation axis corners = space.domain.corners()[:, :2] rho = np.max(np.linalg.norm(corners, axis=1)) # Find default values according to Nyquist criterion. # We assume that the function is bandlimited by a wave along the x or y # axis. The highest frequency we can measure is then a standing wave with # period of twice the inter-node distance. min_side = min(space.partition.cell_sides[:2]) omega = np.pi / min_side # Compute minimum width of the detector to cover the object. The relation # used here is (w/2)/(rs+rd) = rho/rs since both are equal to tan(alpha), # where alpha is the half fan angle. rs = float(src_radius) if (rs <= rho): raise ValueError('source too close to the object, resulting in ' 'infinite detector for full coverage') rd = float(det_radius) r = src_radius + det_radius w = 2 * rho * (rs + rd) / rs # Compute minimum number of pixels given the constraint on the # sampling interval and the computed width rb = np.hypot(r, w / 2) # length of the boundary ray to the flat detector num_px_horiz = 2 * int(np.ceil(w * omega * r / (2 * np.pi * rb))) + 1 if space.ndim == 2: det_min_pt = -w / 2 det_max_pt = w / 2 if det_shape is None: det_shape = num_px_horiz elif space.ndim == 3: # Compute number of vertical pixels required to cover the object, # using the same sampling interval vertically as horizontally. # The reasoning is the same as for the computation of w. # Minimum distance of the containing cuboid edges to the source dist = rs - rho # Take angle of the rays going through the top and bottom corners # in that edge half_cone_angle = max(np.arctan(abs(space.partition.min_pt[2]) / dist), np.arctan(abs(space.partition.max_pt[2]) / dist)) h = 2 * np.sin(half_cone_angle) * (rs + rd) # Use the vertical spacing from the reco space, corrected for # magnification at the "back" of the object, i.e., where it is # minimal min_mag = (rs + rd) / (rs + rho) delta_h = min_mag * space.cell_sides[2] num_px_vert = int(np.ceil(h / delta_h)) h = num_px_vert * delta_h # make multiple of spacing det_min_pt = [-w / 2, -h / 2] det_max_pt = [w / 2, h / 2] if det_shape is None: det_shape = [num_px_horiz, num_px_vert] fan_angle = 2 * np.arctan(rho / rs) if short_scan: max_angle = min(np.pi + fan_angle, 2 * np.pi) else: max_angle = 2 * np.pi if num_angles is None: num_angles = int(np.ceil(max_angle * omega * rho / np.pi * r / (r + rho))) angle_partition = uniform_partition(0, max_angle, num_angles) det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape) if space.ndim == 2: return FanBeamGeometry(angle_partition, det_partition, src_radius, det_radius) elif space.ndim == 3: return ConeFlatGeometry(angle_partition, det_partition, src_radius, det_radius) else: raise ValueError('``space.ndim`` must be 2 or 3.')
python
{ "resource": "" }
q35697
helical_geometry
train
def helical_geometry(space, src_radius, det_radius, num_turns, n_pi=1, num_angles=None, det_shape=None): """Create a default helical geometry from ``space``. This function is intended for simple test cases where users do not need the full flexibility of the geometries, but simply wants a geometry that works. The geometry returned by this function has equidistant angles that lie (strictly) between 0 and ``2 * pi * num_turns``. The detector is centered around 0, and its size is chosen such that the whole ``space`` is covered with lines. The number of angles and detector elements is chosen such that the resulting sinogram is fully sampled according to the Nyquist criterion, which in general results in a very large number of samples. In particular, a ``space`` that is not centered at the origin can result in very large detectors since the latter is always origin-centered. Parameters ---------- space : `DiscreteLp` Reconstruction space, the space of the volumetric data to be projected. Must be 3-dimensional. src_radius : nonnegative float Radius of the source circle. Must be larger than the radius of the smallest vertical cylinder containing ``space.domain``, i.e., the source must be outside the volume for all rotations. det_radius : nonnegative float Radius of the detector circle. num_turns : positive float Total number of helical turns. num_angles : int, optional Number of angles. Default: Enough to fully sample the data, see Notes. n_pi : odd int, optional Total number of half rotations to include in the window. Values larger than 1 should be used if the pitch is much smaller than the detector height. det_shape : int or sequence of ints, optional Number of detector pixels. Default: Enough to fully sample the data, see Notes. Returns ------- geometry : `ConeFlatGeometry` Projection geometry with equidistant angles and zero-centered detector as determined by sampling criteria. Examples -------- Create a helical beam geometry from space: >>> space = odl.uniform_discr([-1, -1, -1], [1, 1, 1], (20, 20, 20)) >>> geometry = helical_geometry(space, src_radius=5, det_radius=5, ... num_turns=3) >>> geometry.angles.size 234 >>> geometry.detector.shape (57, 9) Notes ----- In the "fan beam direction", the sampling exactly follows the two-dimensional case see `cone_beam_geometry` for a description. In the "axial direction", e.g. along the [0, 0, 1] axis, the geometry is sampled according to two criteria. First, the bounds of the detector are chosen to satisfy the tuy condition. See `[TSS1998]`_ for a full description. Second, the sampling rate is selected according to the nyquist criterion to give a full sampling. This is done by sampling such that the pixel size is half of the size of the projection of the smallest voxel onto the detector. References ---------- [TSS1998] Tam, K C, Samarasekera, S and Sauer, F. *Exact cone beam CT with a spiral scan*. Physics in Medicine & Biology 4 (1998), p 1015. .. _[TSS1998]: https://dx.doi.org/10.1088/0031-9155/43/4/028 """ # Find maximum distance from rotation axis corners = space.domain.corners()[:, :2] rho = np.max(np.linalg.norm(corners, axis=1)) offset_along_axis = space.partition.min_pt[2] pitch = space.partition.extent[2] / num_turns # Find default values according to Nyquist criterion. # We assume that the function is bandlimited by a wave along the x or y # axis. The highest frequency we can measure is then a standing wave with # period of twice the inter-node distance. min_side = min(space.partition.cell_sides[:2]) omega = np.pi / min_side # Compute minimum width of the detector to cover the object. The relation # used here is (w/2)/(rs+rd) = rho/rs since both are equal to tan(alpha), # where alpha is the half fan angle. rs = float(src_radius) if (rs <= rho): raise ValueError('source too close to the object, resulting in ' 'infinite detector for full coverage') rd = float(det_radius) r = rs + rd w = 2 * rho * (rs + rd) / rs # Compute minimum number of pixels given the constraint on the # sampling interval and the computed width rb = np.hypot(r, w / 2) # length of the boundary ray to the flat detector num_px_horiz = 2 * int(np.ceil(w * omega * r / (2 * np.pi * rb))) + 1 # Compute lower and upper bound needed to fully sample the object. # In particular, since in a helical geometry several turns are used, # this is selected so that the field of view of two opposing projections, # separated by theta = 180 deg, overlap, but as little as possible. # See `tam_danielson_window` for more information. h_axis = (pitch / (2 * np.pi) * (1 + (-rho / src_radius) ** 2) * (n_pi * np.pi / 2.0 - np.arctan(-rho / src_radius))) h = 2 * h_axis * (rs + rd) / rs # Compute number of pixels min_mag = r / rs dh = 0.5 * space.partition.cell_sides[2] * min_mag num_px_vert = int(np.ceil(h / dh)) det_min_pt = [-w / 2, -h / 2] det_max_pt = [w / 2, h / 2] if det_shape is None: det_shape = [num_px_horiz, num_px_vert] max_angle = 2 * np.pi * num_turns if num_angles is None: num_angles = int(np.ceil(max_angle * omega * rho / np.pi * r / (r + rho))) angle_partition = uniform_partition(0, max_angle, num_angles) det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape) return ConeFlatGeometry(angle_partition, det_partition, src_radius, det_radius, offset_along_axis=offset_along_axis, pitch=pitch)
python
{ "resource": "" }
q35698
FanBeamGeometry.frommatrix
train
def frommatrix(cls, apart, dpart, src_radius, det_radius, init_matrix, det_curvature_radius=None, **kwargs): """Create an instance of `FanBeamGeometry` using a matrix. This alternative constructor uses a matrix to rotate and translate the default configuration. It is most useful when the transformation to be applied is already given as a matrix. Parameters ---------- apart : 1-dim. `RectPartition` Partition of the angle interval. dpart : 1-dim. `RectPartition` Partition of the detector parameter interval. src_radius : nonnegative float Radius of the source circle. det_radius : nonnegative float Radius of the detector circle. Must be nonzero if ``src_radius`` is zero. init_matrix : `array_like`, shape ``(2, 2)`` or ``(2, 3)``, optional Transformation matrix whose left ``(2, 2)`` block is multiplied with the default ``det_pos_init`` and ``det_axis_init`` to determine the new vectors. If present, the third column acts as a translation after the initial transformation. The resulting ``det_axis_init`` will be normalized. det_curvature_radius : nonnegative float, optional Radius of the detector curvature. If ``None``, flat detector is used, otherwise must be positive. kwargs : Further keyword arguments passed to the class constructor. Returns ------- geometry : `FanBeamGeometry` Examples -------- Mirror the second unit vector, creating a left-handed system: >>> apart = odl.uniform_partition(0, np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> matrix = np.array([[1, 0], ... [0, -1]]) >>> geom = FanBeamGeometry.frommatrix( ... apart, dpart, src_radius=1, det_radius=5, init_matrix=matrix) >>> geom.det_refpoint(0) array([ 0., -5.]) >>> geom.det_axis_init array([ 1., 0.]) >>> geom.translation array([ 0., 0.]) Adding a translation with a third matrix column: >>> matrix = np.array([[1, 0, 1], ... [0, -1, 1]]) >>> geom = FanBeamGeometry.frommatrix( ... apart, dpart, src_radius=1, det_radius=5, init_matrix=matrix) >>> geom.translation array([ 1., 1.]) >>> geom.det_refpoint(0) # (0, -5) + (1, 1) array([ 1., -4.]) """ # Get transformation and translation parts from `init_matrix` init_matrix = np.asarray(init_matrix, dtype=float) if init_matrix.shape not in ((2, 2), (2, 3)): raise ValueError('`matrix` must have shape (2, 2) or (2, 3), ' 'got array with shape {}' ''.format(init_matrix.shape)) trafo_matrix = init_matrix[:, :2] translation = init_matrix[:, 2:].squeeze() # Transform the default vectors default_src_to_det_init = cls._default_config['src_to_det_init'] default_det_axis_init = cls._default_config['det_axis_init'] vecs_to_transform = [default_det_axis_init] transformed_vecs = transform_system( default_src_to_det_init, None, vecs_to_transform, matrix=trafo_matrix) # Use the standard constructor with these vectors src_to_det, det_axis = transformed_vecs if translation.size != 0: kwargs['translation'] = translation return cls(apart, dpart, src_radius, det_radius, det_curvature_radius, src_to_det, det_axis_init=det_axis, **kwargs)
python
{ "resource": "" }
q35699
FanBeamGeometry.src_position
train
def src_position(self, angle): """Return the source position at ``angle``. For an angle ``phi``, the source position is given by :: src(phi) = translation + rot_matrix(phi) * (-src_rad * src_to_det_init) where ``src_to_det_init`` is the initial unit vector pointing from source to detector. Parameters ---------- angle : float or `array-like` Angle(s) in radians describing the counter-clockwise rotation of source and detector. Returns ------- pos : `numpy.ndarray` Vector(s) pointing from the origin to the source. If ``angle`` is a single parameter, the returned array has shape ``(2,)``, otherwise ``angle.shape + (2,)``. See Also -------- det_refpoint Examples -------- With default arguments, the source starts at ``src_rad * (-e_y)`` and rotates to ``src_rad * e_x`` at 90 degrees: >>> apart = odl.uniform_partition(0, 2 * np.pi, 10) >>> dpart = odl.uniform_partition(-1, 1, 20) >>> geom = FanBeamGeometry(apart, dpart, src_radius=2, det_radius=5) >>> geom.src_position(0) array([ 0., -2.]) >>> np.allclose(geom.src_position(np.pi / 2), [2, 0]) True The method is vectorized, i.e., it can be called with multiple angles at once: >>> points = geom.src_position([0, np.pi / 2]) >>> np.allclose(points[0], [0, -2]) True >>> np.allclose(points[1], [2, 0]) True """ squeeze_out = (np.shape(angle) == ()) angle = np.array(angle, dtype=float, copy=False, ndmin=1) # Initial vector from the rotation center to the source. It can be # computed this way since source and detector are at maximum distance, # i.e. the connecting line passes the origin. center_to_src_init = -self.src_radius * self.src_to_det_init pos_vec = (self.translation[None, :] + self.rotation_matrix(angle).dot(center_to_src_init)) if squeeze_out: pos_vec = pos_vec.squeeze() return pos_vec
python
{ "resource": "" }