repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
pylops
pylops-master/pytests/test_utils.py
from pylops import utils # Optional import try: import scooby except ImportError: scooby = False def test_report(capsys): out, _ = capsys.readouterr() # Empty capsys # Reporting is done by the external package scooby. # We just ensure the shown packages do not change (core and optional). if scooby: out1 = utils.Report() out2 = scooby.Report( core=["numpy", "scipy", "pylops"], optional=["IPython", "matplotlib", "numba"], ) # Ensure they're the same; exclude time to avoid errors. assert out1.__repr__()[115:] == out2.__repr__()[115:] else: # soft dependency _ = utils.Report() out, _ = capsys.readouterr() # Empty capsys assert "NOTE: `pylops.Report` requires `scooby`. Install it via" in out
821
27.344828
79
py
pylops
pylops-master/pylops/_torchoperator.py
import logging from pylops.utils import deps if deps.torch_enabled: import torch from torch.utils.dlpack import from_dlpack, to_dlpack if deps.cupy_enabled: import cupy as cp class _TorchOperator(torch.autograd.Function): """Wrapper class for PyLops operators into Torch functions""" @staticmethod def forward(ctx, x, forw, adj, device, devicetorch): ctx.forw = forw ctx.adj = adj ctx.device = device ctx.devicetorch = devicetorch # check if data is moved to cpu and warn user if ctx.device == "cpu" and ctx.devicetorch != "cpu": logging.warning( "pylops operator will be applied on the cpu " "whilst the input torch vector is on " "%s, this may lead to poor performance" % ctx.devicetorch ) # prepare input if ctx.device == "cpu": # bring x to cpu and numpy x = x.cpu().detach().numpy() else: # pass x to cupy using DLPack x = cp.fromDlpack(to_dlpack(x)) # apply forward operator y = ctx.forw(x) # prepare output if ctx.device == "cpu": # move y to torch and device y = torch.from_numpy(y).to(ctx.devicetorch) else: # move y to torch and device y = from_dlpack(y.toDlpack()) return y @staticmethod def backward(ctx, y): # prepare input if ctx.device == "cpu": y = y.cpu().detach().numpy() else: # pass x to cupy using DLPack y = cp.fromDlpack(to_dlpack(y)) # apply adjoint operator x = ctx.adj(y) # prepare output if ctx.device == "cpu": x = torch.from_numpy(x).to(ctx.devicetorch) else: x = from_dlpack(x.toDlpack()) return x, None, None, None, None, None
1,923
26.884058
73
py
pylops
pylops-master/pylops/linearoperator.py
from __future__ import annotations, division __all__ = [ "LinearOperator", "aslinearoperator", ] import logging from abc import ABC, abstractmethod import numpy as np import scipy as sp from numpy.linalg import solve as np_solve from scipy.linalg import eigvals, lstsq from scipy.linalg import solve as sp_solve from scipy.sparse import csr_matrix from scipy.sparse.linalg import LinearOperator as spLinearOperator from scipy.sparse.linalg import eigs as sp_eigs from scipy.sparse.linalg import eigsh as sp_eigsh from scipy.sparse.linalg import lobpcg as sp_lobpcg from scipy.sparse.linalg import lsqr, spsolve # need to check scipy version since the interface submodule changed into # _interface from scipy>=1.8.0 sp_version = sp.__version__.split(".") if int(sp_version[0]) <= 1 and int(sp_version[1]) < 8: from scipy.sparse.sputils import isintlike, isshape else: from scipy.sparse._sputils import isintlike, isshape from typing import Callable, List, Optional, Sequence, Union from pylops import get_ndarray_multiplication from pylops.optimization.basic import cgls from pylops.utils.backend import get_array_module, get_module, get_sparse_eye from pylops.utils.decorators import count from pylops.utils.estimators import trace_hutchinson, trace_hutchpp, trace_nahutchpp from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray, ShapeLike logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class _LinearOperator(ABC): """Meta-class for Linear operator""" @abstractmethod def _matvec(self, x: NDArray) -> NDArray: """Matrix-vector multiplication handler.""" pass @abstractmethod def _rmatvec(self, x: NDArray) -> NDArray: """Matrix-vector adjoint multiplication handler.""" pass class LinearOperator(_LinearOperator): """Common interface for performing matrix-vector products. This class acts as an abstract interface between matrix-like objects and iterative solvers, providing methods to perform matrix-vector and adjoint matrix-vector products as as well as convenience methods such as ``eigs``, ``cond``, and ``conj``. .. note:: End users of PyLops should not use this class directly but simply use operators that are already implemented. This class is meant for developers and it has to be used as the parent class of any new operator developed within PyLops. Find more details regarding implementation of new operators at :ref:`addingoperator`. Parameters ---------- Op : :obj:`scipy.sparse.linalg.LinearOperator` or :obj:`pylops.linearoperator.LinearOperator` Operator. If other arguments are provided, they will overwrite those obtained from ``Op``. dtype : :obj:`str`, optional Type of elements in input array. shape : :obj:`tuple(int, int)`, optional Shape of operator. If not provided, obtained from ``dims`` and ``dimsd``. dims : :obj:`tuple(int, ..., int)`, optional .. versionadded:: 2.0.0 Dimensions of model. If not provided, ``(self.shape[1],)`` is used. dimsd : :obj:`tuple(int, ..., int)`, optional .. versionadded:: 2.0.0 Dimensions of data. If not provided, ``(self.shape[0],)`` is used. explicit : :obj:`bool`, optional Operator contains a matrix that can be solved explicitly (``True``) or not (``False``). Defaults to ``False``. clinear : :obj:`bool`, optional .. versionadded:: 1.17.0 Operator is complex-linear. Defaults to ``True``. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) """ def __init__( self, Op: Optional[Union[spLinearOperator, LinearOperator]] = None, dtype: Optional[DTypeLike] = None, shape: Optional[ShapeLike] = None, dims: Optional[ShapeLike] = None, dimsd: Optional[ShapeLike] = None, clinear: Optional[bool] = None, explicit: Optional[bool] = None, name: Optional[str] = None, ) -> None: if Op is not None: self.Op = Op # All Operators must have shape and dtype dtype = self.Op.dtype if dtype is None else dtype shape = self.Op.shape if shape is None else shape # Optionally, some operators have other attributes dims = getattr(Op, "dims", (Op.shape[1],)) if dims is None else dims dimsd = getattr(Op, "dimsd", (Op.shape[0],)) if dimsd is None else dimsd clinear = getattr(Op, "clinear", True) if clinear is None else clinear explicit = ( getattr(self.Op, "explicit", False) if explicit is None else explicit ) if explicit and hasattr(Op, "A"): self.A = Op.A name = getattr(Op, "name", None) if name is None else name if dtype is not None: self.dtype = dtype if shape is not None: self.shape = shape if dims is not None: self.dims = dims if dimsd is not None: self.dimsd = dimsd if clinear is not None: self.clinear = clinear if explicit is not None: self.explicit = explicit self.name = name # counters self.matvec_count = 0 self.rmatvec_count = 0 self.matmat_count = 0 self.rmatmat_count = 0 @property def shape(self): _shape = getattr(self, "_shape", None) if _shape is None: # Cannot find shape, falling back on dims and dimsd dims = getattr(self, "_dims", None) dimsd = getattr(self, "_dimsd", None) if dims is None or dimsd is None: # Cannot find both dims and dimsd, error raise AttributeError( ( f"'{self.__class__.__name__}' object has no attribute 'shape' " "nor both fallback attributes ('dims', 'dimsd')" ) ) _shape = (int(np.prod(dimsd)), int(np.prod(dims))) self._shape = _shape # Update to not redo everything above on next call return _shape @shape.setter def shape(self, new_shape: ShapeLike) -> None: new_shape = tuple(new_shape) if not isshape(new_shape): raise ValueError(f"invalid shape %{new_shape:r} (must be 2-d)") dims = getattr(self, "_dims", None) dimsd = getattr(self, "_dimsd", None) if dims is not None and dimsd is not None: # Found dims and dimsd if np.prod(dimsd) != new_shape[0] and np.prod(dims) != new_shape[1]: raise ValueError("New shape incompatible with dims and dimsd") elif np.prod(dimsd) != new_shape[0]: raise ValueError("New shape incompatible with dimsd") elif np.prod(dims) != new_shape[1]: raise ValueError("New shape incompatible with dims") self._shape = new_shape @shape.deleter def shape(self): del self._shape @property def dims(self): _dims = getattr(self, "_dims", None) if _dims is None: shape = getattr(self, "_shape", None) if shape is None: raise AttributeError( f"'{self.__class__.__name__}' object has no attributes 'dims' or 'shape'" ) _dims = (shape[1],) return _dims @dims.setter def dims(self, new_dims: ShapeLike) -> None: new_dims = tuple(new_dims) shape = getattr(self, "_shape", None) if shape is None: # shape not set yet self._dims = new_dims else: if np.prod(new_dims) == self.shape[1]: self._dims = new_dims else: raise ValueError("dims incompatible with shape[1]") @dims.deleter def dims(self): del self._dims @property def dimsd(self): _dimsd = getattr(self, "_dimsd", None) if _dimsd is None: shape = getattr(self, "_shape", None) if shape is None: raise AttributeError( f"'{self.__class__.__name__}' object has no attributes 'dimsd' or 'shape'" ) _dimsd = (shape[0],) return _dimsd @dimsd.setter def dimsd(self, new_dimsd: ShapeLike) -> None: new_dimsd = tuple(new_dimsd) shape = getattr(self, "_shape", None) if shape is None: # shape not set yet self._dimsd = new_dimsd else: if np.prod(new_dimsd) == self.shape[0]: self._dimsd = new_dimsd else: raise ValueError("dimsd incompatible with shape[0]") @dimsd.deleter def dimsd(self): del self._dimsd @property def clinear(self): return getattr(self, "_clinear", True) @clinear.setter def clinear(self, new_clinear: bool) -> None: self._clinear = bool(new_clinear) @clinear.deleter def clinear(self): del self._clinear @property def explicit(self): return getattr(self, "_explicit", False) @explicit.setter def explicit(self, new_explicit: bool) -> None: self._explicit = bool(new_explicit) @explicit.deleter def explicit(self): del self._explicit @property def name(self): return getattr(self, "_name", None) @name.setter def name(self, new_name: str) -> None: self._name = new_name @name.deleter def name(self): del self._name def __mul__(self, x: Union[float, LinearOperator]) -> LinearOperator: return self.dot(x) def __matmul__(self, other): if np.isscalar(other): raise ValueError("Scalar not allowed, use * instead") return self.__mul__(other) def __rmul__(self, x: float) -> LinearOperator: if np.isscalar(x): Op = _ScaledLinearOperator(self, x) self._copy_attributes( Op, exclude=[ "explicit", "name", ], ) Op.explicit = False return Op else: return NotImplemented def __rmatmul__(self, other): if np.isscalar(other): raise ValueError("Scalar not allowed, use * instead") return self.__rmul__(other) def __pow__(self, p: int) -> LinearOperator: if np.isscalar(p): Op = _PowerLinearOperator(self, p) self._copy_attributes( Op, exclude=[ "explicit", "name", ], ) Op.explicit = False return Op else: return NotImplemented def __add__(self, x: LinearOperator) -> LinearOperator: if isinstance(x, (LinearOperator, spLinearOperator)): # cast x to pylops linear operator if not already (this is done # to allow mixing pylops and scipy operators) Opx = aslinearoperator(x) Op = _SumLinearOperator(self, Opx) self._copy_attributes( Op, exclude=[ "explicit", "name", ], ) Op.clinear = Op.clinear and Opx.clinear Op.explicit = False # Replace if shape-like if len(self.dims) == 1: Op.dims = Opx.dims if len(self.dimsd) == 1: Op.dimsd = Opx.dimsd return Op else: return NotImplemented def __neg__(self) -> LinearOperator: Op = _ScaledLinearOperator(self, -1) self._copy_attributes( Op, exclude=[ "explicit", "name", ], ) Op.explicit = False return Op def __sub__(self, x): return self.__add__(-x) def __repr__(self): M, N = self.shape if self.dtype is None: dt = "unspecified dtype" else: dt = "dtype=" + str(self.dtype) return "<%dx%d %s with %s>" % (M, N, self.__class__.__name__, dt) def _copy_attributes( self, dest: LinearOperator, exclude: Optional[List[str]] = None, ) -> None: """Copy attributes from one LinearOperator to another""" if exclude is None: exclude = ["name"] attrs = ["dims", "dimsd", "clinear", "explicit", "name"] if exclude is not None: for item in exclude: attrs.remove(item) for attr in attrs: if hasattr(self, attr): setattr(dest, attr, getattr(self, attr)) def _matvec(self, x: NDArray) -> NDArray: """Matrix-vector multiplication handler.""" if self.Op is not None: return self.Op._matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: """Matrix-vector adjoint multiplication handler.""" if self.Op is not None: return self.Op._rmatvec(x) def _matmat(self, X: NDArray) -> NDArray: """Matrix-matrix multiplication handler. Modified version of scipy _matmat to avoid having trailing dimension in col when provided to matvec """ if sp.sparse.issparse(X): y = np.vstack([self.matvec(col.toarray().reshape(-1)) for col in X.T]).T else: y = np.vstack([self.matvec(col.reshape(-1)) for col in X.T]).T return y def _rmatmat(self, X: NDArray) -> NDArray: """Matrix-matrix adjoint multiplication handler. Modified version of scipy _rmatmat to avoid having trailing dimension in col when provided to rmatvec """ if sp.sparse.issparse(X): y = np.vstack([self.rmatvec(col.toarray().reshape(-1)) for col in X.T]).T else: y = np.vstack([self.rmatvec(col.reshape(-1)) for col in X.T]).T return y def _adjoint(self) -> LinearOperator: Op = _AdjointLinearOperator(self) self._copy_attributes(Op, exclude=["dims", "dimsd", "explicit", "name"]) Op.explicit = False Op.dims = self.dimsd Op.dimsd = self.dims return Op def _transpose(self) -> LinearOperator: Op = _TransposedLinearOperator(self) self._copy_attributes(Op, exclude=["dims", "dimsd", "explicit", "name"]) Op.explicit = False Op.dims = self.dimsd Op.dimsd = self.dims return Op def adjoint(self): return self._adjoint() H: Callable[[LinearOperator], LinearOperator] = property(adjoint) def transpose(self): return self._transpose() T: Callable[[LinearOperator], LinearOperator] = property(transpose) @count(forward=True) def matvec(self, x: NDArray) -> NDArray: """Matrix-vector multiplication. Modified version of scipy matvec which does not consider the case where the input vector is ``np.matrix`` (the use ``np.matrix`` is now discouraged in numpy's documentation). Parameters ---------- x : :obj:`numpy.ndarray` Input array of shape (N,) or (N,1) Returns ------- y : :obj:`numpy.ndarray` Output array of shape (M,) or (M,1) """ M, N = self.shape if x.shape != (N,) and x.shape != (N, 1): raise ValueError("dimension mismatch") y = self._matvec(x) if x.ndim == 1: y = y.reshape(M) elif x.ndim == 2: y = y.reshape(M, 1) else: raise ValueError("invalid shape returned by user-defined matvec()") return y @count(forward=False) def rmatvec(self, x: NDArray) -> NDArray: """Adjoint matrix-vector multiplication. Modified version of scipy rmatvec which does not consider the case where the input vector is ``np.matrix`` (the use ``np.matrix`` is now discouraged in numpy's documentation). Parameters ---------- y : :obj:`numpy.ndarray` Input array of shape (M,) or (M,1) Returns ------- x : :obj:`numpy.ndarray` Output array of shape (N,) or (N,1) """ M, N = self.shape if x.shape != (M,) and x.shape != (M, 1): raise ValueError("dimension mismatch") y = self._rmatvec(x) if x.ndim == 1: y = y.reshape(N) elif x.ndim == 2: y = y.reshape(N, 1) else: raise ValueError("invalid shape returned by user-defined rmatvec()") return y @count(forward=True, matmat=True) def matmat(self, X: NDArray) -> NDArray: """Matrix-matrix multiplication. Modified version of scipy matmat which does not consider the case where the input vector is ``np.matrix`` (the use ``np.matrix`` is now discouraged in numpy's documentation). Parameters ---------- x : :obj:`numpy.ndarray` Input array of shape (N,K) Returns ------- y : :obj:`numpy.ndarray` Output array of shape (M,K) """ if X.ndim != 2: raise ValueError("expected 2-d ndarray or matrix, " "not %d-d" % X.ndim) if X.shape[0] != self.shape[1]: raise ValueError("dimension mismatch: %r, %r" % (self.shape, X.shape)) Y = self._matmat(X) return Y @count(forward=False, matmat=True) def rmatmat(self, X: NDArray) -> NDArray: """Matrix-matrix multiplication. Modified version of scipy rmatmat which does not consider the case where the input vector is ``np.matrix`` (the use ``np.matrix`` is now discouraged in numpy's documentation). Parameters ---------- x : :obj:`numpy.ndarray` Input array of shape (M,K) Returns ------- y : :obj:`numpy.ndarray` Output array of shape (N,K) """ if X.ndim != 2: raise ValueError("expected 2-d ndarray or matrix, " "not %d-d" % X.ndim) if X.shape[0] != self.shape[0]: raise ValueError("dimension mismatch: %r, %r" % (self.shape, X.shape)) Y = self._rmatmat(X) return Y def dot(self, x: NDArray) -> NDArray: """Matrix-matrix or matrix-vector multiplication. Parameters ---------- x : :obj:`numpy.ndarray` Input array (or matrix) Returns ------- y : :obj:`numpy.ndarray` Output array (or matrix) that represents the result of applying the linear operator on x. """ if isinstance(x, (LinearOperator, spLinearOperator)): # cast x to pylops linear operator if not already (this is done # to allow mixing pylops and scipy operators) Opx = aslinearoperator(x) Op = _ProductLinearOperator(self, Opx) self._copy_attributes(Op, exclude=["dims", "explicit", "name"]) Op.clinear = Op.clinear and Opx.clinear Op.explicit = False Op.dims = Opx.dims return Op elif np.isscalar(x): Op = _ScaledLinearOperator(self, x) self._copy_attributes( Op, exclude=["explicit", "name"], ) Op.explicit = False return Op else: if not get_ndarray_multiplication() and ( x.ndim > 2 or (x.ndim == 2 and x.shape[0] != self.shape[1]) ): raise ValueError( "Operator can only be applied 1D vectors or 2D matrices. " "Enable ndarray multiplication with pylops.set_ndarray_multiplication(True)." ) is_dims_shaped = x.shape == self.dims is_dims_shaped_matrix = len(x.shape) > 1 and x.shape[:-1] == self.dims if is_dims_shaped: # (dims1, ..., dimsK) => (dims1 * ... * dimsK,) == self.shape x = x.ravel() if is_dims_shaped_matrix: # (dims1, ..., dimsK, P) => (dims1 * ... * dimsK, P) x = x.reshape((-1, x.shape[-1])) if x.ndim == 1: y = self.matvec(x) if is_dims_shaped and get_ndarray_multiplication(): y = y.reshape(self.dimsd) return y elif x.ndim == 2: y = self.matmat(x) if is_dims_shaped_matrix and get_ndarray_multiplication(): y = y.reshape((*self.dimsd, -1)) return y else: raise ValueError( ( "Wrong shape.\nFor vector multiplication, expects either a 1d " "array or, an ndarray of size `dims` when `dims` and `dimsd` " "both are available.\n" "For matrix multiplication, expects a 2d array with its first " f"dimension is equal to {self.shape[1]}.\n" f"Instead, received an array of shape {x.shape}." ) ) def div( self, y: NDArray, niter: int = 100, densesolver: str = "scipy", ) -> NDArray: r"""Solve the linear problem :math:`\mathbf{y}=\mathbf{A}\mathbf{x}`. Overloading of operator ``/`` to improve expressivity of `Pylops` when solving inverse problems. Parameters ---------- y : :obj:`np.ndarray` Data niter : :obj:`int`, optional Number of iterations (to be used only when ``explicit=False``) densesolver : :obj:`str`, optional Use scipy (``scipy``) or numpy (``numpy``) dense solver Returns ------- xest : :obj:`np.ndarray` Estimated model """ xest = self.__truediv__(y, niter=niter, densesolver=densesolver) return xest def __truediv__( self, y: NDArray, niter: int = 100, densesolver: str = "scipy", ) -> NDArray: if self.explicit is True: if sp.sparse.issparse(self.A): # use scipy solver for sparse matrices xest = spsolve(self.A, y) elif isinstance(self.A, np.ndarray): # use scipy solvers for dense matrices (used for backward # compatibility, could be switched to numpy equivalents) if self.A.shape[0] == self.A.shape[1]: if densesolver == "scipy": xest = sp_solve(self.A, y) else: xest = np_solve(self.A, y) else: xest = lstsq(self.A, y)[0] else: # use numpy/cupy solvers for dense matrices ncp = get_array_module(y) if self.A.shape[0] == self.A.shape[1]: xest = ncp.linalg.solve(self.A, y) else: xest = ncp.linalg.lstsq(self.A, y)[0] else: if isinstance(y, np.ndarray): # numpy backend xest = lsqr(self, y, iter_lim=niter, atol=1e-8, btol=1e-8)[0] else: # cupy backend ncp = get_array_module(y) xest = cgls( self, y, x0=ncp.zeros(int(self.shape[1]), dtype=self.dtype), niter=niter, )[0] return xest def todense( self, backend: str = "numpy", ) -> NDArray: r"""Return dense matrix. The operator is converted into its dense matrix equivalent. In order to do so, square or tall operators are applied to an identity matrix whose number of rows and columns is equivalent to the number of columns of the operator. Conversely, for skinny operators, the transpose operator is applied to an identity matrix whose number of rows and columns is equivalent to the number of rows of the operator and the resulting matrix is transposed (and complex conjugated). Note that this operation may be costly for operators with large number of rows and columns and it should be used mostly as a way to inspect the structure of the matricial equivalent of the operator. Parameters ---------- backend : :obj:`str`, optional Backend used to densify matrix (``numpy`` or ``cupy``). Note that this must be consistent with how the operator has been created. Returns ------- matrix : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` Dense matrix. """ ncp = get_module(backend) Op = self # Create identity matrix shapemin = min(Op.shape) if shapemin <= 1e3: # use numpy for small matrices (faster but heavier on memory) identity = ncp.eye(shapemin, dtype=self.dtype) else: # use scipy for large matrices (slower but lighter on memory) identity = get_sparse_eye(ncp.ones(1))(shapemin, dtype=self.dtype).tocsc() # Apply operator if Op.shape[1] == shapemin: matrix = Op.matmat(identity) else: matrix = np.conj(Op.rmatmat(identity)).T return matrix def tosparse(self) -> NDArray: r"""Return sparse matrix. The operator in converted into its sparse (CSR) matrix equivalent. In order to do so, the operator is applied to series of unit vectors with length equal to the number of coloumns in the original operator. Returns ------- matrix : :obj:`scipy.sparse.csr_matrix` Sparse matrix. """ Op = self (_, n) = self.shape # stores non-zero data for the sparse matrix creation entries = [] indices = [] # loop through columns of self for i in range(n): # make a unit vector for the ith column unit_i = np.zeros(n) unit_i[i] = 1 # multiply unit vector to self and find the non-zeros res_i: NDArray = Op * unit_i rows_nz = np.where(res_i != 0)[0] # append the non-zero values and indices to the lists for j in rows_nz: indices.append([i, j]) entries_i = res_i[rows_nz] for e in entries_i: entries.append(e) # post process the entries / indices for scipy.sparse.csr_matrix entries = np.array(entries) indices = np.array(indices) i, j = indices[:, 0], indices[:, 1] # construct a sparse, CSR matrix from the entries / indices data. matrix = csr_matrix((entries, (j, i)), shape=self.shape, dtype=self.dtype) return matrix def eigs( self, neigs: Optional[int] = None, symmetric: bool = False, niter: Optional[int] = None, uselobpcg: bool = False, **kwargs_eig: Union[int, float, str], ) -> NDArray: r"""Most significant eigenvalues of linear operator. Return an estimate of the most significant eigenvalues of the linear operator. If the operator has rectangular shape (``shape[0]!=shape[1]``), eigenvalues are first computed for the square operator :math:`\mathbf{A^H}\mathbf{A}` and the square-root values are returned. Parameters ---------- neigs : :obj:`int` Number of eigenvalues to compute (if ``None``, return all). Note that for ``explicit=False``, only :math:`N-1` eigenvalues can be computed where :math:`N` is the size of the operator in the model space symmetric : :obj:`bool`, optional Operator is symmetric (``True``) or not (``False``). User should set this parameter to ``True`` only when it is guaranteed that the operator is real-symmetric or complex-hermitian matrices niter : :obj:`int`, optional Number of iterations for eigenvalue estimation uselobpcg : :obj:`bool`, optional Use :func:`scipy.sparse.linalg.lobpcg` **kwargs_eig Arbitrary keyword arguments for :func:`scipy.sparse.linalg.eigs`, :func:`scipy.sparse.linalg.eigsh`, or :func:`scipy.sparse.linalg.lobpcg` Returns ------- eigenvalues : :obj:`numpy.ndarray` Operator eigenvalues. Raises ------ ValueError If ``uselobpcg=True`` for a non-symmetric square matrix with complex type Notes ----- Depending on the size of the operator, whether it is explicit or not and the number of eigenvalues requested, different algorithms are used by this routine. More precisely, when only a limited number of eigenvalues is requested the :func:`scipy.sparse.linalg.eigsh` method is used in case of ``symmetric=True`` and the :func:`scipy.sparse.linalg.eigs` method is used ``symmetric=False``. However, when the matrix is represented explicitly within the linear operator (``explicit=True``) and all the eigenvalues are requested the :func:`scipy.linalg.eigvals` is used instead. Finally, when only a limited number of eigenvalues is required, it is also possible to explicitly choose to use the ``scipy.sparse.linalg.lobpcg`` method via the ``uselobpcg`` input parameter flag. Most of these algorithms are a port of ARPACK [1]_, a Fortran package which provides routines for quickly finding eigenvalues/eigenvectors of a matrix. As ARPACK requires only left-multiplication by the matrix in question, eigenvalues/eigenvectors can also be estimated for linear operators when the dense matrix is not available. .. [1] http://www.caam.rice.edu/software/ARPACK/ """ if self.explicit and isinstance(self.A, np.ndarray): if self.shape[0] == self.shape[1]: if neigs is None or neigs == self.shape[1]: eigenvalues = eigvals(self.A) else: if not symmetric and np.iscomplexobj(self) and uselobpcg: raise ValueError( "cannot use scipy.sparse.linalg.lobpcg " "for non-symmetric square matrices of " "complex type..." ) if symmetric and uselobpcg: X = np.random.rand(self.shape[0], neigs).astype(self.dtype) eigenvalues = sp_lobpcg( self.A, X=X, maxiter=niter, **kwargs_eig )[0] elif symmetric: eigenvalues = sp_eigsh( self.A, k=neigs, maxiter=niter, **kwargs_eig )[0] else: eigenvalues = sp_eigs( self.A, k=neigs, maxiter=niter, **kwargs_eig )[0] else: if neigs is None or neigs == self.shape[1]: eigenvalues = np.sqrt(eigvals(np.dot(np.conj(self.A.T), self.A))) else: if uselobpcg: X = np.random.rand(self.shape[1], neigs).astype(self.dtype) eigenvalues = np.sqrt( sp_lobpcg( np.dot(np.conj(self.A.T), self.A), X=X, maxiter=niter, **kwargs_eig, )[0] ) else: eigenvalues = np.sqrt( sp_eigsh( np.dot(np.conj(self.A.T), self.A), k=neigs, maxiter=niter, **kwargs_eig, )[0] ) else: if neigs is None or neigs >= self.shape[1]: neigs = self.shape[1] - 2 if self.shape[0] == self.shape[1]: if not symmetric and np.iscomplexobj(self) and uselobpcg: raise ValueError( "cannot use scipy.sparse.linalg.lobpcg for " "non symmetric square matrices of " "complex type..." ) if symmetric and uselobpcg: X = np.random.rand(self.shape[0], neigs).astype(self.dtype) eigenvalues = sp_lobpcg(self, X=X, maxiter=niter, **kwargs_eig)[0] elif symmetric: eigenvalues = sp_eigsh(self, k=neigs, maxiter=niter, **kwargs_eig)[ 0 ] else: eigenvalues = sp_eigs(self, k=neigs, maxiter=niter, **kwargs_eig)[0] else: if uselobpcg: X = np.random.rand(self.shape[1], neigs).astype(self.dtype) eigenvalues = np.sqrt( sp_lobpcg(self.H * self, X=X, maxiter=niter, **kwargs_eig)[0] ) else: eigenvalues = np.sqrt( sp_eigs(self.H * self, k=neigs, maxiter=niter, **kwargs_eig)[0] ) return -np.sort(-eigenvalues) def cond( self, uselobpcg: bool = False, **kwargs_eig: Union[int, float, str], ) -> NDArray: r"""Condition number of linear operator. Return an estimate of the condition number of the linear operator as the ratio of the largest and lowest estimated eigenvalues. Parameters ---------- uselobpcg : :obj:`bool`, optional Use :func:`scipy.sparse.linalg.lobpcg` to compute eigenvalues **kwargs_eig Arbitrary keyword arguments for :func:`scipy.sparse.linalg.eigs`, :func:`scipy.sparse.linalg.eigsh`, or :func:`scipy.sparse.linalg.lobpcg` Returns ------- eigenvalues : :obj:`numpy.ndarray` Operator eigenvalues. Notes ----- The condition number of a matrix (or linear operator) can be estimated as the ratio of the largest and lowest estimated eigenvalues: .. math:: k= \frac{\lambda_{max}}{\lambda_{min}} The condition number provides an indication of the rate at which the solution of the inversion of the linear operator :math:`A` will change with respect to a change in the data :math:`y`. Thus, if the condition number is large, even a small error in :math:`y` may cause a large error in :math:`x`. On the other hand, if the condition number is small then the error in :math:`x` is not much bigger than the error in :math:`y`. A problem with a low condition number is said to be *well-conditioned*, while a problem with a high condition number is said to be *ill-conditioned*. """ if not uselobpcg: cond = ( self.eigs(neigs=1, which="LM", **kwargs_eig).item() / self.eigs(neigs=1, which="SM", **kwargs_eig).item() ) else: cond = ( self.eigs(neigs=1, uselobpcg=True, largest=True, **kwargs_eig).item() / self.eigs(neigs=1, uselobpcg=True, largest=False, **kwargs_eig).item() ) return cond def conj(self) -> LinearOperator: """Complex conjugate operator Returns ------- conjop : :obj:`pylops.LinearOperator` Complex conjugate operator """ conjop = _ConjLinearOperator(self) return conjop def apply_columns(self, cols: InputDimsLike) -> LinearOperator: """Apply subset of columns of operator This method can be used to wrap a LinearOperator and mimic the action of a subset of columns of the operator on a reduced model in forward mode, and retrieve only the result of a subset of rows in adjoint mode. Note that unless the operator has ``explicit=True``, this is not optimal as the entire forward and adjoint passes of the original operator will have to be perfomed. It can however be useful for the implementation of solvers such as Orthogonal Matching Pursuit (OMP) that iteratively build a solution by evaluate only a subset of the columns of the operator. Parameters ---------- cols : :obj:`list` Columns to be selected Returns ------- colop : :obj:`pylops.LinearOperator` Apply column operator """ colop = _ColumnLinearOperator(self, cols) return colop def toreal(self, forw: bool = True, adj: bool = True) -> LinearOperator: """Real operator Parameters ---------- forw : :obj:`bool`, optional Apply real to output of forward pass adj : :obj:`bool`, optional Apply real to output of adjoint pass Returns ------- realop : :obj:`pylops.LinearOperator` Real operator """ realop = _RealImagLinearOperator(self, forw, adj, True) return realop def toimag(self, forw: bool = True, adj: bool = True) -> LinearOperator: """Imag operator Parameters ---------- forw : :obj:`bool`, optional Apply imag to output of forward pass adj : :obj:`bool`, optional Apply imag to output of adjoint pass Returns ------- imagop : :obj:`pylops.LinearOperator` Imag operator """ imagop = _RealImagLinearOperator(self, forw, adj, False) return imagop def trace( self, neval: Optional[int] = None, method: Optional[str] = None, backend: str = "numpy", **kwargs_trace, ) -> float: r"""Trace of linear operator. Returns the trace (or its estimate) of the linear operator. Parameters ---------- neval : :obj:`int`, optional Maximum number of matrix-vector products compute. Default depends ``method``. method : :obj:`str`, optional Should be one of the following: - **explicit**: If the operator is not explicit, will convert to dense first. - **hutchinson**: see :obj:`pylops.utils.trace_hutchinson` - **hutch++**: see :obj:`pylops.utils.trace_hutchpp` - **na-hutch++**: see :obj:`pylops.utils.trace_nahutchpp` Defaults to 'explicit' for explicit operators, and 'Hutch++' for the rest. backend : :obj:`str`, optional Backend used to densify matrix (``numpy`` or ``cupy``). Note that this must be consistent with how the operator has been created. **kwargs_trace Arbitrary keyword arguments passed to :obj:`pylops.utils.trace_hutchinson`, :obj:`pylops.utils.trace_hutchpp`, or :obj:`pylops.utils.trace_nahutchpp` Returns ------- trace : :obj:`float` Operator trace. Raises ------ ValueError If the operator has rectangular shape (``shape[0] != shape[1]``) NotImplementedError If the ``method`` is not one of the available methods. """ if self.shape[0] != self.shape[1]: raise ValueError("operator is not square.") ncp = get_module(backend) if method is None: method = "explicit" if self.explicit else "hutch++" method_l = method.lower() if method_l == "explicit": A = self.A if self.explicit else self.todense(backend=backend) return ncp.trace(A) elif method_l == "hutchinson": return trace_hutchinson(self, neval=neval, backend=backend, **kwargs_trace) elif method_l == "hutch++": return trace_hutchpp(self, neval=neval, backend=backend, **kwargs_trace) elif method_l == "na-hutch++": return trace_nahutchpp(self, neval=neval, backend=backend, **kwargs_trace) else: raise NotImplementedError(f"method {method} not available.") def reset_count(self) -> None: """Reset counters When invoked all counters are set back to 0. """ self.matvec_count = 0 self.rmatvec_count = 0 self.matmat_count = 0 self.rmatmat_count = 0 def _get_dtype( operators: Sequence[LinearOperator], dtypes: Optional[Sequence[DTypeLike]] = None, ) -> DTypeLike: if dtypes is None: dtypes = [] opdtypes = [] for obj in operators: if obj is not None and hasattr(obj, "dtype"): opdtypes.append(obj.dtype) return np.find_common_type(opdtypes, dtypes) class _ScaledLinearOperator(LinearOperator): """ Sum Linear Operator Modified version of scipy _ScaledLinearOperator which uses a modified _get_dtype where the scalar and operator types are passed separately to np.find_common_type. Passing them together does lead to problems when using np.float32 operators which are cast to np.float64 """ def __init__( self, A: LinearOperator, alpha: float, ) -> None: if not isinstance(A, LinearOperator): raise ValueError("LinearOperator expected as A") if not np.isscalar(alpha): raise ValueError("scalar expected as alpha") dtype = _get_dtype([A], [type(alpha)]) super(_ScaledLinearOperator, self).__init__(dtype=dtype, shape=A.shape) self.args = (A, alpha) def _matvec(self, x: NDArray) -> NDArray: return self.args[1] * self.args[0].matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return np.conj(self.args[1]) * self.args[0].rmatvec(x) def _rmatmat(self, x: NDArray) -> NDArray: return np.conj(self.args[1]) * self.args[0].rmatmat(x) def _matmat(self, x: NDArray) -> NDArray: return self.args[1] * self.args[0].matmat(x) def _adjoint(self) -> LinearOperator: A, alpha = self.args return A.H * np.conj(alpha) class _ConjLinearOperator(LinearOperator): """Complex conjugate linear operator""" def __init__(self, Op: LinearOperator) -> None: if not isinstance(Op, LinearOperator): raise TypeError("Op must be a LinearOperator") super(_ConjLinearOperator, self).__init__(Op, shape=Op.shape) self.Op = Op def _matvec(self, x: NDArray) -> NDArray: return (self.Op._matvec(x.conj())).conj() def _rmatvec(self, x: NDArray) -> NDArray: return (self.Op._rmatvec(x.conj())).conj() def _adjoint(self) -> LinearOperator: return _ConjLinearOperator(self.Op.H) class _ColumnLinearOperator(LinearOperator): """Column selector linear operator Produces the forward and adjoint passes with a subset of columns of the original operator """ def __init__( self, Op: LinearOperator, cols: InputDimsLike, ) -> None: if not isinstance(Op, LinearOperator): raise TypeError("Op must be a LinearOperator") super(_ColumnLinearOperator, self).__init__(Op, explicit=Op.explicit) self.Op = Op self.cols = cols self._shape = (Op.shape[0], len(cols)) self._dims = len(cols) if self.explicit: self.Opcol = Op.A[:, cols] def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if self.explicit: y = self.Opcol @ x else: y = ncp.zeros(int(self.Op.shape[1]), dtype=self.dtype) y[self.cols] = x y = self.Op._matvec(y) return y def _rmatvec(self, x: NDArray) -> NDArray: if self.explicit: y = self.Opcol.T.conj() @ x else: y = self.Op._rmatvec(x) y = y[self.cols] return y class _AdjointLinearOperator(LinearOperator): """Adjoint of Linear Operator""" def __init__(self, A: LinearOperator): shape = (A.shape[1], A.shape[0]) super(_AdjointLinearOperator, self).__init__(shape=shape, dtype=A.dtype) self.A = A self.args = (A,) def _matvec(self, x: NDArray) -> NDArray: return self.A._rmatvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return self.A._matvec(x) def _matmat(self, X: NDArray) -> NDArray: return self.A._rmatmat(X) def _rmatmat(self, X: NDArray) -> NDArray: return self.A._matmat(X) class _TransposedLinearOperator(LinearOperator): """Transposition of Linear Operator""" def __init__(self, A: LinearOperator): shape = (A.shape[1], A.shape[0]) super(_TransposedLinearOperator, self).__init__(shape=shape, dtype=A.dtype) self.A = A self.args = (A,) def _matvec(self, x: NDArray) -> NDArray: return np.conj(self.A._rmatvec(np.conj(x))) def _rmatvec(self, x: NDArray) -> NDArray: return np.conj(self.A._matvec(np.conj(x))) def _matmat(self, X: NDArray) -> NDArray: return np.conj(self.A._rmatmat(np.conj(X))) def _rmatmat(self, X: NDArray) -> NDArray: return np.conj(self.A._matmat(np.conj(X))) class _ProductLinearOperator(LinearOperator): """Product of Linear Operators""" def __init__(self, A: LinearOperator, B: LinearOperator): if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator): raise ValueError( f"both operands have to be a LinearOperator{type(A)} {type(B)}" ) if A.shape[1] != B.shape[0]: raise ValueError("cannot add %r and %r: shape mismatch" % (A, B)) super().__init__(dtype=_get_dtype([A, B]), shape=(A.shape[0], B.shape[1])) self.args = (A, B) def _matvec(self, x: NDArray) -> NDArray: return self.args[0].matvec(self.args[1].matvec(x)) def _rmatvec(self, x: NDArray) -> NDArray: return self.args[1].rmatvec(self.args[0].rmatvec(x)) def _rmatmat(self, X: NDArray) -> NDArray: return self.args[1].rmatmat(self.args[0].rmatmat(X)) def _matmat(self, X: NDArray) -> NDArray: return self.args[0].matmat(self.args[1].matmat(X)) def _adjoint(self): A, B = self.args return B.H * A.H class _SumLinearOperator(LinearOperator): def __init__( self, A: LinearOperator, B: LinearOperator, ) -> None: if not isinstance(A, LinearOperator) or not isinstance(B, LinearOperator): raise ValueError("both operands have to be a LinearOperator") if A.shape != B.shape: raise ValueError("cannot add %r and %r: shape mismatch" % (A, B)) self.args = (A, B) super(_SumLinearOperator, self).__init__( dtype=_get_dtype([A, B]), shape=A.shape ) def _matvec(self, x: NDArray) -> NDArray: return self.args[0].matvec(x) + self.args[1].matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return self.args[0].rmatvec(x) + self.args[1].rmatvec(x) def _rmatmat(self, x: NDArray) -> NDArray: return self.args[0].rmatmat(x) + self.args[1].rmatmat(x) def _matmat(self, x: NDArray) -> NDArray: return self.args[0].matmat(x) + self.args[1].matmat(x) def _adjoint(self) -> LinearOperator: A, B = self.args return A.H + B.H class _PowerLinearOperator(LinearOperator): def __init__(self, A: LinearOperator, p: int) -> None: if not isinstance(A, LinearOperator): raise ValueError("LinearOperator expected as A") if A.shape[0] != A.shape[1]: raise ValueError("square LinearOperator expected, got %r" % A) if not isintlike(p) or p < 0: raise ValueError("non-negative integer expected as p") super(_PowerLinearOperator, self).__init__(dtype=_get_dtype([A]), shape=A.shape) self.args = (A, p) def _power(self, fun: Callable, x: NDArray) -> NDArray: res = np.array(x, copy=True) for _ in range(self.args[1]): res = fun(res) return res def _matvec(self, x: NDArray) -> NDArray: return self._power(self.args[0].matvec, x) def _rmatvec(self, x: NDArray) -> NDArray: return self._power(self.args[0].rmatvec, x) def _rmatmat(self, x: NDArray) -> NDArray: return self._power(self.args[0].rmatmat, x) def _matmat(self, x: NDArray) -> NDArray: return self._power(self.args[0].matmat, x) def _adjoint(self) -> LinearOperator: A, p = self.args return A.H**p class _RealImagLinearOperator(LinearOperator): """Real-Imag linear operator Computes forward and adjoint passes of an operator Op and returns only its real (or imaginary) component. Note that for the adjoint step the output must be complex conjugated (i.e. opposite of the imaginary part is returned) """ def __init__( self, Op: LinearOperator, forw: bool = True, adj: bool = True, real: bool = True, ) -> None: if not isinstance(Op, LinearOperator): raise TypeError("Op must be a LinearOperator") super(_RealImagLinearOperator, self).__init__(Op, shape=Op.shape) self.Op = Op self.real = real self.forw = forw self.adj = adj self.dtype = np.array(0, dtype=self.Op.dtype).real.dtype def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = self.Op._matvec(x) if self.forw: if self.real: y = ncp.real(y) else: y = ncp.imag(y) return y def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = self.Op._rmatvec(x) if self.adj: if self.real: y = ncp.real(y) else: y = -ncp.imag(y) return y def aslinearoperator(Op: Union[spLinearOperator, LinearOperator]) -> LinearOperator: """Return Op as a LinearOperator. Converts any operator compatible with pylops definition of LinearOperator into a pylops LinearOperator. This can be used for example when `Op` is a scipy operator to ensure that the returned operator has all properties and methods of the pylops class. Parameters ---------- Op : :obj:`pylops.LinearOperator` or any other Operator Operator of any type Returns ------- Op : :obj:`pylops.LinearOperator` Operator of type :obj:`pylops.LinearOperator` """ if isinstance(Op, LinearOperator): return Op else: return LinearOperator(Op)
51,799
33.579439
98
py
pylops
pylops-master/pylops/config.py
""" Configuration ============= The configuration module controls module-level behavior in PyLops. You can either set behavior globally with getter/setter: get_ndarray_multiplication Check the status of ndarray multiplication (True/False). set_ndarray_multiplication Enable/disable ndarray multiplication. or use context managers (with blocks): enabled_ndarray_multiplication Enable ndarray multiplication within context. disabled_ndarray_multiplication Disable ndarray multiplication within context. """ from contextlib import contextmanager from dataclasses import dataclass from typing import Generator __all__ = [ "get_ndarray_multiplication", "set_ndarray_multiplication", "enabled_ndarray_multiplication", "disabled_ndarray_multiplication", ] @dataclass class Config: ndarray_multiplication: bool = True _config = Config() def get_ndarray_multiplication() -> bool: return _config.ndarray_multiplication def set_ndarray_multiplication(val: bool) -> None: _config.ndarray_multiplication = val @contextmanager def enabled_ndarray_multiplication() -> Generator: enabled = get_ndarray_multiplication() set_ndarray_multiplication(True) try: yield enabled finally: set_ndarray_multiplication(enabled) @contextmanager def disabled_ndarray_multiplication() -> Generator: enabled = get_ndarray_multiplication() set_ndarray_multiplication(False) try: yield enabled finally: set_ndarray_multiplication(enabled)
1,575
23.625
100
py
pylops
pylops-master/pylops/__init__.py
""" PyLops ====== Linear operators and inverse problems are at the core of many of the most used algorithms in signal processing, image processing, and remote sensing. When dealing with small-scale problems, the Python numerical scientific libraries `numpy <http://www.numpy.org>`_ and `scipy <http://www.scipy.org/scipylib/index.html>`_ allow to perform most of the underlying matrix operations (e.g., computation of matrix-vector products and manipulation of matrices) in a simple and expressive way. Many useful operators, however, do not lend themselves to an explicit matrix representation when used to solve large-scale problems. PyLops operators, on the other hand, still represent a matrix and can be treated in a similar way, but do not rely on the explicit creation of a dense (or sparse) matrix itself. Conversely, the forward and adjoint operators are represented by small pieces of codes that mimic the effect of the matrix on a vector or another matrix. Luckily, many iterative methods (e.g. cg, lsqr) do not need to know the individual entries of a matrix to solve a linear system. Such solvers only require the computation of forward and adjoint matrix-vector products as done for any of the PyLops operators. PyLops provides 1. A general construct for creating Linear Operators 2. An extensive set of commonly used linear operators 3. A set of least-squares and sparse solvers for linear operators. Available subpackages --------------------- basicoperators Basic Linear Operators signalprocessing Linear Operators for Signal Processing operations avo Linear Operators for Seismic Reservoir Characterization waveeqprocessing Linear Operators for Wave Equation oriented processing optimization Solvers utils Utility routines """ from .config import * from .linearoperator import * from .torchoperator import * from .basicoperators import * from . import ( avo, basicoperators, optimization, signalprocessing, utils, waveeqprocessing, ) from .avo.poststack import * from .avo.prestack import * from .optimization.basic import * from .optimization.leastsquares import * from .optimization.sparsity import * from .utils.seismicevents import * from .utils.tapers import * from .utils.utils import * from .utils.wavelets import * try: from .version import version as __version__ except ImportError: # If it was not installed, then we don't know the version. We could throw a # warning here, but this case *should* be rare. pylops should be installed # properly! from datetime import datetime __version__ = "unknown-" + datetime.today().strftime("%Y%m%d")
2,654
32.607595
79
py
pylops
pylops-master/pylops/torchoperator.py
__all__ = [ "TorchOperator", ] from typing import Optional import numpy as np from pylops import LinearOperator from pylops.utils import deps if deps.torch_enabled: from pylops._torchoperator import _TorchOperator else: torch_message = ( "Torch package not installed. In order to be able to use" 'the twoway module run "pip install torch" or' '"conda install -c pytorch torch".' ) from pylops.utils.typing import TensorTypeLike class TorchOperator: """Wrap a PyLops operator into a Torch function. This class can be used to wrap a pylops operator into a torch function. Doing so, users can mix native torch functions (e.g. basic linear algebra operations, neural networks, etc.) and pylops operators. Since all operators in PyLops are linear operators, a Torch function is simply implemented by using the forward operator for its forward pass and the adjoint operator for its backward (gradient) pass. Parameters ---------- Op : :obj:`pylops.LinearOperator` PyLops operator batch : :obj:`bool`, optional Input has single sample (``False``) or batch of samples (``True``). If ``batch==False`` the input must be a 1-d Torch tensor or a tensor of size equal to ``Op.dims``; if ``batch==True`` the input must be a 2-d Torch tensor with batches along the first dimension or a tensor of size equal to ``[nbatch, *Op.dims]`` where ``nbatch`` is the size of the batch flatten : :obj:`bool`, optional Input is flattened along ``Op.dims`` (``True``) or not (``False``) device : :obj:`str`, optional Device to be used when applying operator (``cpu`` or ``gpu``) devicetorch : :obj:`str`, optional Device to be assigned the output of the operator to (any Torch-compatible device) """ def __init__( self, Op: LinearOperator, batch: bool = False, flatten: Optional[bool] = True, device: str = "cpu", devicetorch: str = "cpu", ) -> None: if not deps.torch_enabled: raise NotImplementedError(torch_message) self.device = device self.devicetorch = devicetorch self.dtype = np.dtype(Op.dtype) self.dims, self.dimsd = Op.dims, Op.dimsd self.name = Op.name # define transpose indices to bring batch to last dimension before applying # pylops forward and adjoint (this will call matmat and rmatmat) self.transpf = np.roll(np.arange(2 if flatten else len(self.dims) + 1), -1) self.transpb = np.roll(np.arange(2 if flatten else len(self.dims) + 1), 1) if not batch: self.matvec = lambda x: Op @ x self.rmatvec = lambda x: Op.H @ x else: self.matvec = lambda x: (Op @ x.transpose(self.transpf)).transpose( self.transpb ) self.rmatvec = lambda x: (Op.H @ x.transpose(self.transpf)).transpose( self.transpb ) self.Top = _TorchOperator.apply def __call__(self, x): return self.apply(x) def apply(self, x: TensorTypeLike) -> TensorTypeLike: """Apply forward pass to input vector Parameters ---------- x : :obj:`torch.Tensor` Input array Returns ------- y : :obj:`torch.Tensor` Output array resulting from the application of the operator to ``x``. """ return self.Top(x, self.matvec, self.rmatvec, self.device, self.devicetorch)
3,580
33.76699
89
py
pylops
pylops-master/pylops/waveeqprocessing/lsm.py
__all__ = ["LSM"] import logging from typing import Callable, Optional from scipy.sparse.linalg import lsqr from pylops.utils import dottest as Dottest from pylops.utils.typing import NDArray from pylops.waveeqprocessing.kirchhoff import Kirchhoff from pylops.waveeqprocessing.twoway import AcousticWave2D logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class LSM: r"""Least-squares Migration (LSM). Solve seismic migration as inverse problem given smooth velocity model ``vel`` and an acquisition setup identified by sources (``src``) and receivers (``recs``). Parameters ---------- z : :obj:`numpy.ndarray` Depth axis x : :obj:`numpy.ndarray` Spatial axis t : :obj:`numpy.ndarray` Time axis for data srcs : :obj:`numpy.ndarray` Sources in array of size :math:`\lbrack 2(3) \times n_s \rbrack` recs : :obj:`numpy.ndarray` Receivers in array of size :math:`\lbrack 2(3) \times n_r \rbrack` vel : :obj:`numpy.ndarray` or :obj:`float` Velocity model of size :math:`\lbrack (n_y \times)\, n_x \times n_z \rbrack` (or constant) wav : :obj:`numpy.ndarray` Wavelet wavcenter : :obj:`int` Index of wavelet center y : :obj:`numpy.ndarray` Additional spatial axis (for 3-dimensional problems) kind : :str`, optional Kind of modelling operator (``kirchhoff``, ``twoway``) dottest : :obj:`bool`, optional Apply dot-test **kwargs_mod : :obj:`int`, optional Additional arguments to pass to modelling operators Attributes ---------- Demop : :class:`pylops.LinearOperator` Demigration operator operator See Also -------- pylops.waveeqprocessing.Kirchhoff : Kirchhoff operator pylops.waveeqprocessing.AcousticWave2D : AcousticWave2D operator Notes ----- Inverting a demigration operator is generally referred in the literature as least-squares migration (LSM) as historically a least-squares cost function has been used for this purpose. In practice any other cost function could be used, for examples if ``solver='pylops.optimization.sparsity.FISTA'`` a sparse representation of reflectivity is produced as result of the inversion. This routines provides users with a easy-to-use, out-of-the-box least-squares migration application that currently implements: - Kirchhoff LSM: this problem is parametrized in terms of reflectivity (i.e., vertical derivative of the acoustic impedance - or velocity in case of constant density). Currently, a ray-based modelling engine is used for this case (see :class:`pylops.waveeqprocessing.Kirchhoff`). - Born LSM: this problem is parametrized in terms of squared slowness perturbation (in the constant density case) and it is solved using an acoustic two-way eave equation modelling engine (see :class:`pylops.waveeqprocessing.AcousticWave2D`). The following table shows the current status of the LSM application: +------------------+----------------------+-----------+------------+ | | Kirchhoff integral | WKBJ | Wave eq | +==================+======================+===========+============+ | Reflectivity | V | X | X | +------------------+----------------------+-----------+------------+ | Slowness-squared | X | X | V | +------------------+----------------------+-----------+------------+ Finally, it is worth noting that for both cases the first iteration of an iterative scheme aimed at inverting the demigration operator is a simple a projection of the recorded data into the model domain. An approximate (band-limited) image of the subsurface is therefore created. This process is referred to in the literature as *migration*. """ def __init__( self, z: NDArray, x: NDArray, t: NDArray, srcs: NDArray, recs: NDArray, vel: NDArray, wav: NDArray, wavcenter: int, y: Optional[NDArray] = None, kind: str = "kirchhoff", dottest: bool = False, **kwargs_mod, ) -> None: self.y, self.x, self.z = y, x, z if kind == "kirchhoff": self.Demop = Kirchhoff( z, x, t, srcs, recs, vel, wav, wavcenter, y=y, **kwargs_mod ) elif kind == "twowayac": shape = (len(x), len(z)) origin = (x[0], z[0]) spacing = (x[1] - x[0], z[1] - z[0]) self.Demop = AcousticWave2D( shape, origin, spacing, vel, srcs[0], srcs[1], recs[0], recs[1], t[0], len(t), **kwargs_mod, ) else: raise NotImplementedError("kind must be kirchhoff or twowayac") if dottest: Dottest( self.Demop, self.Demop.shape[0], self.Demop.shape[1], raiseerror=True, verb=True, ) def solve(self, d: NDArray, solver: Callable = lsqr, **kwargs_solver): r"""Solve least-squares migration equations with chosen ``solver`` Parameters ---------- d : :obj:`numpy.ndarray` Input data of size :math:`\lbrack n_s \times n_r \times n_t \rbrack` solver : :obj:`func`, optional Solver to be used for inversion **kwargs_solver Arbitrary keyword arguments for chosen ``solver`` Returns ------- minv : :obj:`np.ndarray` Inverted reflectivity model of size :math:`\lbrack (n_y \times) n_x \times n_z \rbrack` """ minv = solver(self.Demop, d.ravel(), **kwargs_solver)[0] if self.y is None: minv = minv.reshape(len(self.x), len(self.z)) else: minv = minv.reshape(len(self.y), len(self.x), len(self.z)) return minv
6,222
33.960674
95
py
pylops
pylops-master/pylops/waveeqprocessing/kirchhoff.py
__all__ = ["Kirchhoff"] import logging import os import warnings from typing import Optional, Tuple, Union import numpy as np from pylops import LinearOperator from pylops.signalprocessing import Convolve1D from pylops.utils import deps from pylops.utils._internal import _value_or_sized_to_array from pylops.utils.decorators import reshaped from pylops.utils.tapers import taper from pylops.utils.typing import DTypeLike, NDArray skfmm_message = deps.skfmm_import("the kirchhoff module") jit_message = deps.numba_import("the kirchhoff module") if skfmm_message is None: import skfmm if jit_message is None: from numba import jit, prange # detect whether to use parallel or not numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1")) parallel = True if numba_threads != 1 else False else: prange = range logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class Kirchhoff(LinearOperator): r"""Kirchhoff Demigration operator. Kirchhoff-based demigration/migration operator. Uses a high-frequency approximation of Green's function propagators based on ``trav``. Parameters ---------- z : :obj:`numpy.ndarray` Depth axis x : :obj:`numpy.ndarray` Spatial axis t : :obj:`numpy.ndarray` Time axis for data srcs : :obj:`numpy.ndarray` Sources in array of size :math:`\lbrack 2 (3) \times n_s \rbrack` The first axis should be ordered as (``y``,) ``x``, ``z``. recs : :obj:`numpy.ndarray` Receivers in array of size :math:`\lbrack 2 (3) \times n_r \rbrack` The first axis should be ordered as (``y``,) ``x``, ``z``. vel : :obj:`numpy.ndarray` or :obj:`float` Velocity model of size :math:`\lbrack (n_y\,\times)\; n_x \times n_z \rbrack` (or constant) wav : :obj:`numpy.ndarray` Wavelet. wavcenter : :obj:`int` Index of wavelet center y : :obj:`numpy.ndarray` Additional spatial axis (for 3-dimensional problems) mode : :obj:`str`, optional Computation mode (``analytic``, ``eikonal`` or ``byot``, see Notes for more details) wavfilter : :obj:`bool`, optional .. versionadded:: 2.0.0 Apply wavelet filter (``True``) or not (``False``) dynamic : :obj:`bool`, optional .. versionadded:: 2.0.0 Include dynamic weights in computations (``True``) or not (``False``). Note that when ``mode=byot``, the user is required to provide such weights in ``amp``. trav : :obj:`numpy.ndarray` or :obj:`tuple`, optional Traveltime table of size :math:`\lbrack (n_y) n_x n_z \times n_s n_r \rbrack` or pair of traveltime tables of size :math:`\lbrack (n_y) n_x n_z \times n_s \rbrack` and :math:`\lbrack (n_y) n_x n_z \times n_r \rbrack` (to be provided if ``mode='byot'``). Note that the latter approach is recommended as less memory demanding than the former. amp : :obj:`numpy.ndarray`, optional .. versionadded:: 2.0.0 Amplitude table of size :math:`\lbrack (n_y) n_x n_z \times n_s n_r \rbrack` or pair of amplitude tables of size :math:`\lbrack (n_y) n_x n_z \times n_s \rbrack` and :math:`\lbrack (n_y) n_x n_z \times n_r \rbrack` (to be provided if ``mode='byot'``). Note that the latter approach is recommended as less memory demanding than the former. aperture : :obj:`float` or :obj:`tuple`, optional .. versionadded:: 2.0.0 Maximum allowed aperture expressed as the ratio of offset over depth. If ``None``, no aperture limitations are introduced. If scalar, a taper from 80% to 100% of aperture is applied. If tuple, apertures below the first value are accepted and those after the second value are rejected. A tapering is implemented for those between such values. angleaperture : :obj:`float` or :obj:`tuple`, optional .. versionadded:: 2.0.0 Maximum allowed angle (either source or receiver side) in degrees. If ``None``, angle aperture limitations are not introduced. See ``aperture`` for implementation details regarding scalar and tuple cases. anglerefl : :obj:`np.ndarray`, optional .. versionadded:: 2.0.0 Angle between the normal of the reflectors and the vertical axis in degrees snell : :obj:`float` or :obj:`tuple`, optional .. versionadded:: 2.0.0 Threshold on Snell's law evaluation. If larger, the source-receiver-image point is discarded. If ``None``, no check on the validity of the Snell's law is performed. See ``aperture`` for implementation details regarding scalar and tuple cases. engine : :obj:`str`, optional Engine used for computations (``numpy`` or ``numba``). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ NotImplementedError If ``mode`` is neither ``analytic``, ``eikonal``, or ``byot`` Notes ----- The Kirchhoff demigration operator synthesizes seismic data given a propagation velocity model :math:`v` and a reflectivity model :math:`m`. In forward mode [1]_, [2]_: .. math:: d(\mathbf{x_r}, \mathbf{x_s}, t) = \widetilde{w}(t) * \int_V G(\mathbf{x_r}, \mathbf{x}, t) m(\mathbf{x}) G(\mathbf{x}, \mathbf{x_s}, t)\,\mathrm{d}\mathbf{x} where :math:`m(\mathbf{x})` represents the reflectivity at every location in the subsurface, :math:`G(\mathbf{x}, \mathbf{x_s}, t)` and :math:`G(\mathbf{x_r}, \mathbf{x}, t)` are the Green's functions from source-to-subsurface-to-receiver and finally :math:`\widetilde{w}(t)` is a filtered version of the wavelet :math:`w(t)` [3]_ (or the wavelet itself when ``wavfilter=False``). In our implementation, the following high-frequency approximation of the Green's functions is adopted: .. math:: G(\mathbf{x_r}, \mathbf{x}, \omega) = a(\mathbf{x_r}, \mathbf{x}) e^{j \omega t(\mathbf{x_r}, \mathbf{x})} where :math:`a(\mathbf{x_r}, \mathbf{x})` is the amplitude and :math:`t(\mathbf{x_r}, \mathbf{x})` is the traveltime. When ``dynamic=False`` the amplitude is disregarded leading to a kinematic-only Kirchhoff operator. .. math:: d(\mathbf{x_r}, \mathbf{x_s}, t) = \tilde{w}(t) * \int_V e^{j \omega (t(\mathbf{x_r}, \mathbf{x}) + t(\mathbf{x}, \mathbf{x_s}))} m(\mathbf{x}) \,\mathrm{d}\mathbf{x} On the other hand, when ``dynamic=True``, the amplitude scaling is defined as :math:`a(\mathbf{x}, \mathbf{y})=\frac{1}{\|\mathbf{x} - \mathbf{y}\|}`, that is, the reciprocal of the distance between the two points, approximating the geometrical spreading of the wavefront. Moreover an angle scaling is included in the modelling operator added as follows: .. math:: d(\mathbf{x_r}, \mathbf{x_s}, t) = \tilde{w}(t) * \int_V a(\mathbf{x}, \mathbf{x_s}) a(\mathbf{x}, \mathbf{x_r}) \frac{|cos \theta_s + cos \theta_r|} {v(\mathbf{x})} e^{j \omega (t(\mathbf{x_r}, \mathbf{x}) + t(\mathbf{x}, \mathbf{x_s}))} m(\mathbf{x}) \,\mathrm{d}\mathbf{x} where :math:`\theta_s` and :math:`\theta_r` are the angles between the source-side and receiver-side rays and the normal to the reflector at the image point (or the vertical axis at the image point when ``reflslope=None``), respectively. Depending on the choice of ``mode`` the traveltime and amplitude of the Green's function will be also computed differently: * ``mode=analytic`` or ``mode=eikonal``: traveltimes, geometrical spreading, and angles are computed for every source-image point-receiver triplets and the Green's functions are implemented from traveltime look-up tables, placing scaled reflectivity values at corresponding source-to-receiver time in the data. * ``byot``: bring your own tables. Traveltime table are provided directly by user using ``trav`` input parameter. Similarly, in this case one can provide their own amplitude scaling ``amp`` (which should include the angle scaling too). Three aperture limitations have been also implemented as defined by: * ``aperture``: the maximum allowed aperture is expressed as the ratio of offset over depth. This aperture limitation avoid including grazing angles whose contributions can introduce aliasing effects. A taper is added at the edges of the aperture; * ``angleaperture``: the maximum allowed angle aperture is expressed as the difference between the incident or emerging angle at every image point and the vertical axis (or the normal to the reflector if ``anglerefl`` is provided. This aperture limitation also avoid including grazing angles whose contributions can introduce aliasing effects. Note that for a homogenous medium and slowly varying heterogenous medium the offset and angle aperture limits may work in the same way; * ``snell``: the maximum allowed snell's angle is expressed as the absolute value of the sum between incident and emerging angles defined as in the ``angleaperture`` case. This aperture limitation is introduced to turn a scattering-based Kirchhoff engine into a reflection-based Kirchhoff engine where each image point is not considered as scatter but as a local horizontal (or straight) reflector. Finally, the adjoint of the demigration operator is a *migration* operator which projects data in the model domain creating an image of the subsurface reflectivity. .. [1] Bleistein, N., Cohen, J.K., and Stockwell, J.W.. "Mathematics of Multidimensional Seismic Imaging, Migration and Inversion", 2000. .. [2] Santos, L.T., Schleicher, J., Tygel, M., and Hubral, P. "Seismic modeling by demigration", Geophysics, 65(4), pp. 1281-1289, 2000. .. [3] Safron, L. "Multicomponent least-squares Kirchhoff depth migration", MSc Thesis, 2018. """ def __init__( self, z: NDArray, x: NDArray, t: NDArray, srcs: NDArray, recs: NDArray, vel: NDArray, wav: NDArray, wavcenter: int, y: Optional[NDArray] = None, mode: str = "eikonal", wavfilter: bool = False, dynamic: bool = False, trav: Optional[NDArray] = None, amp: Optional[NDArray] = None, aperture: Optional[Tuple[float, float]] = None, angleaperture: Union[float, Tuple[float, float]] = 90.0, anglerefl: Optional[NDArray] = None, snell: Optional[Tuple[float, float]] = None, engine: str = "numpy", dtype: DTypeLike = "float64", name: str = "K", ) -> None: warnings.warn( "A new implementation of Kirchhoff is provided in v2.1.0. " "This currently affects only the inner working of the " "operator, end-users can continue using the operator in " "the same way. Nevertheless, it is now recommended to provide" "the variables trav (and amp) as a tuples containing the " "traveltime (and amplitude) tables for sources and receivers " "separately. This behaviour will eventually become default in " "version v3.0.0.", FutureWarning, ) # identify geometry ( self.ndims, _, dims, self.ny, self.nx, self.nz, ns, nr, _, _, _, _, _, ) = Kirchhoff._identify_geometry(z, x, srcs, recs, y=y) self.dt = t[1] - t[0] self.nt = len(t) # store ix-iy locations of sources and receivers dx = x[1] - x[0] if self.ndims == 2: self.six = np.tile((srcs[0] - x[0]) // dx, (nr, 1)).T.astype(int).ravel() self.rix = np.tile((recs[0] - x[0]) // dx, (ns, 1)).astype(int).ravel() elif self.ndims == 3: # TODO: 3D normalized distances pass # compute traveltime self.dynamic = dynamic self.travsrcrec = True # use separate tables for src and rec traveltimes if mode in ["analytic", "eikonal", "byot"]: if mode in ["analytic", "eikonal"]: # compute traveltime table ( self.trav_srcs, self.trav_recs, self.dist_srcs, self.dist_recs, trav_srcs_grad, trav_recs_grad, ) = Kirchhoff._traveltime_table(z, x, srcs, recs, vel, y=y, mode=mode) if self.dynamic: # need to add a scalar in the denominator of amplitude term to avoid # division by 0, currently set to 1/100 of max distance to avoid having # to large scaling around the source. This number may change in future # or left to the user to define epsdist = 1e-2 self.maxdist = epsdist * ( np.max(self.dist_srcs) + np.max(self.dist_recs) ) # compute angles if self.ndims == 2: # 2d with vertical if anglerefl is None: self.angle_srcs = np.arctan2( trav_srcs_grad[0], trav_srcs_grad[1] ).reshape(np.prod(dims), ns) self.angle_recs = np.arctan2( trav_recs_grad[0], trav_recs_grad[1] ).reshape(np.prod(dims), nr) self.cosangle_srcs = np.cos(self.angle_srcs) self.cosangle_recs = np.cos(self.angle_recs) else: # TODO: 2D with normal raise NotImplementedError( "angle scaling with anglerefl currently not available" ) else: # TODO: 3D raise NotImplementedError( "dynamic=True currently not available in 3D" ) else: if isinstance(trav, tuple): self.trav_srcs, self.trav_recs = trav else: self.travsrcrec = False self.trav = trav if self.dynamic: if isinstance(amp, tuple): self.amp_srcs, self.amp_recs = amp else: self.amp = amp # in byot mode, angleaperture and snell checks are not performed self.angle_srcs = np.ones( (self.ny * self.nx * self.nz, ns), dtype=dtype ) self.angle_recs = np.ones( (self.ny * self.nx * self.nz, nr), dtype=dtype ) else: raise NotImplementedError("method must be analytic, eikonal or byot") # pre-compute traveltime indices if total traveltime is used if not self.travsrcrec: self.itrav = (self.trav / self.dt).astype("int32") self.travd = self.trav / self.dt - self.itrav # create wavelet operator if wavfilter: self.wav = self._wavelet_reshaping( wav, self.dt, srcs.shape[0], recs.shape[0], self.ndims ) else: self.wav = wav self.cop = Convolve1D( (ns * nr, self.nt), h=self.wav, offset=wavcenter, axis=1, dtype=dtype ) # create fixed-size aperture taper for all apertures self.aperturetap = taper(41, 20, "hanning")[20:] # define aperture if aperture is not None: warnings.warn( "Aperture is currently defined as ratio of offset over depth, " "and may be not ideal for highly heterogenous media" ) self.aperture = ( (2 * self.nx / self.nz,) if aperture is None else _value_or_sized_to_array(aperture) ) if len(self.aperture) == 1: self.aperture = np.array([0.8 * self.aperture[0], self.aperture[0]]) # define angle aperture and snell law angleaperture = [0.0, 1000.0] if angleaperture is None else angleaperture self.angleaperture = np.deg2rad(_value_or_sized_to_array(angleaperture)) if len(self.angleaperture) == 1: self.angleaperture = np.array( [0.8 * self.angleaperture[0], self.angleaperture[0]] ) self.snell = ( (np.pi,) if snell is None else np.deg2rad(_value_or_sized_to_array(snell)) ) if len(self.snell) == 1: self.snell = np.array([0.8 * self.snell[0], self.snell[0]]) # dimensions self.ns, self.nr = ns, nr self.nsnr = ns * nr self.ni = np.prod(dims) dims = tuple(dims) if self.ndims == 2 else (dims[0] * dims[1], dims[2]) dimsd = (ns, nr, self.nt) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) # save velocity if using dynamic to compute amplitudes if self.dynamic: self.vel = ( vel.flatten() if not isinstance(vel, (float, int)) else vel * np.ones(np.prod(dims)) ) self._register_multiplications(engine) @staticmethod def _identify_geometry( z: NDArray, x: NDArray, srcs: NDArray, recs: NDArray, y: Optional[NDArray] = None, ) -> Tuple[ int, int, NDArray, int, int, int, int, int, float, float, float, NDArray, NDArray, ]: """Identify geometry and acquisition size and sampling""" ns, nr = srcs.shape[1], recs.shape[1] nz, nx = len(z), len(x) dz = np.abs(z[1] - z[0]) dx = np.abs(x[1] - x[0]) if y is None: ndims = 2 shiftdim = 0 ny = 1 dy = None dims = np.array([nx, nz]) dsamp = np.array([dx, dz]) origin = np.array([x[0], z[0]]) else: ndims = 3 shiftdim = 1 ny = len(y) dy = np.abs(y[1] - y[0]) dims = np.array([ny, nx, nz]) dsamp = np.array([dy, dx, dz]) origin = np.array([y[0], x[0], z[0]]) return ndims, shiftdim, dims, ny, nx, nz, ns, nr, dy, dx, dz, dsamp, origin @staticmethod def _traveltime_table( z: NDArray, x: NDArray, srcs: NDArray, recs: NDArray, vel: Union[float, NDArray], y: Optional[NDArray] = None, mode: str = "eikonal", ) -> Tuple[NDArray, NDArray, NDArray, NDArray, NDArray, NDArray]: r"""Traveltime table Compute traveltimes along the source-subsurface-receivers triplet in 2- or 3-dimensional media given a constant or depth- and space variable velocity. Parameters ---------- z : :obj:`numpy.ndarray` Depth axis x : :obj:`numpy.ndarray` Spatial axis srcs : :obj:`numpy.ndarray` Sources in array of size :math:`\lbrack 2 (3) \times n_s \rbrack` recs : :obj:`numpy.ndarray` Receivers in array of size :math:`\lbrack 2 (3) \times n_r \rbrack` vel : :obj:`numpy.ndarray` or :obj:`float` Velocity model of size :math:`\lbrack (n_y \times)\, n_x \times n_z \rbrack` (or constant) y : :obj:`numpy.ndarray` Additional spatial axis (for 3-dimensional problems) mode : :obj:`numpy.ndarray`, optional Computation mode (``eikonal``, ``analytic`` - only for constant velocity) Returns ------- trav_srcs : :obj:`numpy.ndarray` Source-to-subsurface traveltime table of size :math:`\lbrack (n_y*) n_x n_z \times n_s \rbrack` trav_recs : :obj:`numpy.ndarray` Receiver-to-subsurface traveltime table of size :math:`\lbrack (n_y) n_x n_z \times n_r \rbrack` dist_srcs : :obj:`numpy.ndarray` Source-to-subsurface distance table of size :math:`\lbrack (n_y*) n_x n_z \times n_s \rbrack` dist_recs : :obj:`numpy.ndarray` Receiver-to-subsurface distance table of size :math:`\lbrack (n_y) n_x n_z \times n_r \rbrack` trav_srcs_gradient : :obj:`numpy.ndarray` Source-to-subsurface traveltime gradient table of size :math:`\lbrack (n_y*) n_x n_z \times n_s \rbrack` trav_recs_gradient : :obj:`numpy.ndarray` Receiver-to-subsurface traveltime gradient table of size :math:`\lbrack (n_y) n_x n_z \times n_r \rbrack` """ # define geometry ( ndims, shiftdim, dims, ny, nx, nz, ns, nr, _, _, _, dsamp, origin, ) = Kirchhoff._identify_geometry(z, x, srcs, recs, y=y) # compute traveltimes if mode == "analytic": if not isinstance(vel, (float, int)): raise ValueError("vel must be scalar for mode=analytical") # compute grid if ndims == 2: X, Z = np.meshgrid(x, z, indexing="ij") X, Z = X.ravel(), Z.ravel() else: Y, X, Z = np.meshgrid(y, x, z, indexing="ij") Y, X, Z = Y.ravel(), X.ravel(), Z.ravel() dist_srcs2 = np.zeros((ny * nx * nz, ns)) dist_recs2 = np.zeros((ny * nx * nz, nr)) for isrc, src in enumerate(srcs.T): dist_srcs2[:, isrc] = (X - src[0 + shiftdim]) ** 2 + ( Z - src[1 + shiftdim] ) ** 2 if ndims == 3: dist_srcs2[:, isrc] += (Y - src[0]) ** 2 for irec, rec in enumerate(recs.T): dist_recs2[:, irec] = (X - rec[0 + shiftdim]) ** 2 + ( Z - rec[1 + shiftdim] ) ** 2 if ndims == 3: dist_recs2[:, irec] += (Y - rec[0]) ** 2 trav_srcs = np.sqrt(dist_srcs2) / vel trav_recs = np.sqrt(dist_recs2) / vel dist_srcs = trav_srcs * vel dist_recs = trav_recs * vel elif mode == "eikonal": if skfmm is not None: dist_srcs = np.zeros((ny * nx * nz, ns)) dist_recs = np.zeros((ny * nx * nz, nr)) trav_srcs = np.zeros((ny * nx * nz, ns)) trav_recs = np.zeros((ny * nx * nz, nr)) for isrc, src in enumerate(srcs.T): src = np.round((src - origin) / dsamp).astype(np.int32) phi = np.ones_like(vel) if ndims == 2: phi[src[0], src[1]] = -1 else: phi[src[0], src[1], src[2]] = -1 dist_srcs[:, isrc] = (skfmm.distance(phi=phi, dx=dsamp)).ravel() trav_srcs[:, isrc] = ( skfmm.travel_time(phi=phi, speed=vel, dx=dsamp) ).ravel() for irec, rec in enumerate(recs.T): rec = np.round((rec - origin) / dsamp).astype(np.int32) phi = np.ones_like(vel) if ndims == 2: phi[rec[0], rec[1]] = -1 else: phi[rec[0], rec[1], rec[2]] = -1 dist_recs[:, irec] = (skfmm.distance(phi=phi, dx=dsamp)).ravel() trav_recs[:, irec] = ( skfmm.travel_time(phi=phi, speed=vel, dx=dsamp) ).ravel() else: raise NotImplementedError(skfmm_message) else: raise NotImplementedError("method must be analytic or eikonal") # compute traveltime gradients at image points trav_srcs_grad = np.gradient( trav_srcs.reshape(*dims, ns), axis=np.arange(ndims) ) trav_recs_grad = np.gradient( trav_recs.reshape(*dims, nr), axis=np.arange(ndims) ) return ( trav_srcs, trav_recs, dist_srcs, dist_recs, trav_srcs_grad, trav_recs_grad, ) def _wavelet_reshaping( self, wav: NDArray, dt: float, dimsrc: int, dimrec: int, dimv: int, ) -> NDArray: """Apply wavelet reshaping as from theory in [1]_""" f = np.fft.rfftfreq(len(wav), dt) W = np.fft.rfft(wav, n=len(wav)) if dimsrc == 2 and dimv == 2: # 2D Wfilt = W * (2 * np.pi * f) elif (dimsrc == 2 or dimrec == 2) and dimv == 3: # 2.5D raise NotImplementedError("2.D wavelet currently not available") elif dimsrc == 3 and dimrec == 3 and dimv == 3: # 3D Wfilt = W * (-1j * 2 * np.pi * f) wavfilt = np.fft.irfft(Wfilt, n=len(wav)) return wavfilt @staticmethod def _trav_kirch_matvec( x: NDArray, y: NDArray, nsnr: int, nt: int, ni: int, itrav: NDArray, travd: NDArray, ) -> NDArray: for isrcrec in prange(nsnr): itravisrcrec = itrav[:, isrcrec] travdisrcrec = travd[:, isrcrec] for ii in range(ni): itravii = itravisrcrec[ii] travdii = travdisrcrec[ii] if 0 <= itravii < nt - 1: y[isrcrec, itravii] += x[ii] * (1 - travdii) y[isrcrec, itravii + 1] += x[ii] * travdii return y @staticmethod def _trav_kirch_rmatvec( x: NDArray, y: NDArray, nsnr: int, nt: int, ni: int, itrav: NDArray, travd: NDArray, ) -> NDArray: for ii in prange(ni): itravii = itrav[ii] travdii = travd[ii] for isrcrec in range(nsnr): itravisrcrecii = itravii[isrcrec] travdisrcrecii = travdii[isrcrec] if 0 <= itravisrcrecii < nt - 1: y[ii] += ( x[isrcrec, itravisrcrecii] * (1 - travdisrcrecii) + x[isrcrec, itravisrcrecii + 1] * travdisrcrecii ) return y @staticmethod def _travsrcrec_kirch_matvec( x: NDArray, y: NDArray, ns: int, nr: int, nt: int, ni: int, dt: float, trav_srcs: NDArray, trav_recs: NDArray, ) -> NDArray: for isrc in prange(ns): travisrc = trav_srcs[:, isrc] for irec in range(nr): travirec = trav_recs[:, irec] trav = travisrc + travirec itrav = (trav / dt).astype("int32") travd = trav / dt - itrav for ii in range(ni): itravii = itrav[ii] travdii = travd[ii] if 0 <= itravii < nt - 1: y[isrc * nr + irec, itravii] += x[ii] * (1 - travdii) y[isrc * nr + irec, itravii + 1] += x[ii] * travdii return y @staticmethod def _travsrcrec_kirch_rmatvec( x: NDArray, y: NDArray, ns: int, nr: int, nt: int, ni: int, dt: float, trav_srcs: NDArray, trav_recs: NDArray, ) -> NDArray: for ii in prange(ni): trav_srcsii = trav_srcs[ii] trav_recsii = trav_recs[ii] for isrc in prange(ns): trav_srcii = trav_srcsii[isrc] for irec in range(nr): trav_recii = trav_recsii[irec] travii = trav_srcii + trav_recii itravii = int(travii / dt) travdii = travii / dt - itravii if 0 <= itravii < nt - 1: y[ii] += ( x[isrc * nr + irec, itravii] * (1 - travdii) + x[isrc * nr + irec, itravii + 1] * travdii ) return y @staticmethod def _amp_kirch_matvec( x: NDArray, y: NDArray, nsnr: int, nt: int, ni: int, itrav: NDArray, travd: NDArray, amp: NDArray, aperturemin: float, aperturemax: float, aperturetap: NDArray, nz: int, six: NDArray, rix: NDArray, angleaperturemin: float, angleaperturemax: float, angles_srcs: NDArray, angles_recs: NDArray, snellmin: float, snellmax: float, ) -> NDArray: nr = angles_recs.shape[-1] daperture = aperturemax - aperturemin dangleaperture = angleaperturemax - angleaperturemin dsnell = snellmax - snellmin for isrcrec in prange(nsnr): # extract traveltime, amplitude, src/rec coordinates at given src/pair itravisrcrec = itrav[:, isrcrec] travdisrcrec = travd[:, isrcrec] ampisrcrec = amp[:, isrcrec] sixisrcrec = six[isrcrec] rixisrcrec = rix[isrcrec] # extract source and receiver angles angles_src = angles_srcs[:, isrcrec // nr] angles_rec = angles_recs[:, isrcrec % nr] for ii in range(ni): # extract traveltime, amplitude at given image point itravii = itravisrcrec[ii] travdii = travdisrcrec[ii] damp = ampisrcrec[ii] # extract source and receiver angle angle_src = angles_src[ii] angle_rec = angles_rec[ii] abs_angle_src = abs(angle_src) abs_angle_rec = abs(angle_rec) abs_angle_src_rec = abs(angle_src + angle_rec) aptap = 1.0 # angle apertures checks if ( abs_angle_src < angleaperturemax and abs_angle_rec < angleaperturemax and abs_angle_src_rec < snellmax ): if abs_angle_src >= angleaperturemin: # extract source angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_src - angleaperturemin) // dangleaperture ) ] ) if abs_angle_rec >= angleaperturemin: # extract receiver angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_rec - angleaperturemin) // dangleaperture ) ] ) if abs_angle_src_rec >= snellmin: # extract snell taper value aptap = ( aptap * aperturetap[ int(20 * (abs_angle_src_rec - snellmin) // dsnell) ] ) # identify x-index of image point iz = ii % nz # aperture check aperture = abs(sixisrcrec - rixisrcrec) / iz if aperture < aperturemax: if aperture >= aperturemin: # extract aperture taper value aptap = ( aptap * aperturetap[ int(20 * ((aperture - aperturemin) // daperture)) ] ) # time limit check if 0 <= itravii < nt - 1: # assign values y[isrcrec, itravii] += x[ii] * (1 - travdii) * damp * aptap y[isrcrec, itravii + 1] += x[ii] * travdii * damp * aptap return y @staticmethod def _amp_kirch_rmatvec( x: NDArray, y: NDArray, nsnr: int, nt: int, ni: int, itrav: NDArray, travd: NDArray, amp: NDArray, aperturemin: float, aperturemax: float, aperturetap: NDArray, nz: int, six: NDArray, rix: NDArray, angleaperturemin: float, angleaperturemax: float, angles_srcs: NDArray, angles_recs: NDArray, snellmin: float, snellmax: float, ) -> NDArray: nr = angles_recs.shape[-1] daperture = aperturemax - aperturemin dangleaperture = angleaperturemax - angleaperturemin dsnell = snellmax - snellmin for ii in prange(ni): itravii = itrav[ii] travdii = travd[ii] ampii = amp[ii] # extract source and receiver angles angle_srcs = angles_srcs[ii] angle_recs = angles_recs[ii] # identify x-index of image point iz = ii % nz for isrcrec in range(nsnr): itravisrcrecii = itravii[isrcrec] travdisrcrecii = travdii[isrcrec] sixisrcrec = six[isrcrec] rixisrcrec = rix[isrcrec] # extract source and receiver angle angle_src = angle_srcs[isrcrec // nr] angle_rec = angle_recs[isrcrec % nr] abs_angle_src = abs(angle_src) abs_angle_rec = abs(angle_rec) abs_angle_src_rec = abs(angle_src + angle_rec) aptap = 1.0 # angle apertures checks if ( abs_angle_src < angleaperturemax and abs_angle_rec < angleaperturemax and abs_angle_src_rec < snellmax ): if abs_angle_src >= angleaperturemin: # extract source angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_src - angleaperturemin) // dangleaperture ) ] ) if abs_angle_rec >= angleaperturemin: # extract receiver angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_rec - angleaperturemin) // dangleaperture ) ] ) if abs_angle_src_rec >= snellmin: # extract snell taper value aptap = ( aptap * aperturetap[ int(20 * (abs_angle_src_rec - snellmin) // dsnell) ] ) # aperture check aperture = abs(sixisrcrec - rixisrcrec) / iz if aperture < aperturemax: if aperture >= aperturemin: # extract aperture taper value aptap = ( aptap * aperturetap[ int(20 * ((aperture - aperturemin) // daperture)) ] ) # time limit check if 0 <= itravisrcrecii < nt - 1: # assign values y[ii] += ( ( x[isrcrec, itravisrcrecii] * (1 - travdisrcrecii) + x[isrcrec, itravisrcrecii + 1] * travdisrcrecii ) * ampii[isrcrec] * aptap ) return y @staticmethod def _ampsrcrec_kirch_matvec( x: NDArray, y: NDArray, ns: int, nr: int, nt: int, ni: int, dt: float, vel: NDArray, trav_srcs: NDArray, trav_recs: NDArray, dist_srcs: NDArray, dist_recs: NDArray, aperturemin: float, aperturemax: float, aperturetap: NDArray, nz: int, six: NDArray, rix: NDArray, angleaperturemin: float, angleaperturemax: float, angles_srcs: NDArray, angles_recs: NDArray, snellmin: float, snellmax: float, maxdist: float, ) -> NDArray: daperture = aperturemax - aperturemin dangleaperture = angleaperturemax - angleaperturemin dsnell = snellmax - snellmin for isrc in prange(ns): travisrc = trav_srcs[:, isrc] distisrc = dist_srcs[:, isrc] angleisrc = angles_srcs[:, isrc] for irec in range(nr): travirec = trav_recs[:, irec] trav = travisrc + travirec itrav = (trav / dt).astype("int32") travd = trav / dt - itrav distirec = dist_recs[:, irec] angleirec = angles_recs[:, irec] dist = distisrc + distirec amp = np.abs(np.cos(angleisrc) + np.cos(angleirec)) / (dist + maxdist) sixisrcrec = six[isrc * nr + irec] rixisrcrec = rix[isrc * nr + irec] for ii in range(ni): itravii = itrav[ii] travdii = travd[ii] damp = amp[ii] / vel[ii] # extract source and receiver angle at given image point angle_src = angleisrc[ii] angle_rec = angleirec[ii] abs_angle_src = abs(angle_src) abs_angle_rec = abs(angle_rec) abs_angle_src_rec = abs(angle_src + angle_rec) aptap = 1.0 # angle apertures checks if ( abs_angle_src < angleaperturemax and abs_angle_rec < angleaperturemax and abs_angle_src_rec < snellmax ): if abs_angle_src >= angleaperturemin: # extract source angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_src - angleaperturemin) // dangleaperture ) ] ) if abs_angle_rec >= angleaperturemin: # extract receiver angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_rec - angleaperturemin) // dangleaperture ) ] ) if abs_angle_src_rec >= snellmin: # extract snell taper value aptap = ( aptap * aperturetap[ int(20 * (abs_angle_src_rec - snellmin) // dsnell) ] ) # identify x-index of image point iz = ii % nz # aperture check aperture = abs(sixisrcrec - rixisrcrec) / iz if aperture < aperturemax: if aperture >= aperturemin: # extract aperture taper value aptap = ( aptap * aperturetap[ int( 20 * ((aperture - aperturemin) // daperture) ) ] ) # time limit check if 0 <= itravii < nt - 1: y[isrc * nr + irec, itravii] += ( x[ii] * (1 - travdii) * damp * aptap ) y[isrc * nr + irec, itravii + 1] += ( x[ii] * travdii * damp * aptap ) return y @staticmethod def _ampsrcrec_kirch_rmatvec( x: NDArray, y: NDArray, ns: int, nr: int, nt: int, ni: int, dt: float, vel: NDArray, trav_srcs: NDArray, trav_recs: NDArray, dist_srcs: NDArray, dist_recs: NDArray, aperturemin: float, aperturemax: float, aperturetap: NDArray, nz: int, six: NDArray, rix: NDArray, angleaperturemin: float, angleaperturemax: float, angles_srcs: NDArray, angles_recs: NDArray, snellmin: float, snellmax: float, maxdist: float, ) -> NDArray: daperture = aperturemax - aperturemin dangleaperture = angleaperturemax - angleaperturemin dsnell = snellmax - snellmin for ii in prange(ni): trav_srcsii = trav_srcs[ii] trav_recsii = trav_recs[ii] dist_srcsii = dist_srcs[ii] dist_recsii = dist_recs[ii] velii = vel[ii] angle_srcsii = angles_srcs[ii] angle_recsii = angles_recs[ii] # identify x-index of image point iz = ii % nz for isrc in range(ns): trav_srcii = trav_srcsii[isrc] dist_srcii = dist_srcsii[isrc] angle_src = angle_srcsii[isrc] for irec in range(nr): trav_recii = trav_recsii[irec] travii = trav_srcii + trav_recii itravii = int(travii / dt) travdii = travii / dt - itravii dist_recii = dist_recsii[irec] angle_rec = angle_recsii[irec] dist = dist_srcii + dist_recii ampii = np.abs(np.cos(angle_src) + np.cos(angle_rec)) / ( dist + maxdist ) damp = ampii / velii sixisrcrec = six[isrc * nr + irec] rixisrcrec = rix[isrc * nr + irec] abs_angle_src = abs(angle_src) abs_angle_rec = abs(angle_rec) abs_angle_src_rec = abs(angle_src + angle_rec) aptap = 1.0 # angle apertures checks if ( abs_angle_src < angleaperturemax and abs_angle_rec < angleaperturemax and abs_angle_src_rec < snellmax ): if abs_angle_src >= angleaperturemin: # extract source angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_src - angleaperturemin) // dangleaperture ) ] ) if abs_angle_rec >= angleaperturemin: # extract receiver angle aperture taper value aptap = ( aptap * aperturetap[ int( 20 * (abs_angle_rec - angleaperturemin) // dangleaperture ) ] ) if abs_angle_src_rec >= snellmin: # extract snell taper value aptap = ( aptap * aperturetap[ int(20 * (abs_angle_src_rec - snellmin) // dsnell) ] ) # aperture check aperture = abs(sixisrcrec - rixisrcrec) / iz if aperture < aperturemax: if aperture >= aperturemin: # extract aperture taper value aptap = ( aptap * aperturetap[ int( 20 * ((aperture - aperturemin) // daperture) ) ] ) # time limit check if 0 <= itravii < nt - 1: # assign values y[ii] += ( ( x[isrc * nr + irec, itravii] * (1 - travdii) + x[isrc * nr + irec, itravii + 1] * travdii ) * damp * aptap ) return y def _register_multiplications(self, engine: str) -> None: if engine not in ["numpy", "numba"]: raise KeyError("engine must be numpy or numba") if engine == "numba" and jit is not None: # numba numba_opts = dict( nopython=True, nogil=True, parallel=parallel ) # fastmath=True, if self.dynamic and self.travsrcrec: self._kirch_matvec = jit(**numba_opts)(self._ampsrcrec_kirch_matvec) self._kirch_rmatvec = jit(**numba_opts)(self._ampsrcrec_kirch_rmatvec) elif self.dynamic and not self.travsrcrec: self._kirch_matvec = jit(**numba_opts)(self._amp_kirch_matvec) self._kirch_rmatvec = jit(**numba_opts)(self._amp_kirch_rmatvec) elif self.travsrcrec: self._kirch_matvec = jit(**numba_opts)(self._travsrcrec_kirch_matvec) self._kirch_rmatvec = jit(**numba_opts)(self._travsrcrec_kirch_rmatvec) elif not self.travsrcrec: self._kirch_matvec = jit(**numba_opts)(self._trav_kirch_matvec) self._kirch_rmatvec = jit(**numba_opts)(self._trav_kirch_rmatvec) else: if engine == "numba" and jit is None: logging.warning(jit_message) if self.dynamic and self.travsrcrec: self._kirch_matvec = self._ampsrcrec_kirch_matvec self._kirch_rmatvec = self._ampsrcrec_kirch_rmatvec elif self.dynamic and not self.travsrcrec: self._kirch_matvec = self._amp_kirch_matvec self._kirch_rmatvec = self._amp_kirch_rmatvec elif self.travsrcrec: self._kirch_matvec = self._travsrcrec_kirch_matvec self._kirch_rmatvec = self._travsrcrec_kirch_rmatvec elif not self.travsrcrec: self._kirch_matvec = self._trav_kirch_matvec self._kirch_rmatvec = self._trav_kirch_rmatvec @reshaped def _matvec(self, x: NDArray) -> NDArray: y = np.zeros((self.nsnr, self.nt), dtype=self.dtype) if self.dynamic and self.travsrcrec: inputs = ( x.ravel(), y, self.ns, self.nr, self.nt, self.ni, self.dt, self.vel, self.trav_srcs, self.trav_recs, self.dist_srcs, self.dist_recs, self.aperture[0], self.aperture[1], self.aperturetap, self.nz, self.six, self.rix, self.angleaperture[0], self.angleaperture[1], self.angle_srcs, self.angle_recs, self.snell[0], self.snell[1], self.maxdist, ) elif self.dynamic and not self.travsrcrec: inputs = ( x.ravel(), y, self.nsnr, self.nt, self.ni, self.itrav, self.travd, self.amp, self.aperture[0], self.aperture[1], self.aperturetap, self.nz, self.six, self.rix, self.angleaperture[0], self.angleaperture[1], self.angle_srcs, self.angle_recs, self.snell[0], self.snell[1], ) elif not self.dynamic and self.travsrcrec: inputs = ( x.ravel(), y, self.ns, self.nr, self.nt, self.ni, self.dt, self.trav_srcs, self.trav_recs, ) elif not self.dynamic and not self.travsrcrec: inputs = (x.ravel(), y, self.nsnr, self.nt, self.ni, self.itrav, self.travd) y = self._kirch_matvec(*inputs) y = self.cop._matvec(y.ravel()) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: x = self.cop._rmatvec(x.ravel()) x = x.reshape(self.nsnr, self.nt) y = np.zeros(self.ni, dtype=self.dtype) if self.dynamic and self.travsrcrec: inputs = ( x, y, self.ns, self.nr, self.nt, self.ni, self.dt, self.vel, self.trav_srcs, self.trav_recs, self.dist_srcs, self.dist_recs, self.aperture[0], self.aperture[1], self.aperturetap, self.nz, self.six, self.rix, self.angleaperture[0], self.angleaperture[1], self.angle_srcs, self.angle_recs, self.snell[0], self.snell[1], self.maxdist, ) elif self.dynamic and not self.travsrcrec: inputs = ( x, y, self.nsnr, self.nt, self.ni, self.itrav, self.travd, self.amp, self.aperture[0], self.aperture[1], self.aperturetap, self.nz, self.six, self.rix, self.angleaperture[0], self.angleaperture[1], self.angle_srcs, self.angle_recs, self.snell[0], self.snell[1], ) elif not self.dynamic and self.travsrcrec: inputs = ( x, y, self.ns, self.nr, self.nt, self.ni, self.dt, self.trav_srcs, self.trav_recs, ) elif not self.dynamic and not self.travsrcrec: inputs = (x, y, self.nsnr, self.nt, self.ni, self.itrav, self.travd) y = self._kirch_rmatvec(*inputs) return y
54,838
38.059117
117
py
pylops
pylops-master/pylops/waveeqprocessing/mdd.py
__all__ = [ "MDC", "MDD", ] import logging from typing import Optional, Tuple, Union import numpy as np from scipy.signal import filtfilt from scipy.sparse.linalg import lsqr from pylops import Diagonal, Identity, LinearOperator, Transpose from pylops.optimization.basic import cgls from pylops.optimization.leastsquares import preconditioned_inversion from pylops.signalprocessing import FFT, Fredholm1 from pylops.utils import dottest as Dottest from pylops.utils.backend import ( get_array_module, get_fftconvolve, get_module_name, to_cupy_conditional, ) from pylops.utils.typing import NDArray def _MDC( G: NDArray, nt: int, nv: int, dt: float = 1.0, dr: float = 1.0, twosided: bool = True, saveGt: bool = True, conj: bool = False, prescaled: bool = False, _Identity=Identity, _Transpose=Transpose, _FFT=FFT, _Fredholm1=Fredholm1, args_Identity: Optional[dict] = None, args_FFT: Optional[dict] = None, args_Identity1: Optional[dict] = None, args_FFT1: Optional[dict] = None, args_Fredholm1: Optional[dict] = None, ) -> LinearOperator: r"""Multi-dimensional convolution. Used to be able to provide operators from different libraries (e.g., pylops-distributed) to MDC. It operates in the same way as public method (MDC) but has additional input parameters allowing passing a different operator and additional arguments to be passed to such operator. """ if args_Identity is None: args_Identity = {} if args_FFT is None: args_FFT = {} if args_Identity1 is None: args_Identity1 = {} if args_FFT1 is None: args_FFT1 = {} if args_Fredholm1 is None: args_Fredholm1 = {} if twosided and nt % 2 == 0: raise ValueError("nt must be odd number") # find out dtype of G dtype = G[0, 0, 0].dtype rdtype = np.real(np.ones(1, dtype=dtype)).dtype # create Fredholm operator if prescaled: Frop = _Fredholm1(G, nv, saveGt=saveGt, dtype=dtype, **args_Fredholm1) else: Frop = _Fredholm1( dr * dt * np.sqrt(nt) * G, nv, saveGt=saveGt, dtype=dtype, **args_Fredholm1 ) if conj: Frop = Frop.conj() # create FFT operators nfmax, ns, nr = G.shape # ensure that nfmax is not bigger than allowed nfft = int(np.ceil((nt + 1) / 2)) if nfmax > nfft: nfmax = nfft logging.warning("nfmax set equal to ceil[(nt+1)/2=%d]", nfmax) Fop = _FFT( dims=(nt, nr, nv), axis=0, real=True, ifftshift_before=twosided, dtype=rdtype, **args_FFT ) F1op = _FFT( dims=(nt, ns, nv), axis=0, real=True, ifftshift_before=False, dtype=rdtype, **args_FFT1 ) # create Identity operator to extract only relevant frequencies Iop = _Identity( N=nfmax * nr * nv, M=nfft * nr * nv, inplace=True, dtype=dtype, **args_Identity ) I1op = _Identity( N=nfmax * ns * nv, M=nfft * ns * nv, inplace=True, dtype=dtype, **args_Identity1 ) F1opH = F1op.H I1opH = I1op.H # create MDC operator MDCop = F1opH * I1opH * Frop * Iop * Fop # force dtype to be real (as FFT operators assume real inputs and outputs) MDCop.dtype = rdtype return MDCop def MDC( G: NDArray, nt: int, nv: int, dt: float = 1.0, dr: float = 1.0, twosided: bool = True, fftengine: str = "numpy", saveGt: bool = True, conj: bool = False, usematmul: bool = False, prescaled: bool = False, name: str = "M", ) -> LinearOperator: r"""Multi-dimensional convolution. Apply multi-dimensional convolution between two datasets. Model and data should be provided after flattening 2- or 3-dimensional arrays of size :math:`[n_t \times n_r \;(\times n_{vs})]` and :math:`[n_t \times n_s \;(\times n_{vs})]` (or :math:`2n_t-1` for ``twosided=True``), respectively. Parameters ---------- G : :obj:`numpy.ndarray` Multi-dimensional convolution kernel in frequency domain of size :math:`[n_{f_\text{max}} \times n_s \times n_r]` nt : :obj:`int` Number of samples along time axis for model and data (note that this must be equal to :math:`2n_t-1` when working with ``twosided=True``. nv : :obj:`int` Number of samples along virtual source axis dt : :obj:`float`, optional Sampling of time integration axis :math:`\Delta t` dr : :obj:`float`, optional Sampling of receiver integration axis :math:`\Delta r` twosided : :obj:`bool`, optional MDC operator has both negative and positive time (``True``) or only positive (``False``) fftengine : :obj:`str`, optional Engine used for fft computation (``numpy``, ``scipy`` or ``fftw``) saveGt : :obj:`bool`, optional Save ``G`` and ``G.H`` to speed up the computation of adjoint of :class:`pylops.signalprocessing.Fredholm1` (``True``) or create ``G.H`` on-the-fly (``False``) Note that ``saveGt=True`` will be faster but double the amount of required memory conj : :obj:`str`, optional Perform Fredholm integral computation with complex conjugate of ``G`` usematmul : :obj:`bool`, optional Use :func:`numpy.matmul` (``True``) or for-loop with :func:`numpy.dot` (``False``) in :py:class:`pylops.signalprocessing.Fredholm1` operator. Refer to Fredholm1 documentation for details. prescaled : :obj:`bool`, optional Apply scaling to kernel (``False``) or not (``False``) when performing spatial and temporal summations. In case ``prescaled=True``, the kernel is assumed to have been pre-scaled when passed to the MDC routine. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Raises ------ ValueError If ``nt`` is even and ``twosided=True`` See Also -------- MDD : Multi-dimensional deconvolution Notes ----- The so-called multi-dimensional convolution (MDC) is a chained operator [1]_. It is composed of a forward Fourier transform, a multi-dimensional integration, and an inverse Fourier transform: .. math:: y(t, s, v) = \mathscr{F}^{-1} \Big( \int_S G(f, s, r) \mathscr{F}(x(t, r, v))\,\mathrm{d}r \Big) which is discretized as follows: .. math:: y(t, s, v) = \sqrt{n_t} \Delta t \Delta r\mathscr{F}^{-1} \Big( \sum_{i_r=0}^{n_r} G(f, s, i_r) \mathscr{F}(x(t, i_r, v)) \Big) where :math:`\sqrt{n_t} \Delta t \Delta r` is not applied if ``prescaled=True``. This operation can be discretized and performed by means of a linear operator .. math:: \mathbf{D}= \mathbf{F}^H \mathbf{G} \mathbf{F} where :math:`\mathbf{F}` is the Fourier transform applied along the time axis and :math:`\mathbf{G}` is the multi-dimensional convolution kernel. .. [1] Wapenaar, K., van der Neut, J., Ruigrok, E., Draganov, D., Hunziker, J., Slob, E., Thorbecke, J., and Snieder, R., "Seismic interferometry by crosscorrelation and by multi-dimensional deconvolution: a systematic comparison", Geophysical Journal International, vol. 185, pp. 1335-1364. 2011. """ MOp = _MDC( G, nt, nv, dt=dt, dr=dr, twosided=twosided, saveGt=saveGt, conj=conj, prescaled=prescaled, args_FFT={"engine": fftengine}, args_Fredholm1={"usematmul": usematmul}, ) MOp.name = name return MOp def MDD( G: NDArray, d: NDArray, dt: float = 0.004, dr: float = 1.0, nfmax: Optional[int] = None, wav: Optional[NDArray] = None, twosided: bool = True, causality_precond: bool = False, adjoint: bool = False, psf: bool = False, dottest: bool = False, saveGt: bool = True, add_negative: bool = True, smooth_precond: int = 0, fftengine: str = "numpy", **kwargs_solver ) -> Union[ Tuple[NDArray, NDArray], Tuple[NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray, NDArray], ]: r"""Multi-dimensional deconvolution. Solve multi-dimensional deconvolution problem using :py:func:`scipy.sparse.linalg.lsqr` iterative solver. Parameters ---------- G : :obj:`numpy.ndarray` Multi-dimensional convolution kernel in time domain of size :math:`[n_s \times n_r \times n_t]` for ``twosided=False`` or ``twosided=True`` and ``add_negative=True`` (with only positive times) or size :math:`[n_s \times n_r \times 2n_t-1]` for ``twosided=True`` and ``add_negative=False`` (with both positive and negative times) d : :obj:`numpy.ndarray` Data in time domain :math:`[n_s \,(\times n_{vs}) \times n_t]` if ``twosided=False`` or ``twosided=True`` and ``add_negative=True`` (with only positive times) or size :math:`[n_s \,(\times n_{vs}) \times 2n_t-1]` if ``twosided=True`` dt : :obj:`float`, optional Sampling of time integration axis dr : :obj:`float`, optional Sampling of receiver integration axis nfmax : :obj:`int`, optional Index of max frequency to include in deconvolution process wav : :obj:`numpy.ndarray`, optional Wavelet to convolve to the inverted model and psf (must be centered around its index in the middle of the array). If ``None``, the outputs of the inversion are returned directly. twosided : :obj:`bool`, optional MDC operator and data both negative and positive time (``True``) or only positive (``False``) add_negative : :obj:`bool`, optional Add negative side to MDC operator and data (``True``) or not (``False``)- operator and data are already provided with both positive and negative sides. To be used only with ``twosided=True``. causality_precond : :obj:`bool`, optional Apply causality mask (``True``) or not (``False``) smooth_precond : :obj:`int`, optional Lenght of smoothing to apply to causality preconditioner adjoint : :obj:`bool`, optional Compute and return adjoint(s) psf : :obj:`bool`, optional Compute and return Point Spread Function (PSF) and its inverse dottest : :obj:`bool`, optional Apply dot-test saveGt : :obj:`bool`, optional Save ``G`` and ``G.H`` to speed up the computation of adjoint of :class:`pylops.signalprocessing.Fredholm1` (``True``) or create ``G.H`` on-the-fly (``False``) Note that ``saveGt=True`` will be faster but double the amount of required memory fftengine : :obj:`str`, optional Engine used for fft computation (``numpy``, ``scipy`` or ``fftw``) **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.cg` and :py:func:`pylops.optimization.solver.cg` are used as default for numpy and cupy `data`, respectively) Returns ------- minv : :obj:`numpy.ndarray` Inverted model of size :math:`[n_r \,(\times n_{vs}) \times n_t]` for ``twosided=False`` or :math:`[n_r \,(\times n_vs) \times 2n_t-1]` for ``twosided=True`` madj : :obj:`numpy.ndarray` Adjoint model of size :math:`[n_r \,(\times n_{vs}) \times n_t]` for ``twosided=False`` or :math:`[n_r \,(\times n_r) \times 2n_t-1]` for ``twosided=True`` psfinv : :obj:`numpy.ndarray` Inverted psf of size :math:`[n_r \times n_r \times n_t]` for ``twosided=False`` or :math:`[n_r \times n_r \times 2n_t-1]` for ``twosided=True`` psfadj : :obj:`numpy.ndarray` Adjoint psf of size :math:`[n_r \times n_r \times n_t]` for ``twosided=False`` or :math:`[n_r \times n_r \times 2n_t-1]` for ``twosided=True`` See Also -------- MDC : Multi-dimensional convolution Notes ----- Multi-dimensional deconvolution (MDD) is a mathematical ill-solved problem, well-known in the image processing and geophysical community [1]_. MDD aims at removing the effects of a Multi-dimensional Convolution (MDC) kernel or the so-called blurring operator or point-spread function (PSF) from a given data. It can be written as .. math:: \mathbf{d}= \mathbf{D} \mathbf{m} or, equivalently, by means of its normal equation .. math:: \mathbf{m}= (\mathbf{D}^H\mathbf{D})^{-1} \mathbf{D}^H\mathbf{d} where :math:`\mathbf{D}^H\mathbf{D}` is the PSF. .. [1] Wapenaar, K., van der Neut, J., Ruigrok, E., Draganov, D., Hunziker, J., Slob, E., Thorbecke, J., and Snieder, R., "Seismic interferometry by crosscorrelation and by multi-dimensional deconvolution: a systematic comparison", Geophyscial Journal International, vol. 185, pp. 1335-1364. 2011. """ ncp = get_array_module(d) ns, nr, nt = G.shape if len(d.shape) == 2: nv = 1 else: nv = d.shape[1] if twosided: if add_negative: nt2 = 2 * nt - 1 else: nt2 = nt nt = (nt2 + 1) // 2 nfmax_allowed = int(np.ceil((nt2 + 1) / 2)) else: nt2 = nt nfmax_allowed = nt # Fix nfmax to be at maximum equal to half of the size of fft samples if nfmax is None or nfmax > nfmax_allowed: nfmax = nfmax_allowed logging.warning("nfmax set equal to ceil[(nt+1)/2=%d]", nfmax) # Add negative part to data and model if twosided and add_negative: G = np.concatenate((ncp.zeros((ns, nr, nt - 1)), G), axis=-1) d = np.concatenate((np.squeeze(np.zeros((ns, nv, nt - 1))), d), axis=-1) # Bring kernel to frequency domain Gfft = np.fft.rfft(G, nt2, axis=-1) Gfft = Gfft[..., :nfmax] # Bring frequency/time to first dimension Gfft = np.moveaxis(Gfft, -1, 0) d = np.moveaxis(d, -1, 0) if psf: G = np.moveaxis(G, -1, 0) # Define MDC linear operator MDCop = MDC( Gfft, nt2, nv=nv, dt=dt, dr=dr, twosided=twosided, saveGt=saveGt, fftengine=fftengine, ) if psf: PSFop = MDC( Gfft, nt2, nv=nr, dt=dt, dr=dr, twosided=twosided, saveGt=saveGt, fftengine=fftengine, ) if dottest: Dottest( MDCop, nt2 * ns * nv, nt2 * nr * nv, verb=True, backend=get_module_name(ncp) ) if psf: Dottest( PSFop, nt2 * ns * nr, nt2 * nr * nr, verb=True, backend=get_module_name(ncp), ) # Adjoint if adjoint: madj = MDCop.H * d.ravel() madj = np.squeeze(madj.reshape(nt2, nr, nv)) madj = np.moveaxis(madj, 0, -1) if psf: psfadj = PSFop.H * G.ravel() psfadj = np.squeeze(psfadj.reshape(nt2, nr, nr)) psfadj = np.moveaxis(psfadj, 0, -1) # Inverse if twosided and causality_precond: P = np.ones((nt2, nr, nv)) P[: nt - 1] = 0 if smooth_precond > 0: P = filtfilt(np.ones(smooth_precond) / smooth_precond, 1, P, axis=0) P = to_cupy_conditional(d, P) Pop = Diagonal(P) minv = preconditioned_inversion(MDCop, d.ravel(), Pop, **kwargs_solver)[0] else: if ncp == np and "callback" not in kwargs_solver: minv = lsqr(MDCop, d.ravel(), **kwargs_solver)[0] else: minv = cgls( MDCop, d.ravel(), ncp.zeros(int(MDCop.shape[1]), dtype=MDCop.dtype), **kwargs_solver )[0] minv = np.squeeze(minv.reshape(nt2, nr, nv)) minv = np.moveaxis(minv, 0, -1) if wav is not None: wav1 = wav.copy() for _ in range(minv.ndim - 1): wav1 = wav1[ncp.newaxis] minv = get_fftconvolve(d)(minv, wav1, mode="same") if psf: if ncp == np: psfinv = lsqr(PSFop, G.ravel(), **kwargs_solver)[0] else: psfinv = cgls( PSFop, G.ravel(), ncp.zeros(int(PSFop.shape[1]), dtype=PSFop.dtype), **kwargs_solver )[0] psfinv = np.squeeze(psfinv.reshape(nt2, nr, nr)) psfinv = np.moveaxis(psfinv, 0, -1) if wav is not None: wav1 = wav.copy() for _ in range(psfinv.ndim - 1): wav1 = wav1[np.newaxis] psfinv = get_fftconvolve(d)(psfinv, wav1, mode="same") if adjoint and psf: return minv, madj, psfinv, psfadj elif adjoint: return minv, madj elif psf: return minv, psfinv else: return minv
17,066
32.011605
95
py
pylops
pylops-master/pylops/waveeqprocessing/seismicinterpolation.py
__all__ = ["SeismicInterpolation"] import logging from typing import List, Optional, Sequence, Tuple, Union import numpy as np from pylops import Laplacian, Restriction, SecondDerivative from pylops.optimization.leastsquares import regularized_inversion from pylops.optimization.sparsity import fista from pylops.signalprocessing import ( FFT2D, FFTND, ChirpRadon2D, ChirpRadon3D, Interp, Radon2D, Radon3D, Sliding2D, Sliding3D, ) from pylops.utils.dottest import dottest as Dottest from pylops.utils.typing import InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def SeismicInterpolation( data: NDArray, nrec: Union[int, Tuple[int, int]], iava: Union[List[Union[int, float]], NDArray], iava1: Optional[Union[List[Union[int, float]], NDArray]] = None, kind: str = "fk", nffts: Optional[Union[int, InputDimsLike]] = None, sampling: Optional[Sequence[float]] = None, spataxis: Optional[NDArray] = None, spat1axis: Optional[NDArray] = None, taxis: Optional[NDArray] = None, paxis: Optional[NDArray] = None, p1axis: Optional[NDArray] = None, centeredh: bool = True, nwins: InputDimsLike = None, nwin: InputDimsLike = None, nover: InputDimsLike = None, engine: str = "numba", dottest: bool = False, **kwargs_solver, ) -> Tuple[NDArray, NDArray, NDArray]: r"""Seismic interpolation (or regularization). Interpolate seismic data from irregular to regular spatial grid. Depending on the size of the input ``data``, interpolation is either 2- or 3-dimensional. In case of 3-dimensional interpolation, data can be irregularly sampled in either one or both spatial directions. Parameters ---------- data : :obj:`np.ndarray` Irregularly sampled seismic data of size :math:`[n_{r_y} \,(\times n_{r_x} \times n_t)]` nrec : :obj:`int` or :obj:`tuple` Number of elements in the regularly sampled (reconstructed) spatial array, :math:`n_{R_y}` for 2-dimensional data and :math:`(n_{R_y}, n_{R_x})` for 3-dimensional data iava : :obj:`list` or :obj:`numpy.ndarray` Integer (or floating) indices of locations of available samples in first dimension of regularly sampled spatial grid of interpolated signal. The :class:`pylops.basicoperators.Restriction` operator is used in case of integer indices, while the :class:`pylops.signalprocessing.Iterp` operator is used in case of floating indices. iava1 : :obj:`list` or :obj:`numpy.ndarray`, optional Integer (or floating) indices of locations of available samples in second dimension of regularly sampled spatial grid of interpolated signal. Can be used only in case of 3-dimensional data. kind : :obj:`str`, optional Type of inversion: ``fk`` (default), ``spatial``, ``radon-linear``, ``chirpradon-linear``, ``radon-parabolic`` , ``radon-hyperbolic``, ``sliding``, or ``chirp-sliding`` nffts : :obj:`int` or :obj:`tuple`, optional nffts : :obj:`tuple`, optional Number of samples in Fourier Transform for each direction. Required if ``kind='fk'`` sampling : :obj:`tuple`, optional Sampling steps ``dy`` (, ``dx``) and ``dt``. Required if ``kind='fk'`` or ``kind='radon-linear'`` spataxis : :obj:`np.ndarray`, optional First spatial axis. Required for ``kind='radon-linear'``, ``kind='chirpradon-linear'``, ``kind='radon-parabolic'``, ``kind='radon-hyperbolic'``, can also be provided instead of ``sampling`` for ``kind='fk'`` spat1axis : :obj:`np.ndarray`, optional Second spatial axis. Required for ``kind='radon-linear'``, ``kind='chirpradon-linear'``, ``kind='radon-parabolic'``, ``kind='radon-hyperbolic'``, can also be provided instead of ``sampling`` for ``kind='fk'`` taxis : :obj:`np.ndarray`, optional Time axis. Required for ``kind='radon-linear'``, ``kind='chirpradon-linear'``, ``kind='radon-parabolic'``, ``kind='radon-hyperbolic'``, can also be provided instead of ``sampling`` for ``kind='fk'`` paxis : :obj:`np.ndarray`, optional First Radon axis. Required for ``kind='radon-linear'``, ``kind='chirpradon-linear'``, ``kind='radon-parabolic'``, ``kind='radon-hyperbolic'``, ``kind='sliding'``, and ``kind='chirp-sliding'`` p1axis : :obj:`np.ndarray`, optional Second Radon axis. Required for ``kind='radon-linear'``, ``kind='chirpradon-linear'``, ``kind='radon-parabolic'``, ``kind='radon-hyperbolic'``, ``kind='sliding'``, and ``kind='chirp-sliding'`` centeredh : :obj:`bool`, optional Assume centered spatial axis (``True``) or not (``False``). Required for ``kind='radon-linear'``, ``kind='radon-parabolic'`` and ``kind='radon-hyperbolic'`` nwins : :obj:`int` or :obj:`tuple`, optional Number of windows. Required for ``kind='sliding'`` and ``kind='chirp-sliding'`` nwin : :obj:`int` or :obj:`tuple`, optional Number of samples of window. Required for ``kind='sliding'`` and ``kind='chirp-sliding'`` nover : :obj:`int` or :obj:`tuple`, optional Number of samples of overlapping part of window. Required for ``kind='sliding'`` and ``kind='chirp-sliding'`` engine : :obj:`str`, optional Engine used for Radon computations (``numpy/numba`` for ``Radon2D`` and ``Radon3D`` or ``numpy/fftw`` for ``ChirpRadon2D`` and ``ChirpRadon3D``) dottest : :obj:`bool`, optional Apply dot-test **kwargs_solver Arbitrary keyword arguments for :py:func:`pylops.optimization.leastsquares.regularized_inversion` solver if ``kind='spatial'`` or :py:func:`pylops.optimization.sparsity.FISTA` solver otherwise Returns ------- recdata : :obj:`np.ndarray` Reconstructed data of size :math:`[n_{R_y}\,(\times n_{R_x} \times n_t)]` recprec : :obj:`np.ndarray` Reconstructed data in the sparse or preconditioned domain in case of ``kind='fk'``, ``kind='radon-linear'``, ``kind='radon-parabolic'``, ``kind='radon-hyperbolic'`` and ``kind='sliding'`` cost : :obj:`np.ndarray` Cost function norm Raises ------ KeyError If ``kind`` is neither ``spatial``, ``fl``, ``radon-linear``, ``radon-parabolic``, ``radon-hyperbolic`` nor ``sliding`` Notes ----- The problem of seismic data interpolation (or regularization) can be formally written as .. math:: \mathbf{y} = \mathbf{R} \mathbf{x} where a restriction or interpolation operator is applied along the spatial direction(s). Here :math:`\mathbf{y} = [\mathbf{y}_{R1}^T, \mathbf{y}_{R2}^T,\ldots, \mathbf{y}_{RN^T}]^T` where each vector :math:`\mathbf{y}_{Ri}` contains all time samples recorded in the seismic data at the specific receiver :math:`R_i`. Similarly, :math:`\mathbf{x} = [\mathbf{x}_{r1}^T, \mathbf{x}_{r2}^T,\ldots, \mathbf{x}_{rM}^T]`, contains all traces at the regularly and finely sampled receiver locations :math:`r_i`. Several alternative approaches can be taken to solve such a problem. They mostly differ in the choice of the regularization (or preconditining) used to mitigate the ill-posedness of the problem: * ``spatial``: least-squares inversion in the original time-space domain with an additional spatial smoothing regularization term, corresponding to the cost function :math:`J = ||\mathbf{y} - \mathbf{R} \mathbf{x}||_2 + \epsilon_\nabla \nabla ||\mathbf{x}||_2` where :math:`\nabla` is a second order space derivative implemented via :class:`pylops.basicoperators.SecondDerivative` in 2-dimensional case and :class:`pylops.basicoperators.Laplacian` in 3-dimensional case * ``fk``: L1 inversion in frequency-wavenumber preconditioned domain corresponding to the cost function :math:`J = ||\mathbf{y} - \mathbf{R} \mathbf{F} \mathbf{x}||_2` where :math:`\mathbf{F}` is frequency-wavenumber transform implemented via :class:`pylops.signalprocessing.FFT2D` in 2-dimensional case and :class:`pylops.signalprocessing.FFTND` in 3-dimensional case * ``radon-linear``: L1 inversion in linear Radon preconditioned domain using the same cost function as ``fk`` but with :math:`\mathbf{F}` being a Radon transform implemented via :class:`pylops.signalprocessing.Radon2D` in 2-dimensional case and :class:`pylops.signalprocessing.Radon3D` in 3-dimensional case * ``radon-parabolic``: L1 inversion in parabolic Radon preconditioned domain * ``radon-hyperbolic``: L1 inversion in hyperbolic Radon preconditioned domain * ``sliding``: L1 inversion in sliding-linear Radon preconditioned domain using the same cost function as ``fk`` but with :math:`\mathbf{F}` being a sliding Radon transform implemented via :class:`pylops.signalprocessing.Sliding2D` in 2-dimensional case and :class:`pylops.signalprocessing.Sliding3D` in 3-dimensional case """ dtype = data.dtype ndims = data.ndim if ndims == 1 or ndims > 3: raise ValueError("data must have 2 or 3 dimensions") if ndims == 2: dimsd = data.shape dims = (nrec, dimsd[1]) else: dimsd = data.shape dims = (nrec[0], nrec[1], dimsd[2]) # sampling if taxis is not None: dt = taxis[1] - taxis[0] if spataxis is not None: dspat = np.abs(spataxis[1] - spataxis[0]) if spat1axis is not None: dspat1 = np.abs(spat1axis[1] - spat1axis[0]) # create restriction/interpolation operator if iava.dtype == float: Rop = Interp(dims, iava, axis=0, kind="linear", dtype=dtype) if ndims == 3 and iava1 is not None: dims1 = (len(iava), nrec[1], dimsd[2]) Rop1 = Interp(dims1, iava1, axis=1, kind="linear", dtype=dtype) Rop = Rop1 * Rop else: Rop = Restriction(dims, iava, axis=0, dtype=dtype) if ndims == 3 and iava1 is not None: dims1 = (len(iava), nrec[1], dimsd[2]) Rop1 = Restriction(dims1, iava1, axis=1, dtype=dtype) Rop = Rop1 * Rop # create other operators for inversion if kind == "spatial": prec = False dotcflag = 0 if ndims == 3 and iava1 is not None: Regop = Laplacian(dims=dims, axes=(0, 1), dtype=dtype) else: Regop = SecondDerivative(dims, axis=0, dtype=dtype) SIop = Rop elif kind == "fk": prec = True dimsp = nffts dotcflag = 1 if ndims == 3: if sampling is None: if spataxis is None or spat1axis is None or taxis is None: raise ValueError( "Provide either sampling or spataxis, " f"spat1axis and taxis for kind=%{kind}" ) else: sampling = ( np.abs(spataxis[1] - spataxis[1]), np.abs(spat1axis[1] - spat1axis[1]), np.abs(taxis[1] - taxis[1]), ) Pop = FFTND(dims=dims, nffts=nffts, sampling=sampling) Pop = Pop.H else: if sampling is None: if spataxis is None or taxis is None: raise ValueError( "Provide either sampling or spataxis, " f"and taxis for kind={kind}" ) else: sampling = ( np.abs(spataxis[1] - spataxis[1]), np.abs(taxis[1] - taxis[1]), ) Pop = FFT2D(dims=dims, nffts=nffts, sampling=sampling) Pop = Pop.H SIop = Rop * Pop elif "chirpradon" in kind: prec = True dotcflag = 0 if ndims == 3: Pop = ChirpRadon3D( taxis, spataxis, spat1axis, (np.max(paxis) * dspat / dt, np.max(p1axis) * dspat1 / dt), ).H dimsp = (spataxis.size, spat1axis.size, taxis.size) else: Pop = ChirpRadon2D(taxis, spataxis, np.max(paxis) * dspat / dt).H dimsp = (spataxis.size, taxis.size) SIop = Rop * Pop elif "radon" in kind: prec = True dotcflag = 0 kindradon = kind.split("-")[-1] if ndims == 3: Pop = Radon3D( taxis, spataxis, spat1axis, paxis, p1axis, centeredh=centeredh, kind=kindradon, engine=engine, ) dimsp = (paxis.size, p1axis.size, taxis.size) else: Pop = Radon2D( taxis, spataxis, paxis, centeredh=centeredh, kind=kindradon, engine=engine, ) dimsp = (paxis.size, taxis.size) SIop = Rop * Pop elif kind in ("sliding", "chirp-sliding"): prec = True dotcflag = 0 if ndims == 3: nspat, nspat1 = spataxis.size, spat1axis.size spataxis_local = np.linspace( -dspat * nwin[0] // 2, dspat * nwin[0] // 2, nwin[0] ) spat1axis_local = np.linspace( -dspat1 * nwin[1] // 2, dspat1 * nwin[1] // 2, nwin[1] ) dimsslid = (nspat, nspat1, taxis.size) if kind == "sliding": npaxis, np1axis = paxis.size, p1axis.size Op = Radon3D( taxis, spataxis_local, spat1axis_local, paxis, p1axis, centeredh=True, kind="linear", engine=engine, ) else: npaxis, np1axis = nwin[0], nwin[1] Op = ChirpRadon3D( taxis, spataxis_local, spat1axis_local, (np.max(paxis) * dspat / dt, np.max(p1axis) * dspat1 / dt), ).H dimsp = (nwins[0] * npaxis, nwins[1] * np1axis, dimsslid[2]) Pop = Sliding3D( Op, dimsp, dimsslid, nwin, nover, (npaxis, np1axis), tapertype="cosine" ) # to be able to reshape correctly the preconditioned model dimsp = (nwins[0], nwins[1], npaxis, np1axis, dimsslid[2]) else: nspat = spataxis.size spataxis_local = np.linspace(-dspat * nwin // 2, dspat * nwin // 2, nwin) dimsslid = (nspat, taxis.size) if kind == "sliding": npaxis = paxis.size Op = Radon2D( taxis, spataxis_local, paxis, centeredh=True, kind="linear", engine=engine, ) else: npaxis = nwin Op = ChirpRadon2D(taxis, spataxis_local, np.max(paxis) * dspat / dt).H dimsp = (nwins * npaxis, dimsslid[1]) Pop = Sliding2D(Op, dimsp, dimsslid, nwin, nover, tapertype="cosine") SIop = Rop * Pop else: raise KeyError( "kind must be spatial, fk, radon-linear, " "radon-parabolic, radon-hyperbolic, sliding or chirp-sliding" ) # dot-test if dottest: Dottest( SIop, np.prod(dimsd), np.prod(dimsp) if prec else np.prod(dims), complexflag=dotcflag, raiseerror=True, verb=True, ) # inversion if kind == "spatial": recdata = regularized_inversion(SIop, data.ravel(), [Regop], **kwargs_solver) if isinstance(recdata, tuple): recdata = recdata[0] recdata = recdata.reshape(dims) recprec = None cost = None else: recprec = fista(SIop, data.ravel(), **kwargs_solver) if len(recprec) == 3: cost = recprec[2] else: cost = None recprec = recprec[0] recdata = np.real(Pop * recprec) recprec = recprec.reshape(dimsp) recdata = recdata.reshape(dims) return recdata, recprec, cost
16,856
39.135714
88
py
pylops
pylops-master/pylops/waveeqprocessing/oneway.py
__all__ = [ "PhaseShift", "Deghosting", ] import logging from typing import Callable, Optional, Sequence, Tuple, Union import numpy as np from scipy.sparse.linalg import lsqr from pylops import Diagonal, Identity, LinearOperator, Pad, aslinearoperator from pylops.signalprocessing import FFT from pylops.utils import dottest as Dottest from pylops.utils.backend import to_cupy_conditional from pylops.utils.tapers import taper2d, taper3d from pylops.utils.typing import DTypeLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class _PhaseShift(LinearOperator): """Phase shift operator in frequency-wavenumber domain Apply positive phase shift directly in frequency-wavenumber domain. See :class:`pylops.waveeqprocessingPhaseShift` for more details on the input parameters. """ def __init__( self, vel: float, dz: float, freq: NDArray, kx: NDArray, ky: Optional[Union[int, NDArray]] = None, dtype: str = "complex64", ) -> None: self.vel = vel self.dz = dz # define frequency and horizontal wavenumber axes if ky is None: ky = 0 [freq, kx] = np.meshgrid(freq, kx, indexing="ij") else: [freq, kx, ky] = np.meshgrid(freq, kx, ky, indexing="ij") # define vertical wavenumber axis kz = (freq / vel) ** 2 - kx**2 - ky**2 kz = np.sqrt(kz.astype(dtype)) # ensure evanescent region is complex positive kz = np.real(kz) - 1j * np.sign(dz) * np.abs(np.imag(kz)) # create propagator self.gazx = np.exp(-1j * 2 * np.pi * dz * kz) super().__init__( dtype=np.dtype(dtype), dims=freq.shape, dimsd=freq.shape, explicit=False, name="P", ) def _matvec(self, x: NDArray) -> NDArray: if not isinstance(self.gazx, type(x)): self.gazx = to_cupy_conditional(x, self.gazx) y = x.reshape(self.dims) * self.gazx return y.ravel() def _rmatvec(self, x: NDArray) -> NDArray: if not isinstance(self.gazx, type(x)): self.gazx = to_cupy_conditional(x, self.gazx) y = x.reshape(self.dims) * np.conj(self.gazx) return y.ravel() def PhaseShift( vel: float, dz: float, nt: int, freq: NDArray, kx: NDArray, ky: Optional[NDArray] = None, dtype: DTypeLike = "float64", name: str = "P", ) -> LinearOperator: r"""Phase shift operator Apply positive (forward) phase shift with constant velocity in forward mode, and negative (backward) phase shift with constant velocity in adjoint mode. Input model and data should be 2- or 3-dimensional arrays in time-space domain of size :math:`[n_t \times n_x \;(\times n_y)]`. Parameters ---------- vel : :obj:`float`, optional Constant propagation velocity dz : :obj:`float`, optional Depth step nt : :obj:`int`, optional Number of time samples of model and data freq : :obj:`numpy.ndarray` Positive frequency axis kx : :obj:`int`, optional Horizontal wavenumber axis (centered around 0) of size :math:`[n_x \times 1]`. ky : :obj:`int`, optional Second horizontal wavenumber axis for 3d phase shift (centered around 0) of size :math:`[n_y \times 1]`. dtype : :obj:`str`, optional Type of elements in input array name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Pop : :obj:`pylops.LinearOperator` Phase shift operator Notes ----- The phase shift operator implements a one-way wave equation forward propagation in frequency-wavenumber domain by applying the following transformation to the input model: .. math:: d(f, k_x, k_y) = m(f, k_x, k_y) e^{-j \Delta z \sqrt{\omega^2/v^2 - k_x^2 - k_y^2}} where :math:`v` is the constant propagation velocity and :math:`\Delta z` is the propagation depth. In adjoint mode, the data is propagated backward using the following transformation: .. math:: m(f, k_x, k_y) = d(f, k_x, k_y) e^{j \Delta z \sqrt{\omega^2/v^2 - k_x^2 - k_y^2}} Effectively, the input model and data are assumed to be in time-space domain and forward Fourier transform is applied to both dimensions, leading to the following operator: .. math:: \mathbf{d} = \mathbf{F}^H_t \mathbf{F}^H_x \mathbf{P} \mathbf{F}_x \mathbf{F}_t \mathbf{m} where :math:`\mathbf{P}` perfoms the phase-shift as discussed above. """ dtypefft = (np.ones(1, dtype=dtype) + 1j * np.ones(1, dtype=dtype)).dtype dims: Union[Tuple[int, int], Tuple[int, int, int]] dimsfft: Union[Tuple[int, int], Tuple[int, int, int]] if ky is None: dims = (nt, kx.size) dimsfft = (freq.size, kx.size) else: dims = (nt, kx.size, ky.size) dimsfft = (freq.size, kx.size, ky.size) Fop = FFT(dims, axis=0, nfft=nt, real=True, dtype=dtype) Kxop = FFT( dimsfft, axis=1, nfft=kx.size, real=False, ifftshift_before=True, dtype=dtypefft ) if ky is not None: Kyop = FFT( dimsfft, axis=2, nfft=ky.size, real=False, ifftshift_before=True, dtype=dtypefft, ) Pop = _PhaseShift(vel, dz, freq, kx, ky, dtypefft) if ky is None: Pop = Fop.H * Kxop * Pop * Kxop.H * Fop else: Pop = Fop.H * Kxop * Kyop * Pop * Kyop.H * Kxop.H * Fop # Recasting of type is required to avoid FFT operators to cast to complex. # We know this is correct because forward and inverse FFTs are applied at # the beginning and end of this combined operator Pop.dtype = dtype Pop = aslinearoperator(Pop) Pop.name = name return Pop def Deghosting( p: NDArray, nt: int, nr: Union[int, Tuple[int, int]], dt: float, dr: Sequence[float], vel: float, zrec: float, pd: Optional[NDArray] = None, win: Optional[NDArray] = None, npad: Union[Tuple[int], Tuple[int, int]] = (11, 11), ntaper: Tuple[int, int] = (11, 11), restriction: Optional[LinearOperator] = None, sptransf: Optional[LinearOperator] = None, solver: Callable = lsqr, dottest: bool = False, dtype: DTypeLike = "complex128", **kwargs_solver ) -> Tuple[NDArray, NDArray]: r"""Wavefield deghosting. Apply seismic wavefield decomposition from single-component (pressure) data. This process is also generally referred to as model-based deghosting. Parameters ---------- p : :obj:`np.ndarray` Pressure data of of size :math:`\lbrack n_{r_x}\,(\times n_{r_y}) \times n_t \rbrack` (or :math:`\lbrack n_{r_{x,\text{sub}}}\, (\times n_{r_{y,\text{sub}}}) \times n_t \rbrack` in case a ``restriction`` operator is provided. Note that :math:`n_{r_{x,\text{sub}}}` (and :math:`n_{r_{y,\text{sub}}}`) must agree with the size of the output of this operator) nt : :obj:`int` Number of samples along the time axis nr : :obj:`int` or :obj:`tuple` Number of samples along the receiver axis (or axes) dt : :obj:`float` Sampling along the time axis dr : :obj:`float` or :obj:`tuple` Sampling along the receiver array of the separated pressure consituents vel : :obj:`float` Velocity along the receiver array (must be constant) zrec : :obj:`float` Depth of receiver array pd : :obj:`np.ndarray`, optional Direct arrival to be subtracted from ``p`` win : :obj:`np.ndarray`, optional Time window to be applied to ``p`` to remove the direct arrival (if ``pd=None``) ntaper : :obj:`float` or :obj:`tuple`, optional Number of samples of taper applied to propagator to avoid edge effects npad : :obj:`float` or :obj:`tuple`, optional Number of samples of padding applied to propagator to avoid edge effects angle restriction : :obj:`pylops.LinearOperator`, optional Restriction operator sptransf : :obj:`pylops.LinearOperator`, optional Sparsifying operator solver : :obj:`float`, optional Function handle of solver to be used if ``kind='inverse'`` dottest : :obj:`bool`, optional Apply dot-test dtype : :obj:`str`, optional Type of elements in input array. If ``None``, directly inferred from ``p`` **kwargs_solver Arbitrary keyword arguments for chosen ``solver`` Returns ------- pup : :obj:`np.ndarray` Up-going wavefield pdown : :obj:`np.ndarray` Down-going wavefield Notes ----- Up- and down-going components of seismic data :math:`p^-(x, t)` and :math:`p^+(x, t)` can be estimated from single-component data :math:`p(x, t)` using a ghost model. The basic idea [1]_ is that of using a one-way propagator in the f-k domain (also referred to as ghost model) to predict the down-going field from the up-going one (excluded the direct arrival and its source ghost referred here to as :math:`p_d(x, t)`): .. math:: p^+ - p_d = e^{-j k_z 2 z_\text{rec}} p^- where :math:`k_z` is the vertical wavenumber and :math:`z_\text{rec}` is the depth of the array of receivers In a matrix form we can thus write the total wavefield as: .. math:: \mathbf{p} - \mathbf{p_d} = (\mathbf{I} + \Phi) \mathbf{p}^- where :math:`\Phi` is one-way propagator implemented via the :class:`pylops.waveeqprocessing.PhaseShift` operator. .. [1] Amundsen, L., 1993, Wavenumber-based filtering of marine point-source data: GEOPHYSICS, 58, 1335–1348. """ ndims = p.ndim if ndims == 2: dims = (nt, nr) nrs = nr nkx = nr + 2 * npad kx = np.fft.ifftshift(np.fft.fftfreq(nkx, dr)) ky = None else: dims = (nt, nr[0], nr[1]) nrs = nr[0] * nr[1] nkx = nr[0] + 2 * npad[0] kx = np.fft.ifftshift(np.fft.fftfreq(nkx, dr[0])) nky = nr[1] + 2 * npad[1] ky = np.fft.ifftshift(np.fft.fftfreq(nky, dr)) nf = nt freq = np.fft.rfftfreq(nf, dt) # Phase shift operator zprop = 2 * zrec if ndims == 2: taper = taper2d(nt, nr, ntaper).T Padop = Pad(dims, ((0, 0), (npad, npad))) else: taper = taper3d(nt, nr, ntaper).transpose(2, 0, 1) Padop = Pad(dims, ((0, 0), (npad[0], npad[0]), (npad[1], npad[1]))) Pop = ( -Padop.H * PhaseShift(vel, zprop, nt, freq, kx, ky) * Padop * Diagonal(taper.ravel(), dtype=dtype) ) # Decomposition operator Dupop = Identity(nt * nrs, dtype=p.dtype) + Pop if dottest: Dottest(Dupop, nt * nrs, nt * nrs, verb=True) # Add restriction if restriction is not None: Dupop_norestr = Dupop Dupop = restriction * Dupop # Add sparsify transform if sptransf is not None: Dupop_norestr = Dupop_norestr * sptransf Dupop = Dupop * sptransf # Define data if pd is not None: d = p - pd else: d = win * p # Inversion pup = solver(Dupop, d.ravel(), **kwargs_solver)[0] # Apply sparse transform if sptransf is not None: p = Dupop_norestr * pup # reconstruct p at finely sampled spatial axes pup = sptransf * pup p = np.real(p).reshape(dims) # Finalize estimates pup = np.real(pup).reshape(dims) pdown = p - pup return pup, pdown
11,789
31.30137
88
py
pylops
pylops-master/pylops/waveeqprocessing/wavedecomposition.py
__all__ = [ "PressureToVelocity", "UpDownComposition2D", "UpDownComposition3D", "WavefieldDecomposition", ] import logging from typing import Callable, Optional, Sequence, Tuple, Union import numpy as np from scipy.signal import filtfilt from scipy.sparse.linalg import lsqr from pylops import Block, BlockDiag, Diagonal, Identity, LinearOperator from pylops.signalprocessing import FFT2D, FFTND from pylops.utils import dottest as Dottest from pylops.utils.backend import get_array_module, get_module, get_module_name from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _filter_obliquity( OBL: NDArray, F: NDArray, Kx: NDArray, vel: float, critical: float, ntaper: int, Ky: NDArray = 0, ) -> NDArray: """Apply masking of ``OBL`` based on critical angle and tapering at edges Parameters ---------- OBL : :obj:`np.ndarray` Obliquity factor F : :obj:`np.ndarray` Frequency grid Kx : :obj:`np.ndarray` Horizonal wavenumber grid vel : :obj:`float` Velocity along the receiver array (must be constant) critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle Ky : :obj:`np.ndarray`, optional Second horizonal wavenumber grid Returns ------- OBL : :obj:`np.ndarray` Filtered obliquity factor """ critical /= 100.0 mask = np.sqrt(Kx**2 + Ky**2) < critical * np.abs(F) / vel OBL *= mask OBL = filtfilt(np.ones(ntaper) / float(ntaper), 1, OBL, axis=0) OBL = filtfilt(np.ones(ntaper) / float(ntaper), 1, OBL, axis=1) if isinstance(Ky, np.ndarray): OBL = filtfilt(np.ones(ntaper) / float(ntaper), 1, OBL, axis=2) return OBL def _obliquity2D( nt: int, nr: int, dt: float, dr: float, rho: float, vel: float, nffts: InputDimsLike, critical: float = 100.0, ntaper: int = 10, composition: bool = True, backend: str = "numpy", dtype: DTypeLike = "complex128", ) -> Tuple[LinearOperator, LinearOperator]: r"""2D Obliquity operator and FFT operator Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`int` Number of samples along the receiver axis dt : :obj:`float` Sampling along the time axis dr : :obj:`float` Sampling along the receiver array rho : :obj:`float` Density along the receiver array (must be constant) vel : :obj:`float` Velocity along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumber and frequency axes critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`|k_x| < \frac{f(k_x)}{vel}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle composition : :obj:`bool`, optional Create obliquity factor for composition (``True``) or decomposition (``False``) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. Returns ------- FFTop : :obj:`pylops.LinearOperator` FFT operator OBLop : :obj:`pylops.LinearOperator` Obliquity factor operator """ # create Fourier operator FFTop = FFT2D(dims=[nr, nt], nffts=nffts, sampling=[dr, dt], dtype=dtype) # create obliquity operator [Kx, F] = np.meshgrid(FFTop.f1, FFTop.f2, indexing="ij") k = F / vel Kz = np.sqrt((k**2 - Kx**2).astype(dtype)) Kz[np.isnan(Kz)] = 0 if composition: OBL = Kz / (rho * np.abs(F)) OBL[F == 0] = 0 else: OBL = rho * (np.abs(F) / Kz) OBL[Kz == 0] = 0 # cut off and taper OBL = _filter_obliquity(OBL, F, Kx, vel, critical, ntaper) OBL = get_module(backend).asarray(OBL) OBLop = Diagonal(OBL.ravel(), dtype=dtype) return FFTop, OBLop def _obliquity3D( nt: int, nr: Union[int, Sequence[int]], dt: float, dr: Union[float, Sequence[float]], rho: float, vel: float, nffts: InputDimsLike, critical: float = 100.0, ntaper: int = 10, composition: bool = True, backend: str = "numpy", dtype: DTypeLike = "complex128", ) -> Tuple[LinearOperator, LinearOperator]: r"""3D Obliquity operator and FFT operator Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`tuple` Number of samples along the receiver axes dt : :obj:`float` Sampling along the time axis dr : :obj:`tuple` Samplings along the receiver array rho : :obj:`float` Density along the receiver array (must be constant) vel : :obj:`float` Velocity along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumber and frequency axes critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`\sqrt{k_y^2 + k_x^2} < \frac{\omega}{vel}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle composition : :obj:`bool`, optional Create obliquity factor for composition (``True``) or decomposition (``False``) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. Returns ------- FFTop : :obj:`pylops.LinearOperator` FFT operator OBLop : :obj:`pylops.LinearOperator` Obliquity factor operator """ # create Fourier operator FFTop = FFTND( dims=[nr[0], nr[1], nt], nffts=nffts, sampling=[dr[0], dr[1], dt], dtype=dtype ) # create obliquity operator [Ky, Kx, F] = np.meshgrid(FFTop.fs[0], FFTop.fs[1], FFTop.fs[2], indexing="ij") k = F / vel Kz = np.sqrt((k**2 - Ky**2 - Kx**2).astype(dtype)) Kz[np.isnan(Kz)] = 0 if composition: OBL = Kz / (rho * np.abs(F)) OBL[F == 0] = 0 else: OBL = rho * (np.abs(F) / Kz) OBL[Kz == 0] = 0 # cut off and taper OBL = _filter_obliquity(OBL, F, Kx, vel, critical, ntaper, Ky=Ky) OBL = get_module(backend).asarray(OBL) OBLop = Diagonal(OBL.ravel(), dtype=dtype) return FFTop, OBLop def PressureToVelocity( nt: int, nr: int, dt: float, dr: float, rho: float, vel: float, nffts: Union[InputDimsLike, Tuple[None, None, None]] = (None, None, None), critical: float = 100.0, ntaper: int = 10, topressure: bool = False, backend: str = "numpy", dtype: DTypeLike = "complex128", name: str = "P", ) -> LinearOperator: r"""Pressure to Vertical velocity conversion. Apply conversion from pressure to vertical velocity seismic wavefield (or vertical velocity to pressure). The input model and data required by the operator should be created by flattening the a wavefield of size :math:`(\lbrack n_{r_y} \times n_{r_x} \times n_t \rbrack`. Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`int` or :obj:`tuple` Number of samples along the receiver axis (or axes) dt : :obj:`float` Sampling along the time axis dr : :obj:`float` or :obj:`tuple` Sampling(s) along the receiver array rho : :obj:`float` Density :math:`\rho` along the receiver array (must be constant) vel : :obj:`float` Velocity :math:`c` along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumber and frequency axes critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`\sqrt{k_y^2 + k_x^2} < \frac{\omega}{c}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle topressure : :obj:`bool`, optional Perform conversion from particle velocity to pressure (``True``) or from pressure to particle velocity (``False``) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Cop : :obj:`pylops.LinearOperator` Pressure to particle velocity (or particle velocity to pressure) conversion operator See Also -------- UpDownComposition2D: 2D Wavefield composition UpDownComposition3D: 3D Wavefield composition WavefieldDecomposition: Wavefield decomposition Notes ----- A pressure wavefield :math:`p(x, t)` can be converted into an equivalent vertical particle velocity wavefield :math:`v_z(x, t)` by applying the following frequency-wavenumber dependant scaling [1]_: .. math:: \hat{v}_z(k_x, \omega) = \frac{k_z}{\omega \rho} \hat{p}(k_x, \omega) where the vertical wavenumber :math:`k_z` is defined as :math:`k_z=\sqrt{\frac{\omega^2}{c^2} - k_x^2}`. Similarly a vertical particle velocity can be converted into an equivalent pressure wavefield by applying the following frequency-wavenumber dependant scaling [1]_: .. math:: \hat{p}(k_x, \omega) = \frac{\omega \rho}{k_z} \hat{v}_z(k_x, \omega) For 3-dimensional applications the only difference is represented by the vertical wavenumber :math:`k_z`, which is defined as :math:`k_z=\sqrt{\frac{\omega^2}{c^2} - k_x^2 - k_y^2}`. In both cases, this operator is implemented as a concatanation of a 2 or 3-dimensional forward FFT (:class:`pylops.signalprocessing.FFT2` or :class:`pylops.signalprocessing.FFTN`), a weighting matrix implemented via :class:`pylops.basicprocessing.Diagonal`, and 2 or 3-dimensional inverse FFT. .. [1] Wapenaar, K. "Reciprocity properties of one-way propagators", Geophysics, vol. 63, pp. 1795-1798. 1998. """ if isinstance(nr, int): obl = _obliquity2D nffts = ( int(nffts[0]) if nffts[0] is not None else nr, int(nffts[1]) if nffts[1] is not None else nt, ) else: obl = _obliquity3D nffts = ( int(nffts[0]) if nffts[0] is not None else nr[0], int(nffts[1]) if nffts[1] is not None else nr[1], int(nffts[2]) if nffts[2] is not None else nt, ) # create obliquity operator FFTop, OBLop = obl( nt, nr, dt, dr, rho, vel, nffts=nffts, critical=critical, ntaper=ntaper, composition=not topressure, backend=backend, dtype=dtype, ) # create conversion operator Cop = FFTop.H * OBLop * FFTop Cop.name = name return Cop def UpDownComposition2D( nt: int, nr: int, dt: float, dr: float, rho: float, vel: float, nffts: Union[InputDimsLike, Tuple[None, None]] = (None, None), critical: float = 100.0, ntaper: int = 10, scaling: float = 1.0, backend: str = "numpy", dtype: DTypeLike = "complex128", name: str = "U", ) -> LinearOperator: r"""2D Up-down wavefield composition. Apply multi-component seismic wavefield composition from its up- and down-going constituents. The input model required by the operator should be created by flattening the separated wavefields of size :math:`\lbrack n_r \times n_t \rbrack` concatenated along the spatial axis. Similarly, the data is also a flattened concatenation of pressure and vertical particle velocity wavefields. Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`int` Number of samples along the receiver axis dt : :obj:`float` Sampling along the time axis dr : :obj:`float` Sampling along the receiver array rho : :obj:`float` Density :math:`\rho` along the receiver array (must be constant) vel : :obj:`float` Velocity :math:`c` along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumber and frequency axes critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`|k_x| < \frac{f(k_x)}{c}` will be retained will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle scaling : :obj:`float`, optional Scaling to apply to the operator (see Notes for more details) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- UDop : :obj:`pylops.LinearOperator` Up-down wavefield composition operator See Also -------- UpDownComposition3D: 3D Wavefield composition WavefieldDecomposition: Wavefield decomposition Notes ----- Multi-component seismic data :math:`p(x, t)` and :math:`v_z(x, t)` can be synthesized in the frequency-wavenumber domain as the superposition of the up- and downgoing constituents of the pressure wavefield (:math:`p^-(x, t)` and :math:`p^+(x, t)`) as follows [1]_: .. math:: \begin{bmatrix} \hat{p} \\ \hat{v_z} \end{bmatrix}(k_x, \omega) = \begin{bmatrix} 1 & 1 \\ \frac{k_z}{\omega \rho} & - \frac{k_z}{\omega \rho} \\ \end{bmatrix} \begin{bmatrix} \hat{p^+} \\ \hat{p^-} \end{bmatrix}(k_x, \omega) where the vertical wavenumber :math:`k_z` is defined as :math:`k_z=\sqrt{\frac{\omega^2}{c^2} - k_x^2}`. We can write the entire composition process in a compact matrix-vector notation as follows: .. math:: \begin{bmatrix} \mathbf{p} \\ s\mathbf{v_z} \end{bmatrix} = \begin{bmatrix} \mathbf{F} & 0 \\ 0 & s\mathbf{F} \end{bmatrix} \begin{bmatrix} \mathbf{I} & \mathbf{I} \\ \mathbf{W}^+ & \mathbf{W}^- \end{bmatrix} \begin{bmatrix} \mathbf{F}^H & 0 \\ 0 & \mathbf{F}^H \end{bmatrix} \mathbf{p^{\pm}} where :math:`\mathbf{F}` is the 2-dimensional FFT (:class:`pylops.signalprocessing.FFT2`), :math:`\mathbf{W}^\pm` are weighting matrices which contain the scalings :math:`\pm \frac{k_z}{\omega \rho}` implemented via :class:`pylops.basicprocessing.Diagonal`, and :math:`s` is a scaling factor that is applied to both the particle velocity data and to the operator has shown above. Such a scaling is required to balance out the different dynamic range of pressure and particle velocity when solving the wavefield separation problem as an inverse problem. As the operator is effectively obtained by chaining basic PyLops operators the adjoint is automatically implemented for this operator. .. [1] Wapenaar, K. "Reciprocity properties of one-way propagators", Geophysics, vol. 63, pp. 1795-1798. 1998. """ nffts = ( int(nffts[0]) if nffts[0] is not None else nr, int(nffts[1]) if nffts[1] is not None else nt, ) # create obliquity operator FFTop, OBLop, = _obliquity2D( nt, nr, dt, dr, rho, vel, nffts=nffts, critical=critical, ntaper=ntaper, composition=True, backend=backend, dtype=dtype, ) # create up-down modelling operator UDop = ( BlockDiag([FFTop.H, scaling * FFTop.H]) * Block( [ [ Identity(nffts[0] * nffts[1], dtype=dtype), Identity(nffts[0] * nffts[1], dtype=dtype), ], [OBLop, -OBLop], ] ) * BlockDiag([FFTop, FFTop]) ) UDop.name = name return UDop def UpDownComposition3D( nt: int, nr: int, dt: float, dr: float, rho: float, vel: float, nffts: Union[InputDimsLike, Tuple[None, None, None]] = (None, None, None), critical: float = 100.0, ntaper: int = 10, scaling: float = 1.0, backend: str = "numpy", dtype: DTypeLike = "complex128", name: str = "U", ) -> LinearOperator: r"""3D Up-down wavefield composition. Apply multi-component seismic wavefield composition from its up- and down-going constituents. The input model required by the operator should be created by flattening the separated wavefields of size :math:`\lbrack n_{r_y} \times n_{r_x} \times n_t \rbrack` concatenated along the first spatial axis. Similarly, the data is also a flattened concatenation of pressure and vertical particle velocity wavefields. Parameters ---------- nt : :obj:`int` Number of samples along the time axis nr : :obj:`tuple` Number of samples along the receiver axes dt : :obj:`float` Sampling along the time axis dr : :obj:`tuple` Samplings along the receiver array rho : :obj:`float` Density :math:`\rho` along the receiver array (must be constant) vel : :obj:`float` Velocity :math:`c` along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumbers and frequency axes (for the wavenumbers axes the same order as ``nr`` and ``dr`` must be followed) critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`\sqrt{k_y^2 + k_x^2} < \frac{\omega}{c}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle scaling : :obj:`float`, optional Scaling to apply to the operator (see Notes for more details) backend : :obj:`str`, optional Backend used for creation of obliquity factor operator (``numpy`` or ``cupy``) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- UDop : :obj:`pylops.LinearOperator` Up-down wavefield composition operator See Also -------- UpDownComposition2D: 2D Wavefield composition WavefieldDecomposition: Wavefield decomposition Notes ----- Multi-component seismic data :math:`p(y, x, t)` and :math:`v_z(y, x, t)` can be synthesized in the frequency-wavenumber domain as the superposition of the up- and downgoing constituents of the pressure wavefield (:math:`p^-(y, x, t)` and :math:`p^+(y, x, t)`) as described :class:`pylops.waveeqprocessing.UpDownComposition2D`. Here the vertical wavenumber :math:`k_z` is defined as :math:`k_z=\sqrt{\frac{\omega^2}{c^2} - k_y^2 - k_x^2}`. """ nffts = ( int(nffts[0]) if nffts[0] is not None else nr[0], int(nffts[1]) if nffts[1] is not None else nr[1], int(nffts[2]) if nffts[2] is not None else nt, ) # create obliquity operator FFTop, OBLop = _obliquity3D( nt, nr, dt, dr, rho, vel, nffts=nffts, critical=critical, ntaper=ntaper, composition=True, backend=backend, dtype=dtype, ) # create up-down modelling operator UDop = ( BlockDiag([FFTop.H, scaling * FFTop.H]) * Block( [ [ Identity(nffts[0] * nffts[1] * nffts[2], dtype=dtype), Identity(nffts[0] * nffts[1] * nffts[2], dtype=dtype), ], [OBLop, -OBLop], ] ) * BlockDiag([FFTop, FFTop]) ) UDop.name = name return UDop def WavefieldDecomposition( p: NDArray, vz: NDArray, nt: int, nr: Union[int, InputDimsLike], dt: float, dr: float, rho: float, vel: float, nffts: Union[InputDimsLike, Tuple[None, None, None]] = (None, None, None), critical: float = 100.0, ntaper: int = 10, scaling: float = 1.0, kind: str = "inverse", restriction: Optional[LinearOperator] = None, sptransf: Optional[LinearOperator] = None, solver: Callable = lsqr, dottest: bool = False, dtype: DTypeLike = "complex128", **kwargs_solver ) -> Tuple[NDArray, NDArray]: r"""Up-down wavefield decomposition. Apply seismic wavefield decomposition from multi-component (pressure and vertical particle velocity) data. This process is also generally referred to as data-based deghosting. Parameters ---------- p : :obj:`np.ndarray` Pressure data of size :math:`\lbrack n_{r_x} \,(\times n_{r_y}) \times n_t \rbrack` (or :math:`\lbrack n_{r_{x,\text{sub}}} \,(\times n_{r_{y,\text{sub}}}) \times n_t \rbrack` in case a ``restriction`` operator is provided. Note that :math:`n_{r_{x,\text{sub}}}` (and :math:`n_{r_{y,\text{sub}}}`) must agree with the size of the output of this operator.) vz : :obj:`np.ndarray` Vertical particle velocity data of same size as pressure data nt : :obj:`int` Number of samples along the time axis nr : :obj:`int` or :obj:`tuple` Number of samples along the receiver axis (or axes) dt : :obj:`float` Sampling along the time axis dr : :obj:`float` or :obj:`tuple` Sampling along the receiver array (or axes) rho : :obj:`float` Density :math:`\rho` along the receiver array (must be constant) vel : :obj:`float` Velocity :math:`c` along the receiver array (must be constant) nffts : :obj:`tuple`, optional Number of samples along the wavenumber and frequency axes critical : :obj:`float`, optional Percentage of angles to retain in obliquity factor. For example, if ``critical=100`` only angles below the critical angle :math:`\frac{f(k_x)}{c}` will be retained ntaper : :obj:`float`, optional Number of samples of taper applied to obliquity factor around critical angle kind : :obj:`str`, optional Type of separation: ``inverse`` (default) or ``analytical`` scaling : :obj:`float`, optional Scaling to apply to the operator (see Notes of :func:`pylops.waveeqprocessing.wavedecomposition.UpDownComposition2D` for more details) restriction : :obj:`pylops.LinearOperator`, optional Restriction operator sptransf : :obj:`pylops.LinearOperator`, optional Sparsifying operator solver : :obj:`float`, optional Function handle of solver to be used if ``kind='inverse'`` dottest : :obj:`bool`, optional Apply dot-test dtype : :obj:`str`, optional Type of elements in input array. **kwargs_solver Arbitrary keyword arguments for chosen ``solver`` Returns ------- pup : :obj:`np.ndarray` Up-going wavefield pdown : :obj:`np.ndarray` Down-going wavefield Raises ------ KeyError If ``kind`` is neither ``analytical`` nor ``inverse`` Notes ----- Up- and down-going components of seismic data :math:`p^-(x, t)` and :math:`p^+(x, t)` can be estimated from multi-component data :math:`p(x, t)` and :math:`v_z(x, t)` by computing the following expression [1]_: .. math:: \begin{bmatrix} \hat{p}^+ \\ \hat{p}^- \end{bmatrix}(k_x, \omega) = \frac{1}{2} \begin{bmatrix} 1 & \frac{\omega \rho}{k_z} \\ 1 & - \frac{\omega \rho}{k_z} \\ \end{bmatrix} \begin{bmatrix} \hat{p} \\ \hat{v}_z \end{bmatrix}(k_x, \omega) if ``kind='analytical'`` or alternatively by solving the equation in :func:`ptcpy.waveeqprocessing.UpDownComposition2D` as an inverse problem, if ``kind='inverse'``. The latter approach has several advantages as data regularization can be included as part of the separation process allowing the input data to be aliased. This is obtained by solving the following problem: .. math:: \begin{bmatrix} \mathbf{p} \\ s\mathbf{v_z} \end{bmatrix} = \begin{bmatrix} \mathbf{R}\mathbf{F} & 0 \\ 0 & s\mathbf{R}\mathbf{F} \end{bmatrix} \mathbf{W} \begin{bmatrix} \mathbf{F}^H \mathbf{S} & 0 \\ 0 & \mathbf{F}^H \mathbf{S} \end{bmatrix} \mathbf{p^{\pm}} where :math:`\mathbf{R}` is a :class:`ptcpy.basicoperators.Restriction` operator and :math:`\mathbf{S}` is sparsyfing transform operator (e.g., :class:`ptcpy.signalprocessing.Radon2D`). .. [1] Wapenaar, K. "Reciprocity properties of one-way propagators", Geophysics, vol. 63, pp. 1795-1798. 1998. """ ncp = get_array_module(p) backend = get_module_name(ncp) ndims = p.ndim if ndims == 2: dims = (nr, nt) dims2 = (2 * nr, nt) nr2 = nr decomposition = _obliquity2D composition = UpDownComposition2D else: dims = (nr[0], nr[1], nt) dims2 = (2 * nr[0], nr[1], nt) nr2 = nr[0] decomposition = _obliquity3D composition = UpDownComposition3D if kind == "analytical": FFTop, OBLop = decomposition( nt, nr, dt, dr, rho, vel, nffts=nffts, critical=critical, ntaper=ntaper, composition=False, backend=backend, dtype=dtype, ) VZ: NDArray = FFTop * vz.ravel() # scaled Vz VZ_obl: NDArray = OBLop * VZ vz_obl = FFTop.H * VZ_obl vz_obl = ncp.real(vz_obl.reshape(dims)) #  separation pup = (p - vz_obl) / 2 pdown = (p + vz_obl) / 2 elif kind == "inverse": d = ncp.concatenate((p.ravel(), scaling * vz.ravel())) UDop = composition( nt, nr, dt, dr, rho, vel, nffts=nffts, critical=critical, ntaper=ntaper, scaling=scaling, backend=backend, dtype=dtype, ) if restriction is not None: UDop = restriction * UDop if sptransf is not None: UDop = UDop * BlockDiag([sptransf, sptransf]) UDop.dtype = ncp.real(ncp.ones(1, UDop.dtype)).dtype if dottest: Dottest( UDop, UDop.shape[0], UDop.shape[1], complexflag=2, backend=backend, verb=True, ) # separation by inversion dud = solver(UDop, d.ravel(), **kwargs_solver)[0] if sptransf is None: dud = ncp.real(dud) else: dud = BlockDiag([sptransf, sptransf]) * ncp.real(dud) dud = dud.reshape(dims2) pdown, pup = dud[:nr2], dud[nr2:] else: raise KeyError("kind must be analytical or inverse") return pup, pdown
28,782
31.413288
86
py
pylops
pylops-master/pylops/waveeqprocessing/marchenko.py
__all__ = ["Marchenko"] import logging from typing import Optional, Tuple, Union import numpy as np from scipy.signal import filtfilt from scipy.sparse.linalg import lsqr from scipy.special import hankel2 from pylops import Block, BlockDiag, Diagonal, Identity, Roll from pylops.optimization.basic import cgls from pylops.utils import dottest as Dottest from pylops.utils.backend import get_array_module, get_module_name, to_cupy_conditional from pylops.utils.typing import DTypeLike, NDArray from pylops.waveeqprocessing.mdd import MDC logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def directwave( wav: NDArray, trav: NDArray, nt: int, dt: float, nfft: Optional[int] = None, dist: Optional[NDArray] = None, kind: str = "2d", derivative: bool = True, ) -> NDArray: r"""Analytical direct wave in acoustic media Compute the analytical acoustic 2d or 3d Green's function in frequency domain given a wavelet ``wav``, traveltime curve ``trav`` and distance ``dist`` (for 3d case only). Parameters ---------- wav : :obj:`numpy.ndarray` Wavelet in time domain to apply to direct arrival when created using ``trav``. Phase will be discarded resulting in a zero-phase wavelet with same amplitude spectrum as provided by ``wav`` trav : :obj:`numpy.ndarray` Traveltime of first arrival from subsurface point to surface receivers of size :math:`\lbrack n_r \times 1 \rbrack` nt : :obj:`float`, optional Number of samples in time dt : :obj:`float`, optional Sampling in time nfft : :obj:`int`, optional Number of samples in fft time (if ``None``, ``nfft=nt``) dist: :obj:`numpy.ndarray` Distance between subsurface point to surface receivers of size :math:`\lbrack n_r \times 1 \rbrack` kind : :obj:`str`, optional 2-dimensional (``2d``) or 3-dimensional (``3d``) derivative : :obj:`bool`, optional Apply time derivative (``True``) or not (``False``) Returns ------- direct : :obj:`numpy.ndarray` Direct arrival in time domain of size :math:`\lbrack n_t \times n_r \rbrack` Notes ----- The analytical Green's function in 2D [1]_ is : .. math:: G^{2D}(\mathbf{r}) = -\frac{i}{4}H_0^{(1)}(k|\mathbf{r}|) and in 3D [1]_ is: .. math:: G^{3D}(\mathbf{r}) = \frac{e^{-jk\mathbf{r}}}{4 \pi \mathbf{r}} Note that these Green's functions represent the acoustic response to a point source of volume injection. In case the response to a point source of volume injection rate is desired, a :math:`j\omega` scaling (which is equivalent to applying a first derivative in time domain) must be applied. Here this is accomplished by setting ``derivative=True``. .. [1] Snieder, R. "A Guided Tour of Mathematical Methods for the Physical Sciences", Cambridge University Press, pp. 302, 2004. """ ncp = get_array_module(wav) nr = len(trav) nfft = nt if nfft is None or nfft < nt else nfft W = np.abs(np.fft.rfft(wav, nfft)) * dt F: NDArray = 2 * np.pi * ncp.arange(nfft) / (dt * nfft) direct = ncp.zeros((nfft // 2 + 1, nr), dtype=np.complex128) for iw, (w, f) in enumerate(zip(W, F)): if kind == "2d": # direct[iw] = ( # w # * np.exp(-1j * (f * trav + np.sign(f) * np.pi / 4)) # / np.sqrt(8 * np.pi * np.abs(f) * trav + 1e-10) # ) direct[iw] = -w * 1j * hankel2(0, f * trav + 1e-10) / 4.0 elif dist is not None: direct[iw] = w * np.exp(-1j * f * trav) / (4 * np.pi * dist) if derivative: direct[iw] *= 1j * f direct = np.fft.irfft(direct, nfft, axis=0) / dt direct = np.real(direct[:nt]) return direct class Marchenko: r"""Marchenko redatuming Solve multi-dimensional Marchenko redatuming problem using :py:func:`scipy.sparse.linalg.lsqr` iterative solver. Parameters ---------- R : :obj:`numpy.ndarray` Multi-dimensional reflection response in time or frequency domain of size :math:`[n_s \times n_r \times n_t (n_{f_\text{max}})]`. If provided in time, ``R`` should not be of complex type. Note that the reflection response should have already been multiplied by 2. dt : :obj:`float`, optional Sampling of time integration axis nt : :obj:`float`, optional Number of samples in time (not required if ``R`` is in time) dr : :obj:`float`, optional Sampling of receiver integration axis nfmax : :obj:`int`, optional Index of max frequency to include in deconvolution process wav : :obj:`numpy.ndarray`, optional Wavelet to apply to direct arrival when created using ``trav`` toff : :obj:`float`, optional Time-offset to apply to traveltime nsmooth : :obj:`int`, optional Number of samples of smoothing operator to apply to window saveRt : :obj:`bool`, optional Save ``R`` and ``R.H`` to speed up the computation of adjoint of :class:`pylops.signalprocessing.Fredholm1` (``True``) or create ``R.H`` on-the-fly (``False``) Note that ``saveRt=True`` will be faster but double the amount of required memory prescaled : :obj:`bool`, optional Apply scaling to ``R`` (``False``) or not (``False``) when performing spatial and temporal summations within the :class:`pylops.waveeqprocessing.MDC` operator. In case ``prescaled=True``, the ``R`` is assumed to have been pre-scaled by the user. fftengine : :obj:`str`, optional .. versionadded:: 1.17.0 Engine used for fft computation (``numpy``, ``scipy`` or ``fftw``) dtype : :obj:`bool`, optional Type of elements in input array. Attributes ---------- ns : :obj:`int` Number of samples along source axis nr : :obj:`int` Number of samples along receiver axis shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ TypeError If ``t`` is not :obj:`numpy.ndarray`. See Also -------- MDC : Multi-dimensional convolution MDD : Multi-dimensional deconvolution Notes ----- Marchenko redatuming is a method that allows to produce correct subsurface-to-surface responses given the availability of a reflection data and a macro-velocity model [1]_. The Marchenko equations can be written in a compact matrix form [2]_ and solved by means of iterative solvers such as LSQR: .. math:: \begin{bmatrix} \Theta \mathbf{R} \mathbf{f_d^+} \\ \mathbf{0} \end{bmatrix} = \mathbf{I} - \begin{bmatrix} \mathbf{0} & \Theta \mathbf{R} \\ \Theta \mathbf{R^*} & \mathbf{0} \end{bmatrix} \begin{bmatrix} \mathbf{f^-} \\ \mathbf{f_m^+} \end{bmatrix} Finally the subsurface Green's functions can be obtained applying the following operator to the retrieved focusing functions .. math:: \begin{bmatrix} -\mathbf{g^-} \\ \mathbf{g^{+ *}} \end{bmatrix} = \mathbf{I} - \begin{bmatrix} \mathbf{0} & \mathbf{R} \\ \mathbf{R^*} & \mathbf{0} \end{bmatrix} \begin{bmatrix} \mathbf{f^-} \\ \mathbf{f^+} \end{bmatrix} Here :math:`\mathbf{R}` is the monopole-to-particle velocity seismic response (already multiplied by 2). .. [1] Wapenaar, K., Thorbecke, J., Van der Neut, J., Broggini, F., Slob, E., and Snieder, R., "Marchenko imaging", Geophysics, vol. 79, pp. WA39-WA57. 2014. .. [2] van der Neut, J., Vasconcelos, I., and Wapenaar, K. "On Green's function retrieval by iterative substitution of the coupled Marchenko equations", Geophysical Journal International, vol. 203, pp. 792-813. 2015. """ def __init__( self, R: NDArray, dt: float = 0.004, nt: Optional[int] = None, dr: float = 1.0, nfmax: Optional[int] = None, wav: Optional[NDArray] = None, toff: float = 0.0, nsmooth: int = 10, saveRt: bool = True, prescaled: bool = False, fftengine: str = "numpy", dtype: DTypeLike = "float64", ) -> None: # Save inputs into class self.dt = dt self.dr = dr self.wav = wav self.toff = toff self.nsmooth = nsmooth self.saveRt = saveRt self.prescaled = prescaled self.fftengine = fftengine self.dtype = dtype self.explicit = False self.ncp = get_array_module(R) # Infer dimensions of R if not np.iscomplexobj(R): self.ns, self.nr, self.nt = R.shape self.nfmax = nfmax else: self.ns, self.nr, self.nfmax = R.shape self.nt = nt if nt is None: logging.error("nt must be provided as R is in frequency") self.nt2 = int(2 * self.nt - 1) self.t = np.arange(self.nt) * self.dt # Fix nfmax to be at maximum equal to half of the size of fft samples if self.nfmax is None or self.nfmax > np.ceil((self.nt2 + 1) / 2): self.nfmax = int(np.ceil((self.nt2 + 1) / 2)) logging.warning("nfmax set equal to (nt+1)/2=%d", self.nfmax) # Add negative time to reflection data and convert to frequency if not np.iscomplexobj(R): Rtwosided = np.concatenate( (self.ncp.zeros((self.ns, self.nr, self.nt - 1), dtype=R.dtype), R), axis=-1, ) Rtwosided_fft = np.fft.rfft(Rtwosided, self.nt2, axis=-1) / np.sqrt( self.nt2 ) self.Rtwosided_fft = Rtwosided_fft[..., :nfmax] else: self.Rtwosided_fft = R # bring frequency to first dimension self.Rtwosided_fft = self.Rtwosided_fft.transpose(2, 0, 1) def apply_onepoint( self, trav: NDArray, G0: Optional[NDArray] = None, nfft: Optional[int] = None, rtm: bool = False, greens: bool = False, dottest: bool = False, usematmul: bool = False, **kwargs_solver ) -> Union[ Tuple[NDArray, NDArray, NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray], Tuple[NDArray, NDArray], ]: r"""Marchenko redatuming for one point Solve the Marchenko redatuming inverse problem for a single point given its direct arrival traveltime curve (``trav``) and waveform (``G0``). Parameters ---------- trav : :obj:`numpy.ndarray` Traveltime of first arrival from subsurface point to surface receivers of size :math:`[n_r \times 1]` G0 : :obj:`numpy.ndarray`, optional Direct arrival in time domain of size :math:`[n_r \times n_t]` (if None, create arrival using ``trav``) nfft : :obj:`int`, optional Number of samples in fft when creating the analytical direct wave rtm : :obj:`bool`, optional Compute and return rtm redatuming greens : :obj:`bool`, optional Compute and return Green's functions dottest : :obj:`bool`, optional Apply dot-test usematmul : :obj:`bool`, optional Use :func:`numpy.matmul` (``True``) or for-loop with :func:`numpy.dot` (``False``) in :py:class:`pylops.signalprocessing.Fredholm1` operator. Refer to Fredholm1 documentation for details. **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used as default for numpy and cupy `data`, respectively) Returns ---------- f1_inv_minus : :obj:`numpy.ndarray` Inverted upgoing focusing function of size :math:`[n_r \times n_t]` f1_inv_plus : :obj:`numpy.ndarray` Inverted downgoing focusing function of size :math:`[n_r \times n_t]` p0_minus : :obj:`numpy.ndarray` Single-scattering standard redatuming upgoing Green's function of size :math:`[n_r \times n_t]` g_inv_minus : :obj:`numpy.ndarray` Inverted upgoing Green's function of size :math:`[n_r \times n_t]` g_inv_plus : :obj:`numpy.ndarray` Inverted downgoing Green's function of size :math:`[n_r \times n_t]` """ # Create window trav_off = trav - self.toff trav_off = np.round(trav_off / self.dt).astype(int) w = np.zeros((self.nr, self.nt), dtype=self.dtype) for ir in range(self.nr): w[ir, : trav_off[ir]] = 1 w = np.hstack((np.fliplr(w), w[:, 1:])) if self.nsmooth > 0: smooth = np.ones(self.nsmooth, dtype=self.dtype) / self.nsmooth w = filtfilt(smooth, 1, w) w = to_cupy_conditional(self.Rtwosided_fft, w) # Create operators Rop = MDC( self.Rtwosided_fft, self.nt2, nv=1, dt=self.dt, dr=self.dr, twosided=True, conj=False, fftengine=self.fftengine, saveGt=self.saveRt, prescaled=self.prescaled, usematmul=usematmul, ) R1op = MDC( self.Rtwosided_fft, self.nt2, nv=1, dt=self.dt, dr=self.dr, twosided=True, conj=True, fftengine=self.fftengine, saveGt=self.saveRt, prescaled=self.prescaled, usematmul=usematmul, ) Rollop = Roll( (self.nt2, self.ns), axis=0, shift=-1, dtype=self.dtype, ) Wop = Diagonal(w.T.ravel()) Iop = Identity(self.nr * self.nt2) Mop = Block( [[Iop, -1 * Wop * Rop], [-1 * Wop * Rollop * R1op, Iop]] ) * BlockDiag([Wop, Wop]) Gop = Block([[Iop, -1 * Rop], [-1 * Rollop * R1op, Iop]]) if dottest: Dottest( Gop, 2 * self.ns * self.nt2, 2 * self.nr * self.nt2, raiseerror=True, verb=True, backend=get_module_name(self.ncp), ) if dottest: Dottest( Mop, 2 * self.ns * self.nt2, 2 * self.nr * self.nt2, raiseerror=True, verb=True, backend=get_module_name(self.ncp), ) # Create input focusing function if G0 is None: if self.wav is not None and nfft is not None: G0 = ( directwave( self.wav, trav, self.nt, self.dt, nfft=nfft, derivative=True ) ).T G0 = to_cupy_conditional(self.Rtwosided_fft, G0) else: logging.error( "wav and/or nfft are not provided. " "Provide either G0 or wav and nfft..." ) raise ValueError( "wav and/or nfft are not provided. " "Provide either G0 or wav and nfft..." ) fd_plus = np.concatenate( (np.fliplr(G0).T, self.ncp.zeros((self.nt - 1, self.nr), dtype=self.dtype)) ) # Run standard redatuming as benchmark if rtm: p0_minus = Rop * fd_plus.ravel() p0_minus = p0_minus.reshape(self.nt2, self.ns).T # Create data and inverse focusing functions d = Wop * Rop * fd_plus.ravel() d = np.concatenate( ( d.reshape(self.nt2, self.ns), self.ncp.zeros((self.nt2, self.ns), self.dtype), ) ) # Invert for focusing functions if self.ncp == np: f1_inv = lsqr(Mop, d.ravel(), **kwargs_solver)[0] else: f1_inv = cgls( Mop, d.ravel(), x0=self.ncp.zeros(2 * (2 * self.nt - 1) * self.nr, dtype=self.dtype), **kwargs_solver )[0] f1_inv = f1_inv.reshape(2 * self.nt2, self.nr) f1_inv_tot = f1_inv + np.concatenate( (self.ncp.zeros((self.nt2, self.nr), dtype=self.dtype), fd_plus) ) f1_inv_minus = f1_inv_tot[: self.nt2].T f1_inv_plus = f1_inv_tot[self.nt2 :].T if greens: # Create Green's functions g_inv = Gop * f1_inv_tot.ravel() g_inv = g_inv.reshape(2 * self.nt2, self.ns) g_inv_minus, g_inv_plus = -g_inv[: self.nt2].T, np.fliplr( g_inv[self.nt2 :].T ) if rtm and greens: return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus elif rtm: return f1_inv_minus, f1_inv_plus, p0_minus elif greens: return f1_inv_minus, f1_inv_plus, g_inv_minus, g_inv_plus else: return f1_inv_minus, f1_inv_plus def apply_multiplepoints( self, trav: NDArray, G0: Optional[NDArray] = None, nfft: Optional[int] = None, rtm: bool = False, greens: bool = False, dottest: bool = False, usematmul: bool = False, **kwargs_solver ) -> Union[ Tuple[NDArray, NDArray, NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray, NDArray], Tuple[NDArray, NDArray, NDArray], Tuple[NDArray, NDArray], ]: r"""Marchenko redatuming for multiple points Solve the Marchenko redatuming inverse problem for multiple points given their direct arrival traveltime curves (``trav``) and waveforms (``G0``). Parameters ---------- trav : :obj:`numpy.ndarray` Traveltime of first arrival from subsurface points to surface receivers of size :math:`[n_r \times n_{vs}]` G0 : :obj:`numpy.ndarray`, optional Direct arrival in time domain of size :math:`[n_r \times n_{vs} \times n_t]` (if None, create arrival using ``trav``) nfft : :obj:`int`, optional Number of samples in fft when creating the analytical direct wave rtm : :obj:`bool`, optional Compute and return rtm redatuming greens : :obj:`bool`, optional Compute and return Green's functions dottest : :obj:`bool`, optional Apply dot-test usematmul : :obj:`bool`, optional Use :func:`numpy.matmul` (``True``) or for-loop with :func:`numpy.dot` (``False``) in :py:class:`pylops.signalprocessing.Fredholm1` operator. Refer to Fredholm1 documentation for details. **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used as default for numpy and cupy `data`, respectively) Returns ---------- f1_inv_minus : :obj:`numpy.ndarray` Inverted upgoing focusing function of size :math:`[n_r \times n_{vs} \times n_t]` f1_inv_plus : :obj:`numpy.ndarray` Inverted downgoing focusing functionof size :math:`[n_r \times n_{vs} \times n_t]` p0_minus : :obj:`numpy.ndarray` Single-scattering standard redatuming upgoing Green's function of size :math:`[n_r \times n_{vs} \times n_t]` g_inv_minus : :obj:`numpy.ndarray` Inverted upgoing Green's function of size :math:`[n_r \times n_{vs} \times n_t]` g_inv_plus : :obj:`numpy.ndarray` Inverted downgoing Green's function of size :math:`[n_r \times n_{vs} \times n_t]` """ nvs = trav.shape[1] # Create window trav_off = trav - self.toff trav_off = np.round(trav_off / self.dt).astype(int) w = np.zeros((self.nr, nvs, self.nt), dtype=self.dtype) for ir in range(self.nr): for ivs in range(nvs): w[ir, ivs, : trav_off[ir, ivs]] = 1 w = np.concatenate((np.flip(w, axis=-1), w[:, :, 1:]), axis=-1) if self.nsmooth > 0: smooth = np.ones(self.nsmooth, dtype=self.dtype) / self.nsmooth w = filtfilt(smooth, 1, w) w = to_cupy_conditional(self.Rtwosided_fft, w) # Create operators Rop = MDC( self.Rtwosided_fft, self.nt2, nv=nvs, dt=self.dt, dr=self.dr, twosided=True, conj=False, fftengine=self.fftengine, prescaled=self.prescaled, usematmul=usematmul, ) R1op = MDC( self.Rtwosided_fft, self.nt2, nv=nvs, dt=self.dt, dr=self.dr, twosided=True, conj=True, fftengine=self.fftengine, prescaled=self.prescaled, usematmul=usematmul, ) Rollop = Roll( (self.nt2, self.ns, nvs), axis=0, shift=-1, dtype=self.dtype, ) Wop = Diagonal(w.transpose(2, 0, 1).ravel()) Iop = Identity(self.nr * nvs * self.nt2) Mop = Block( [[Iop, -1 * Wop * Rop], [-1 * Wop * Rollop * R1op, Iop]] ) * BlockDiag([Wop, Wop]) Gop = Block([[Iop, -1 * Rop], [-1 * Rollop * R1op, Iop]]) if dottest: Dottest( Gop, 2 * self.nr * nvs * self.nt2, 2 * self.nr * nvs * self.nt2, raiseerror=True, verb=True, backend=get_module_name(self.ncp), ) if dottest: Dottest( Mop, 2 * self.ns * nvs * self.nt2, 2 * self.nr * nvs * self.nt2, raiseerror=True, verb=True, backend=get_module_name(self.ncp), ) # Create input focusing function if G0 is None: if self.wav is not None and nfft is not None: G0 = np.zeros((self.nr, nvs, self.nt), dtype=self.dtype) for ivs in range(nvs): G0[:, ivs] = ( directwave( self.wav, trav[:, ivs], self.nt, self.dt, nfft=nfft, derivative=True, ) ).T G0 = to_cupy_conditional(self.Rtwosided_fft, G0) else: logging.error( "wav and/or nfft are not provided. " "Provide either G0 or wav and nfft..." ) raise ValueError( "wav and/or nfft are not provided. " "Provide either G0 or wav and nfft..." ) fd_plus = np.concatenate( ( np.flip(G0, axis=-1).transpose(2, 0, 1), self.ncp.zeros((self.nt - 1, self.nr, nvs), dtype=self.dtype), ) ) # Run standard redatuming as benchmark if rtm: p0_minus = Rop * fd_plus.ravel() p0_minus = p0_minus.reshape(self.nt2, self.ns, nvs).transpose(1, 2, 0) # Create data and inverse focusing functions d = Wop * Rop * fd_plus.ravel() d = np.concatenate( ( d.reshape(self.nt2, self.ns, nvs), self.ncp.zeros((self.nt2, self.ns, nvs), dtype=self.dtype), ) ) # Invert for focusing functions if self.ncp == np: f1_inv = lsqr(Mop, d.ravel(), **kwargs_solver)[0] else: f1_inv = cgls( Mop, d.ravel(), x0=self.ncp.zeros( 2 * (2 * self.nt - 1) * self.nr * nvs, dtype=self.dtype ), **kwargs_solver )[0] f1_inv = f1_inv.reshape(2 * self.nt2, self.nr, nvs) f1_inv_tot = f1_inv + np.concatenate( (self.ncp.zeros((self.nt2, self.nr, nvs), dtype=self.dtype), fd_plus) ) f1_inv_minus = f1_inv_tot[: self.nt2].transpose(1, 2, 0) f1_inv_plus = f1_inv_tot[self.nt2 :].transpose(1, 2, 0) if greens: # Create Green's functions g_inv = Gop * f1_inv_tot.ravel() g_inv = g_inv.reshape(2 * self.nt2, self.ns, nvs) g_inv_minus = -g_inv[: self.nt2].transpose(1, 2, 0) g_inv_plus = np.flip(g_inv[self.nt2 :], axis=0).transpose(1, 2, 0) if rtm and greens: return f1_inv_minus, f1_inv_plus, p0_minus, g_inv_minus, g_inv_plus elif rtm: return f1_inv_minus, f1_inv_plus, p0_minus elif greens: return f1_inv_minus, f1_inv_plus, g_inv_minus, g_inv_plus else: return f1_inv_minus, f1_inv_plus
25,821
34.715076
87
py
pylops
pylops-master/pylops/waveeqprocessing/__init__.py
""" Wave Equation processing ======================== The subpackage waveeqprocessing provides linear operators and applications aimed at solving various inverse problems in the area of Seismic Wave Equation Processing. A list of operators present in pylops.waveeqprocessing: PressureToVelocity Pressure to Vertical velocity conversion. UpDownComposition2D 2D Up-down wavefield composition. UpDownComposition3D 3D Up-down wavefield composition. MDC Multi-dimensional convolution. PhaseShift Phase shift operator. BlendingContinuous Continuous Blending operator. BlendingGroup Group Blending operator. BlendingHalf Half Blending operator. Kirchhoff Kirchoff demigration operator. AcousticWave2D Two-way wave equation demigration operator. and a list of applications: SeismicInterpolation Seismic interpolation (or regularization). Deghosting Single-component wavefield decomposition. WavefieldDecomposition Multi-component wavefield decomposition. MDD Multi-dimensional deconvolution. Marchenko Marchenko redatuming. LSM Least-squares Migration (LSM). """ from .blending import * from .kirchhoff import * from .lsm import * from .marchenko import * from .mdd import * from .oneway import * from .seismicinterpolation import * from .twoway import * from .wavedecomposition import * __all__ = [ "MDC", "MDD", "Marchenko", "SeismicInterpolation", "PressureToVelocity", "UpDownComposition2D", "UpDownComposition3D", "WavefieldDecomposition", "PhaseShift", "Deghosting", "BlendingContinuous", "BlendingGroup", "BlendingHalf", "Kirchhoff", "AcousticWave2D", "LSM", ]
1,990
31.639344
79
py
pylops
pylops-master/pylops/waveeqprocessing/blending.py
__all__ = [ "BlendingContinuous", "BlendingGroup", "BlendingHalf", ] import numpy as np from pylops import LinearOperator from pylops.basicoperators import BlockDiag, HStack, Pad from pylops.signalprocessing import Shift from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, NDArray class BlendingContinuous(LinearOperator): r"""Continuous blending operator Blend seismic shot gathers in continuous mode based on pre-defined sequence of firing times. The size of input model vector must be :math:`n_s \times n_r \times n_t`, whilst the size of the data vector is :math:`n_r \times n_{t,tot}`. Parameters ---------- nt : :obj:`int` Number of time samples nr : :obj:`int` Number of receivers ns : :obj:`int` Number of sources dt : :obj:`float` Time sampling in seconds times : :obj:`np.ndarray` Absolute ignition times for each source nproc : :obj:`int`, optional Number of processors used when applying operator dtype : :obj:`str`, optional Operator dtype name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Notes ----- Simultaneous shooting or blending is the process of acquiring seismic data firing consecutive sources at short time intervals (shorter than the time requires for all significant waves to come back from the Earth interior). Continuous blending refers to an acquisition scenario where a source towed behind a single vessel is fired at irregular time intervals (``times``) to create a continuous recording whose modelling operator is .. math:: \Phi = [\Phi_1, \Phi_2, ..., \Phi_N] where each :math:`\Phi_i` operator applies a time-shift equal to the absolute ignition time provided in the variable ``times``. """ def __init__( self, nt: int, nr: int, ns: int, dt: float, times: NDArray, dtype: DTypeLike = "float64", name: str = "B", ) -> None: self.dtype = np.dtype(dtype) self.nt = nt self.nr = nr self.ns = ns self.dt = dt self.times = times self.nttot = int(np.max(self.times) / self.dt + self.nt + 1) self.PadOp = Pad((self.nr, self.nt), ((0, 0), (0, 1)), dtype=self.dtype) # Define shift operators self.shifts = [] self.ShiftOps = [] for i in range(self.ns): shift = self.times[i] # This is the part that fits on the grid shift_int = int(shift // self.dt) self.shifts.append(shift_int) # This is the fractional part diff = (shift / self.dt - shift_int) * self.dt if diff == 0: self.ShiftOps.append(None) else: self.ShiftOps.append( Shift( (self.nr, self.nt + 1), diff, axis=1, sampling=self.dt, real=False, dtype=self.dtype, ) ) super().__init__( dtype=np.dtype(dtype), dims=(self.ns, self.nr, self.nt), dimsd=(self.nr, self.nttot), name=name, ) @reshaped def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) blended_data = ncp.zeros((self.nr, self.nttot), dtype=self.dtype) for i, shift_int in enumerate(self.shifts): if self.ShiftOps[i] is None: blended_data[:, shift_int : shift_int + self.nt] += x[i, :, :] else: shifted_data = self.ShiftOps[i] * self.PadOp * x[i, :, :] blended_data[:, shift_int : shift_int + self.nt + 1] += shifted_data return blended_data @reshaped def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) deblended_data = ncp.zeros((self.ns, self.nr, self.nt), dtype=self.dtype) for i, shift_int in enumerate(self.shifts): if self.ShiftOps[i] is None: deblended_data[i, :, :] = x[:, shift_int : shift_int + self.nt] else: shifted_data = ( self.PadOp.H * self.ShiftOps[i].H * x[:, shift_int : shift_int + self.nt + 1] ) deblended_data[i, :, :] = shifted_data return deblended_data def BlendingGroup( nt: int, nr: int, ns: int, dt: float, times: NDArray, group_size: int, n_groups: int, nproc: int = 1, dtype: DTypeLike = "float64", name: str = "B", ) -> LinearOperator: r"""Group blending operator Blend seismic shot gathers in group blending mode based on pre-defined sequence of firing times. In group blending a number of spatially closed sources are fired in a short interval. These sources belong to one group. The next group of sources is fired after all the data of the previous group has been recorded. The size of input model vector must be :math:`n_s \times n_r \times n_t`, whilst the size of the data vector is :math:`n_{groups} \times n_r \times n_{t,tot}`. Parameters ---------- nt : :obj:`int` Number of time samples nr : :obj:`int` Number of receivers ns : :obj:`int` Number of sources. Equal to group_size x n_groups dt : :obj:`float` Time sampling in seconds times : :obj:`np.ndarray` Absolute ignition times for each source. This should have dimensions :math:`n_{groups} \times group_{size}`, where each row contains the firing times for every group. group_size : :obj:`int` The number of sources per group n_groups : :obj:`int` The number of groups nproc : :obj:`int`, optional Number of processors used when applying operator dtype : :obj:`str`, optional Operator dtype name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Bop : :obj:`pylops.LinearOperator` Blending operator Notes ----- Simultaneous shooting or blending is the process of acquiring seismic data firing consecutive sources at short time intervals (shorter than the time requires for all significant waves to come back from the Earth interior). Group blending refers to an acquisition scenario where two or more sources are towed behind a single vessel and fired at short time differences. The same experiment is repeated :math:`n_{groups}` times to create :math:`n_{groups}` blended recordings. For the case of 2 sources and an overall number of :math:`N=n_{groups}*group_{size}` shots, the modelling operator is .. math:: \Phi = \begin{bmatrix} \Phi_1 & \Phi_2 & \mathbf{0} & \mathbf{0} & ... & \mathbf{0} & \mathbf{0} \\ \mathbf{0} & \mathbf{0} & \Phi_3 & \Phi_4 & ... & \mathbf{0} & \mathbf{0} \\ ... & ... & ... & ... & ... & ... & ... \\ \mathbf{0} & \mathbf{0} & \mathbf{0} & \mathbf{0} & ... & \Phi_{N-1} & \Phi_{N} \end{bmatrix} where each :math:`\Phi_i` operator applies a time-shift equal to the absolute ignition time provided in the variable ``times``. """ if times.shape[0] != group_size: raise ValueError("The first dimension of times must equal group_size") Bop = [] for i in range(n_groups): Hop = [] for j in range(group_size): ShiftOp = Shift( (nr, nt), times[j, i], axis=1, sampling=dt, real=False, dtype=dtype ) Hop.append(ShiftOp) Bop.append(HStack(Hop)) Bop = BlockDiag(Bop, nproc=nproc) Bop.dims = (ns, nr, nt) Bop.dimsd = (n_groups, nr, nt) Bop.name = name return Bop def BlendingHalf( nt: int, nr: int, ns: int, dt: float, times: NDArray, group_size: int, n_groups: int, nproc: int = 1, dtype: DTypeLike = "float64", name: str = "B", ) -> LinearOperator: r"""Half blending operator Blend seismic shot gathers in half blending mode based on pre-defined sequence of firing times. This type of blending assumes that there are multiple sources at different spatial locations firing at the same time. This means that the blended data only partially overlaps in space. The size of input model vector must be :math:`n_s \times n_r \times n_t`, whilst the size of the data vector is :math:`n_{groups} \times n_r \times n_{t,tot}`. Parameters ---------- nt : :obj:`int` Number of time samples nr : :obj:`int` Number of receivers ns : :obj:`int` Number of sources. Equal to group_size x n_groups dt : :obj:`float` Time sampling in seconds times : :obj:`np.ndarray` Absolute ignition times for each source. This should have dimensions :math`n_{groups} \times group_{size}`, where each row contains the firing times for every group. group_size : :obj:`int` The number of sources per group n_groups : :obj:`int` The number of groups nproc : :obj:`int`, optional Number of processors used when applying operator dtype : :obj:`str`, optional Operator dtype name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Bop : :obj:`pylops.LinearOperator` Blending operator Notes ----- Simultaneous shooting or blending is the process of acquiring seismic data firing consecutive sources at short time intervals (shorter than the time requires for all significant waves to come back from the Earth interior). Half blending refers to an acquisition scenario where two or more vessels, each with a source are fired at short time differences. The same experiment is repeated :math:`n_{groups}` times to create :math:`n_{groups}` blended recordings. For the case of 2 sources and an overall number of :math:`N=n_{groups}*group_{size}` shots .. math:: \Phi = \begin{bmatrix} \Phi_1 & \mathbf{0} & \mathbf{0} & ... & \Phi_{N/2} & \mathbf{0} & \mathbf{0} & \\ \mathbf{0} & \Phi_2 & \mathbf{0} & & \mathbf{0} & \Phi_{N/2+1} & \mathbf{0} \\ ... & ... & ... & ... & ... & ... & ... \\ \mathbf{0} & \mathbf{0} & \mathbf{0} & \Phi_{N/2-1} & \mathbf{0} & \mathbf{0} & \Phi_{N} \\ \end{bmatrix} where each :math:`\Phi_i` operator applies a time-shift equal to the absolute ignition time provided in the variable ``times``. """ if times.shape[0] != group_size: raise ValueError("The first dimension of times must equal group_size") Bop = [] for j in range(group_size): OpShift = [] for i in range(n_groups): ShiftOp = Shift( (nr, nt), times[j, i], axis=1, sampling=dt, real=False, dtype=dtype ) OpShift.append(ShiftOp) Dop = BlockDiag(OpShift, nproc=nproc) Bop.append(Dop) Bop = HStack(Bop) Bop.dims = (ns, nr, nt) Bop.dimsd = (n_groups, nr, nt) Bop.name = name return Bop
11,626
35.334375
115
py
pylops
pylops-master/pylops/waveeqprocessing/twoway.py
__all__ = ["AcousticWave2D"] from typing import Tuple import numpy as np from pylops import LinearOperator from pylops.utils import deps from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray, SamplingLike devito_message = deps.devito_import("the twoway module") if devito_message is None: from examples.seismic import AcquisitionGeometry, Model from examples.seismic.acoustic import AcousticWaveSolver class AcousticWave2D(LinearOperator): """Devito Acoustic propagator. Parameters ---------- shape : :obj:`tuple` or :obj:`numpy.ndarray` Model shape ``(nx, nz)`` origin : :obj:`tuple` or :obj:`numpy.ndarray` Model origin ``(ox, oz)`` spacing : :obj:`tuple` or :obj:`numpy.ndarray` Model spacing ``(dx, dz)`` vp : :obj:`numpy.ndarray` Velocity model in m/s src_x : :obj:`numpy.ndarray` Source x-coordinates in m src_z : :obj:`numpy.ndarray` or :obj:`float` Source z-coordinates in m rec_x : :obj:`numpy.ndarray` Receiver x-coordinates in m rec_z : :obj:`numpy.ndarray` or :obj:`float` Receiver z-coordinates in m t0 : :obj:`float` Initial time tn : :obj:`int` Number of time samples src_type : :obj:`str` Source type space_order : :obj:`int`, optional Spatial ordering of FD stencil nbl : :obj:`int`, optional Number ordering of samples in absorbing boundaries f0 : :obj:`float`, optional Source peak frequency (Hz) checkpointing : :obj:`bool`, optional Use checkpointing (``True``) or not (``False``). Note that using checkpointing is needed when dealing with large models but it will slow down computations dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) """ def __init__( self, shape: InputDimsLike, origin: SamplingLike, spacing: SamplingLike, vp: NDArray, src_x: NDArray, src_z: NDArray, rec_x: NDArray, rec_z: NDArray, t0: float, tn: int, src_type: str = "Ricker", space_order: int = 6, nbl: int = 20, f0: float = 20.0, checkpointing: bool = False, dtype: DTypeLike = "float32", name: str = "A", ) -> None: if devito_message is not None: raise NotImplementedError(devito_message) # create model self._create_model(shape, origin, spacing, vp, space_order, nbl) self._create_geometry(src_x, src_z, rec_x, rec_z, t0, tn, src_type, f0=f0) self.checkpointing = checkpointing super().__init__( dtype=np.dtype(dtype), dims=vp.shape, dimsd=(len(src_x), len(rec_x), self.geometry.nt), explicit=False, name=name, ) @staticmethod def _crop_model(m: NDArray, nbl: int) -> NDArray: """Remove absorbing boundaries from model""" return m[nbl:-nbl, nbl:-nbl] def _create_model( self, shape: InputDimsLike, origin: SamplingLike, spacing: SamplingLike, vp: NDArray, space_order: int = 6, nbl: int = 20, ) -> None: """Create model Parameters ---------- shape : :obj:`numpy.ndarray` Model shape ``(nx, nz)`` origin : :obj:`numpy.ndarray` Model origin ``(ox, oz)`` spacing : :obj:`numpy.ndarray` Model spacing ``(dx, dz)`` vp : :obj:`numpy.ndarray` Velocity model in m/s space_order : :obj:`int`, optional Spatial ordering of FD stencil nbl : :obj:`int`, optional Number ordering of samples in absorbing boundaries """ self.space_order = space_order self.model = Model( space_order=space_order, vp=vp * 1e-3, origin=origin, shape=shape, dtype=np.float32, spacing=spacing, nbl=nbl, bcs="damp", ) def _create_geometry( self, src_x: NDArray, src_z: NDArray, rec_x: NDArray, rec_z: NDArray, t0: float, tn: int, src_type: str, f0: float = 20.0, ) -> None: """Create geometry and time axis Parameters ---------- src_x : :obj:`numpy.ndarray` Source x-coordinates in m src_z : :obj:`numpy.ndarray` or :obj:`float` Source z-coordinates in m rec_x : :obj:`numpy.ndarray` Receiver x-coordinates in m rec_z : :obj:`numpy.ndarray` or :obj:`float` Receiver z-coordinates in m t0 : :obj:`float` Initial time tn : :obj:`int` Number of time samples src_type : :obj:`str` Source type f0 : :obj:`float`, optional Source peak frequency (Hz) """ nsrc, nrec = len(src_x), len(rec_x) src_coordinates = np.empty((nsrc, 2)) src_coordinates[:, 0] = src_x src_coordinates[:, 1] = src_z rec_coordinates = np.empty((nrec, 2)) rec_coordinates[:, 0] = rec_x rec_coordinates[:, 1] = rec_z self.geometry = AcquisitionGeometry( self.model, rec_coordinates, src_coordinates, t0, tn, src_type=src_type, f0=None if f0 is None else f0 * 1e-3, ) def _srcillumination_oneshot(self, isrc: int) -> Tuple[NDArray, NDArray]: """Source wavefield and illumination for one shot Parameters ---------- isrc : :obj:`int` Index of source to model Returns ------- u0 : :obj:`np.ndarray` Source wavefield src_ill : :obj:`np.ndarray` Source illumination """ # create geometry for single source geometry = AcquisitionGeometry( self.model, self.geometry.rec_positions, self.geometry.src_positions[isrc, :], self.geometry.t0, self.geometry.tn, f0=self.geometry.f0, src_type=self.geometry.src_type, ) solver = AcousticWaveSolver(self.model, geometry, space_order=self.space_order) # source wavefield u0 = solver.forward(save=True)[1] # source illumination src_ill = self._crop_model((u0.data**2).sum(axis=0), self.model.nbl) return u0, src_ill def srcillumination_allshots(self, savewav: bool = False) -> None: """Source wavefield and illumination for all shots Parameters ---------- savewav : :obj:`bool`, optional Save source wavefield (``True``) or not (``False``) """ nsrc = self.geometry.src_positions.shape[0] if savewav: self.src_wavefield = [] self.src_illumination = np.zeros(self.model.shape) for isrc in range(nsrc): src_wav, src_ill = self._srcillumination_oneshot(isrc) if savewav: self.src_wavefield.append(src_wav) self.src_illumination += src_ill def _born_oneshot(self, isrc: int, dm: NDArray) -> NDArray: """Born modelling for one shot Parameters ---------- isrc : :obj:`int` Index of source to model dm : :obj:`np.ndarray` Model perturbation Returns ------- d : :obj:`np.ndarray` Data """ # create geometry for single source geometry = AcquisitionGeometry( self.model, self.geometry.rec_positions, self.geometry.src_positions[isrc, :], self.geometry.t0, self.geometry.tn, f0=self.geometry.f0, src_type=self.geometry.src_type, ) # set perturbation dmext = np.zeros(self.model.grid.shape, dtype=np.float32) dmext[self.model.nbl : -self.model.nbl, self.model.nbl : -self.model.nbl] = dm # solve solver = AcousticWaveSolver(self.model, geometry, space_order=self.space_order) d = solver.jacobian(dmext)[0] d = d.resample(geometry.dt).data[:][: geometry.nt].T return d def _born_allshots(self, dm: NDArray) -> NDArray: """Born modelling for all shots Parameters ----------- dm : :obj:`np.ndarray` Model perturbation Returns ------- dtot : :obj:`np.ndarray` Data for all shots """ nsrc = self.geometry.src_positions.shape[0] dtot = [] for isrc in range(nsrc): d = self._born_oneshot(isrc, dm) dtot.append(d) dtot = np.array(dtot).reshape(nsrc, d.shape[0], d.shape[1]) return dtot def _bornadj_oneshot(self, isrc, dobs): """Adjoint born modelling for one shot Parameters ---------- isrc : :obj:`float` Index of source to model dobs : :obj:`np.ndarray` Observed data to inject Returns ------- model : :obj:`np.ndarray` Model """ # create geometry for single source geometry = AcquisitionGeometry( self.model, self.geometry.rec_positions, self.geometry.src_positions[isrc, :], self.geometry.t0, self.geometry.tn, f0=self.geometry.f0, src_type=self.geometry.src_type, ) # create boundary data recs = self.geometry.rec.copy() recs.data[:] = dobs.T[:] solver = AcousticWaveSolver(self.model, geometry, space_order=self.space_order) # source wavefield if hasattr(self, "src_wavefield"): u0 = self.src_wavefield[isrc] else: u0 = solver.forward(save=True)[1] # adjoint modelling (reverse wavefield plus imaging condition) model = solver.jacobian_adjoint( rec=recs, u=u0, checkpointing=self.checkpointing )[0] return model def _bornadj_allshots(self, dobs: NDArray) -> NDArray: """Adjoint Born modelling for all shots Parameters ---------- dobs : :obj:`np.ndarray` Observed data to inject Returns ------- model : :obj:`np.ndarray` Model """ nsrc = self.geometry.src_positions.shape[0] mtot = np.zeros(self.model.shape, dtype=np.float32) for isrc in range(nsrc): m = self._bornadj_oneshot(isrc, dobs[isrc]) mtot += self._crop_model(m.data, self.model.nbl) return mtot @reshaped def _matvec(self, x: NDArray) -> NDArray: y = self._born_allshots(x) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: y = self._bornadj_allshots(x) return y
11,425
28.147959
87
py
pylops
pylops-master/pylops/avo/avo.py
__all__ = [ "zoeppritz_scattering", "zoeppritz_element", "zoeppritz_pp", "approx_zoeppritz_pp", "akirichards", "fatti", "ps", "AVOLinearModelling", ] import logging from typing import List, Optional, Tuple, Union import numpy as np import numpy.typing as npt from numpy import cos, sin, tan from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def zoeppritz_scattering( vp1: float, vs1: float, rho1: float, vp0: float, vs0: float, rho0: float, theta1: Union[float, npt.ArrayLike], ) -> NDArray: r"""Zoeppritz solution. Calculates the angle dependent p-wave reflectivity of an interface between two media for a set of incident angles. Parameters ---------- vp1 : :obj:`float` P-wave velocity of the upper medium vs1 : :obj:`float` S-wave velocity of the upper medium rho1 : :obj:`float` Density of the upper medium vp0 : :obj:`float` P-wave velocity of the lower medium vs0 : :obj:`float` S-wave velocity of the lower medium rho0 : :obj:`float` Density of the lower medium theta1 : :obj:`np.ndarray` or :obj:`float` Incident angles in degrees Returns ------- zoep : :obj:`np.ndarray` :math:`4 \times 4` matrix representing the scattering matrix for the incident angle ``theta1`` See also -------- zoeppritz_element : Single reflectivity element of Zoeppritz solution zoeppritz_pp : PP reflectivity element of Zoeppritz solution """ ncp = get_array_module(theta1) # Create theta1 array of angles in radiants if isinstance(theta1, (int, float)): theta1 = ncp.array( [ float(theta1), ] ) elif isinstance(theta1, (list, tuple)): theta1 = ncp.array(theta1) theta1 = ncp.radians(theta1) # Set the ray parameter p p = sin(theta1) / vp1 # Calculate reflection & transmission angles for Zoeppritz theta2 = ncp.arcsin(p * vp0) # Trans. angle of P-wave phi1 = ncp.arcsin(p * vs1) # Refl. angle of converted S-wave phi2 = ncp.arcsin(p * vs0) # Trans. angle of converted S-wave # Matrix form of Zoeppritz equation M = ncp.array( [ [-sin(theta1), -cos(phi1), sin(theta2), cos(phi2)], [cos(theta1), -sin(phi1), cos(theta2), -sin(phi2)], [ 2 * rho1 * vs1 * sin(phi1) * cos(theta1), rho1 * vs1 * (1 - 2 * sin(phi1) ** 2), 2 * rho0 * vs0 * sin(phi2) * cos(theta2), rho0 * vs0 * (1 - 2 * sin(phi2) ** 2), ], [ -rho1 * vp1 * (1 - 2 * sin(phi1) ** 2), rho1 * vs1 * sin(2 * phi1), rho0 * vp0 * (1 - 2 * sin(phi2) ** 2), -rho0 * vs0 * sin(2 * phi2), ], ], dtype="float", ) N = ncp.array( [ [sin(theta1), cos(phi1), -sin(theta2), -cos(phi2)], [cos(theta1), -sin(phi1), cos(theta2), -sin(phi2)], [ 2 * rho1 * vs1 * sin(phi1) * cos(theta1), rho1 * vs1 * (1 - 2 * sin(phi1) ** 2), 2 * rho0 * vs0 * sin(phi2) * cos(theta2), rho0 * vs0 * (1 - 2 * sin(phi2) ** 2), ], [ rho1 * vp1 * (1 - 2 * sin(phi1) ** 2), -rho1 * vs1 * sin(2 * phi1), -rho0 * vp0 * (1 - 2 * sin(phi2) ** 2), rho0 * vs0 * sin(2 * phi2), ], ], dtype="float", ) # Create Zoeppritz coefficient for all angles zoep = ncp.zeros((4, 4, M.shape[-1])) for i in range(M.shape[-1]): Mi = M[..., i] Ni = N[..., i] dt = ncp.dot(ncp.linalg.inv(Mi), Ni) zoep[..., i] = dt return zoep def zoeppritz_element( vp1: float, vs1: float, rho1: float, vp0: float, vs0: float, rho0: float, theta1: Union[float, NDArray], element: str = "PdPu", ) -> NDArray: """Single element of Zoeppritz solution. Simple wrapper to :py:class:`pylops.avo.avo.scattering_matrix`, returning any mode reflection coefficient from the Zoeppritz scattering matrix for specific combination of incident and reflected wave and a set of incident angles Parameters ---------- vp1 : :obj:`float` P-wave velocity of the upper medium vs1 : :obj:`float` S-wave velocity of the upper medium rho1 : :obj:`float` Density of the upper medium vp0 : :obj:`float` P-wave velocity of the lower medium vs0 : :obj:`float` S-wave velocity of the lower medium rho0 : :obj:`float` Density of the lower medium theta1 : :obj:`np.ndarray` or :obj:`float` Incident angles in degrees element : :obj:`str`, optional Specific choice of incident and reflected wave combining any two of the following strings: ``Pd`` P-wave downgoing, ``Sd`` S-wave downgoing, ``Pu`` P-wave upgoing, ``Su`` S-wave upgoing (e.g., ``PdPu``) Returns ------- refl : :obj:`np.ndarray` reflectivity values for all input angles for specific combination of incident and reflected wave. See also -------- zoeppritz_scattering : Zoeppritz solution zoeppritz_pp : PP reflectivity element of Zoeppritz solution """ elements = np.array( [ ["PdPu", "SdPu", "PuPu", "SuPu"], ["PdSu", "SdSu", "PuSu", "SuSu"], ["PdPd", "SdPd", "PuPd", "SuPd"], ["PdSd", "SdSd", "PuSd", "SuSd"], ] ) refl = zoeppritz_scattering(vp1, vs1, rho1, vp0, vs0, rho0, theta1) element = np.where(elements == element) return np.squeeze(refl[element]) def zoeppritz_pp( vp1: float, vs1: float, rho1: float, vp0: float, vs0: float, rho0: float, theta1: Union[float, NDArray], ) -> NDArray: """PP reflection coefficient from the Zoeppritz scattering matrix. Simple wrapper to :py:class:`pylops.avo.avo.scattering_matrix`, returning the PP reflection coefficient from the Zoeppritz scattering matrix for a set of incident angles Parameters ---------- vp1 : :obj:`float` P-wave velocity of the upper medium vs1 : :obj:`float` S-wave velocity of the upper medium rho1 : :obj:`float` Density of the upper medium vp0 : :obj:`float` P-wave velocity of the lower medium vs0 : :obj:`float` S-wave velocity of the lower medium rho0 : :obj:`float` Density of the lower medium theta1 : :obj:`np.ndarray` or :obj:`float` Incident angles in degrees Returns ------- PPrefl : :obj:`np.ndarray` PP reflectivity values for all input angles. See also -------- zoeppritz_scattering : Zoeppritz solution zoeppritz_element : Single reflectivity element of Zoeppritz solution """ PPrefl = zoeppritz_element(vp1, vs1, rho1, vp0, vs0, rho0, theta1, "PdPu") return PPrefl def approx_zoeppritz_pp( vp1: Union[List, Tuple, npt.ArrayLike], vs1: Union[List, Tuple, npt.ArrayLike], rho1: Union[List, Tuple, npt.ArrayLike], vp0: Union[List, Tuple, npt.ArrayLike], vs0: Union[List, Tuple, npt.ArrayLike], rho0: Union[List, Tuple, npt.ArrayLike], theta1: Union[float, NDArray], ) -> NDArray: """PP reflection coefficient from the approximate Zoeppritz equation. Approximate calculation of PP reflection from the Zoeppritz scattering matrix for a set of incident angles [1]_. .. [1] Dvorkin et al. Seismic Reflections of Rock Properties. Cambridge. 2014. Parameters ---------- vp1 : :obj:`np.ndarray` or :obj:`list` or :obj:`tuple` P-wave velocity of the upper medium vs1 : :obj:`np.ndarray` or :obj:`list` or :obj:`tuple` S-wave velocity of the upper medium rho1 : :obj:`np.ndarray` or :obj:`list` or :obj:`tuple` Density of the upper medium vp0 : :obj:`np.ndarray` or :obj:`list` or :obj:`tuple` P-wave velocity of the lower medium vs0 : :obj:`np.ndarray` or :obj:`list` or :obj:`tuple` S-wave velocity of the lower medium rho0 : :obj:`np.ndarray` or :obj:`list` or :obj:`tuple` Density of the lower medium theta1 : :obj:`np.ndarray` or :obj:`float` Incident angles in degrees Returns ------- PPrefl : :obj:`np.ndarray` PP reflectivity values for all input angles. See also -------- zoeppritz_scattering : Zoeppritz solution zoeppritz_element : Single reflectivity element of Zoeppritz solution zoeppritz_pp : PP reflectivity element of Zoeppritz solution """ ncp = get_array_module(theta1) vp1, vs1, rho1 = ncp.array(vp1), ncp.array(vs1), ncp.array(rho1) vp0, vs0, rho0 = ncp.array(vp0), ncp.array(vs0), ncp.array(rho0) # Incident P theta1 = theta1[:, np.newaxis] if vp1.size > 1 else theta1 theta1 = ncp.deg2rad(theta1) # Ray parameter and reflected P p = ncp.sin(theta1) / vp1 theta0 = ncp.arcsin(p * vp0) # Reflected S phi1 = ncp.arcsin(p * vs1) # Transmitted S phi0 = ncp.arcsin(p * vs0) # Coefficients a = rho0 * (1 - 2 * np.sin(phi0) ** 2.0) - rho1 * (1 - 2 * np.sin(phi1) ** 2.0) b = rho0 * (1 - 2 * np.sin(phi0) ** 2.0) + 2 * rho1 * np.sin(phi1) ** 2.0 c = rho1 * (1 - 2 * np.sin(phi1) ** 2.0) + 2 * rho0 * np.sin(phi0) ** 2.0 d = 2 * (rho0 * vs0**2 - rho1 * vs1**2) E = (b * np.cos(theta1) / vp1) + (c * np.cos(theta0) / vp0) F = (b * np.cos(phi1) / vs1) + (c * np.cos(phi0) / vs0) G = a - d * np.cos(theta1) / vp1 * np.cos(phi0) / vs0 H = a - d * np.cos(theta0) / vp0 * np.cos(phi1) / vs1 D = E * F + G * H * p**2 rpp = (1 / D) * ( F * (b * (ncp.cos(theta1) / vp1) - c * (ncp.cos(theta0) / vp0)) - H * p**2 * (a + d * (ncp.cos(theta1) / vp1) * (ncp.cos(phi0) / vs0)) ) return rpp def akirichards( theta: npt.ArrayLike, vsvp: Union[float, NDArray], n: int = 1, ) -> Tuple[NDArray, NDArray, NDArray]: r"""Three terms Aki-Richards approximation. Computes the coefficients of the of three terms Aki-Richards approximation for a set of angles and a constant or variable VS/VP ratio. Parameters ---------- theta : :obj:`np.ndarray` Incident angles in degrees vsvp : :obj:`np.ndarray` or :obj:`float` :math:`V_S/V_P` ratio n : :obj:`int`, optional Number of samples (if ``vsvp`` is a scalar) Returns ------- G1 : :obj:`np.ndarray` First coefficient of three terms Aki-Richards approximation :math:`[n_\theta \times n_\text{vsvp}]` G2 : :obj:`np.ndarray` Second coefficient of three terms Aki-Richards approximation :math:`[n_\theta \times n_\text{vsvp}]` G3 : :obj:`np.ndarray` Third coefficient of three terms Aki-Richards approximation :math:`[n_\theta \times n_\text{vsvp}]` Notes ----- The three terms Aki-Richards approximation [1]_, [2]_, is used to compute the reflection coefficient as linear combination of contrasts in :math:`V_P`, :math:`V_S`, and :math:`\rho.` More specifically: .. math:: R(\theta) = G_1(\theta) \frac{\Delta V_P}{\overline{V}_P} + G_2(\theta) \frac{\Delta V_S}{\overline{V}_S} + G_3(\theta) \frac{\Delta \rho}{\overline{\rho}} where .. math:: \begin{align} G_1(\theta) &= \frac{1}{2 \cos^2 \theta},\\ G_2(\theta) &= -4 (V_S/V_P)^2 \sin^2 \theta,\\ G_3(\theta) &= 0.5 - 2 (V_S/V_P)^2 \sin^2 \theta,\\ \frac{\Delta V_P}{\overline{V}_P} &= 2 \frac{V_{P,2}-V_{P,1}}{V_{P,2}+V_{P,1}},\\ \frac{\Delta V_S}{\overline{V}_S} &= 2 \frac{V_{S,2}-V_{S,1}}{V_{S,2}+V_{S,1}}, \\ \frac{\Delta \rho}{\overline{\rho}} &= 2 \frac{\rho_2-\rho_1}{\rho_2+\rho_1}. \end{align} .. [1] https://wiki.seg.org/wiki/AVO_equations .. [2] Aki, K., and Richards, P. G. (2002). Quantitative Seismology (2nd ed.). University Science Books. """ ncp = get_array_module(theta) theta = ncp.deg2rad(theta) vsvp = vsvp * ncp.ones(n) if not isinstance(vsvp, ncp.ndarray) else vsvp theta = theta[:, np.newaxis] if vsvp.size > 1 else theta vsvp = vsvp[:, np.newaxis].T if vsvp.size > 1 else vsvp G1 = 1.0 / (2.0 * cos(theta) ** 2) + 0 * vsvp G2 = -4.0 * vsvp**2 * np.sin(theta) ** 2 G3 = 0.5 - 2.0 * vsvp**2 * sin(theta) ** 2 return G1, G2, G3 def fatti( theta: npt.ArrayLike, vsvp: Union[float, NDArray], n: int = 1, ) -> Tuple[NDArray, NDArray, NDArray]: r"""Three terms Fatti approximation. Computes the coefficients of the three terms Fatti approximation for a set of angles and a constant or variable VS/VP ratio. Parameters ---------- theta : :obj:`np.ndarray` Incident angles in degrees vsvp : :obj:`np.ndarray` or :obj:`float` :math:`V_S/V_P` ratio n : :obj:`int`, optional Number of samples (if ``vsvp`` is a scalar) Returns ------- G1 : :obj:`np.ndarray` First coefficient of three terms Smith-Gidlow approximation :math:`[n_{\theta} \times n_\text{vsvp}]` G2 : :obj:`np.ndarray` Second coefficient of three terms Smith-Gidlow approximation :math:`[n_{\theta} \times n_\text{vsvp}]` G3 : :obj:`np.ndarray` Third coefficient of three terms Smith-Gidlow approximation :math:`[n_{\theta} \times n_\text{vsvp}]` Notes ----- The three terms Fatti approximation [1]_, [2]_, is used to compute the reflection coefficient as linear combination of contrasts in :math:`\text{AI},` :math:`\text{SI}`, and :math:`\rho.` More specifically: .. math:: R(\theta) = G_1(\theta) \frac{\Delta \text{AI}}{\bar{\text{AI}}} + G_2(\theta) \frac{\Delta \text{SI}}{\overline{\text{SI}}} + G_3(\theta) \frac{\Delta \rho}{\overline{\rho}} where .. math:: \begin{align} G_1(\theta) &= 0.5 (1 + \tan^2 \theta),\\ G_2(\theta) &= -4 (V_S/V_P)^2 \sin^2 \theta,\\ G_3(\theta) &= 0.5 \left(4 (V_S/V_P)^2 \sin^2 \theta - \tan^2 \theta\right),\\ \frac{\Delta \text{AI}}{\overline{\text{AI}}} &= 2 \frac{\text{AI}_2-\text{AI}_1}{\text{AI}_2+\text{AI}_1},\\ \frac{\Delta \text{SI}}{\overline{\text{SI}}} &= 2 \frac{\text{SI}_2-\text{SI}_1}{\text{SI}_2+\text{SI}_1},\\ \frac{\Delta \rho}{\overline{\rho}} &= 2 \frac{\rho_2-\rho_1}{\rho_2+\rho_1}. \end{align} .. [1] https://www.subsurfwiki.org/wiki/Fatti_equation .. [2] Jan L. Fatti, George C. Smith, Peter J. Vail, Peter J. Strauss, and Philip R. Levitt, (1994), "Detection of gas in sandstone reservoirs using AVO analysis: A 3-D seismic case history using the Geostack technique," Geophysics 59: 1362-1376. """ ncp = get_array_module(theta) theta = ncp.deg2rad(theta) vsvp = vsvp * ncp.ones(n) if not isinstance(vsvp, ncp.ndarray) else vsvp theta = theta[:, np.newaxis] if vsvp.size > 1 else theta vsvp = vsvp[:, np.newaxis].T if vsvp.size > 1 else vsvp G1 = 0.5 * (1 + np.tan(theta) ** 2) + 0 * vsvp G2 = -4 * vsvp**2 * np.sin(theta) ** 2 G3 = 0.5 * (4 * vsvp**2 * np.sin(theta) ** 2 - tan(theta) ** 2) return G1, G2, G3 def ps( theta: npt.ArrayLike, vsvp: Union[float, NDArray], n: int = 1, ) -> Tuple[NDArray, NDArray, NDArray]: r"""PS reflection coefficient Computes the coefficients for the PS approximation for a set of angles and a constant or variable VS/VP ratio. Parameters ---------- theta : :obj:`np.ndarray` Incident angles in degrees vsvp : :obj:`np.ndarray` or :obj:`float` :math:`V_S/V_P` ratio n : :obj:`int`, optional Number of samples (if ``vsvp`` is a scalar) Returns ------- G1 : :obj:`np.ndarray` First coefficient for VP :math:`[n_{\theta} \times n_\text{vsvp}]`. Since the PS reflection at zero angle is zero, this value is not used and is only available to ensure function signature compatibility with other linearization routines. G2 : :obj:`np.ndarray` Second coefficient for VS :math:`[n_{\theta} \times n_\text{vsvp}]` G3 : :obj:`np.ndarray` Third coefficient for density :math:`[n_{\theta} \times n_\text{vsvp}]` Notes ----- The approximation in [1]_ is used to compute the PS reflection coefficient as linear combination of contrasts in :math:`V_P`, :math:`V_S`, and :math:`\rho.` More specifically: .. math:: R(\theta) = G_2(\theta) \frac{\Delta V_S}{\bar{V_S}} + G_3(\theta) \frac{\Delta \rho}{\overline{\rho}} where .. math:: \begin{align} G_2(\theta) &= \tan \frac{\theta}{2} \left\{4 (V_S/V_P)^2 \sin^2 \theta - 4(V_S/V_P) \cos \theta \cos \phi \right\},\\ G_3(\theta) &= -\tan \frac{\theta}{2} \left\{1 - 2 (V_S/V_P)^2 \sin^2 \theta + 2(V_S/V_P) \cos \theta \cos \phi\right\},\\ \frac{\Delta V_S}{\overline{V_S}} &= 2 \frac{V_{S,2}-V_{S,1}}{V_{S,2}+V_{S,1}},\\ \frac{\Delta \rho}{\overline{\rho}} &= 2 \frac{\rho_2-\rho_1}{\rho_2+\rho_1}. \end{align} Note that :math:`\theta` is the P-incidence angle whilst :math:`\phi` is the S-reflected angle which is computed using Snell's law and the average :math:`V_S/V_P` ratio. .. [1] Xu, Y., and Bancroft, J.C., "Joint AVO analysis of PP and PS seismic data", CREWES Report, vol. 9. 1997. """ ncp = get_array_module(theta) theta = ncp.deg2rad(theta) vsvp = vsvp * np.ones(n) if not isinstance(vsvp, np.ndarray) else vsvp theta = theta[:, np.newaxis] if vsvp.size > 1 else theta vsvp = vsvp[:, np.newaxis].T if vsvp.size > 1 else vsvp phi = np.arcsin(vsvp * np.sin(theta)) # G1 = 0.0 * np.sin(theta) + 0 * vsvp # G2 = (np.tan(phi) / vsvp) * (4 * np.sin(phi) ** 2 - 4 * vsvp * np.cos(theta) * np.cos(phi)) + 0 * vsvp # G3 = -((np.tan(phi)) / (2 * vsvp)) * (1 + 2 * np.sin(phi) - 2 * vsvp * np.cos(theta) * np.cos(phi)) + 0 * vsvp G1 = 0.0 * np.sin(theta) + 0 * vsvp G2 = (np.tan(phi) / 2) * ( 4 * (vsvp * np.sin(phi)) ** 2 - 4 * vsvp * np.cos(theta) * np.cos(phi) ) + 0 * vsvp G3 = ( -(np.tan(phi) / 2) * (1 - 2 * (vsvp * np.sin(phi)) ** 2 + 2 * vsvp * np.cos(theta) * np.cos(phi)) + 0 * vsvp ) return G1, G2, G3 class AVOLinearModelling(LinearOperator): r"""AVO Linearized modelling. Create operator to be applied to a combination of elastic parameters for generation of seismic pre-stack reflectivity. Parameters ---------- theta : :obj:`np.ndarray` Incident angles in degrees vsvp : :obj:`np.ndarray` or :obj:`float` :math:`V_S/V_P` ratio nt0 : :obj:`int`, optional Number of samples (if ``vsvp`` is a scalar) spatdims : :obj:`int` or :obj:`tuple`, optional Number of samples along spatial axis (or axes) (``None`` if only one dimension is available) linearization : `{"akirich", "fatti", "PS"}`, optional * "akirich": Aki-Richards. See :py:func:`pylops.avo.avo.akirichards`. * "fatti": Fatti. See :py:func:`pylops.avo.avo.fatti`. * "PS": PS. See :py:func:`pylops.avo.avo.ps`. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ NotImplementedError If ``linearization`` is not an implemented linearization Notes ----- The AVO linearized operator performs a linear combination of three (or two) elastic parameters arranged in input vector :math:`\mathbf{m}` of size :math:`n_{t_0} \times N` to create the so-called seismic reflectivity: .. math:: r(t, \theta, x, y) = \sum_{i=1}^N G_i(t, \theta) m_i(t, x, y) \qquad \forall \,t,\theta where :math:`N=2,\, 3`. Note that the reflectivity can be in 1d, 2d or 3d and ``spatdims`` contains the dimensions of the spatial axis (or axes) :math:`x` and :math:`y`. """ def __init__( self, theta: NDArray, vsvp: Union[float, NDArray] = 0.5, nt0: int = 1, spatdims: Optional[Union[int, Tuple[int]]] = None, linearization: str = "akirich", dtype: DTypeLike = "float64", name: str = "A", ) -> None: self.ncp = get_array_module(theta) self.nt0 = nt0 if not isinstance(vsvp, self.ncp.ndarray) else len(vsvp) self.ntheta = len(theta) self.spatdims = () if spatdims is None else _value_or_sized_to_tuple(spatdims) # Compute AVO coefficients if linearization == "akirich": Gs = akirichards(theta, vsvp, n=self.nt0) elif linearization == "fatti": Gs = fatti(theta, vsvp, n=self.nt0) elif linearization == "ps": Gs = ps(theta, vsvp, n=self.nt0) else: logging.error("%s not an available " "linearization...", linearization) raise NotImplementedError( "%s not an available linearization..." % linearization ) self.npars = len(Gs) dims: Tuple[int, ...] = (self.nt0, self.npars) dimsd: Tuple[int, ...] = (self.nt0, self.ntheta) if spatdims is not None: dims += self.spatdims dimsd += self.spatdims super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) self.G = self.ncp.concatenate([gs.T[:, self.ncp.newaxis] for gs in Gs], axis=1) # add dimensions to G to account for horizonal axes for _ in range(len(self.spatdims)): self.G = self.G[..., np.newaxis] @reshaped def _matvec(self, x: NDArray) -> NDArray: return self.ncp.sum(self.G * x[:, :, self.ncp.newaxis], axis=1) @reshaped def _rmatvec(self, x: NDArray) -> NDArray: return self.ncp.sum(self.G * x[:, self.ncp.newaxis], axis=2)
22,761
32.180758
250
py
pylops
pylops-master/pylops/avo/poststack.py
__all__ = [ "PoststackLinearModelling", "PoststackInversion", ] import logging from typing import Optional, Tuple, Union import numpy as np import numpy.typing as npt from scipy.sparse.linalg import lsqr from pylops import ( FirstDerivative, Laplacian, LinearOperator, MatrixMult, SecondDerivative, ) from pylops.optimization.basic import cgls from pylops.optimization.leastsquares import regularized_inversion from pylops.optimization.sparsity import splitbregman from pylops.signalprocessing import Convolve1D from pylops.utils import dottest as Dottest from pylops.utils.backend import ( get_array_module, get_csc_matrix, get_lstsq, get_module_name, ) from pylops.utils.signalprocessing import convmtx, nonstationary_convmtx from pylops.utils.typing import NDArray, ShapeLike logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _PoststackLinearModelling( wav, nt0, spatdims=None, explicit=False, sparse=False, kind="centered", _MatrixMult=MatrixMult, _Convolve1D=Convolve1D, _FirstDerivative=FirstDerivative, args_MatrixMult: Optional[dict] = None, args_Convolve1D: Optional[dict] = None, args_FirstDerivative: Optional[dict] = None, ): """Post-stack linearized seismic modelling operator. Used to be able to provide operators from different libraries to PoststackLinearModelling. It operates in the same way as public method (PoststackLinearModelling) but has additional input parameters allowing passing a different operator and additional arguments to be passed to such operator. """ if args_MatrixMult is None: args_MatrixMult = {} if args_Convolve1D is None: args_Convolve1D = {} if args_FirstDerivative is None: args_FirstDerivative = {} ncp = get_array_module(wav) # check kind is correctly selected if kind not in ["forward", "centered"]: raise NotImplementedError("%s not an available derivative kind..." % kind) # define dtype to be used dtype = wav.dtype # ensure wav.dtype rules that of operator if len(wav.shape) == 2 and wav.shape[0] != nt0: raise ValueError("Provide 1d wavelet or 2d wavelet composed of nt0 " "wavelets") spatdims: Union[int, ShapeLike] # organize dimensions if spatdims is None: dims = (nt0,) spatdims = None elif isinstance(spatdims, int): dims = (nt0, spatdims) spatdims = (spatdims,) else: dims = (nt0,) + spatdims if explicit: # Create derivative operator if kind == "centered": D = ncp.diag(0.5 * ncp.ones(nt0 - 1, dtype=dtype), k=1) - ncp.diag( 0.5 * ncp.ones(nt0 - 1, dtype=dtype), -1 ) D[0] = D[-1] = 0 else: D = ncp.diag(ncp.ones(nt0 - 1, dtype=dtype), k=1) - ncp.diag( ncp.ones(nt0, dtype=dtype), k=0 ) D[-1] = 0 # Create wavelet operator if len(wav.shape) == 1: C = convmtx(wav, nt0)[:, len(wav) // 2 : -len(wav) // 2 + 1] else: C = nonstationary_convmtx(wav, nt0, hc=wav.shape[1] // 2, pad=(nt0, nt0)) # Combine operators M = ncp.dot(C, D) if sparse: M = get_csc_matrix(wav)(M) Pop = _MatrixMult(M, otherdims=spatdims, dtype=dtype, **args_MatrixMult) else: # Create wavelet operator if len(wav.shape) == 1: Cop = _Convolve1D( dims, h=wav, offset=len(wav) // 2, axis=0, dtype=dtype, **args_Convolve1D ) else: Cop = _MatrixMult( nonstationary_convmtx(wav, nt0, hc=wav.shape[1] // 2, pad=(nt0, nt0)), otherdims=spatdims, dtype=dtype, **args_MatrixMult ) # Create derivative operator Dop = _FirstDerivative( dims, axis=0, sampling=1.0, kind=kind, dtype=dtype, **args_FirstDerivative ) Pop = Cop * Dop return Pop def PoststackLinearModelling( wav: npt.ArrayLike, nt0: int, spatdims: Optional[Union[int, ShapeLike]] = None, explicit: bool = False, sparse: bool = False, kind: str = "centered", name: Optional[str] = None, ) -> LinearOperator: r"""Post-stack linearized seismic modelling operator. Create operator to be applied to an elastic parameter trace (or stack of traces) for generation of band-limited seismic post-stack data. The input model and data have shape :math:`[n_{t_0} \,(\times n_x \times n_y)]`. Parameters ---------- wav : :obj:`np.ndarray` Wavelet in time domain (must have odd number of elements and centered to zero). If 1d, assume stationary wavelet for the entire time axis. If 2d, use as non-stationary wavelet (user must provide one wavelet per time sample in an array of size :math:`[n_{t_0} \times n_\text{wav}]` where :math:`n_\text{wav}` is the length of each wavelet). Note that the ``dtype`` of this variable will define that of the operator nt0 : :obj:`int` Number of samples along time axis spatdims : :obj:`int` or :obj:`tuple`, optional Number of samples along spatial axis (or axes) (``None`` if only one dimension is available) explicit : :obj:`bool`, optional Create a chained linear operator (``False``, preferred for large data) or a ``MatrixMult`` linear operator with dense matrix (``True``, preferred for small data) sparse : :obj:`bool`, optional Create a sparse matrix (``True``) or dense (``False``) when ``explicit=True`` kind : :obj:`str`, optional Derivative kind (``forward`` or ``centered``). name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Pop : :obj:`LinearOperator` post-stack modelling operator. Raises ------ ValueError If ``wav`` is two dimensional but does not contain ``nt0`` wavelets Notes ----- Post-stack seismic modelling is the process of constructing seismic post-stack data from a profile of an elastic parameter of choice in time (or depth) domain. This can be easily achieved using the following forward model: .. math:: d(t, \theta=0) = w(t) * \frac{\mathrm{d}\ln m(t)}{\mathrm{d}t} where :math:`m(t)` is the elastic parameter profile and :math:`w(t)` is the time domain seismic wavelet. In compact form: .. math:: \mathbf{d}= \mathbf{W} \mathbf{D} \mathbf{m} In the special case of acoustic impedance (:math:`m(t)=AI(t)`), the modelling operator can be used to create zero-offset data: .. math:: d(t, \theta=0) = \frac{1}{2} w(t) * \frac{\mathrm{d}\ln m(t)}{\mathrm{d}t} where the scaling factor :math:`\frac{1}{2}` can be easily included in the wavelet. """ Pop = _PoststackLinearModelling( wav, nt0, spatdims=spatdims, explicit=explicit, sparse=sparse, kind=kind ) Pop.name = name return Pop def PoststackInversion( data: NDArray, wav: npt.ArrayLike, m0: Optional[NDArray] = None, explicit: bool = False, simultaneous: bool = False, epsI: Optional[float] = None, epsR: Optional[float] = None, dottest: bool = False, epsRL1: Optional[float] = None, **kwargs_solver ) -> Tuple[NDArray, NDArray]: r"""Post-stack linearized seismic inversion. Invert post-stack seismic operator to retrieve an elastic parameter of choice from band-limited seismic post-stack data. Depending on the choice of input parameters, inversion can be trace-by-trace with explicit operator or global with either explicit or linear operator. Parameters ---------- data : :obj:`np.ndarray` Band-limited seismic post-stack data of size :math:`[n_{t_0}\,(\times n_x \times n_y)]` wav : :obj:`np.ndarray` Wavelet in time domain (must have odd number of elements and centered to zero). If 1d, assume stationary wavelet for the entire time axis. If 2d of size :math:`[n_{t_0} \times n_h]` use as non-stationary wavelet m0 : :obj:`np.ndarray`, optional Background model of size :math:`[n_{t_0}\,(\times n_x \times n_y)]` explicit : :obj:`bool`, optional Create a chained linear operator (``False``, preferred for large data) or a ``MatrixMult`` linear operator with dense matrix (``True``, preferred for small data) simultaneous : :obj:`bool`, optional Simultaneously invert entire data (``True``) or invert trace-by-trace (``False``) when using ``explicit`` operator (note that the entire data is always inverted when working with linear operator) epsI : :obj:`float`, optional Damping factor for Tikhonov regularization term epsR : :obj:`float`, optional Damping factor for additional Laplacian regularization term dottest : :obj:`bool`, optional Apply dot-test epsRL1 : :obj:`float`, optional Damping factor for additional blockiness regularization term **kwargs_solver Arbitrary keyword arguments for :py:func:`scipy.linalg.lstsq` solver (if ``explicit=True`` and ``epsR=None``) or :py:func:`scipy.sparse.linalg.lsqr` solver (if ``explicit=False`` and/or ``epsR`` is not ``None``) Returns ------- minv : :obj:`np.ndarray` Inverted model of size :math:`[n_{t_0}\,(\times n_x \times n_y)]` datar : :obj:`np.ndarray` Residual data (i.e., data - background data) of size :math:`[n_{t_0}\,(\times n_x \times n_y)]` Notes ----- The cost function and solver used in the seismic post-stack inversion module depends on the choice of ``explicit``, ``simultaneous``, ``epsI``, and ``epsR`` parameters: * ``explicit=True``, ``epsI=None`` and ``epsR=None``: the explicit solver :py:func:`scipy.linalg.lstsq` is used if ``simultaneous=False`` (or the iterative solver :py:func:`scipy.sparse.linalg.lsqr` is used if ``simultaneous=True``) * ``explicit=True`` with ``epsI`` and ``epsR=None``: the regularized normal equations :math:`\mathbf{W}^T\mathbf{d} = (\mathbf{W}^T \mathbf{W} + \epsilon_\mathbf{I}^2 \mathbf{I}) \mathbf{AI}` are instead fed into the :py:func:`scipy.linalg.lstsq` solver if ``simultaneous=False`` (or the iterative solver :py:func:`scipy.sparse.linalg.lsqr` if ``simultaneous=True``) * ``explicit=False`` and ``epsR=None``: the iterative solver :py:func:`scipy.sparse.linalg.lsqr` is used * ``explicit=False`` with ``epsR`` and ``epsRL1=None``: the iterative solver :py:func:`pylops.optimization.leastsquares.regularized_inversion` is used to solve the spatially regularized problem. * ``explicit=False`` with ``epsR`` and ``epsRL1``: the iterative solver :py:func:`pylops.optimization.sparsity.SplitBregman` is used to solve the blockiness-promoting (in vertical direction) and spatially regularized (in additional horizontal directions) problem. Note that the convergence of iterative solvers such as :py:func:`scipy.sparse.linalg.lsqr` can be very slow for this type of operator. It is suggested to take a two steps approach with first a trace-by-trace inversion using the explicit operator, followed by a regularized global inversion using the outcome of the previous inversion as initial guess. """ ncp = get_array_module(wav) # check if background model and data have same shape if m0 is not None and data.shape != m0.shape: raise ValueError("data and m0 must have same shape") nspat: Optional[Union[int, ShapeLike]] # find out dimensions if data.ndim == 1: dims = 1 nt0 = data.size nspat = None nspatprod = nx = 1 elif data.ndim == 2: dims = 2 nt0, nx = data.shape nspat = (nx,) nspatprod = nx else: dims = 3 nt0, nx, ny = data.shape nspat = (nx, ny) nspatprod = nx * ny data = data.reshape(nt0, nspatprod) # create operator PPop = PoststackLinearModelling(wav, nt0=nt0, spatdims=nspat, explicit=explicit) if dottest: Dottest( PPop, nt0 * nspatprod, nt0 * nspatprod, raiseerror=True, backend=get_module_name(ncp), verb=True, ) # create and remove background data from original data datar = data.ravel() if m0 is None else data.ravel() - PPop * m0.ravel() # invert model if epsR is None: # inversion without spatial regularization if explicit: if epsI is None and not simultaneous: # solve unregularized equations indipendently trace-by-trace minv = get_lstsq(data)( PPop.A, datar.reshape(nt0, nspatprod).squeeze(), **kwargs_solver )[0] elif epsI is None and simultaneous: # solve unregularized equations simultaneously if ncp == np: minv = lsqr(PPop, datar, **kwargs_solver)[0] else: minv = cgls( PPop, datar, x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype), **kwargs_solver )[0] elif epsI is not None: # create regularized normal equations PP = ncp.dot(PPop.A.T, PPop.A) + epsI * ncp.eye(nt0, dtype=PPop.A.dtype) datarn = ncp.dot(PPop.A.T, datar.reshape(nt0, nspatprod)) if not simultaneous: # solve regularized normal eqs. trace-by-trace minv = get_lstsq(data)(PP, datarn, **kwargs_solver)[0] else: # solve regularized normal equations simultaneously PPop_reg = MatrixMult(PP, otherdims=nspatprod) if ncp == np: minv = lsqr(PPop_reg, datar.ravel(), **kwargs_solver)[0] else: minv = cgls( PPop_reg, datar.ravel(), x0=ncp.zeros(int(PPop_reg.shape[1]), PPop_reg.dtype), **kwargs_solver )[0] else: # create regularized normal eqs. and solve them simultaneously PP = ncp.dot(PPop.A.T, PPop.A) + epsI * ncp.eye(nt0, dtype=PPop.A.dtype) datarn = PPop.A.T * datar.reshape(nt0, nspatprod) PPop_reg = MatrixMult(PP, otherdims=nspatprod) minv = get_lstsq(data)(PPop_reg.A, datarn.ravel(), **kwargs_solver)[0] else: # solve unregularized normal equations simultaneously with lop if ncp == np: minv = lsqr(PPop, datar, **kwargs_solver)[0] else: minv = cgls( PPop, datar, x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype), **kwargs_solver )[0] else: if epsRL1 is None: # L2 inversion with spatial regularization if dims == 1: Regop = SecondDerivative(nt0, dtype=PPop.dtype) elif dims == 2: Regop = Laplacian((nt0, nx), dtype=PPop.dtype) else: Regop = Laplacian((nt0, nx, ny), axes=(1, 2), dtype=PPop.dtype) minv = regularized_inversion( PPop, data.ravel(), [Regop], x0=None if m0 is None else m0.ravel(), epsRs=[epsR], **kwargs_solver )[0] else: # Blockiness-promoting inversion with spatial regularization if dims == 1: RegL1op = FirstDerivative(nt0, kind="forward", dtype=PPop.dtype) RegL2op = None elif dims == 2: RegL1op = FirstDerivative( (nt0, nx), axis=0, kind="forward", dtype=PPop.dtype ) RegL2op = SecondDerivative((nt0, nx), axis=1, dtype=PPop.dtype) else: RegL1op = FirstDerivative( (nt0, nx, ny), axis=0, kind="forward", dtype=PPop.dtype ) RegL2op = Laplacian((nt0, nx, ny), axes=(1, 2), dtype=PPop.dtype) if "mu" in kwargs_solver.keys(): mu = kwargs_solver["mu"] kwargs_solver.pop("mu") else: mu = 1.0 if "niter_outer" in kwargs_solver.keys(): niter_outer = kwargs_solver["niter_outer"] kwargs_solver.pop("niter_outer") else: niter_outer = 3 if "niter_inner" in kwargs_solver.keys(): niter_inner = kwargs_solver["niter_inner"] kwargs_solver.pop("niter_inner") else: niter_inner = 5 if not isinstance(epsRL1, (list, tuple)): epsRL1 = list([epsRL1]) if not isinstance(epsR, (list, tuple)): epsR = list([epsR]) minv = splitbregman( PPop, data.ravel(), [RegL1op], RegsL2=[RegL2op], epsRL1s=epsRL1, epsRL2s=epsR, mu=mu, niter_outer=niter_outer, niter_inner=niter_inner, x0=None if m0 is None else m0.ravel(), **kwargs_solver )[0] # compute residual if epsR is None: datar -= PPop * minv.ravel() else: datar = data.ravel() - PPop * minv.ravel() # reshape inverted model and residual data if dims == 1: minv = minv.squeeze() datar = datar.squeeze() elif dims == 2: minv = minv.reshape(nt0, nx) datar = datar.reshape(nt0, nx) else: minv = minv.reshape(nt0, nx, ny) datar = datar.reshape(nt0, nx, ny) if m0 is not None and epsR is None: minv = minv + m0 return minv, datar
18,614
35.861386
88
py
pylops
pylops-master/pylops/avo/prestack.py
__all__ = [ "PrestackLinearModelling", "PrestackWaveletModelling", "PrestackInversion", ] import logging from typing import Optional, Tuple, Union import numpy as np from scipy.sparse.linalg import lsqr from pylops import ( Diagonal, FirstDerivative, Identity, Laplacian, LinearOperator, MatrixMult, SecondDerivative, VStack, ) from pylops.avo.avo import AVOLinearModelling, akirichards, fatti, ps from pylops.optimization.basic import cgls from pylops.optimization.leastsquares import regularized_inversion from pylops.optimization.sparsity import splitbregman from pylops.signalprocessing import Convolve1D from pylops.utils import dottest as Dottest from pylops.utils.backend import ( get_array_module, get_block_diag, get_lstsq, get_module_name, ) from pylops.utils.signalprocessing import convmtx from pylops.utils.typing import NDArray, ShapeLike logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) _linearizations = {"akirich": 3, "fatti": 3, "ps": 3} def PrestackLinearModelling( wav: NDArray, theta: NDArray, vsvp: Union[float, NDArray] = 0.5, nt0: int = 1, spatdims: Optional[Union[int, ShapeLike]] = None, linearization: str = "akirich", explicit: bool = False, kind: str = "centered", name: Optional[str] = None, ) -> LinearOperator: r"""Pre-stack linearized seismic modelling operator. Create operator to be applied to elastic property profiles for generation of band-limited seismic angle gathers from a linearized version of the Zoeppritz equation. The input model must be arranged in a vector of size :math:`n_m \times n_{t_0}\,(\times n_x \times n_y)` for ``explicit=True`` and :math:`n_{t_0} \times n_m \,(\times n_x \times n_y)` for ``explicit=False``. Similarly the output data is arranged in a vector of size :math:`n_{\theta} \times n_{t_0} \,(\times n_x \times n_y)` for ``explicit=True`` and :math:`n_{t_0} \times n_{\theta} \,(\times n_x \times n_y)` for ``explicit=False``. Parameters ---------- wav : :obj:`np.ndarray` Wavelet in time domain (must had odd number of elements and centered to zero). Note that the ``dtype`` of this variable will define that of the operator theta : :obj:`np.ndarray` Incident angles in degrees. Must have same ``dtype`` of ``wav`` (or it will be automatically casted to it) vsvp : :obj:`float` or :obj:`np.ndarray` :math:`V_S/V_P` ratio (constant or time/depth variant) nt0 : :obj:`int`, optional number of samples (if ``vsvp`` is a scalar) spatdims : :obj:`int` or :obj:`tuple`, optional Number of samples along spatial axis (or axes) (``None`` if only one dimension is available) linearization : `{"akirich", "fatti", "PS"}` or :obj:`callable`, optional * "akirich": Aki-Richards. See :py:func:`pylops.avo.avo.akirichards`. * "fatti": Fatti. See :py:func:`pylops.avo.avo.fatti`. * "PS": PS. See :py:func:`pylops.avo.avo.ps`. * Function with the same signature as :py:func:`pylops.avo.avo.akirichards` explicit : :obj:`bool`, optional Create a chained linear operator (``False``, preferred for large data) or a ``MatrixMult`` linear operator with dense matrix (``True``, preferred for small data) kind : :obj:`str`, optional Derivative kind (``forward`` or ``centered``). name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Preop : :obj:`LinearOperator` pre-stack modelling operator. Raises ------ NotImplementedError If ``linearization`` is not an implemented linearization NotImplementedError If ``kind`` is not ``forward`` nor ``centered`` Notes ----- Pre-stack seismic modelling is the process of constructing seismic pre-stack data from three (or two) profiles of elastic parameters in time (or depth) domain. This can be easily achieved using the following forward model: .. math:: d(t, \theta) = w(t) * \sum_{i=1}^{n_m} G_i(t, \theta) m_i(t) where :math:`w(t)` is the time domain seismic wavelet. In compact form: .. math:: \mathbf{d}= \mathbf{G} \mathbf{m} On the other hand, pre-stack inversion aims at recovering the different profiles of elastic properties from the band-limited seismic pre-stack data. """ ncp = get_array_module(wav) # check kind is correctly selected if kind not in ["forward", "centered"]: raise NotImplementedError("%s not an available derivative kind..." % kind) # define dtype to be used dtype = theta.dtype # ensure theta.dtype rules that of operator theta = theta.astype(dtype) # create vsvp profile vsvp = vsvp if isinstance(vsvp, ncp.ndarray) else vsvp * ncp.ones(nt0, dtype=dtype) nt0 = len(vsvp) ntheta = len(theta) # organize dimensions dims: Optional[ShapeLike] if spatdims is None: dims = (nt0, ntheta) spatdims = None elif isinstance(spatdims, int): dims = (nt0, ntheta, spatdims) spatdims = (spatdims,) else: dims = (nt0, ntheta) + spatdims if explicit: # Create AVO operator if linearization == "akirich": G = akirichards(theta, vsvp, n=nt0) elif linearization == "fatti": G = fatti(theta, vsvp, n=nt0) elif linearization == "ps": G = ps(theta, vsvp, n=nt0) elif callable(linearization): G = linearization(theta, vsvp, n=nt0) else: logging.error("%s not an available linearization...", linearization) raise NotImplementedError( "%s not an available linearization..." % linearization ) nG = len(G) G = [ ncp.hstack([ncp.diag(G_[itheta] * ncp.ones(nt0, dtype=dtype)) for G_ in G]) for itheta in range(ntheta) ] G = ncp.vstack(G).reshape(ntheta * nt0, nG * nt0) # Create derivative operator if kind == "centered": D = ncp.diag(0.5 * ncp.ones(nt0 - 1, dtype=dtype), k=1) - ncp.diag( 0.5 * ncp.ones(nt0 - 1, dtype=dtype), k=-1 ) D[0] = D[-1] = 0 else: D = ncp.diag(ncp.ones(nt0 - 1, dtype=dtype), k=1) - ncp.diag( ncp.ones(nt0, dtype=dtype), k=0 ) D[-1] = 0 D = get_block_diag(theta)(*([D] * nG)) # Create wavelet operator C = ncp.asarray(convmtx(wav, nt0))[:, len(wav) // 2 : -len(wav) // 2 + 1] C = [C] * ntheta C = get_block_diag(theta)(*C) # Combine operators M = ncp.dot(C, ncp.dot(G, D)) Preop = MatrixMult(M, otherdims=spatdims, dtype=dtype) else: # Create wavelet operator Cop = Convolve1D( dims, h=wav, offset=len(wav) // 2, axis=0, dtype=dtype, ) # create AVO operator AVOop = AVOLinearModelling( theta, vsvp, spatdims=spatdims, linearization=linearization, dtype=dtype ) # Create derivative operator dimsm = list(dims) dimsm[1] = AVOop.npars Dop = FirstDerivative(dimsm, axis=0, sampling=1.0, kind=kind, dtype=dtype) Preop = Cop * AVOop * Dop Preop.name = name return Preop def PrestackWaveletModelling( m: NDArray, theta: NDArray, nwav: int, wavc: Optional[int] = None, vsvp: Union[float, NDArray] = 0.5, linearization: str = "akirich", name: Optional[str] = None, ) -> LinearOperator: r"""Pre-stack linearized seismic modelling operator for wavelet. Create operator to be applied to a wavelet for generation of band-limited seismic angle gathers using a linearized version of the Zoeppritz equation. Parameters ---------- m : :obj:`np.ndarray` elastic parameter profles of size :math:`[n_{t_0} \times N]` where :math:`N=3,\,2`. Note that the ``dtype`` of this variable will define that of the operator theta : :obj:`int` Incident angles in degrees. Must have same ``dtype`` of ``m`` (or it will be automatically cast to it) nwav : :obj:`np.ndarray` Number of samples of wavelet to be applied/estimated wavc : :obj:`int`, optional Index of the center of the wavelet vsvp : :obj:`np.ndarray` or :obj:`float`, optional :math:`V_S/V_P` ratio linearization : `{"akirich", "fatti", "PS"}` or :obj:`callable`, optional * "akirich": Aki-Richards. See :py:func:`pylops.avo.avo.akirichards`. * "fatti": Fatti. See :py:func:`pylops.avo.avo.fatti`. * "PS": PS. See :py:func:`pylops.avo.avo.ps`. * Function with the same signature as :py:func:`pylops.avo.avo.akirichards` name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Mconv : :obj:`LinearOperator` pre-stack modelling operator for wavelet estimation. Raises ------ NotImplementedError If ``linearization`` is not an implemented linearization Notes ----- Pre-stack seismic modelling for wavelet estimate is the process of constructing seismic reflectivities using three (or two) profiles of elastic parameters in time (or depth) domain arranged in an input vector :math:`\mathbf{m}` of size :math:`n_{t_0} \times N`: .. math:: d(t, \theta) = \sum_{i=1}^N G_i(t, \theta) m_i(t) * w(t) where :math:`w(t)` is the time domain seismic wavelet. In compact form: .. math:: \mathbf{d}= \mathbf{G} \mathbf{w} On the other hand, pre-stack wavelet estimation aims at recovering the wavelet given knowledge of the band-limited seismic pre-stack data and the elastic parameter profiles. """ ncp = get_array_module(theta) # define dtype to be used dtype = m.dtype # ensure m.dtype rules that of operator theta = theta.astype(dtype) # Create vsvp profile vsvp = ( vsvp if isinstance(vsvp, ncp.ndarray) else vsvp * ncp.ones(m.shape[0], dtype=dtype) ) wavc = nwav // 2 if wavc is None else wavc nt0 = len(vsvp) ntheta = len(theta) # Create AVO operator if linearization == "akirich": G = akirichards(theta, vsvp, n=nt0) elif linearization == "fatti": G = fatti(theta, vsvp, n=nt0) elif linearization == "ps": G = ps(theta, vsvp, n=nt0) elif callable(linearization): G = linearization(theta, vsvp, n=nt0) else: logging.error("%s not an available linearization...", linearization) raise NotImplementedError( "%s not an available linearization..." % linearization ) nG = len(G) G = [ ncp.hstack([ncp.diag(G_[itheta] * ncp.ones(nt0, dtype=dtype)) for G_ in G]) for itheta in range(ntheta) ] G = ncp.vstack(G).reshape(ntheta * nt0, nG * nt0) # Create derivative operator D = ncp.diag(0.5 * np.ones(nt0 - 1, dtype=dtype), k=1) - ncp.diag( 0.5 * np.ones(nt0 - 1, dtype=dtype), k=-1 ) D[0] = D[-1] = 0 D = get_block_diag(theta)(*([D] * nG)) # Create infinite-reflectivity data M = ncp.dot(G, ncp.dot(D, m.T.ravel())).reshape(ntheta, nt0) Mconv = VStack( [ MatrixMult(convmtx(M[itheta], nwav)[wavc : -nwav + wavc + 1], dtype=dtype) for itheta in range(ntheta) ] ) Mconv.name = name return Mconv def PrestackInversion( data: NDArray, theta: NDArray, wav: NDArray, m0: Optional[NDArray] = None, linearization: str = "akirich", explicit: bool = False, simultaneous: bool = False, epsI: Optional[float] = None, epsR: Optional[float] = None, dottest: bool = False, returnres: bool = False, epsRL1: Optional[float] = None, kind: str = "centered", vsvp: Union[float, NDArray] = 0.5, **kwargs_solver ) -> Union[NDArray, Tuple[NDArray, NDArray]]: r"""Pre-stack linearized seismic inversion. Invert pre-stack seismic operator to retrieve a set of elastic property profiles from band-limited seismic pre-stack data (i.e., angle gathers). Depending on the choice of input parameters, inversion can be trace-by-trace with explicit operator or global with either explicit or linear operator. Parameters ---------- data : :obj:`np.ndarray` Band-limited seismic post-stack data of size :math:`[(n_\text{lins} \times) \, n_{t_0} \times n_{\theta} \, (\times n_x \times n_y)]` theta : :obj:`np.ndarray` Incident angles in degrees wav : :obj:`np.ndarray` Wavelet in time domain (must had odd number of elements and centered to zero) m0 : :obj:`np.ndarray`, optional Background model of size :math:`[n_{t_0} \times n_{m} \,(\times n_x \times n_y)]` linearization : `{"akirich", "fatti", "PS"}` or :obj:`list`, optional * "akirich": Aki-Richards. See :py:func:`pylops.avo.avo.akirichards`. * "fatti": Fatti. See :py:func:`pylops.avo.avo.fatti`. * "PS": PS. See :py:func:`pylops.avo.avo.ps`. * List which is a combination of previous options (required only when ``m0 is None``). explicit : :obj:`bool`, optional Create a chained linear operator (``False``, preferred for large data) or a ``MatrixMult`` linear operator with dense matrix (``True``, preferred for small data) simultaneous : :obj:`bool`, optional Simultaneously invert entire data (``True``) or invert trace-by-trace (``False``) when using ``explicit`` operator (note that the entire data is always inverted when working with linear operator) epsI : :obj:`float` or :obj:`list`, optional Damping factor(s) for Tikhonov regularization term. If a list of :math:`n_{m}` elements is provided, the regularization term will have different strenght for each elastic property epsR : :obj:`float`, optional Damping factor for additional Laplacian regularization term dottest : :obj:`bool`, optional Apply dot-test returnres : :obj:`bool`, optional Return residuals epsRL1 : :obj:`float`, optional Damping factor for additional blockiness regularization term kind : :obj:`str`, optional Derivative kind (``forward`` or ``centered``). vsvp : :obj:`float` or :obj:`np.ndarray` :math:`V_S/V_P` ratio (constant or time/depth variant) **kwargs_solver Arbitrary keyword arguments for :py:func:`scipy.linalg.lstsq` solver (if ``explicit=True`` and ``epsR=None``) or :py:func:`scipy.sparse.linalg.lsqr` solver (if ``explicit=False`` and/or ``epsR`` is not ``None``)) Returns ------- minv : :obj:`np.ndarray` Inverted model of size :math:`[n_{t_0} \times n_{m} \,(\times n_x \times n_y)]` datar : :obj:`np.ndarray` Residual data (i.e., data - background data) of size :math:`[n_{t_0} \times n_{\theta} \,(\times n_x \times n_y)]` Notes ----- The different choices of cost functions and solvers used in the seismic pre-stack inversion module follow the same convention of the seismic post-stack inversion module. Refer to :py:func:`pylops.avo.poststack.PoststackInversion` for more details. """ ncp = get_array_module(data) # find out dimensions if m0 is None and linearization is None: raise NotImplementedError("either m0 or linearization " "must be provided") elif m0 is None: if isinstance(linearization, str): nm = _linearizations[linearization] else: nm = _linearizations[linearization[0]] else: nm = m0.shape[1] data_shape = data.shape data_ndim = data.ndim n_lins = 1 multi = 0 if not isinstance(linearization, str): n_lins = data_shape[0] data_shape = data_shape[1:] data_ndim -= 1 multi = 1 if data_ndim == 2: dims = 1 nt0, ntheta = data_shape nspat = None nspatprod = nx = 1 elif data_ndim == 3: dims = 2 nt0, ntheta, nx = data_shape nspat = (nx,) nspatprod = nx else: dims = 3 nt0, ntheta, nx, ny = data_shape nspat = (nx, ny) nspatprod = nx * ny data = data.reshape(nt0, ntheta, nspatprod) # check if background model and data have same shape if m0 is not None: if ( nt0 != m0.shape[0] or (dims >= 2 and nx != m0.shape[2]) or (dims == 3 and ny != m0.shape[3]) ): raise ValueError("data and m0 must have same time and space axes") # create operator if isinstance(linearization, str): # single operator PPop = PrestackLinearModelling( wav, theta, vsvp=vsvp, nt0=nt0, spatdims=nspat, linearization=linearization, explicit=explicit, kind=kind, ) else: # multiple operators if not isinstance(wav, (list, tuple)): wav = [ wav, ] * n_lins PPop = [ PrestackLinearModelling( w, theta, vsvp=vsvp, nt0=nt0, spatdims=nspat, linearization=lin, explicit=explicit, ) for w, lin in zip(wav, linearization) ] if explicit: PPop = MatrixMult( np.vstack([Op.A for Op in PPop]), otherdims=nspat, dtype=PPop[0].A.dtype ) else: PPop = VStack(PPop) if dottest: Dottest( PPop, n_lins * nt0 * ntheta * nspatprod, nt0 * nm * nspatprod, raiseerror=True, verb=True, backend=get_module_name(ncp), ) # swap axes for explicit operator if explicit: data = data.swapaxes(0 + multi, 1 + multi) if m0 is not None: m0 = m0.swapaxes(0, 1) # invert model if epsR is None: # create and remove background data from original data datar = data.ravel() if m0 is None else data.ravel() - PPop * m0.ravel() # inversion without spatial regularization if explicit: if epsI is None and not simultaneous: # solve unregularized equations indipendently trace-by-trace minv = get_lstsq(data)( PPop.A, datar.reshape(n_lins * nt0 * ntheta, nspatprod).squeeze(), **kwargs_solver )[0] elif epsI is None and simultaneous: # solve unregularized equations simultaneously if ncp == np: minv = lsqr(PPop, datar, **kwargs_solver)[0] else: minv = cgls( PPop, datar, x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype), **kwargs_solver )[0] elif epsI is not None: # create regularized normal equations PP = ncp.dot(PPop.A.T, PPop.A) + epsI * ncp.eye( nt0 * nm, dtype=PPop.A.dtype ) datarn = np.dot(PPop.A.T, datar.reshape(nt0 * ntheta, nspatprod)) if not simultaneous: # solve regularized normal eqs. trace-by-trace minv = get_lstsq(data)(PP, datarn, **kwargs_solver)[0] else: # solve regularized normal equations simultaneously PPop_reg = MatrixMult(PP, otherdims=nspatprod) if ncp == np: minv = lsqr(PPop_reg, datarn.ravel(), **kwargs_solver)[0] else: minv = cgls( PPop_reg, datarn.ravel(), x0=ncp.zeros(int(PPop_reg.shape[1]), PPop_reg.dtype), **kwargs_solver )[0] # else: # # create regularized normal eqs. and solve them simultaneously # PP = np.dot(PPop.A.T, PPop.A) + epsI * np.eye(nt0*nm) # datarn = PPop.A.T * datar.reshape(nt0*ntheta, nspatprod) # PPop_reg = MatrixMult(PP, otherdims=ntheta*nspatprod) # minv = lstsq(PPop_reg, datarn.ravel(), **kwargs_solver)[0] else: # solve unregularized normal equations simultaneously with lop if ncp == np: minv = lsqr(PPop, datar, **kwargs_solver)[0] else: minv = cgls( PPop, datar, x0=ncp.zeros(int(PPop.shape[1]), PPop.dtype), **kwargs_solver )[0] else: # Create Thicknov regularization if epsI is not None: if isinstance(epsI, (list, tuple)): if len(epsI) != nm: raise ValueError("epsI must be a scalar or a list of" "size nm") RegI = Diagonal(np.array(epsI), dims=(nt0, nm, nspatprod), axis=1) else: RegI = epsI * Identity(nt0 * nm * nspatprod) if epsRL1 is None: # L2 inversion with spatial regularization if dims == 1: Regop = SecondDerivative((nt0, nm), axis=0, dtype=PPop.dtype) elif dims == 2: Regop = Laplacian((nt0, nm, nx), axes=(0, 2), dtype=PPop.dtype) else: Regop = Laplacian((nt0, nm, nx, ny), axes=(2, 3), dtype=PPop.dtype) if epsI is None: Regop = (Regop,) epsR = (epsR,) else: Regop = (Regop, RegI) epsR = (epsR, 1) minv = regularized_inversion( PPop, data.ravel(), Regop, x0=m0.ravel() if m0 is not None else None, epsRs=epsR, **kwargs_solver )[0] else: # Blockiness-promoting inversion with spatial regularization if dims == 1: RegL1op = FirstDerivative(nt0 * nm, dtype=PPop.dtype) RegL2op = None elif dims == 2: RegL1op = FirstDerivative((nt0, nm, nx), axis=0, dtype=PPop.dtype) RegL2op = SecondDerivative((nt0, nm, nx), axis=2, dtype=PPop.dtype) else: RegL1op = FirstDerivative((nt0, nm, nx, ny), axis=0, dtype=PPop.dtype) RegL2op = Laplacian((nt0, nm, nx, ny), axes=(2, 3), dtype=PPop.dtype) if dims == 1: if epsI is not None: RegL2op = (RegI,) epsR = (1,) else: if epsI is None: RegL2op = (RegL2op,) epsR = (epsR,) else: RegL2op = (RegL2op, RegI) epsR = (epsR, 1) epsRL1 = (epsRL1,) if "mu" in kwargs_solver.keys(): mu = kwargs_solver["mu"] kwargs_solver.pop("mu") else: mu = 1.0 if "niter_outer" in kwargs_solver.keys(): niter_outer = kwargs_solver["niter_outer"] kwargs_solver.pop("niter_outer") else: niter_outer = 3 if "niter_inner" in kwargs_solver.keys(): niter_inner = kwargs_solver["niter_inner"] kwargs_solver.pop("niter_inner") else: niter_inner = 5 minv = splitbregman( PPop, data.ravel(), (RegL1op,), RegsL2=RegL2op, epsRL1s=epsRL1, epsRL2s=epsR, mu=mu, niter_outer=niter_outer, niter_inner=niter_inner, x0=None if m0 is None else m0.ravel(), **kwargs_solver )[0] # compute residual if returnres: if epsR is None: datar -= PPop * minv.ravel() else: datar = data.ravel() - PPop * minv.ravel() # re-swap axes for explicit operator if explicit: if m0 is not None: m0 = m0.swapaxes(0, 1) # reshape inverted model and residual data if dims == 1: if explicit: minv = minv.reshape(nm, nt0).swapaxes(0, 1) if returnres: datar = ( datar.reshape(n_lins, ntheta, nt0) .squeeze() .swapaxes(0 + multi, 1 + multi) ) else: minv = minv.reshape(nt0, nm) if returnres: datar = datar.reshape(n_lins, nt0, ntheta).squeeze() elif dims == 2: if explicit: minv = minv.reshape(nm, nt0, nx).swapaxes(0, 1) if returnres: datar = ( datar.reshape(n_lins, ntheta, nt0, nx) .squeeze() .swapaxes(0 + multi, 1 + multi) ) else: minv = minv.reshape(nt0, nm, nx) if returnres: datar = datar.reshape(n_lins, nt0, ntheta, nx).squeeze() else: if explicit: minv = minv.reshape(nm, nt0, nx, ny).swapaxes(0, 1) if returnres: datar = ( datar.reshape(n_lins, ntheta, nt0, nx, ny) .squeeze() .swapaxes(0 + multi, 1 + multi) ) else: minv = minv.reshape(nt0, nm, nx, ny) if returnres: datar = datar.reshape(n_lins, nt0, ntheta, nx, ny).squeeze() if m0 is not None and epsR is None: minv = minv + m0 if returnres: return minv, datar else: return minv
26,536
33.825459
96
py
pylops
pylops-master/pylops/avo/__init__.py
""" AVO Operators ============= The subpackage avo provides linear operators and applications aimed at solving various inverse problems in the area of Seismic Reservoir Characterization. A list of available operators present in pylops.avo: AVOLinearModelling AVO modelling. PoststackLinearModelling Post-stack seismic modelling. PrestackLinearModelling Pre-stack seismic modelling. PrestackWaveletModelling Pre-stack modelling operator for wavelet. and a list of applications: PoststackInversion Post-stack seismic inversion. PrestackInversion Pre-stack seismic inversion. """ from .poststack import * from .prestack import * __all__ = [ "AVOLinearModelling", "PoststackLinearModelling", "PrestackWaveletModelling", "PrestackLinearModelling", "PoststackInversion", "PrestackInversion", ]
954
27.088235
85
py
pylops
pylops-master/pylops/optimization/cls_basic.py
__all__ = [ "CG", "CGLS", "LSQR", ] import time from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from pylops.optimization.basesolver import Solver from pylops.utils.backend import get_array_module, to_numpy from pylops.utils.typing import NDArray if TYPE_CHECKING: from pylops.linearoperator import LinearOperator class CG(Solver): r"""Conjugate gradient Solve a square system of equations given an operator ``Op`` and data ``y`` using conjugate gradient iterations. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times N]` Notes ----- Solve the :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` problem using conjugate gradient iterations [1]_. .. [1] Hestenes, M R., Stiefel, E., “Methods of Conjugate Gradients for Solving Linear Systems”, Journal of Research of the National Bureau of Standards. vol. 49. 1952. """ def _print_setup(self, xcomplex: bool = False) -> None: self._print_solver(nbar=55) if self.niter is not None: strpar = f"tol = {self.tol:10e}\tniter = {self.niter}" else: strpar = f"tol = {self.tol:10e}" print(strpar) print("-" * 55 + "\n") if not xcomplex: head1 = " Itn x[0] r2norm" else: head1 = " Itn x[0] r2norm" print(head1) def _print_step(self, x: NDArray) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e} " msg = f"{self.iiter:6g} " + strx + f"{self.cost[self.iiter]:11.4e}" print(msg) def setup( self, y: NDArray, x0: Optional[NDArray] = None, niter: Optional[int] = None, tol: float = 1e-4, show: bool = False, ) -> NDArray: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[N \times 1]`. If ``None``, initialize internally as zero vector niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional Display setup log Returns ------- x : :obj:`np.ndarray` Initial guess of size :math:`[N \times 1]` """ self.y = y self.niter = niter self.tol = tol self.ncp = get_array_module(y) # initialize solver if x0 is None: x = self.ncp.zeros(self.Op.shape[1], dtype=self.y.dtype) self.r = self.y.copy() else: x = x0.copy() self.r = self.y - self.Op.matvec(x) self.c = self.r.copy() self.kold = self.ncp.abs(self.r.dot(self.r.conj())) # create variables to track the residual norm and iterations self.cost: List = [] self.cost.append(float(np.sqrt(self.kold))) self.iiter = 0 # print setup if show: self._print_setup(np.iscomplexobj(x)) return x def step(self, x: NDArray, show: bool = False) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of CG show : :obj:`bool`, optional Display iteration log Returns ------- x : :obj:`np.ndarray` Updated model vector """ Opc = self.Op.matvec(self.c) cOpc = self.ncp.abs(self.c.dot(Opc.conj())) a = self.kold / cOpc x += a * self.c self.r -= a * Opc k = self.ncp.abs(self.r.dot(self.r.conj())) b = k / self.kold self.c = self.r + b * self.c self.kold = k self.iiter += 1 self.cost.append(float(np.sqrt(self.kold))) if show: self._print_step(x) return x def run( self, x: NDArray, niter: Optional[int] = None, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of CG niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already provided in the setup call show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ niter = self.niter if niter is None else niter if niter is None: raise ValueError("niter must not be None") while self.iiter < niter and self.kold > self.tol: showstep = ( True if show and ( self.iiter < itershow[0] or niter - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) x = self.step(x, showstep) self.callback(x) return x def finalize(self, show: bool = False) -> None: r"""Finalize solver Parameters ---------- show : :obj:`bool`, optional Display finalize log """ self.tend = time.time() self.telapsed = self.tend - self.tstart self.cost = np.array(self.cost) if show: self._print_finalize(nbar=55) def solve( self, y: NDArray, x0: Optional[NDArray] = None, niter: int = 10, tol: float = 1e-4, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Tuple[NDArray, int, NDArray]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[N \times 1]`. If ``None``, initialize internally as zero vector niter : :obj:`int`, optional Number of iterations tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[N \times 1]` iit : :obj:`int` Number of executed iterations cost : :obj:`numpy.ndarray` History of the L2 norm of the residual """ x = self.setup(y=y, x0=x0, niter=niter, tol=tol, show=show) x = self.run(x, niter, show=show, itershow=itershow) self.finalize(show) return x, self.iiter, self.cost class CGLS(Solver): r"""Conjugate gradient least squares Solve an overdetermined system of equations given an operator ``Op`` and data ``y`` using conjugate gradient iterations. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` Notes ----- Minimize the following functional using conjugate gradient iterations: .. math:: J = || \mathbf{y} - \mathbf{Op}\,\mathbf{x} ||_2^2 + \epsilon^2 || \mathbf{x} ||_2^2 where :math:`\epsilon` is the damping coefficient. """ def _print_setup(self, xcomplex: bool = False) -> None: self._print_solver(nbar=65) if self.niter is not None: strpar = ( f"damp = {self.damp:10e}\ttol = {self.tol:10e}\tniter = {self.niter}" ) else: strpar = f"damp = {self.damp:10e}\ttol = {self.tol:10e}\t" print(strpar) print("-" * 65 + "\n") if not xcomplex: head1 = " Itn x[0] r1norm r2norm" else: head1 = " Itn x[0] r1norm r2norm" print(head1) def _print_step(self, x: NDArray) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e} " msg = ( f"{self.iiter:6g} " + strx + f"{self.cost[self.iiter]:11.4e} {self.cost1[self.iiter]:11.4e}" ) print(msg) def setup( self, y: NDArray, x0: Optional[NDArray] = None, niter: Optional[int] = None, damp: float = 0.0, tol: float = 1e-4, show: bool = False, ) -> NDArray: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) damp : :obj:`float`, optional Damping coefficient tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional Display setup log Returns ------- x : :obj:`np.ndarray` Initial guess of size :math:`[N \times 1]` """ self.y = y self.damp = damp**2 self.tol = tol self.niter = niter self.ncp = get_array_module(y) # initialize solver if x0 is None: x = self.ncp.zeros(self.Op.shape[1], dtype=y.dtype) self.s = self.y.copy() r = self.Op.rmatvec(self.s) else: x = x0.copy() self.s = self.y - self.Op.matvec(x) r = self.Op.rmatvec(self.s) - damp * x self.c = r.copy() self.q = self.Op.matvec(self.c) self.kold = self.ncp.abs(r.dot(r.conj())) # create variables to track the residual norm and iterations self.cost = [] self.cost1 = [] self.cost.append(float(self.ncp.linalg.norm(self.s))) self.cost1.append( float( self.ncp.sqrt(self.cost[0] ** 2 + damp * self.ncp.abs(x.dot(x.conj()))) ) ) self.iiter = 0 # print setup if show: self._print_setup(np.iscomplexobj(x)) return x def step(self, x: NDArray, show: bool = False) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of CG show : :obj:`bool`, optional Display iteration log """ a = self.kold / ( self.q.dot(self.q.conj()) + self.damp * self.c.dot(self.c.conj()) ) x = x + a * self.c self.s = self.s - a * self.q r = self.Op.rmatvec(self.s) - self.damp * x k = self.ncp.abs(r.dot(r.conj())) b = k / self.kold self.c = r + b * self.c self.q = self.Op.matvec(self.c) self.kold = k self.iiter += 1 self.cost.append(float(self.ncp.linalg.norm(self.s))) self.cost1.append( self.ncp.sqrt( float( self.cost[self.iiter] ** 2 + self.damp * self.ncp.abs(x.dot(x.conj())) ) ) ) if show: self._print_step(x) return x def run( self, x: NDArray, niter: Optional[int] = None, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of CGLS niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already provided in the setup call show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ niter = self.niter if niter is None else niter if niter is None: raise ValueError("niter must not be None") while self.iiter < niter and self.kold > self.tol: showstep = ( True if show and ( self.iiter < itershow[0] or niter - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) x = self.step(x, showstep) self.callback(x) return x def finalize(self, show: bool = False) -> None: r"""Finalize solver Parameters ---------- show : :obj:`bool`, optional Display finalize log """ self.tend = time.time() self.telapsed = self.tend - self.tstart # reason for termination self.istop = 1 if self.kold < self.tol else 2 self.r1norm = self.kold self.r2norm = self.cost1[self.iiter] if show: self._print_finalize(nbar=65) self.cost = np.array(self.cost) def solve( self, y: NDArray, x0: Optional[NDArray] = None, niter: int = 10, damp: float = 0.0, tol: float = 1e-4, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Tuple[NDArray, int, int, float, float, NDArray]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray` Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector niter : :obj:`int`, optional Number of iterations (default to ``None`` in case a user wants to manually step over the solver) damp : :obj:`float`, optional Damping coefficient tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem iit : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` cost : :obj:`numpy.ndarray`, optional History of r1norm through iterations """ x = self.setup(y=y, x0=x0, niter=niter, damp=damp, tol=tol, show=show) x = self.run(x, niter, show=show, itershow=itershow) self.finalize(show) return x, self.istop, self.iiter, self.r1norm, self.r2norm, self.cost class LSQR(Solver): r"""LSQR Solve an overdetermined system of equations given an operator ``Op`` and data ``y`` using LSQR iterations. .. math:: \DeclareMathOperator{\cond}{cond} Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` Notes ----- Minimize the following functional using LSQR iterations [1]_: .. math:: J = || \mathbf{y} - \mathbf{Op}\,\mathbf{x} ||_2^2 + \epsilon^2 || \mathbf{x} ||_2^2 where :math:`\epsilon` is the damping coefficient. .. [1] Paige, C. C., and Saunders, M. A. "LSQR: An algorithm for sparse linear equations and sparse least squares", ACM TOMS, vol. 8, pp. 43-71, 1982. """ def __init__(self, Op: "LinearOperator"): super().__init__(Op) self.msg = ( "The exact solution is x = 0 ", "Op x - b is small enough, given atol, btol ", "The least-squares solution is good enough, given atol ", "The estimate of cond(Opbar) has exceeded conlim ", "Op x - b is small enough for this machine ", "The least-squares solution is good enough for this machine", "Cond(Opbar) seems to be too large for this machine ", "The iteration limit has been reached ", ) def _print_setup(self, x: NDArray, xcomplex: bool = False) -> None: self._print_solver(nbar=90) print(f"damp = {self.damp:20.14e} calc_var = {self.calc_var:6g}") print(f"atol = {self.atol:8.2e} conlim = {self.conlim:8.2e}") if self.niter is not None: strpar = f"btol = {self.btol:8.2e} niter = {self.niter:8g}" else: strpar = f"btol = {self.btol:8.2e}" print(strpar) print("-" * 90) head2 = " Compatible LS Norm A Cond A" if not xcomplex: head1 = " Itn x[0] r1norm r2norm " else: head1 = " Itn x[0] r1norm r2norm " print(head1 + head2) test1: int = 1 test2: float = self.alfa / self.beta strx: str = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e}" str1: str = f"{0:6g} " + strx str2: str = f" {self.r1norm:10.3e} {self.r2norm:10.3e}" str3: str = f" {test1:8.1e} {test2:8.1e}" print(str1 + str2 + str3) def _print_step(self, x: NDArray) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e}" str1 = f"{self.iiter:6g} " + strx str2 = f" {self.r1norm:10.3e} {self.r2norm:10.3e}" str3 = f" {self.test1:8.1e} {self.test2:8.1e}" str4 = f" {self.anorm:8.1e} {self.acond:8.1e}" print(str1 + str2 + str3 + str4) def _print_finalize(self) -> None: print(" ") print(f"LSQR finished, {self.msg[self.istop]}") print(" ") str1 = f"istop ={self.istop:8g} r1norm ={self.r1norm:8.1e}" str2 = f"anorm ={self.anorm:8.1e} arnorm ={self.arnorm:8.1e}" str3 = f"itn ={self.iiter:8g} r2norm ={self.r2norm:8.1e}" str4 = f"acond ={self.acond:8.1e} xnorm ={self.xnorm:8.1e}" str5 = f"Total time (s) = {self.telapsed:.2f}" print(str1 + " " + str2) print(str3 + " " + str4) print(str5) print("-" * 90 + "\n") def setup( self, y: NDArray, x0: Optional[NDArray] = None, damp: float = 0.0, atol: float = 1e-08, btol: float = 1e-08, conlim: float = 100000000.0, niter: int = 10, calc_var: bool = True, show: bool = False, ) -> NDArray: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector damp : :obj:`float`, optional Damping coefficient atol, btol : :obj:`float`, optional Stopping tolerances. If both are 1.0e-9, the final residual norm should be accurate to about 9 digits. (The solution will usually have fewer correct digits, depending on :math:`\cond(\mathbf{Op})` and the size of ``damp``.) conlim : :obj:`float`, optional Stopping tolerance on :math:`\cond(\mathbf{Op})` exceeds ``conlim``. For square, ``conlim`` could be as large as 1.0e+12. For least-squares problems, ``conlim`` should be less than 1.0e+8. Maximum precision can be obtained by setting ``atol = btol = conlim = 0``, but the number of iterations may then be excessive. niter : :obj:`int`, optional Number of iterations calc_var : :obj:`bool`, optional Estimate diagonals of :math:`(\mathbf{Op}^H\mathbf{Op} + \epsilon^2\mathbf{I})^{-1}`. show : :obj:`bool`, optional Display setup log Returns ------- x : :obj:`np.ndarray` Initial guess of size :math:`[N \times 1]` """ self.y = y self.damp = damp self.atol = atol self.btol = btol self.conlim = conlim self.niter = niter self.calc_var = calc_var self.ncp = get_array_module(y) m, n = self.Op.shape # initialize solver self.var = None if self.calc_var: self.var = self.ncp.zeros(n) self.iiter = 0 self.istop = 0 self.ctol = 0 if conlim > 0: self.ctol = 1.0 / conlim self.anorm = 0 self.acond = 0 self.dampsq = damp**2 self.ddnorm = 0 self.res2 = 0 self.xnorm = 0 self.xxnorm = 0 self.z = 0 self.cs2 = -1 self.sn2 = 0 # initialize x0 and set up the first vectors u and v for the # bidiagonalization. These satisfy beta*u=b-Op(x0), alfa*v=Op'u if x0 is None: x = self.ncp.zeros(self.Op.shape[1], dtype=y.dtype) self.u = y.copy() else: x = x0.copy() self.u = self.y - self.Op.matvec(x0) self.alfa = 0.0 self.beta = self.ncp.linalg.norm(self.u) if self.beta > 0.0: self.u = self.u / self.beta self.v = self.Op.rmatvec(self.u) self.alfa = self.ncp.linalg.norm(self.v) if self.alfa > 0: self.v = self.v / self.alfa else: self.v = x.copy() self.alfa = 0 self.w = self.v.copy() # check if solution is already found self.arnorm: float = self.alfa * self.beta # finalize setup self.arnorm0: float = self.arnorm self.rhobar: float = self.alfa self.phibar: float = self.beta self.bnorm: float = self.beta self.rnorm: float = self.beta self.r1norm: float = self.rnorm self.r2norm: float = self.rnorm # create variables to track the residual norm and iterations self.cost = [] self.cost.append(float(self.rnorm)) # print setup if show: self._print_setup(x, np.iscomplexobj(x)) if self.arnorm == 0: print(" ") print("LSQR finished") print(self.msg[self.istop]) return x def step(self, x: NDArray, show: bool = False) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of CG show : :obj:`bool`, optional Display iteration log Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ # perform the next step of the bidiagonalization to obtain the # next beta, u, alfa, v. These satisfy the relations # beta*u = Op*v - alfa*u, # alfa*v = Op'*u - beta*v' self.u = self.Op.matvec(self.v) - self.alfa * self.u self.beta = self.ncp.linalg.norm(self.u) if self.beta > 0: self.u = self.u / self.beta self.anorm = np.linalg.norm( [self.anorm, to_numpy(self.alfa), to_numpy(self.beta), self.damp] ) self.v = self.Op.rmatvec(self.u) - self.beta * self.v self.alfa = self.ncp.linalg.norm(self.v) if self.alfa > 0: self.v = self.v / self.alfa # use a plane rotation to eliminate the damping parameter. # This alters the diagonal (rhobar) of the lower-bidiagonal matrix. self.rhobar1 = np.linalg.norm([to_numpy(self.rhobar), self.damp]) self.cs1 = self.rhobar / self.rhobar1 self.sn1 = self.damp / self.rhobar1 self.psi = self.sn1 * self.phibar self.phibar = self.cs1 * self.phibar # use a plane rotation to eliminate the subdiagonal element (beta) # of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix. self.rho = np.linalg.norm([self.rhobar1, to_numpy(self.beta)]) self.cs = self.rhobar1 / self.rho self.sn = self.beta / self.rho self.theta = self.sn * self.alfa self.rhobar = -self.cs * self.alfa self.phi = self.cs * self.phibar self.phibar = self.sn * self.phibar self.tau = self.sn * self.phi # update x and w. self.t1 = self.phi / self.rho self.t2 = -self.theta / self.rho self.dk = self.w / self.rho x = x + self.t1 * self.w self.w = self.v + self.t2 * self.w self.ddnorm = self.ddnorm + self.ncp.linalg.norm(self.dk) ** 2 if self.calc_var: self.var = self.var + self.ncp.dot(self.dk, self.dk) # use a plane rotation on the right to eliminate the # super-diagonal element (theta) of the upper-bidiagonal matrix. # Then use the result to estimate norm(x). self.delta = self.sn2 * self.rho self.gambar = -self.cs2 * self.rho self.rhs = self.phi - self.delta * self.z self.zbar = self.rhs / self.gambar self.xnorm = self.ncp.sqrt(self.xxnorm + self.zbar**2) self.gamma = np.linalg.norm([self.gambar, to_numpy(self.theta)]) self.cs2 = self.gambar / self.gamma self.sn2 = self.theta / self.gamma self.z = self.rhs / self.gamma self.xxnorm = self.xxnorm + self.z**2.0 # test for convergence. First, estimate the condition of the matrix # Opbar, and the norms of rbar and Opbar'rbar self.acond = self.anorm * self.ncp.sqrt(self.ddnorm) self.res1 = self.phibar**2 self.res2 = self.res2 + self.psi**2 self.rnorm = self.ncp.sqrt(self.res1 + self.res2) self.arnorm = self.alfa * abs(self.tau) # distinguish between r1norm = ||b - Ax|| and # r2norm = sqrt(r1norm^2 + damp^2*||x||^2). # Estimate r1norm = sqrt(r2norm^2 - damp^2*||x||^2). # Although there is cancellation, it might be accurate enough. self.r1sq = self.rnorm**2 - self.dampsq * self.xxnorm self.r1norm = self.ncp.sqrt(self.ncp.abs(self.r1sq)) self.cost.append(float(self.r1norm)) if self.r1sq < 0: self.r1norm = -self.r1norm self.r2norm = self.rnorm # use these norms to estimate certain other quantities, # some of which will be small near a solution. self.test1 = self.rnorm / self.bnorm self.test2 = self.arnorm / self.arnorm0 self.test3 = 1.0 / self.acond t1 = self.test1 / (1.0 + self.anorm * self.xnorm / self.bnorm) self.rtol = self.btol + self.atol * self.anorm * self.xnorm / self.bnorm # set reason for termination. # The following tests guard against extremely small values of # atol, btol or ctol. The effect is equivalent to the normal tests # using atol = eps, btol = eps, conlim = 1/eps. if self.iiter >= self.niter: self.istop = 7 if 1 + self.test3 <= 1: self.istop = 6 if 1 + self.test2 <= 1: self.istop = 5 if 1 + t1 <= 1: self.istop = 4 # allow for tolerances set by the user. if self.test3 <= self.ctol: self.istop = 3 if self.test2 <= self.atol: self.istop = 2 if self.test1 <= self.rtol: self.istop = 1 self.iiter += 1 if show: self._print_step(x) return x def run( self, x: NDArray, niter: Optional[int] = None, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of LSQR niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already provided in the setup call show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ niter = self.niter if niter is None else niter while self.iiter < niter and self.istop == 0: showstep = ( True if show and ( self.niter <= 40 or self.iiter < itershow[0] or niter - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 or self.test3 <= 2 * self.ctol or self.test2 <= 10 * self.atol or self.test1 <= 10 * self.rtol or self.istop != 0 ) else False ) x = self.step(x, showstep) self.callback(x) return x def finalize(self, show: bool = False) -> None: r"""Finalize solver Parameters ---------- show : :obj:`bool`, optional Display finalize log """ self.tend = time.time() self.telapsed = self.tend - self.tstart self.cost = np.array(self.cost) if show: self._print_finalize() def solve( self, y: NDArray, x0: Optional[NDArray] = None, damp: float = 0.0, atol: float = 1e-08, btol: float = 1e-08, conlim: float = 100000000.0, niter: int = 10, calc_var: bool = True, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Tuple[ NDArray, int, int, float, float, float, float, float, float, Union[None, NDArray], NDArray, ]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector damp : :obj:`float`, optional Damping coefficient atol, btol : :obj:`float`, optional Stopping tolerances. If both are 1.0e-9, the final residual norm should be accurate to about 9 digits. (The solution will usually have fewer correct digits, depending on :math:`\cond(\mathbf{Op})` and the size of ``damp``.) conlim : :obj:`float`, optional Stopping tolerance on :math:`\cond(\mathbf{Op})` exceeds ``conlim``. For square, ``conlim`` could be as large as 1.0e+12. For least-squares problems, ``conlim`` should be less than 1.0e+8. Maximum precision can be obtained by setting ``atol = btol = conlim = 0``, but the number of iterations may then be excessive. niter : :obj:`int`, optional Number of iterations calc_var : :obj:`bool`, optional Estimate diagonals of :math:`(\mathbf{Op}^H\mathbf{Op} + \epsilon^2\mathbf{I})^{-1}`. show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` istop : :obj:`int` Gives the reason for termination ``0`` means the exact solution is :math:`\mathbf{x}=0` ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem ``3`` means the estimate of :math:`\cond(\overline{\mathbf{Op}})` has exceeded ``conlim`` ``4`` means :math:`\mathbf{y} - \mathbf{Op}\,\mathbf{x}` is small enough for this machine ``5`` means the least-squares solution is good enough for this machine ``6`` means :math:`\cond(\overline{\mathbf{Op}})` seems to be too large for this machine ``7`` means the iteration limit has been reached r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` anorm : :obj:`float` Estimate of Frobenius norm of :math:`\overline{\mathbf{Op}} = [\mathbf{Op} \; \epsilon \mathbf{I}]` acond : :obj:`float` Estimate of :math:`\cond(\overline{\mathbf{Op}})` arnorm : :obj:`float` Estimate of norm of :math:`\cond(\mathbf{Op}^H\mathbf{r}- \epsilon^2\mathbf{x})` var : :obj:`float` Diagonals of :math:`(\mathbf{Op}^H\mathbf{Op})^{-1}` (if ``damp=0``) or more generally :math:`(\mathbf{Op}^H\mathbf{Op} + \epsilon^2\mathbf{I})^{-1}`. cost : :obj:`numpy.ndarray`, optional History of r1norm through iterations """ x = self.setup( y=y, x0=x0, damp=damp, atol=atol, btol=btol, conlim=conlim, niter=niter, calc_var=calc_var, show=show, ) x = self.run(x, niter=niter, show=show, itershow=itershow) self.finalize(show) return ( x, self.istop, self.iiter, self.r1norm, self.r2norm, self.anorm, self.acond, self.arnorm, self.xnorm, self.var, self.cost, )
36,363
32.484346
91
py
pylops
pylops-master/pylops/optimization/leastsquares.py
__all__ = [ "normal_equations_inversion", "regularized_inversion", "preconditioned_inversion", ] from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple from pylops.optimization.cls_leastsquares import ( NormalEquationsInversion, PreconditionedInversion, RegularizedInversion, ) from pylops.utils.typing import NDArray, SamplingLike if TYPE_CHECKING: from pylops.linearoperator import LinearOperator def normal_equations_inversion( Op: "LinearOperator", y: NDArray, Regs: List["LinearOperator"], x0: Optional[NDArray] = None, Weight: Optional["LinearOperator"] = None, dataregs: Optional[List[NDArray]] = None, epsI: float = 0.0, epsRs: Optional[SamplingLike] = None, NRegs: Optional[Sequence["LinearOperator"]] = None, epsNRs: Optional[SamplingLike] = None, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int]: r"""Inversion of normal equations. Solve the regularized normal equations for a system of equations given the operator ``Op``, a data weighting operator ``Weight`` and optionally a list of regularization terms ``Regs`` and/or ``NRegs``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` y : :obj:`numpy.ndarray` Data of size :math:`[N \times 1]` Regs : :obj:`list` Regularization operators (``None`` to avoid adding regularization) x0 : :obj:`numpy.ndarray`, optional Initial guess of size :math:`[M \times 1]` Weight : :obj:`pylops.LinearOperator`, optional Weight operator dataregs : :obj:`list`, optional Regularization data (must have the same number of elements as ``Regs``) epsI : :obj:`float`, optional Tikhonov damping epsRs : :obj:`list`, optional Regularization dampings (must have the same number of elements as ``Regs``) NRegs : :obj:`list` Normal regularization operators (``None`` to avoid adding regularization). Such operators must apply the chain of the forward and the adjoint in one go. This can be convenient in cases where a faster implementation is available compared to applying the forward followed by the adjoint. epsNRs : :obj:`list`, optional Regularization dampings for normal operators (must have the same number of elements as ``NRegs``) engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display normal equations solver log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.cg` and :py:func:`pylops.optimization.solver.cg` are used for engine ``scipy`` and ``pylops``, respectively) .. note:: When user does not supply ``atol``, it is set to "legacy". Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Convergence information (only when using :py:func:`scipy.sparse.linalg.cg`): ``0``: successful exit ``>0``: convergence to tolerance not achieved, number of iterations ``<0``: illegal input or breakdown See Also -------- RegularizedInversion: Regularized inversion PreconditionedInversion: Preconditioned inversion Notes ----- See :class:`pylops.optimization.cls_leastsquares.NormalEquationsInversion` """ nesolve = NormalEquationsInversion(Op) xinv, istop = nesolve.solve( y, Regs, x0=x0, Weight=Weight, dataregs=dataregs, epsI=epsI, epsRs=epsRs, NRegs=NRegs, epsNRs=epsNRs, engine=engine, show=show, **kwargs_solver, ) return xinv, istop def regularized_inversion( Op, y: NDArray, Regs: List["LinearOperator"], x0: Optional[NDArray] = None, Weight: Optional["LinearOperator"] = None, dataregs: Optional[List[NDArray]] = None, epsRs: Optional[SamplingLike] = None, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int, int, float, float]: r"""Regularized inversion. Solve a system of regularized equations given the operator ``Op``, a data weighting operator ``Weight``, and a list of regularization terms ``Regs``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` y : :obj:`numpy.ndarray` Data of size :math:`[N \times 1]` Regs : :obj:`list` Regularization operators (``None`` to avoid adding regularization) x0 : :obj:`numpy.ndarray`, optional Initial guess of size :math:`[M \times 1]` Weight : :obj:`pylops.LinearOperator`, optional Weight operator dataregs : :obj:`list`, optional Regularization data (if ``None`` a zero data will be used for every regularization operator in ``Regs``) epsRs : :obj:`list`, optional Regularization dampings engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display normal equations solver log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used for engine ``scipy`` and ``pylops``, respectively) Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem itn : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` See Also -------- RegularizedOperator: Regularized operator NormalEquationsInversion: Normal equations inversion PreconditionedInversion: Preconditioned inversion Notes ----- See :class:`pylops.optimization.cls_leastsquares.RegularizedInversion` """ rsolve = RegularizedInversion(Op) xinv, istop, itn, r1norm, r2norm = rsolve.solve( y, Regs, x0=x0, Weight=Weight, dataregs=dataregs, epsRs=epsRs, engine=engine, show=show, **kwargs_solver, ) return xinv, istop, itn, r1norm, r2norm def preconditioned_inversion( Op: "LinearOperator", y: NDArray, P: "LinearOperator", x0: Optional[NDArray] = None, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int, int, float, float]: r"""Preconditioned inversion. Solve a system of preconditioned equations given the operator ``Op`` and a preconditioner ``P``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` y : :obj:`numpy.ndarray` Data of size :math:`[N \times 1]` P : :obj:`pylops.LinearOperator` Preconditioner x0 : :obj:`numpy.ndarray` Initial guess of size :math:`[M \times 1]` engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display normal equations solver log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used as default for numpy and cupy `data`, respectively) Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem itn : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` See Also -------- RegularizedInversion: Regularized inversion NormalEquationsInversion: Normal equations inversion Notes ----- See :class:`pylops.optimization.cls_leastsquares.PreconditionedInversion` """ psolve = PreconditionedInversion(Op) xinv, istop, itn, r1norm, r2norm = psolve.solve( y, P, x0=x0, engine=engine, show=show, **kwargs_solver ) return xinv, istop, itn, r1norm, r2norm
9,183
31
84
py
pylops
pylops-master/pylops/optimization/eigs.py
__all__ = ["power_iteration"] from typing import Tuple import numpy as np from pylops import LinearOperator from pylops.utils.backend import get_module from pylops.utils.typing import NDArray def power_iteration( Op: LinearOperator, niter: int = 10, tol: float = 1e-5, dtype: str = "float32", backend: str = "numpy", ) -> Tuple[float, NDArray, int]: """Power iteration algorithm. Power iteration algorithm, used to compute the largest eigenvector and corresponding eigenvalue. Note that for complex numbers, the eigenvalue with largest module is found. This implementation closely follow that of https://en.wikipedia.org/wiki/Power_iteration. Parameters ---------- Op : :obj:`pylops.LinearOperator` Square operator niter : :obj:`int`, optional Number of iterations tol : :obj:`float`, optional Update tolerance dtype : :obj:`str`, optional Type of elements in input array. backend : :obj:`str`, optional Backend to use (`numpy` or `cupy`) Returns ------- maxeig : :obj:`float` Largest eigenvalue b_k : :obj:`np.ndarray` or :obj:`cp.ndarray` Largest eigenvector iiter : :obj:`int` Effective number of iterations """ ncp = get_module(backend) # Identify if operator is complex if np.issubdtype(dtype, np.complexfloating): cmpx = 1j else: cmpx = 0 # Choose a random vector to decrease the chance that vector # is orthogonal to the eigenvector b_k = ncp.random.rand(Op.shape[1]).astype(dtype) + cmpx * ncp.random.rand( Op.shape[1] ).astype(dtype) b_k = b_k / ncp.linalg.norm(b_k) niter = 10 if niter is None else niter maxeig_old = 0.0 for iiter in range(niter): # compute largest eigenvector b1_k = Op.matvec(b_k) # compute largest eigevalue maxeig = ncp.vdot(b_k, b1_k) # renormalize the vector b_k = b1_k / ncp.linalg.norm(b1_k) if ncp.abs(maxeig - maxeig_old) < tol * maxeig_old: break maxeig_old = maxeig return maxeig, b_k, iiter + 1
2,165
25.096386
78
py
pylops
pylops-master/pylops/optimization/cls_leastsquares.py
__all__ = [ "NormalEquationsInversion", "RegularizedOperator", "RegularizedInversion", "PreconditionedInversion", ] import logging from typing import TYPE_CHECKING, Optional, Sequence, Tuple import numpy as np from scipy.sparse.linalg import cg as sp_cg from scipy.sparse.linalg import lsqr from pylops.basicoperators import Diagonal, VStack from pylops.optimization.basesolver import Solver from pylops.optimization.basic import cg, cgls from pylops.utils.backend import get_array_module from pylops.utils.decorators import disable_ndarray_multiplication from pylops.utils.typing import NDArray if TYPE_CHECKING: from pylops.linearoperator import LinearOperator logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _check_regularization_dims( Regs: Sequence["LinearOperator"], dataregs: Optional[Sequence[NDArray]] = None, epsRs: Optional[Sequence[float]] = None, ) -> None: """check Regs, dataregs, and epsRs have same dimensions""" nRegs = len(Regs) ndataregs = nRegs if dataregs is None else len(dataregs) nepsRs = nRegs if epsRs is None else len(epsRs) if not nRegs == ndataregs == nepsRs: raise ValueError("Regs, dataregs, and epsRs must have the same size") class NormalEquationsInversion(Solver): r"""Inversion of normal equations. Solve the regularized normal equations for a system of equations given the operator ``Op``, a data weighting operator ``Weight`` and optionally a list of regularization terms ``Regs`` and/or ``NRegs``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]`. See Also -------- RegularizedInversion: Regularized inversion PreconditionedInversion: Preconditioned inversion Notes ----- Solve the following normal equations for a system of regularized equations given the operator :math:`\mathbf{Op}`, a data weighting operator :math:`\mathbf{W}`, a list of regularization terms (:math:`\mathbf{R}_i` and/or :math:`\mathbf{N}_i`), the data :math:`\mathbf{y}` and regularization data :math:`\mathbf{y}_{\mathbf{R}_i}`, and the damping factors :math:`\epsilon_I`, :math:`\epsilon_{\mathbf{R}_i}` and :math:`\epsilon_{\mathbf{N}_i}`: .. math:: ( \mathbf{Op}^T \mathbf{W} \mathbf{Op} + \sum_i \epsilon_{\mathbf{R}_i}^2 \mathbf{R}_i^T \mathbf{R}_i + \sum_i \epsilon_{\mathbf{N}_i}^2 \mathbf{N}_i + \epsilon_I^2 \mathbf{I} ) \mathbf{x} = \mathbf{Op}^T \mathbf{W} \mathbf{y} + \sum_i \epsilon_{\mathbf{R}_i}^2 \mathbf{R}_i^T \mathbf{y}_{\mathbf{R}_i} Note that the data term of the regularizations :math:`\mathbf{N}_i` is implicitly assumed to be zero. """ def _print_setup(self) -> None: self._print_solver(nbar=55) strreg = f"Regs={self.Regs}" streps = f"\nepsRs={self.epsRs} epsI={self.epsI}" print(strreg + streps) print("-" * 55) def _print_finalize(self) -> None: print(f"\nTotal time (s) = {self.telapsed:.2f}") print("-" * 55 + "\n") @disable_ndarray_multiplication def setup( self, y: NDArray, Regs: Sequence["LinearOperator"], Weight: Optional["LinearOperator"] = None, dataregs: Optional[Sequence[NDArray]] = None, epsI: float = 0, epsRs: Optional[Sequence[float]] = None, NRegs: Optional[Sequence["LinearOperator"]] = None, epsNRs: Optional[Sequence[float]] = None, show: bool = False, ) -> None: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` Regs : :obj:`list` Regularization operators (``None`` to avoid adding regularization) Weight : :obj:`pylops.LinearOperator`, optional Weight operator dataregs : :obj:`list`, optional Regularization data (must have the same number of elements as ``Regs``) epsI : :obj:`float`, optional Tikhonov damping epsRs : :obj:`list`, optional Regularization dampings (must have the same number of elements as ``Regs``) NRegs : :obj:`list` Normal regularization operators (``None`` to avoid adding regularization). Such operators must apply the chain of the forward and the adjoint in one go. This can be convenient in cases where a faster implementation is available compared to applying the forward followed by the adjoint. epsNRs : :obj:`list`, optional Regularization dampings for normal operators (must have the same number of elements as ``NRegs``) show : :obj:`bool`, optional Display setup log """ self.y = y self.Regs = Regs self.epsI = epsI self.epsRs = epsRs self.dataregs = dataregs self.ncp = get_array_module(y) # check consistency in regularization terms if Regs is not None: _check_regularization_dims(Regs, dataregs, epsRs) # store adjoint self.OpH = self.Op.H # create dataregs and epsRs if not provided if dataregs is None and Regs is not None: self.dataregs = [ self.ncp.zeros(int(Reg.shape[0]), dtype=Reg.dtype) for Reg in Regs ] if epsRs is None and Regs is not None: self.epsRs = [1] * len(Regs) # normal equations if Weight is not None: self.y_normal = self.OpH * Weight * y else: self.y_normal = self.OpH * y if Weight is not None: self.Op_normal = self.OpH * Weight * self.Op else: self.Op_normal = self.OpH * self.Op # add regularization terms if epsI > 0: self.Op_normal += epsI**2 * Diagonal( self.ncp.ones(self.Op.dims, dtype=self.Op.dtype), dtype=self.Op.dtype, ) if ( self.epsRs is not None and self.Regs is not None and self.dataregs is not None ): for epsR, Reg, datareg in zip(self.epsRs, self.Regs, self.dataregs): self.RegH = Reg.H self.y_normal += epsR**2 * self.RegH * datareg self.Op_normal += epsR**2 * self.RegH * Reg if epsNRs is not None and NRegs is not None: for epsNR, NReg in zip(epsNRs, NRegs): self.Op_normal += epsNR**2 * NReg # print setup if show: self._print_setup() def step(self) -> None: raise NotImplementedError( "NormalEquationsInversion uses as default the" " scipy.sparse.linalg.cg solver, therefore the " "step method is not implemented. Use directly run or solve." ) @disable_ndarray_multiplication def run( self, x: NDArray, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int]: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of the solver. If ``None``, x is assumed to be a zero vector engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display iterations log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.cg` and :py:func:`pylops.optimization.solver.cg` are used as default for numpy and cupy `data`, respectively) .. note:: When user does not supply ``atol``, it is set to "legacy". Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Convergence information (only when using :py:func:`scipy.sparse.linalg.cg`): ``0``: successful exit ``>0``: convergence to tolerance not achieved, number of iterations ``<0``: illegal input or breakdown """ if x is not None: self.y_normal = self.y_normal - self.Op_normal * x if engine == "scipy" and self.ncp == np: if "atol" not in kwargs_solver: kwargs_solver["atol"] = "legacy" xinv, istop = sp_cg(self.Op_normal, self.y_normal, **kwargs_solver) elif engine == "pylops" or self.ncp != np: if show: kwargs_solver["show"] = True xinv = cg( self.Op_normal, self.y_normal, self.ncp.zeros(self.Op_normal.shape[1], dtype=self.Op_normal.dtype), **kwargs_solver, )[0] istop = None else: raise NotImplementedError("Engine must be scipy or pylops") if x is not None: xinv = x + xinv return xinv, istop def solve( self, y: NDArray, Regs: Sequence["LinearOperator"], x0: Optional[NDArray] = None, Weight: Optional["LinearOperator"] = None, dataregs: Optional[Sequence[NDArray]] = None, epsI: float = 0, epsRs: Optional[Sequence[float]] = None, NRegs: Optional[Sequence["LinearOperator"]] = None, epsNRs: Optional[Sequence[float]] = None, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` Regs : :obj:`list` Regularization operators (``None`` to avoid adding regularization) x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector Weight : :obj:`pylops.LinearOperator`, optional Weight operator dataregs : :obj:`list`, optional Regularization data (must have the same number of elements as ``Regs``) epsI : :obj:`float`, optional Tikhonov damping epsRs : :obj:`list`, optional Regularization dampings (must have the same number of elements as ``Regs``) NRegs : :obj:`list` Normal regularization operators (``None`` to avoid adding regularization). Such operators must apply the chain of the forward and the adjoint in one go. This can be convenient in cases where a faster implementation is available compared to applying the forward followed by the adjoint. epsNRs : :obj:`list`, optional Regularization dampings for normal operators (must have the same number of elements as ``NRegs``) engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display setup log Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[N \times 1]` istop : :obj:`int` Convergence information (only when using :py:func:`scipy.sparse.linalg.cg`): ``0``: successful exit ``>0``: convergence to tolerance not achieved, number of iterations ``<0``: illegal input or breakdown """ self.setup( y=y, Regs=Regs, Weight=Weight, dataregs=dataregs, epsI=epsI, epsRs=epsRs, NRegs=NRegs, epsNRs=epsNRs, show=show, ) x, istop = self.run(x0, engine=engine, show=show, **kwargs_solver) self.finalize(show) return x, istop def RegularizedOperator( Op: "LinearOperator", Regs: Sequence["LinearOperator"], epsRs: Sequence[float] = (1,), ) -> "LinearOperator": r"""Regularized operator. Creates a regularized operator given the operator ``Op`` and a list of regularization terms ``Regs``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert Regs : :obj:`tuple` or :obj:`list` Regularization operators epsRs : :obj:`tuple` or :obj:`list`, optional Regularization dampings Returns ------- OpReg : :obj:`pylops.LinearOperator` Regularized operator See Also -------- RegularizedInversion: Regularized inversion Notes ----- Create a regularized operator by augumenting the problem operator :math:`\mathbf{Op}`, by a set of regularization terms :math:`\mathbf{R_i}` and their damping factors and :math:`\epsilon_{{R}_i}`: .. math:: \begin{bmatrix} \mathbf{Op} \\ \epsilon_{\mathbf{R}_1} \mathbf{R}_1 \\ ... \\ \epsilon_{R_N} \mathbf{R}_N \end{bmatrix} """ OpReg = VStack( [Op] + [epsR * Reg for epsR, Reg in zip(epsRs, Regs)], dtype=Op.dtype ) return OpReg class RegularizedInversion(Solver): r"""Regularized inversion. Solve a system of regularized equations given the operator ``Op``, a data weighting operator ``Weight``, and a list of regularization terms ``Regs``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]`. See Also -------- RegularizedOperator: Regularized operator NormalEquationsInversion: Normal equations inversion PreconditionedInversion: Preconditioned inversion Notes ----- Solve the following system of regularized equations given the operator :math:`\mathbf{Op}`, a data weighting operator :math:`\mathbf{W}^{1/2}`, a list of regularization terms :math:`\mathbf{R}_i`, the data :math:`\mathbf{y}` and regularization data :math:`\mathbf{y}_{\mathbf{R}_i}`, and the damping factors :math:`\epsilon_\mathbf{I}`: and :math:`\epsilon_{\mathbf{R}_i}`: .. math:: \begin{bmatrix} \mathbf{W}^{1/2} \mathbf{Op} \\ \epsilon_{\mathbf{R}_1} \mathbf{R}_1 \\ \vdots \\ \epsilon_{\mathbf{R}_N} \mathbf{R}_N \end{bmatrix} \mathbf{x} = \begin{bmatrix} \mathbf{W}^{1/2} \mathbf{y} \\ \epsilon_{\mathbf{R}_1} \mathbf{y}_{\mathbf{R}_1} \\ \vdots \\ \epsilon_{\mathbf{R}_N} \mathbf{y}_{\mathbf{R}_N} \\ \end{bmatrix} where the ``Weight`` provided here is equivalent to the square-root of the weight in :py:func:`pylops.optimization.leastsquares.NormalEquationsInversion`. Note that this system is solved using the :py:func:`scipy.sparse.linalg.lsqr` and an initial guess ``x0`` can be provided to this solver, despite the original solver does not allow so. """ def _print_setup(self) -> None: self._print_solver(nbar=65) strreg = f"Regs={self.Regs}" streps = f"\nepsRs={self.epsRs}" print(strreg + streps) print("-" * 65) def _print_finalize(self) -> None: print(f"\nTotal time (s) = {self.telapsed:.2f}") print("-" * 65 + "\n") @disable_ndarray_multiplication def setup( self, y: NDArray, Regs: Sequence["LinearOperator"], Weight: Optional["LinearOperator"] = None, dataregs: Optional[Sequence[NDArray]] = None, epsRs: Optional[Sequence[float]] = None, show: bool = False, ) -> None: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` Regs : :obj:`list` Regularization operators (``None`` to avoid adding regularization) Weight : :obj:`pylops.LinearOperator`, optional Weight operator dataregs : :obj:`list`, optional Regularization data (must have the same number of elements as ``Regs``) epsRs : :obj:`list`, optional Regularization dampings (must have the same number of elements as ``Regs``) show : :obj:`bool`, optional Display setup log """ self.y = y self.Regs = Regs self.epsRs = epsRs self.dataregs = dataregs self.ncp = get_array_module(y) # check consistency in regularization terms if Regs is not None: _check_regularization_dims(Regs, dataregs, epsRs) # create regularization data if dataregs is None and Regs is not None: self.dataregs = [ self.ncp.zeros(int(Reg.shape[0]), dtype=Reg.dtype) for Reg in Regs ] if self.epsRs is None and Regs is not None: self.epsRs = [1] * len(Regs) # create regularization operators self.RegOp: LinearOperator if Weight is not None: if Regs is None: self.RegOp = Weight * self.Op else: self.RegOp = RegularizedOperator( Weight * self.Op, Regs, epsRs=self.epsRs ) else: if Regs is None: self.RegOp = self.Op else: self.RegOp = RegularizedOperator(self.Op, Regs, epsRs=self.epsRs) # augumented data if Weight is not None: self.datatot: NDArray = Weight * self.y.copy() else: self.datatot = self.y.copy() # augumented operator if self.epsRs is not None and self.dataregs is not None: for epsR, datareg in zip(self.epsRs, self.dataregs): self.datatot = np.hstack((self.datatot, epsR * datareg)) # print setup if show: self._print_setup() def step(self) -> None: raise NotImplementedError( "RegularizedInversion uses as default the" " scipy.sparse.linalg.lsqr solver, therefore the " "step method is not implemented. Use directly run or solve." ) @disable_ndarray_multiplication def run( self, x: NDArray, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int, int, float, float]: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of the solver. If ``None``, x is assumed to be a zero vector engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display iterations log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used for engine ``scipy`` and ``pylops``, respectively) Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem itn : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` """ if x is not None: self.datatot = self.datatot - self.RegOp * x if engine == "scipy" and self.ncp == np: if show: kwargs_solver["show"] = 1 xinv, istop, itn, r1norm, r2norm = lsqr( self.RegOp, self.datatot, **kwargs_solver )[0:5] elif engine == "pylops" or self.ncp != np: if show: kwargs_solver["show"] = True xinv, istop, itn, r1norm, r2norm = cgls( self.RegOp, self.datatot, self.ncp.zeros(self.RegOp.dims, dtype=self.RegOp.dtype), **kwargs_solver, )[0:5] else: raise NotImplementedError("Engine must be scipy or pylops") if x is not None: xinv = x + xinv return xinv, istop, itn, r1norm, r2norm def solve( self, y: NDArray, Regs: Sequence["LinearOperator"], x0: Optional[NDArray] = None, Weight: Optional["LinearOperator"] = None, dataregs: Optional[Sequence[NDArray]] = None, epsRs: Optional[Sequence[float]] = None, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int, int, float, float]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` Regs : :obj:`list` Regularization operators (``None`` to avoid adding regularization) x0 : :obj:`numpy.ndarray`, optional Initial guess Weight : :obj:`pylops.LinearOperator`, optional Weight operator dataregs : :obj:`list`, optional Regularization data (must have the same number of elements as ``Regs``) epsRs : :obj:`list`, optional Regularization dampings (must have the same number of elements as ``Regs``) engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used for engine ``scipy`` and ``pylops``, respectively) Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem itn : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` """ self.setup( y=y, Regs=Regs, Weight=Weight, dataregs=dataregs, epsRs=epsRs, show=show ) x, istop, itn, r1norm, r2norm = self.run( x0, engine=engine, show=show, **kwargs_solver ) self.finalize(show) return x, istop, itn, r1norm, r2norm class PreconditionedInversion(Solver): r"""Preconditioned inversion. Solve a system of preconditioned equations given the operator ``Op`` and a preconditioner ``P``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]`. See Also -------- RegularizedInversion: Regularized inversion NormalEquationsInversion: Normal equations inversion Notes ----- Solve the following system of preconditioned equations given the operator :math:`\mathbf{Op}`, a preconditioner :math:`\mathbf{P}`, the data :math:`\mathbf{y}` .. math:: \mathbf{y} = \mathbf{Op}\,\mathbf{P} \mathbf{p} where :math:`\mathbf{p}` is the solution in the preconditioned space and :math:`\mathbf{x} = \mathbf{P}\mathbf{p}` is the solution in the original space. """ def _print_setup(self) -> None: self._print_solver(nbar=65) strprec = f"Prec={self.P}" print(strprec) print("-" * 65) def _print_finalize(self) -> None: print(f"\nTotal time (s) = {self.telapsed:.2f}") print("-" * 65 + "\n") @disable_ndarray_multiplication def setup( self, y: NDArray, P: "LinearOperator", show: bool = False, ) -> None: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` P : :obj:`pylops.LinearOperator` Preconditioner show : :obj:`bool`, optional Display setup log """ self.y = y self.P = P self.ncp = get_array_module(y) # preconditioned operator self.POp = self.Op * P # print setup if show: self._print_setup() def step(self) -> None: raise NotImplementedError( "PreconditionedInversion uses as default the" " scipy.sparse.linalg.lsqr solver, therefore the " "step method is not implemented. Use directly run or solve." ) @disable_ndarray_multiplication def run( self, x: NDArray, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int, int, float, float]: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of the solver. If ``None``, x is assumed to be a zero vector engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display iterations log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used for engine ``scipy`` and ``pylops``, respectively) Returns ------- xinv : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem itn : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` """ if x is not None: self.y = self.y - self.Op * x if engine == "scipy" and self.ncp == np: if show: kwargs_solver["show"] = 1 pinv, istop, itn, r1norm, r2norm = lsqr( self.POp, self.y, **kwargs_solver, )[0:5] elif engine == "pylops" or self.ncp != np: if show: kwargs_solver["show"] = True pinv, istop, itn, r1norm, r2norm = cgls( self.POp, self.y, self.ncp.zeros(self.POp.shape[1], dtype=self.POp.dtype), **kwargs_solver, )[0:5] # force it 1d as we decorate this method with disable_ndarray_multiplication pinv = pinv.ravel() else: raise NotImplementedError("Engine must be scipy or pylops") xinv = self.P * pinv if x is not None: xinv = x + xinv return xinv, istop, itn, r1norm, r2norm def solve( self, y: NDArray, P: "LinearOperator", x0: Optional[NDArray] = None, engine: str = "scipy", show: bool = False, **kwargs_solver, ) -> Tuple[NDArray, int, int, float, float]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` P : :obj:`pylops.LinearOperator` Preconditioner x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector engine : :obj:`str`, optional Solver to use (``scipy`` or ``pylops``) show : :obj:`bool`, optional Display log **kwargs_solver Arbitrary keyword arguments for chosen solver (:py:func:`scipy.sparse.linalg.lsqr` and :py:func:`pylops.optimization.solver.cgls` are used for engine ``scipy`` and ``pylops``, respectively) Returns ------- x : :obj:`numpy.ndarray` Inverted model. istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem itn : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` """ self.setup(y=y, P=P, show=show) x, istop, itn, r1norm, r2norm = self.run( x0, engine=engine, show=show, **kwargs_solver ) self.finalize(show) return x, istop, itn, r1norm, r2norm
30,376
32.865106
92
py
pylops
pylops-master/pylops/optimization/sparsity.py
__all__ = [ "irls", "omp", "ista", "fista", "spgl1", "splitbregman", ] from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple from pylops.optimization.cls_sparsity import FISTA, IRLS, ISTA, OMP, SPGL1, SplitBregman from pylops.utils.decorators import add_ndarray_support_to_solver from pylops.utils.typing import NDArray, SamplingLike if TYPE_CHECKING: from pylops.linearoperator import LinearOperator def irls( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, nouter: int = 10, threshR: bool = False, epsR: float = 1e-10, epsI: float = 1e-10, tolIRLS: float = 1e-10, warm: bool = False, kind: str = "data", show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, **kwargs_solver, ) -> Tuple[NDArray, int]: r"""Iteratively reweighted least squares. Solve an optimization problem with :math:`L_1` cost function (data IRLS) or :math:`L_1` regularization term (model IRLS) given the operator ``Op`` and data ``y``. In the *data IRLS*, the cost function is minimized by iteratively solving a weighted least squares problem with the weight at iteration :math:`i` being based on the data residual at iteration :math:`i-1`. This IRLS solver is robust to *outliers* since the L1 norm given less weight to large residuals than L2 norm does. Similarly in the *model IRLS*, the weight at at iteration :math:`i` is based on the model at iteration :math:`i-1`. This IRLS solver inverts for a sparse model vector. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert y : :obj:`numpy.ndarray` Data x0 : :obj:`numpy.ndarray`, optional Initial guess nouter : :obj:`int`, optional Number of outer iterations threshR : :obj:`bool`, optional Apply thresholding in creation of weight (``True``) or damping (``False``) epsR : :obj:`float`, optional Damping to be applied to residuals for weighting term epsI : :obj:`float`, optional Tikhonov damping tolIRLS : :obj:`float`, optional Tolerance. Stop outer iterations if difference between inverted model at subsequent iterations is smaller than ``tolIRLS`` warm : :obj:`bool`, optional Warm start each inversion inner step with previous estimate (``True``) or not (``False``). This only applies to ``kind="data"`` and ``kind="datamodel"`` kind : :obj:`str`, optional Kind of solver (``model``, ``data`` or ``datamodel``) show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector **kwargs_solver Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.cg` solver for data IRLS and :py:func:`scipy.sparse.linalg.lsqr` solver for model IRLS when using numpy data(or :py:func:`pylops.optimization.solver.cg` and :py:func:`pylops.optimization.solver.cgls` when using cupy data) Returns ------- xinv : :obj:`numpy.ndarray` Inverted model nouter : :obj:`int` Number of effective outer iterations Notes ----- See :class:`pylops.optimization.cls_sparsity.IRLS` """ irlssolve = IRLS(Op) if callback is not None: irlssolve.callback = callback x, nouter, = irlssolve.solve( y, x0=x0, nouter=nouter, threshR=threshR, epsR=epsR, epsI=epsI, tolIRLS=tolIRLS, warm=warm, kind=kind, show=show, itershow=itershow, **kwargs_solver, ) return x, nouter def omp( Op: "LinearOperator", y: NDArray, niter_outer: int = 10, niter_inner: int = 40, sigma: float = 1e-4, normalizecols: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, ) -> Tuple[NDArray, int, NDArray]: r"""Orthogonal Matching Pursuit (OMP). Solve an optimization problem with :math:`L^0` regularization function given the operator ``Op`` and data ``y``. The operator can be real or complex, and should ideally be either square :math:`N=M` or underdetermined :math:`N<M`. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert y : :obj:`numpy.ndarray` Data niter_outer : :obj:`int`, optional Number of iterations of outer loop niter_inner : :obj:`int`, optional Number of iterations of inner loop. By choosing ``niter_inner=0``, the Matching Pursuit (MP) algorithm is implemented. sigma : :obj:`list` Maximum :math:`L_2` norm of residual. When smaller stop iterations. normalizecols : :obj:`list`, optional Normalize columns (``True``) or not (``False``). Note that this can be expensive as it requires applying the forward operator :math:`n_{cols}` times to unit vectors (i.e., containing 1 at position j and zero otherwise); use only when the columns of the operator are expected to have highly varying norms. show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector Returns ------- xinv : :obj:`numpy.ndarray` Inverted model niter_outer : :obj:`int` Number of effective outer iterations cost : :obj:`numpy.ndarray` History of cost function See Also -------- ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA). FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA). SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1). SplitBregman: Split Bregman for mixed L2-L1 norms. Notes ----- See :class:`pylops.optimization.cls_sparsity.OMP` """ ompsolve = OMP(Op) if callback is not None: ompsolve.callback = callback x, niter_outer, cost = ompsolve.solve( y, niter_outer=niter_outer, niter_inner=niter_inner, sigma=sigma, normalizecols=normalizecols, show=show, itershow=itershow, ) return x, niter_outer, cost def ista( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, niter: int = 10, SOp: Optional["LinearOperator"] = None, eps: float = 0.1, alpha: Optional[float] = None, eigsdict: Optional[Dict[str, Any]] = None, tol: float = 1e-10, threshkind: str = "soft", perc: Optional[float] = None, decay: Optional[NDArray] = None, monitorres: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, ) -> Tuple[NDArray, int, NDArray]: r"""Iterative Shrinkage-Thresholding Algorithm (ISTA). Solve an optimization problem with :math:`L^p, \; p=0, 0.5, 1` regularization, given the operator ``Op`` and data ``y``. The operator can be real or complex, and should ideally be either square :math:`N=M` or underdetermined :math:`N<M`. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert y : :obj:`numpy.ndarray` Data of size :math:`[N \times 1]` x0: :obj:`numpy.ndarray`, optional Initial guess niter : :obj:`int` Number of iterations SOp : :obj:`pylops.LinearOperator`, optional Regularization operator (use when solving the analysis problem) eps : :obj:`float`, optional Sparsity damping alpha : :obj:`float`, optional Step size. To guarantee convergence, ensure :math:`\alpha \le 1/\lambda_\text{max}`, where :math:`\lambda_\text{max}` is the largest eigenvalue of :math:`\mathbf{Op}^H\mathbf{Op}`. If ``None``, the maximum eigenvalue is estimated and the optimal step size is chosen as :math:`1/\lambda_\text{max}`. If provided, the convergence criterion will not be checked internally. eigsdict : :obj:`dict`, optional Dictionary of parameters to be passed to :func:`pylops.LinearOperator.eigs` method when computing the maximum eigenvalue tol : :obj:`float`, optional Tolerance. Stop iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` threshkind : :obj:`str`, optional Kind of thresholding ('hard', 'soft', 'half', 'hard-percentile', 'soft-percentile', or 'half-percentile' - 'soft' used as default) perc : :obj:`float`, optional Percentile, as percentage of values to be kept by thresholding (to be provided when thresholding is soft-percentile or half-percentile) decay : :obj:`numpy.ndarray`, optional Decay factor to be applied to thresholding during iterations monitorres : :obj:`bool`, optional Monitor that residual is decreasing show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector Returns ------- xinv : :obj:`numpy.ndarray` Inverted model niter : :obj:`int` Number of effective iterations cost : :obj:`numpy.ndarray` History of cost function Raises ------ NotImplementedError If ``threshkind`` is different from hard, soft, half, soft-percentile, or half-percentile ValueError If ``perc=None`` when ``threshkind`` is soft-percentile or half-percentile ValueError If ``monitorres=True`` and residual increases See Also -------- OMP: Orthogonal Matching Pursuit (OMP). FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA). SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1). SplitBregman: Split Bregman for mixed L2-L1 norms. Notes ----- See :class:`pylops.optimization.cls_sparsity.ISTA` """ istasolve = ISTA(Op) if callback is not None: istasolve.callback = callback x, iiter, cost = istasolve.solve( y=y, x0=x0, niter=niter, SOp=SOp, eps=eps, alpha=alpha, eigsdict=eigsdict, tol=tol, threshkind=threshkind, perc=perc, decay=decay, monitorres=monitorres, show=show, itershow=itershow, ) return x, iiter, cost def fista( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, niter: int = 10, SOp: Optional["LinearOperator"] = None, eps: float = 0.1, alpha: Optional[float] = None, eigsdict: Optional[Dict[str, Any]] = None, tol: float = 1e-10, threshkind: str = "soft", perc: Optional[float] = None, decay: Optional[NDArray] = None, monitorres: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, ) -> Tuple[NDArray, int, NDArray]: r"""Fast Iterative Shrinkage-Thresholding Algorithm (FISTA). Solve an optimization problem with :math:`L^p, \; p=0, 0.5, 1` regularization, given the operator ``Op`` and data ``y``. The operator can be real or complex, and should ideally be either square :math:`N=M` or underdetermined :math:`N<M`. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert y : :obj:`numpy.ndarray` Data x0: :obj:`numpy.ndarray`, optional Initial guess niter : :obj:`int`, optional Number of iterations SOp : :obj:`pylops.LinearOperator`, optional Regularization operator (use when solving the analysis problem) eps : :obj:`float`, optional Sparsity damping alpha : :obj:`float`, optional Step size. To guarantee convergence, ensure :math:`\alpha \le 1/\lambda_\text{max}`, where :math:`\lambda_\text{max}` is the largest eigenvalue of :math:`\mathbf{Op}^H\mathbf{Op}`. If ``None``, the maximum eigenvalue is estimated and the optimal step size is chosen as :math:`1/\lambda_\text{max}`. If provided, the convergence criterion will not be checked internally. eigsdict : :obj:`dict`, optional Dictionary of parameters to be passed to :func:`pylops.LinearOperator.eigs` method when computing the maximum eigenvalue tol : :obj:`float`, optional Tolerance. Stop iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` threshkind : :obj:`str`, optional Kind of thresholding ('hard', 'soft', 'half', 'soft-percentile', or 'half-percentile' - 'soft' used as default) perc : :obj:`float`, optional Percentile, as percentage of values to be kept by thresholding (to be provided when thresholding is soft-percentile or half-percentile) decay : :obj:`numpy.ndarray`, optional Decay factor to be applied to thresholding during iterations monitorres : :obj:`bool`, optional Monitor that residual is decreasing show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector Returns ------- xinv : :obj:`numpy.ndarray` Inverted model niter : :obj:`int` Number of effective iterations cost : :obj:`numpy.ndarray`, optional History of cost function Raises ------ NotImplementedError If ``threshkind`` is different from hard, soft, half, soft-percentile, or half-percentile ValueError If ``perc=None`` when ``threshkind`` is soft-percentile or half-percentile See Also -------- OMP: Orthogonal Matching Pursuit (OMP). ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA). SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1). SplitBregman: Split Bregman for mixed L2-L1 norms. Notes ----- See :class:`pylops.optimization.cls_sparsity.FISTA` """ fistasolve = FISTA(Op) if callback is not None: fistasolve.callback = callback x, iiter, cost = fistasolve.solve( y=y, x0=x0, niter=niter, SOp=SOp, eps=eps, alpha=alpha, eigsdict=eigsdict, tol=tol, threshkind=threshkind, perc=perc, decay=decay, monitorres=monitorres, show=show, itershow=itershow, ) return x, iiter, cost @add_ndarray_support_to_solver def spgl1( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, SOp: Optional["LinearOperator"] = None, tau: float = 0.0, sigma: float = 0.0, show: bool = False, **kwargs_spgl1, ) -> Tuple[NDArray, NDArray, Dict[str, Any]]: r"""Spectral Projected-Gradient for L1 norm. Solve a constrained system of equations given the operator ``Op`` and a sparsyfing transform ``SOp`` aiming to retrive a model that is sparse in the sparsyfing domain. This is a simple wrapper to :py:func:`spgl1.spgl1` which is a porting of the well-known `SPGL1 <https://www.cs.ubc.ca/~mpf/spgl1/>`_ MATLAB solver into Python. In order to be able to use this solver you need to have installed the ``spgl1`` library. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert y : :obj:`numpy.ndarray` Data x0 : :obj:`numpy.ndarray`, optional Initial guess SOp : :obj:`pylops.LinearOperator`, optional Sparsifying transform tau : :obj:`float`, optional Non-negative LASSO scalar. If different from ``0``, SPGL1 will solve LASSO problem sigma : :obj:`list`, optional BPDN scalar. If different from ``0``, SPGL1 will solve BPDN problem show : :obj:`bool`, optional Display iterations log **kwargs_spgl1 Arbitrary keyword arguments for :py:func:`spgl1.spgl1` solver Returns ------- xinv : :obj:`numpy.ndarray` Inverted model in original domain. pinv : :obj:`numpy.ndarray` Inverted model in sparse domain. info : :obj:`dict` Dictionary with the following information: - ``tau``, final value of tau (see sigma above) - ``rnorm``, two-norm of the optimal residual - ``rgap``, relative duality gap (an optimality measure) - ``gnorm``, Lagrange multiplier of (LASSO) - ``stat``, final status of solver * ``1``: found a BPDN solution, * ``2``: found a BP solution; exit based on small gradient, * ``3``: found a BP solution; exit based on small residual, * ``4``: found a LASSO solution, * ``5``: error, too many iterations, * ``6``: error, linesearch failed, * ``7``: error, found suboptimal BP solution, * ``8``: error, too many matrix-vector products. - ``niters``, number of iterations - ``nProdA``, number of multiplications with A - ``nProdAt``, number of multiplications with A' - ``n_newton``, number of Newton steps - ``time_project``, projection time (seconds) - ``time_matprod``, matrix-vector multiplications time (seconds) - ``time_total``, total solution time (seconds) - ``niters_lsqr``, number of lsqr iterations (if ``subspace_min=True``) - ``xnorm1``, L1-norm model solution history through iterations - ``rnorm2``, L2-norm residual history through iterations - ``lambdaa``, Lagrange multiplier history through iterations Raises ------ ModuleNotFoundError If the ``spgl1`` library is not installed Notes ----- See :class:`pylops.optimization.cls_sparsity.SPGL1` """ spgl1solve = SPGL1(Op) xinv, pinv, info = spgl1solve.solve( y, x0=x0, SOp=SOp, tau=tau, sigma=sigma, show=show, **kwargs_spgl1, ) return xinv, pinv, info def splitbregman( Op: "LinearOperator", y: NDArray, RegsL1: List["LinearOperator"], x0: Optional[NDArray] = None, niter_outer: int = 3, niter_inner: int = 5, RegsL2: Optional[List["LinearOperator"]] = None, dataregsL2: Optional[List[NDArray]] = None, mu: float = 1.0, epsRL1s: Optional[SamplingLike] = None, epsRL2s: Optional[SamplingLike] = None, tol: float = 1e-10, tau: float = 1.0, restart: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), show_inner: bool = False, callback: Optional[Callable] = None, **kwargs_lsqr, ) -> Tuple[NDArray, int, NDArray]: r"""Split Bregman for mixed L2-L1 norms. Solve an unconstrained system of equations with mixed :math:`L_2` and :math:`L_1` regularization terms given the operator ``Op``, a list of :math:`L_1` regularization terms ``RegsL1``, and an optional list of :math:`L_2` regularization terms ``RegsL2``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert y : :obj:`numpy.ndarray` Data RegsL1 : :obj:`list` :math:`L_1` regularization operators x0 : :obj:`numpy.ndarray`, optional Initial guess niter_outer : :obj:`int` Number of iterations of outer loop niter_inner : :obj:`int` Number of iterations of inner loop of first step of the Split Bregman algorithm. A small number of iterations is generally sufficient and for many applications optimal efficiency is obtained when only one iteration is performed. RegsL2 : :obj:`list` Additional :math:`L_2` regularization operators (if ``None``, :math:`L_2` regularization is not added to the problem) dataregsL2 : :obj:`list`, optional :math:`L_2` Regularization data (must have the same number of elements of ``RegsL2`` or equal to ``None`` to use a zero data for every regularization operator in ``RegsL2``) mu : :obj:`float`, optional Data term damping epsRL1s : :obj:`list` :math:`L_1` Regularization dampings (must have the same number of elements as ``RegsL1``) epsRL2s : :obj:`list` :math:`L_2` Regularization dampings (must have the same number of elements as ``RegsL2``) tol : :obj:`float`, optional Tolerance. Stop outer iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` tau : :obj:`float`, optional Scaling factor in the Bregman update (must be close to 1) restart : :obj:`bool`, optional The unconstrained inverse problem in inner loop is initialized with the initial guess (``True``) or with the last estimate (``False``) show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. show_inner : :obj:`bool`, optional Display inner iteration logs of lsqr callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector **kwargs_lsqr Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.lsqr` solver used to solve the first subproblem in the first step of the Split Bregman algorithm. Returns ------- xinv : :obj:`numpy.ndarray` Inverted model itn_out : :obj:`int` Iteration number of outer loop upon termination cost : :obj:`numpy.ndarray`, optional History of cost function through iterations Notes ----- See :class:`pylops.optimization.cls_sparsity.SplitBregman` """ sbsolve = SplitBregman(Op) if callback is not None: sbsolve.callback = callback xinv, itn_out, cost = sbsolve.solve( y, RegsL1, x0=x0, niter_outer=niter_outer, niter_inner=niter_inner, RegsL2=RegsL2, dataregsL2=dataregsL2, mu=mu, epsRL1s=epsRL1s, epsRL2s=epsRL2s, tol=tol, tau=tau, restart=restart, show=show, itershow=itershow, show_inner=show_inner, **kwargs_lsqr, ) return xinv, itn_out, cost
23,678
33.021552
98
py
pylops
pylops-master/pylops/optimization/basesolver.py
__all__ = ["Solver"] import functools import time from abc import ABCMeta, abstractmethod from typing import TYPE_CHECKING, Any from pylops.optimization.callback import Callbacks from pylops.utils.typing import NDArray if TYPE_CHECKING: from pylops.linearoperator import LinearOperator class Solver(metaclass=ABCMeta): r"""Solver This is a template class which a user must subclass when implementing a new solver. This class comprises of the following mandatory methods: - ``__init__``: initialization method to which the operator `Op` must be passed - ``setup``: a method that is invoked to setup the solver, basically it will create anything required prior to applying a step of the solver - ``step``: a method applying a single step of the solver - ``run``: a method applying multiple steps of the solver - ``finalize``: a method that is invoked at the end of the optimization process. It can be used to do some final clean-up of the properties of the operator that we want to expose to the user - ``solve``: a method applying the entire optimization loop of the solver for a certain number of steps and optional methods: - ``_print_solver``: a method print on screen details of the solver (already implemented) - ``_print_setup``: a method print on screen details of the setup process - ``_print_step``: a method print on screen details of each step - ``_print_finalize``: a method print on screen details of the finalize process - ``callback``: a method implementing a callback function, which is called after every step of the solver Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of callbacks : :obj:`pylops.optimization.callback.Callbacks` Callbacks object used to implement custom callbacks """ def __init__( self, Op: "LinearOperator", callbacks: Callbacks = None, ) -> None: self.Op = Op self.callbacks = callbacks self._registercallbacks() self.iiter = 0 self.tstart = time.time() def _print_solver(self, text: str = "", nbar: int = 80) -> None: print(f"{type(self).__name__}" + text) print( "-" * nbar + "\n" f"The Operator Op has {self.Op.shape[0]} rows and {self.Op.shape[1]} cols" ) def _print_setup(self, *args: Any, **kwargs: Any) -> None: pass def _print_step(self, *args: Any, **kwargs: Any) -> None: pass def _print_finalize(self, *args: Any, nbar: int = 80, **kwargs: Any) -> None: print( f"\nIterations = {self.iiter} Total time (s) = {self.telapsed:.2f}" ) print("-" * nbar + "\n") def _registercallbacks(self) -> None: # We want to make sure that the appropriate callbacks are called # for each method. Instead of just calling self.step, we want # to call self.callbacks[:].on_step_begin, self.step and finally # self.callbacks[::-1].on_step_end, for all callbacks in the list # We can do this in an automated way by decorating all methods def cbdecorator(func, setup=False): # func will be self.setup, self.step, etc. @functools.wraps(func) def wrapper(*args, **kwargs): if self.callbacks: for cb in self.callbacks: # Call all on_*_begin callbacks if setup: getattr(cb, f"on_{func.__name__}_begin")( self, kwargs.get("x0", None) ) # self is solver, args[0] is x else: getattr(cb, f"on_{func.__name__}_begin")( self, args[0] ) # self is solver, args[0] is x ret = func(*args, **kwargs) if self.callbacks: for cb in self.callbacks[::-1]: # Call all on_*_end callbacks in reverse order if setup: getattr(cb, f"on_{func.__name__}_end")( self, kwargs.get("x0", None) ) else: getattr(cb, f"on_{func.__name__}_end")(self, args[0]) return ret return wrapper for method in ["setup", "step", "run"]: # Replace each method by its decorator setattr( self, method, cbdecorator( getattr(self, method), True if method == "setup" else False ), ) @abstractmethod def setup( self, y: NDArray, *args, show: bool = False, **kwargs, ) -> None: """Setup solver This method is used to setup the solver. Users can change the function signature by including any other input parameter required during the setup stage Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` show : :obj:`bool`, optional Display setup log """ pass @abstractmethod def step( self, x: NDArray, *args, show: bool = False, **kwargs, ) -> Any: """Run one step of solver This method is used to run one step of the solver. Users can change the function signature by including any other input parameter required when applying one step of the solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of the solver show : :obj:`bool`, optional Display step log """ pass @abstractmethod def run( self, x: NDArray, *args, show: bool = False, **kwargs, ) -> Any: """Run multiple steps of solver This method is used to run multiple step of the solver. Users can change the function signature by including any other input parameter required when applying multiple steps of the solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of the solver show : :obj:`bool`, optional Display step log """ pass def finalize( self, *args, show: bool = False, **kwargs, ) -> Any: """Finalize solver This method is used to finalize the solver. Users can change the function signature by including any other input parameter required when finalizing the solver Parameters ---------- show : :obj:`bool`, optional Display finalize log """ self.tend = time.time() self.telapsed = self.tend - self.tstart if show: self._print_finalize() @abstractmethod def solve( self, y: NDArray, *args, show: bool = False, **kwargs, ) -> Any: """Solve This method is used to run the entire optimization process. Users can change the function signature by including any other input parameter required by the solver Parameters ---------- y : :obj:`np.ndarray` Data show : :obj:`bool`, optional Display finalize log """ pass def callback( self, x: NDArray, *args, **kwargs, ) -> None: """Callback routine This routine must be passed by the user. Its function signature must contain a single input that contains the current solution (when using the `solve` method it will be automatically invoked after each step of the solve) Parameters ---------- x : :obj:`np.ndarray` Current solution Examples -------- >>> import numpy as np >>> from pylops.basicoperators import Identity >>> from pylops.optimization.solver import CG >>> def callback(x): ... print(f"Running callback, current solution {x}") ... >>> I = Identity(10) >>> I <10x10 Identity with dtype=float64> >>> cgsolve = CG(I, np.arange(10)) >>> cgsolve.callback = callback >>> x = np.ones(10) >>> cgsolve.callback(x) Running callback, current solution [1,1,1...] """ pass
8,741
30.446043
93
py
pylops
pylops-master/pylops/optimization/cls_sparsity.py
__all__ = ["IRLS"] import logging import time from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple import numpy as np from scipy.sparse.linalg import lsqr from pylops import LinearOperator from pylops.basicoperators import Diagonal, Identity, VStack from pylops.optimization.basesolver import Solver from pylops.optimization.basic import cgls from pylops.optimization.eigs import power_iteration from pylops.optimization.leastsquares import regularized_inversion from pylops.utils import deps from pylops.utils.backend import get_array_module, get_module_name from pylops.utils.decorators import disable_ndarray_multiplication from pylops.utils.typing import InputDimsLike, NDArray, SamplingLike spgl1_message = deps.spgl1_import("the spgl1 solver") if spgl1_message is None: from spgl1 import spgl1 as ext_spgl1 def _hardthreshold(x: NDArray, thresh: float) -> NDArray: r"""Hard thresholding. Applies hard thresholding to vector ``x`` (equal to the proximity operator for :math:`\|\mathbf{x}\|_0`) as shown in [1]_. .. [1] Chen, F., Shen, L., Suter, B.W., “Computing the proximity operator of the ℓp norm with 0 < p < 1”, IET Signal Processing, vol. 10. 2016. Parameters ---------- x : :obj:`numpy.ndarray` Vector thresh : :obj:`float` Threshold Returns ------- x1 : :obj:`numpy.ndarray` Tresholded vector """ x1 = x.copy() x1[np.abs(x) <= np.sqrt(2 * thresh)] = 0 return x1 def _softthreshold(x: NDArray, thresh: float) -> NDArray: r"""Soft thresholding. Applies soft thresholding to vector ``x`` (equal to the proximity operator for :math:`\|\mathbf{x}\|_1`) as shown in [1]_. .. [1] Chen, F., Shen, L., Suter, B.W., “Computing the proximity operator of the ℓp norm with 0 < p < 1”, IET Signal Processing, vol. 10. 2016. Parameters ---------- x : :obj:`numpy.ndarray` Vector thresh : :obj:`float` Threshold Returns ------- x1 : :obj:`numpy.ndarray` Tresholded vector """ if np.iscomplexobj(x): # https://stats.stackexchange.com/questions/357339/soft-thresholding- # for-the-lasso-with-complex-valued-data x1 = np.maximum(np.abs(x) - thresh, 0.0) * np.exp(1j * np.angle(x)) else: x1 = np.maximum(np.abs(x) - thresh, 0.0) * np.sign(x) return x1 def _halfthreshold(x: NDArray, thresh: float) -> NDArray: r"""Half thresholding. Applies half thresholding to vector ``x`` (equal to the proximity operator for :math:`\|\mathbf{x}\|_{1/2}^{1/2}`) as shown in [1]_. .. [1] Chen, F., Shen, L., Suter, B.W., “Computing the proximity operator of the ℓp norm with 0 < p < 1”, IET Signal Processing, vol. 10. 2016. Parameters ---------- x : :obj:`numpy.ndarray` Vector thresh : :obj:`float` Threshold Returns ------- x1 : :obj:`numpy.ndarray` Tresholded vector .. warning:: Since version 1.17.0 does not produce ``np.nan`` on bad input. """ arg = np.ones_like(x) arg[x != 0] = (thresh / 8.0) * (np.abs(x[x != 0]) / 3.0) ** (-1.5) arg = np.clip(arg, -1, 1) phi = 2.0 / 3.0 * np.arccos(arg) x1 = 2.0 / 3.0 * x * (1 + np.cos(2.0 * np.pi / 3.0 - phi)) # x1[np.abs(x) <= 1.5 * thresh ** (2. / 3.)] = 0 x1[np.abs(x) <= (54 ** (1.0 / 3.0) / 4.0) * thresh ** (2.0 / 3.0)] = 0 return x1 def _hardthreshold_percentile(x: NDArray, perc: float) -> NDArray: r"""Percentile Hard thresholding. Applies hard thresholding to vector ``x`` using a percentile to define the amount of values in the input vector to be preserved as shown in [1]_. .. [1] Chen, Y., Chen, K., Shi, P., Wang, Y., “Irregular seismic data reconstruction using a percentile-half-thresholding algorithm”, Journal of Geophysics and Engineering, vol. 11. 2014. Parameters ---------- x : :obj:`numpy.ndarray` Vector thresh : :obj:`float` Threshold Returns ------- x1 : :obj:`numpy.ndarray` Tresholded vector """ thresh = np.percentile(np.abs(x), perc) return _hardthreshold(x, 0.5 * thresh**2) def _softthreshold_percentile(x: NDArray, perc: float) -> NDArray: r"""Percentile Soft thresholding. Applies soft thresholding to vector ``x`` using a percentile to define the amount of values in the input vector to be preserved as shown in [1]_. .. [1] Chen, Y., Chen, K., Shi, P., Wang, Y., “Irregular seismic data reconstruction using a percentile-half-thresholding algorithm”, Journal of Geophysics and Engineering, vol. 11. 2014. Parameters ---------- x : :obj:`numpy.ndarray` Vector perc : :obj:`float` Percentile Returns ------- x : :obj:`numpy.ndarray` Tresholded vector """ thresh = np.percentile(np.abs(x), perc) return _softthreshold(x, thresh) def _halfthreshold_percentile(x: NDArray, perc: float) -> NDArray: r"""Percentile Half thresholding. Applies half thresholding to vector ``x`` using a percentile to define the amount of values in the input vector to be preserved as shown in [1]_. .. [1] Xu, Z., Xiangyu, C., Xu, F. and Zhang, H., “L1/2 Regularization: A Thresholding Representation Theory and a Fast Solver”, IEEE Transactions on Neural Networks and Learning Systems, vol. 23. 2012. Parameters ---------- x : :obj:`numpy.ndarray` Vector perc : :obj:`float` Percentile Returns ------- x : :obj:`numpy.ndarray` Thresholded vector """ thresh = np.percentile(np.abs(x), perc) # return _halfthreshold(x, (2. / 3. * thresh) ** (1.5)) return _halfthreshold(x, (4.0 / 54 ** (1.0 / 3.0) * thresh) ** 1.5) class IRLS(Solver): r"""Iteratively reweighted least squares. Solve an optimization problem with :math:`L_1` cost function (data IRLS) or :math:`L_1` regularization term (model IRLS) given the operator ``Op`` and data ``y``. In the *data IRLS*, the cost function is minimized by iteratively solving a weighted least squares problem with the weight at iteration :math:`i` being based on the data residual at iteration :math:`i-1`. This IRLS solver is robust to *outliers* since the L1 norm given less weight to large residuals than L2 norm does. Similarly in the *model IRLS*, the weight at at iteration :math:`i` is based on the model at iteration :math:`i-1`. This IRLS solver inverts for a sparse model vector. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert Raises ------ NotImplementedError If ``kind`` is different from model or data Notes ----- *Data IRLS* solves the following optimization problem for the operator :math:`\mathbf{Op}` and the data :math:`\mathbf{y}`: .. math:: J = \|\mathbf{y} - \mathbf{Op}\,\mathbf{x}\|_1 by a set of outer iterations which require to repeatedly solve a weighted least squares problem of the form: .. math:: \DeclareMathOperator*{\argmin}{arg\,min} \mathbf{x}^{(i+1)} = \argmin_\mathbf{x} \|\mathbf{y} - \mathbf{Op}\,\mathbf{x}\|_{2, \mathbf{R}^{(i)}}^2 + \epsilon_\mathbf{I}^2 \|\mathbf{x}\|_2^2 where :math:`\mathbf{R}^{(i)}` is a diagonal weight matrix whose diagonal elements at iteration :math:`i` are equal to the absolute inverses of the residual vector :math:`\mathbf{r}^{(i)} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}^{(i)}` at iteration :math:`i`. More specifically the :math:`j`-th element of the diagonal of :math:`\mathbf{R}^{(i)}` is .. math:: R^{(i)}_{j,j} = \frac{1}{\left| r^{(i)}_j \right| + \epsilon_\mathbf{R}} or .. math:: R^{(i)}_{j,j} = \frac{1}{\max\{\left|r^{(i)}_j\right|, \epsilon_\mathbf{R}\}} depending on the choice ``threshR``. In either case, :math:`\epsilon_\mathbf{R}` is the user-defined stabilization/thresholding factor [1]_. Similarly *model IRLS* solves the following optimization problem for the operator :math:`\mathbf{Op}` and the data :math:`\mathbf{y}`: .. math:: J = \|\mathbf{x}\|_1 \quad \text{subject to} \quad \mathbf{y} = \mathbf{Op}\,\mathbf{x} by a set of outer iterations which require to repeatedly solve a weighted least squares problem of the form [2]_: .. math:: \mathbf{x}^{(i+1)} = \operatorname*{arg\,min}_\mathbf{x} \|\mathbf{x}\|_{2, \mathbf{R}^{(i)}}^2 \quad \text{subject to} \quad \mathbf{y} = \mathbf{Op}\,\mathbf{x} where :math:`\mathbf{R}^{(i)}` is a diagonal weight matrix whose diagonal elements at iteration :math:`i` are equal to the absolutes of the model vector :math:`\mathbf{x}^{(i)}` at iteration :math:`i`. More specifically the :math:`j`-th element of the diagonal of :math:`\mathbf{R}^{(i)}` is .. math:: R^{(i)}_{j,j} = \left|x^{(i)}_j\right|. .. [1] https://en.wikipedia.org/wiki/Iteratively_reweighted_least_squares .. [2] Chartrand, R., and Yin, W. "Iteratively reweighted algorithms for compressive sensing", IEEE. 2008. """ def _print_setup(self, xcomplex: bool = False) -> None: self._print_solver(f" ({self.kind})") strpar = f"threshR = {self.threshR}\tepsR = {self.epsR}\tepsI = {self.epsI}" if self.nouter is not None: strpar1 = f"tolIRL = {self.nouter}\tnouter = {self.nouter}" else: strpar1 = f"tolIRL = {self.nouter}" print(strpar) print(strpar1) print("-" * 80) if not xcomplex: head1 = " Itn x[0] r2norm" else: head1 = " Itn x[0] r2norm" print(head1) def _print_step(self, x: NDArray) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e}" str1 = f"{self.iiter:6g} " + strx str2 = f" {self.rnorm:10.3e}" print(str1 + str2) def setup( self, y: NDArray, nouter: Optional[int] = None, threshR: bool = False, epsR: float = 1e-10, epsI: float = 1e-10, tolIRLS: float = 1e-10, warm: bool = False, kind: str = "data", show: bool = False, ) -> None: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` nouter : :obj:`int`, optional Number of outer iterations threshR : :obj:`bool`, optional Apply thresholding in creation of weight (``True``) or damping (``False``) epsR : :obj:`float`, optional Damping to be applied to residuals for weighting term epsI : :obj:`float`, optional Tikhonov damping (for ``kind="data"``) or L1 model damping (for ``kind="datamodel"``) tolIRLS : :obj:`float`, optional Tolerance. Stop outer iterations if difference between inverted model at subsequent iterations is smaller than ``tolIRLS`` warm : :obj:`bool`, optional Warm start each inversion inner step with previous estimate (``True``) or not (``False``). This only applies to ``kind="data"`` and ``kind="datamodel"`` kind : :obj:`str`, optional Kind of solver (``model``, ``data`` or ``datamodel``) show : :obj:`bool`, optional Display setup log """ self.y = y self.nouter = nouter self.threshR = threshR self.epsR = epsR self.epsI = epsI self.tolIRLS = tolIRLS self.warm = warm self.kind = kind self.ncp = get_array_module(y) self.iiter = 0 # choose step to use if self.kind == "data": self._step = self._step_data elif self.kind == "model": self._step = self._step_model self.Iop = Identity(y.size, dtype=y.dtype) elif self.kind == "datamodel": self._step = self._step_data # augment Op and y self.Op = VStack([self.Op, epsI * Identity(self.Op.shape[1])]) self.epsI = 0.0 # as epsI is added to the augmented system already self.y = np.hstack([self.y, np.zeros(self.Op.shape[1])]) else: raise NotImplementedError("kind must be model, data or datamodel") # print setup if show: self._print_setup() def _step_data(self, x: NDArray, **kwargs_solver) -> NDArray: r"""Run one step of solver with L1 data term""" if self.iiter == 0: x = regularized_inversion( self.Op, self.y, None, x0=x if self.warm else None, damp=self.epsI, **kwargs_solver, )[0] else: # other iterations (weighted least-squares) if self.threshR: self.rw = 1.0 / self.ncp.maximum(self.ncp.abs(self.r), self.epsR) else: self.rw = 1.0 / (self.ncp.abs(self.r) + self.epsR) self.rw = self.rw / self.rw.max() R = Diagonal(np.sqrt(self.rw)) x = regularized_inversion( self.Op, self.y, None, Weight=R, x0=x if self.warm else None, damp=self.epsI, **kwargs_solver, )[0] return x def _step_model(self, x: NDArray, **kwargs_solver) -> NDArray: r"""Run one step of solver with L1 model term""" if self.iiter == 0: # first iteration (unweighted least-squares) if self.ncp == np: x = ( self.Op.H @ lsqr( self.Op @ self.Op.H + (self.epsI**2) * self.Iop, self.y, **kwargs_solver, )[0] ) else: x = ( self.Op.H @ cgls( self.Op @ self.Op.H + (self.epsI**2) * self.Iop, self.y, self.ncp.zeros(int(self.Op.shape[0]), dtype=self.Op.dtype), **kwargs_solver, )[0] ) else: # other iterations (weighted least-squares) self.rw = np.abs(x) self.rw = self.rw / self.rw.max() R = Diagonal(self.rw, dtype=self.rw.dtype) if self.ncp == np: x = ( R @ self.Op.H @ lsqr( self.Op @ R @ self.Op.H + self.epsI**2 * self.Iop, self.y, **kwargs_solver, )[0] ) else: x = ( R @ self.Op.H @ cgls( self.Op @ R @ self.Op.H + self.epsI**2 * self.Iop, self.y, self.ncp.zeros(int(self.Op.shape[0]), dtype=self.Op.dtype), **kwargs_solver, )[0] ) return x def step(self, x: NDArray, show: bool = False, **kwargs_solver) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of ISTA show : :obj:`bool`, optional Display iteration log **kwargs_solver Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.cg` solver for data IRLS and :py:func:`scipy.sparse.linalg.lsqr` solver for model IRLS when using numpy data(or :py:func:`pylops.optimization.solver.cg` and :py:func:`pylops.optimization.solver.cgls` when using cupy data) Returns ------- x : :obj:`np.ndarray` Updated model vector """ # update model x = self._step(x, **kwargs_solver) # compute residual self.r: NDArray = self.y - self.Op * x self.rnorm = self.ncp.linalg.norm(self.r) self.iiter += 1 if show: self._print_step(x) return x def run( self, x: NDArray, nouter: int = 10, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), **kwargs_solver, ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of IRLS nouter : :obj:`int`, optional Number of outer iterations. show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. **kwargs_solver Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.cg` solver for data IRLS and :py:func:`scipy.sparse.linalg.lsqr` solver for model IRLS when using numpy data(or :py:func:`pylops.optimization.solver.cg` and :py:func:`pylops.optimization.solver.cgls` when using cupy data) Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ nouter = nouter if self.nouter is None else self.nouter if x is not None: self.x0 = x.copy() self.y = self.y - self.Op * x # choose xold to ensure tolerance test is passed initially xold = x.copy() + np.inf while self.iiter < nouter and self.ncp.linalg.norm(x - xold) >= self.tolIRLS: showstep = ( True if show and ( self.iiter < itershow[0] or nouter - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) xold = x.copy() x = self.step(x, showstep, **kwargs_solver) self.callback(x) # adding initial guess if hasattr(self, "x0"): x = self.x0 + x return x def finalize(self, show: bool = False) -> None: r"""Finalize solver Parameters ---------- show : :obj:`bool`, optional Display finalize log """ self.tend = time.time() self.telapsed = self.tend - self.tstart self.nouter = self.iiter if show: self._print_finalize() def solve( self, y: NDArray, x0: Optional[NDArray] = None, nouter: int = 10, threshR: bool = False, epsR: float = 1e-10, epsI: float = 1e-10, tolIRLS: float = 1e-10, kind: str = "data", warm: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), **kwargs_solver, ) -> NDArray: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[N \times 1]`. If ``None``, initialize internally as zero vector nouter : :obj:`int`, optional Number of outer iterations threshR : :obj:`bool`, optional Apply thresholding in creation of weight (``True``) or damping (``False``) epsR : :obj:`float`, optional Damping to be applied to residuals for weighting term epsI : :obj:`float`, optional Tikhonov damping tolIRLS : :obj:`float`, optional Tolerance. Stop outer iterations if difference between inverted model at subsequent iterations is smaller than ``tolIRLS`` warm : :obj:`bool`, optional Warm start each inversion inner step with previous estimate (``True``) or not (``False``). This only applies to ``kind="data"`` and ``kind="datamodel"`` kind : :obj:`str`, optional Kind of solver (``data`` or ``model``) show : :obj:`bool`, optional Display setup log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. **kwargs_solver Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.cg` solver for data IRLS and :py:func:`scipy.sparse.linalg.lsqr` solver for model IRLS when using numpy data(or :py:func:`pylops.optimization.solver.cg` and :py:func:`pylops.optimization.solver.cgls` when using cupy data) Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[N \times 1]` """ self.setup( y=y, threshR=threshR, epsR=epsR, epsI=epsI, tolIRLS=tolIRLS, warm=warm, kind=kind, show=show, ) if x0 is None: x0 = self.ncp.zeros(self.Op.shape[1], dtype=self.y.dtype) x = self.run(x0, nouter=nouter, show=show, itershow=itershow, **kwargs_solver) self.finalize(show) return x, self.nouter class OMP(Solver): r"""Orthogonal Matching Pursuit (OMP). Solve an optimization problem with :math:`L_0` regularization function given the operator ``Op`` and data ``y``. The operator can be real or complex, and should ideally be either square :math:`N=M` or underdetermined :math:`N<M`. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert See Also -------- ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA). FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA). SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1). SplitBregman: Split Bregman for mixed L2-L1 norms. Notes ----- Solves the following optimization problem for the operator :math:`\mathbf{Op}` and the data :math:`\mathbf{y}`: .. math:: \|\mathbf{x}\|_0 \quad \text{subject to} \quad \|\mathbf{Op}\,\mathbf{x}-\mathbf{y}\|_2^2 \leq \sigma^2, using Orthogonal Matching Pursuit (OMP). This is a very simple iterative algorithm which applies the following step: .. math:: \DeclareMathOperator*{\argmin}{arg\,min} \DeclareMathOperator*{\argmax}{arg\,max} \Lambda_k = \Lambda_{k-1} \cup \left\{\argmax_j \left|\mathbf{Op}_j^H\,\mathbf{r}_k\right| \right\} \\ \mathbf{x}_k = \argmin_{\mathbf{x}} \left\|\mathbf{Op}_{\Lambda_k}\,\mathbf{x} - \mathbf{y}\right\|_2^2 Note that by choosing ``niter_inner=0`` the basic Matching Pursuit (MP) algorithm is implemented instead. In other words, instead of solving an optimization at each iteration to find the best :math:`\mathbf{x}` for the currently selected basis functions, the vector :math:`\mathbf{x}` is just updated at the new basis function by taking directly the value from the inner product :math:`\mathbf{Op}_j^H\,\mathbf{r}_k`. In this case it is highly recommended to provide a normalized basis function. If different basis have different norms, the solver is likely to diverge. Similar observations apply to OMP, even though mild unbalancing between the basis is generally properly handled. """ def _print_setup(self, xcomplex: bool = False) -> None: self._print_solver("(Only MP)" if self.niter_inner == 0 else "", nbar=55) strpar = ( f"sigma = {self.sigma:.2e}\tniter_outer = {self.niter_outer}\n" f"niter_inner = {self.niter_inner}\tnormalization={self.normalizecols}" ) print(strpar) print("-" * 55) if not xcomplex: head1 = " Itn x[0] r2norm" else: head1 = " Itn x[0] r2norm" print(head1) def _print_step(self, x: NDArray) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e}" str1 = f"{self.iiter:6g} " + strx str2 = f" {self.cost[-1]:10.3e}" print(str1 + str2) def setup( self, y: NDArray, niter_outer: int = 10, niter_inner: int = 40, sigma: float = 1e-4, normalizecols: bool = False, show: bool = False, ) -> None: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` niter_outer : :obj:`int`, optional Number of iterations of outer loop niter_inner : :obj:`int`, optional Number of iterations of inner loop. By choosing ``niter_inner=0``, the Matching Pursuit (MP) algorithm is implemented. sigma : :obj:`list` Maximum :math:`L^2` norm of residual. When smaller stop iterations. normalizecols : :obj:`list`, optional Normalize columns (``True``) or not (``False``). Note that this can be expensive as it requires applying the forward operator :math:`n_{cols}` times to unit vectors (i.e., containing 1 at position j and zero otherwise); use only when the columns of the operator are expected to have highly varying norms. show : :obj:`bool`, optional Display setup log """ self.y = y self.niter_outer = niter_outer self.niter_inner = niter_inner self.sigma = sigma self.normalizecols = normalizecols self.ncp = get_array_module(y) # find normalization factor for each column if self.normalizecols: ncols = self.Op.shape[1] self.norms = self.ncp.zeros(ncols) for icol in range(ncols): unit = self.ncp.zeros(ncols, dtype=self.Op.dtype) unit[icol] = 1 self.norms[icol] = np.linalg.norm(self.Op.matvec(unit)) # create variables to track the residual norm and iterations self.res = self.y.copy() self.cost = [ float(np.linalg.norm(self.y)), ] self.iiter = 0 if show: self._print_setup() def step( self, x: NDArray, cols: InputDimsLike, show: bool = False, ) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`list` or :obj:`np.ndarray` Current model vector to be updated by a step of OMP cols : :obj:`list` Current list of chosen elements of vector x to be updated by a step of OMP show : :obj:`bool`, optional Display iteration log Returns ------- x : :obj:`np.ndarray` Updated model vector cols : :obj:`list` Current list of chosen elements """ # compute inner products cres = self.Op.rmatvec(self.res) cres_abs = np.abs(cres) if self.normalizecols: cres_abs = cres_abs / self.norms # choose column with max cres cres_max = np.max(cres_abs) imax = np.argwhere(cres_abs == cres_max).ravel() nimax = len(imax) if nimax > 0: imax = imax[np.random.permutation(nimax)[0]] else: imax = imax[0] # update active set if imax not in cols: addnew = True cols.append(int(imax)) else: addnew = False imax_in_cols = cols.index(imax) # estimate model for current set of columns if self.niter_inner == 0: # MP update Opcol = self.Op.apply_columns( [ int(imax), ] ) self.res -= Opcol.matvec(cres[imax] * self.ncp.ones(1)) if addnew: x.append(cres[imax]) else: x[imax_in_cols] += cres[imax] else: # OMP update Opcol = self.Op.apply_columns(cols) if self.ncp == np: x = lsqr(Opcol, self.y, iter_lim=self.niter_inner)[0] else: x = cgls( Opcol, self.y, self.ncp.zeros(int(Opcol.shape[1]), dtype=Opcol.dtype), niter=self.niter_inner, )[0] self.res = self.y - Opcol.matvec(x) self.iiter += 1 self.cost.append(float(np.linalg.norm(self.res))) if show: self._print_step(x) return x, cols def run( self, x: NDArray, cols: InputDimsLike, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Tuple[NDArray, InputDimsLike]: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of IRLS cols : :obj:`list` Current list of chosen elements of vector x to be updated by a step of OMP show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` cols : :obj:`list` Current list of chosen elements """ while self.iiter < self.niter_outer and self.cost[self.iiter] > self.sigma: showstep = ( True if show and ( self.iiter < itershow[0] or self.niter_outer - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) x, cols = self.step(x, cols, showstep) self.callback(x) return x, cols def finalize( self, x: NDArray, cols: InputDimsLike, show: bool = False, ) -> NDArray: r"""Finalize solver Parameters ---------- x : :obj:`list` or :obj:`np.ndarray` Current model vector to be updated by a step of OMP cols : :obj:`list` Current list of chosen elements of vector x to be updated by a step of OMP show : :obj:`bool`, optional Display finalize log Returns ------- xfin : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ self.tend = time.time() self.telapsed = self.tend - self.tstart self.cost = np.array(self.cost) self.nouter = self.iiter xfin = self.ncp.zeros(int(self.Op.shape[1]), dtype=self.Op.dtype) xfin[cols] = self.ncp.array(x) if show: self._print_finalize(nbar=55) return xfin def solve( self, y: NDArray, niter_outer: int = 10, niter_inner: int = 40, sigma: float = 1e-4, normalizecols: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Tuple[NDArray, int, NDArray]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` niter_outer : :obj:`int`, optional Number of iterations of outer loop niter_inner : :obj:`int`, optional Number of iterations of inner loop. By choosing ``niter_inner=0``, the Matching Pursuit (MP) algorithm is implemented. sigma : :obj:`list` Maximum :math:`L^2` norm of residual. When smaller stop iterations. normalizecols : :obj:`list`, optional Normalize columns (``True``) or not (``False``). Note that this can be expensive as it requires applying the forward operator :math:`n_{cols}` times to unit vectors (i.e., containing 1 at position j and zero otherwise); use only when the columns of the operator are expected to have highly varying norms. show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` niter_outer : :obj:`int` Number of effective outer iterations cost : :obj:`numpy.ndarray`, optional History of cost function """ self.setup( y, niter_outer=niter_outer, niter_inner=niter_inner, sigma=sigma, normalizecols=normalizecols, show=show, ) x: List[NDArray] = [] cols: List[InputDimsLike] = [] x, cols = self.run(x, cols, show=show, itershow=itershow) x = self.finalize(x, cols, show) return x, self.nouter, self.cost class ISTA(Solver): r"""Iterative Shrinkage-Thresholding Algorithm (ISTA). Solve an optimization problem with :math:`L_p, \; p=0, 0.5, 1` regularization, given the operator ``Op`` and data ``y``. The operator can be real or complex, and should ideally be either square :math:`N=M` or underdetermined :math:`N<M`. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert Raises ------ NotImplementedError If ``threshkind`` is different from hard, soft, half, soft-percentile, or half-percentile ValueError If ``perc=None`` when ``threshkind`` is soft-percentile or half-percentile ValueError If ``monitorres=True`` and residual increases See Also -------- OMP: Orthogonal Matching Pursuit (OMP). FISTA: Fast Iterative Shrinkage-Thresholding Algorithm (FISTA). SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1). SplitBregman: Split Bregman for mixed L2-L1 norms. Notes ----- Solves the following synthesis problem for the operator :math:`\mathbf{Op}` and the data :math:`\mathbf{y}`: .. math:: J = \|\mathbf{y} - \mathbf{Op}\,\mathbf{x}\|_2^2 + \epsilon \|\mathbf{x}\|_p or the analysis problem: .. math:: J = \|\mathbf{y} - \mathbf{Op}\,\mathbf{x}\|_2^2 + \epsilon \|\mathbf{SOp}^H\,\mathbf{x}\|_p if ``SOp`` is provided. Note that in the first case, ``SOp`` should be assimilated in the modelling operator (i.e., ``Op=GOp * SOp``). The Iterative Shrinkage-Thresholding Algorithms (ISTA) [1]_ is used, where :math:`p=0, 0.5, 1`. This is a very simple iterative algorithm which applies the following step: .. math:: \mathbf{x}^{(i+1)} = T_{(\epsilon \alpha /2, p)} \left(\mathbf{x}^{(i)} + \alpha\,\mathbf{Op}^H \left(\mathbf{y} - \mathbf{Op}\,\mathbf{x}^{(i)}\right)\right) or .. math:: \mathbf{x}^{(i+1)} = \mathbf{SOp}\,\left\{T_{(\epsilon \alpha /2, p)} \mathbf{SOp}^H\,\left(\mathbf{x}^{(i)} + \alpha\,\mathbf{Op}^H \left(\mathbf{y} - \mathbf{Op} \,\mathbf{x}^{(i)}\right)\right)\right\} where :math:`\epsilon \alpha /2` is the threshold and :math:`T_{(\tau, p)}` is the thresholding rule. The most common variant of ISTA uses the so-called soft-thresholding rule :math:`T(\tau, p=1)`. Alternatively an hard-thresholding rule is used in the case of :math:`p=0` or a half-thresholding rule is used in the case of :math:`p=1/2`. Finally, percentile bases thresholds are also implemented: the damping factor is not used anymore an the threshold changes at every iteration based on the computed percentile. .. [1] Daubechies, I., Defrise, M., and De Mol, C., “An iterative thresholding algorithm for linear inverse problems with a sparsity constraint”, Communications on pure and applied mathematics, vol. 57, pp. 1413-1457. 2004. """ def _print_setup(self) -> None: self._print_solver(f" ({self.threshkind} thresholding)") if self.niter is not None: strpar = f"eps = {self.eps:10e}\ttol = {self.tol:10e}\tniter = {self.niter}" else: strpar = f"eps = {self.eps:10e}\ttol = {self.tol:10e}" if self.perc is None: strpar1 = f"alpha = {self.alpha:10e}\tthresh = {self.thresh:10e}" else: strpar1 = f"alpha = {self.alpha:10e}\tperc = {self.perc:.1f}" head1 = " Itn x[0] r2norm r12norm xupdate" print(strpar) print(strpar1) print("-" * 80) print(head1) def _print_step( self, x: NDArray, costdata: float, costreg: float, xupdate: float, ) -> None: strx = ( f" {x[0]:1.2e} " if np.iscomplexobj(x) else f" {x[0]:11.4e} " ) msg = ( f"{self.iiter:6g} " + strx + f"{costdata:10.3e} {costdata + costreg:9.3e} {xupdate:10.3e}" ) print(msg) def setup( self, y: NDArray, x0: Optional[NDArray] = None, niter: Optional[int] = None, SOp: Optional[LinearOperator] = None, eps: float = 0.1, alpha: Optional[float] = None, eigsdict: Optional[Dict[str, Any]] = None, tol: float = 1e-10, threshkind: str = "soft", perc: Optional[float] = None, decay: Optional[NDArray] = None, monitorres: bool = False, show: bool = False, ) -> NDArray: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0: :obj:`numpy.ndarray`, optional Initial guess niter : :obj:`int` Number of iterations SOp : :obj:`pylops.LinearOperator`, optional Regularization operator (use when solving the analysis problem) eps : :obj:`float`, optional Sparsity damping alpha : :obj:`float`, optional Step size. To guarantee convergence, ensure :math:`\alpha \le 1/\lambda_\text{max}`, where :math:`\lambda_\text{max}` is the largest eigenvalue of :math:`\mathbf{Op}^H\mathbf{Op}`. If ``None``, the maximum eigenvalue is estimated and the optimal step size is chosen as :math:`1/\lambda_\text{max}`. If provided, the convergence criterion will not be checked internally. eigsdict : :obj:`dict`, optional Dictionary of parameters to be passed to :func:`pylops.LinearOperator.eigs` method when computing the maximum eigenvalue tol : :obj:`float`, optional Tolerance. Stop iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` threshkind : :obj:`str`, optional Kind of thresholding ('hard', 'soft', 'half', 'hard-percentile', 'soft-percentile', or 'half-percentile' - 'soft' used as default) perc : :obj:`float`, optional Percentile, as percentage of values to be kept by thresholding (to be provided when thresholding is soft-percentile or half-percentile) decay : :obj:`numpy.ndarray`, optional Decay factor to be applied to thresholding during iterations monitorres : :obj:`bool`, optional Monitor that residual is decreasing show : :obj:`bool`, optional Display setup log Returns ------- x : :obj:`np.ndarray` Initial model vector """ self.y = y self.SOp = SOp self.niter = niter self.eps = eps self.eigsdict = {} if eigsdict is None else eigsdict self.tol = tol self.threshkind = threshkind self.perc = perc self.decay = decay self.monitorres = monitorres self.ncp = get_array_module(y) if threshkind not in [ "hard", "soft", "half", "hard-percentile", "soft-percentile", "half-percentile", ]: raise NotImplementedError( "threshkind should be hard, soft, half," "hard-percentile, soft-percentile, " "or half-percentile" ) if ( threshkind in ["hard-percentile", "soft-percentile", "half-percentile"] and perc is None ): raise ValueError( "Provide a percentile when choosing hard-percentile," "soft-percentile, or half-percentile thresholding" ) # choose thresholding function self.threshf: Callable[[NDArray, float], NDArray] if threshkind == "soft": self.threshf = _softthreshold elif threshkind == "hard": self.threshf = _hardthreshold elif threshkind == "half": self.threshf = _halfthreshold elif threshkind == "hard-percentile": self.threshf = _hardthreshold_percentile elif threshkind == "soft-percentile": self.threshf = _softthreshold_percentile else: self.threshf = _halfthreshold_percentile # prepare decay (if not passed) if perc is None and decay is None: self.decay = self.ncp.ones(niter) # step size if alpha is not None: self.alpha = alpha elif not hasattr(self, "alpha"): # compute largest eigenvalues of Op^H * Op Op1 = self.Op.H * self.Op if get_module_name(self.ncp) == "numpy": maxeig: float = np.abs( Op1.eigs( neigs=1, symmetric=True, **self.eigsdict, )[0] ) else: maxeig = np.abs( power_iteration( Op1, dtype=Op1.dtype, backend="cupy", **self.eigsdict, )[0] ) self.alpha = 1.0 / maxeig # define threshold self.thresh = eps * self.alpha * 0.5 # initialize model and cost function if x0 is None: if y.ndim == 1: x = self.ncp.zeros(int(self.Op.shape[1]), dtype=self.Op.dtype) else: x = self.ncp.zeros( (int(self.Op.shape[1]), y.shape[1]), dtype=self.Op.dtype ) else: if y.ndim != x0.ndim: # error for wrong dimensions raise ValueError("Number of columns of x0 and data are not the same") elif x0.shape[0] != self.Op.shape[1]: # error for wrong dimensions raise ValueError("Operator and input vector have different dimensions") else: x = x0.copy() # create variable to track residual if monitorres: self.normresold = np.inf # for fista self.t = 1.0 # create variables to track the residual norm and iterations self.cost: List[float] = [] self.iiter = 0 # print setup if show: self._print_setup() return x def step(self, x: NDArray, show: bool = False) -> Tuple[NDArray, float]: r"""Run one step of solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of ISTA show : :obj:`bool`, optional Display iteration log Returns ------- x : :obj:`np.ndarray` Updated model vector xupdate : :obj:`float` Norm of the update """ # store old vector xold = x.copy() # compute residual res: NDArray = self.y - self.Op @ x if self.monitorres: self.normres = np.linalg.norm(res) if self.normres > self.normresold: raise ValueError( f"ISTA stopped at iteration {self.iiter} due to " "residual increasing, consider modifying " "eps and/or alpha..." ) else: self.normresold = self.normres # compute gradient grad: NDArray = self.alpha * (self.Op.H @ res) # update inverted model x_unthesh: NDArray = x + grad if self.SOp is not None: x_unthesh = self.SOp.H @ x_unthesh if self.perc is None and self.decay is not None: x = self.threshf(x_unthesh, self.decay[self.iiter] * self.thresh) elif self.perc is not None: x = self.threshf(x_unthesh, 100 - self.perc) if self.SOp is not None: x = self.SOp @ x # model update xupdate = np.linalg.norm(x - xold) costdata = 0.5 * np.linalg.norm(res) ** 2 costreg = self.eps * np.linalg.norm(x, ord=1) self.cost.append(float(costdata + costreg)) self.iiter += 1 if show: self._print_step(x, costdata, costreg, xupdate) return x, xupdate def run( self, x: NDArray, niter: Optional[int] = None, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of CG niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already provided in the setup call show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ xupdate = np.inf niter = self.niter if niter is None else niter if niter is None: raise ValueError("niter must not be None") while self.iiter < niter and xupdate > self.tol: showstep = ( True if show and ( self.iiter < itershow[0] or niter - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) x, xupdate = self.step(x, showstep) self.callback(x) if xupdate <= self.tol: logging.warning( "update smaller that tolerance for " "iteration %d", self.iiter ) return x def finalize(self, show: bool = False) -> None: r"""Finalize solver Parameters ---------- show : :obj:`bool`, optional Display finalize log """ self.tend = time.time() self.telapsed = self.tend - self.tstart self.cost = np.array(self.cost) if show: self._print_finalize() def solve( self, y: NDArray, x0: Optional[NDArray] = None, niter: Optional[int] = None, SOp: Optional[LinearOperator] = None, eps: float = 0.1, alpha: Optional[float] = None, eigsdict: Optional[Dict[str, Any]] = None, tol: float = 1e-10, threshkind: str = "soft", perc: Optional[float] = None, decay: Optional[NDArray] = None, monitorres: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> Tuple[NDArray, int, NDArray]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0: :obj:`numpy.ndarray`, optional Initial guess niter : :obj:`int` Number of iterations SOp : :obj:`pylops.LinearOperator`, optional Regularization operator (use when solving the analysis problem) eps : :obj:`float`, optional Sparsity damping alpha : :obj:`float`, optional Step size. To guarantee convergence, ensure :math:`\alpha \le 1/\lambda_\text{max}`, where :math:`\lambda_\text{max}` is the largest eigenvalue of :math:`\mathbf{Op}^H\mathbf{Op}`. If ``None``, the maximum eigenvalue is estimated and the optimal step size is chosen as :math:`1/\lambda_\text{max}`. If provided, the convergence criterion will not be checked internally. eigsdict : :obj:`dict`, optional Dictionary of parameters to be passed to :func:`pylops.LinearOperator.eigs` method when computing the maximum eigenvalue tol : :obj:`float`, optional Tolerance. Stop iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` threshkind : :obj:`str`, optional Kind of thresholding ('hard', 'soft', 'half', 'hard-percentile', 'soft-percentile', or 'half-percentile' - 'soft' used as default) perc : :obj:`float`, optional Percentile, as percentage of values to be kept by thresholding (to be provided when thresholding is soft-percentile or half-percentile) decay : :obj:`numpy.ndarray`, optional Decay factor to be applied to thresholding during iterations monitorres : :obj:`bool`, optional Monitor that residual is decreasing show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` niter : :obj:`int` Number of effective iterations cost : :obj:`numpy.ndarray`, optional History of cost function """ x = self.setup( y=y, x0=x0, niter=niter, SOp=SOp, eps=eps, alpha=alpha, eigsdict=eigsdict, tol=tol, threshkind=threshkind, perc=perc, decay=decay, monitorres=monitorres, show=show, ) x = self.run(x, niter, show=show, itershow=itershow) self.finalize(show) return x, self.iiter, self.cost class FISTA(ISTA): r"""Fast Iterative Shrinkage-Thresholding Algorithm (FISTA). Solve an optimization problem with :math:`L_p, \; p=0, 0.5, 1` regularization, given the operator ``Op`` and data ``y``. The operator can be real or complex, and should ideally be either square :math:`N=M` or underdetermined :math:`N<M`. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert Raises ------ NotImplementedError If ``threshkind`` is different from hard, soft, half, soft-percentile, or half-percentile ValueError If ``perc=None`` when ``threshkind`` is soft-percentile or half-percentile See Also -------- OMP: Orthogonal Matching Pursuit (OMP). ISTA: Iterative Shrinkage-Thresholding Algorithm (ISTA). SPGL1: Spectral Projected-Gradient for L1 norm (SPGL1). SplitBregman: Split Bregman for mixed L2-L1 norms. Notes ----- Solves the following synthesis problem for the operator :math:`\mathbf{Op}` and the data :math:`\mathbf{y}`: .. math:: J = \|\mathbf{y} - \mathbf{Op}\,\mathbf{x}\|_2^2 + \epsilon \|\mathbf{x}\|_p or the analysis problem: .. math:: J = \|\mathbf{y} - \mathbf{Op}\,\mathbf{x}\|_2^2 + \epsilon \|\mathbf{SOp}^H\,\mathbf{x}\|_p if ``SOp`` is provided. The Fast Iterative Shrinkage-Thresholding Algorithm (FISTA) [1]_ is used, where :math:`p=0, 0.5, 1`. This is a modified version of ISTA solver with improved convergence properties and limited additional computational cost. Similarly to the ISTA solver, the choice of the thresholding algorithm to apply at every iteration is based on the choice of :math:`p`. .. [1] Beck, A., and Teboulle, M., “A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems”, SIAM Journal on Imaging Sciences, vol. 2, pp. 183-202. 2009. """ def step(self, x: NDArray, z: NDArray, show: bool = False) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by a step of ISTA x : :obj:`np.ndarray` Current auxiliary model vector to be updated by a step of ISTA show : :obj:`bool`, optional Display iteration log Returns ------- x : :obj:`np.ndarray` Updated model vector z : :obj:`np.ndarray` Updated auxiliary model vector xupdate : :obj:`float` Norm of the update """ # store old vector xold = x.copy() # compute residual resz: NDArray = self.y - self.Op @ z if self.monitorres: self.normres = np.linalg.norm(resz) if self.normres > self.normresold: raise ValueError( f"ISTA stopped at iteration {self.iiter} due to " "residual increasing, consider modifying " "eps and/or alpha..." ) else: self.normresold = self.normres # compute gradient grad: NDArray = self.alpha * (self.Op.H @ resz) # update inverted model x_unthesh: NDArray = z + grad if self.SOp is not None: x_unthesh = self.SOp.H @ x_unthesh if self.perc is None and self.decay is not None: x = self.threshf(x_unthesh, self.decay[self.iiter] * self.thresh) elif self.perc is not None: x = self.threshf(x_unthesh, 100 - self.perc) if self.SOp is not None: x = self.SOp @ x # update auxiliary coefficients told = self.t self.t = (1.0 + np.sqrt(1.0 + 4.0 * self.t**2)) / 2.0 z = x + ((told - 1.0) / self.t) * (x - xold) # model update xupdate = np.linalg.norm(x - xold) costdata = 0.5 * np.linalg.norm(self.y - self.Op @ x) ** 2 costreg = self.eps * np.linalg.norm(x, ord=1) self.cost.append(float(costdata + costreg)) self.iiter += 1 if show: self._print_step(x, costdata, costreg, xupdate) return x, z, xupdate def run( self, x: NDArray, niter: Optional[int] = None, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of CG niter : :obj:`int`, optional Number of iterations. Can be set to ``None`` if already provided in the setup call show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ z = x.copy() xupdate = np.inf niter = self.niter if niter is None else niter if niter is None: raise ValueError("niter must not be None") while self.iiter < niter and xupdate > self.tol: showstep = ( True if show and ( self.iiter < itershow[0] or niter - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) x, z, xupdate = self.step(x, z, showstep) self.callback(x) if xupdate <= self.tol: logging.warning( "update smaller that tolerance for " "iteration %d", self.iiter ) return x class SPGL1(Solver): r"""Spectral Projected-Gradient for L1 norm. Solve a constrained system of equations given the operator ``Op`` and a sparsyfing transform ``SOp`` aiming to retrive a model that is sparse in the sparsifying domain. This is a simple wrapper to :py:func:`spgl1.spgl1` which is a porting of the well-known `SPGL1 <https://www.cs.ubc.ca/~mpf/spgl1/>`_ MATLAB solver into Python. In order to be able to use this solver you need to have installed the ``spgl1`` library. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]`. Raises ------ ModuleNotFoundError If the ``spgl1`` library is not installed Notes ----- Solve different variations of sparsity-promoting inverse problem by imposing sparsity in the retrieved model [1]_. The first problem is called *basis pursuit denoise (BPDN)* and its cost function is .. math:: \|\mathbf{x}\|_1 \quad \text{subject to} \quad \left\|\mathbf{Op}\,\mathbf{S}^H\mathbf{x}-\mathbf{y}\right\|_2^2 \leq \sigma, while the second problem is the *ℓ₁-regularized least-squares or LASSO* problem and its cost function is .. math:: \left\|\mathbf{Op}\,\mathbf{S}^H\mathbf{x}-\mathbf{y}\right\|_2^2 \quad \text{subject to} \quad \|\mathbf{x}\|_1 \leq \tau .. [1] van den Berg E., Friedlander M.P., "Probing the Pareto frontier for basis pursuit solutions", SIAM J. on Scientific Computing, vol. 31(2), pp. 890-912. 2008. """ def _print_setup(self, xcomplex: bool = False) -> None: self._print_solver() strprec = f"SOp={self.SOp}" strreg = f"tau={self.tau} sigma={self.sigma}" print(strprec) print(strreg) print("-" * 80) def _print_finalize(self) -> None: print(f"\nTotal time (s) = {self.telapsed:.2f}") print("-" * 80 + "\n") @disable_ndarray_multiplication def setup( self, y: NDArray, SOp: Optional[LinearOperator] = None, tau: int = 0, sigma: int = 0, show: bool = False, ) -> None: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` SOp : :obj:`pylops.LinearOperator`, optional Sparsifying transform tau : :obj:`float`, optional Non-negative LASSO scalar. If different from ``0``, SPGL1 will solve LASSO problem sigma : :obj:`list`, optional BPDN scalar. If different from ``0``, SPGL1 will solve BPDN problem show : :obj:`bool`, optional Display setup log """ if spgl1_message is not None: raise ModuleNotFoundError(spgl1_message) self.y = y self.SOp = SOp self.tau = tau self.sigma = sigma self.ncp = get_array_module(y) # print setup if show: self._print_setup() def step(self) -> None: raise NotImplementedError( "SPGL1 uses as default the" "spgl1.spgl1 solver, therefore the " "step method is not implemented. Use directly run or solve." ) @disable_ndarray_multiplication def run( self, x: NDArray, show: bool = False, **kwargs_spgl1, ) -> Tuple[NDArray, NDArray, Dict[str, Any]]: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of the solver. If ``None``, x is assumed to be a zero vector show : :obj:`bool`, optional Display iterations log **kwargs_spgl1 Arbitrary keyword arguments for :py:func:`spgl1.spgl1` solver Returns ------- xinv : :obj:`numpy.ndarray` Inverted model in original domain. pinv : :obj:`numpy.ndarray` Inverted model in sparse domain. info : :obj:`dict` Dictionary with the following information: - ``tau``, final value of tau (see sigma above) - ``rnorm``, two-norm of the optimal residual - ``rgap``, relative duality gap (an optimality measure) - ``gnorm``, Lagrange multiplier of (LASSO) - ``stat``, final status of solver * ``1``: found a BPDN solution, * ``2``: found a BP solution; exit based on small gradient, * ``3``: found a BP solution; exit based on small residual, * ``4``: found a LASSO solution, * ``5``: error, too many iterations, * ``6``: error, linesearch failed, * ``7``: error, found suboptimal BP solution, * ``8``: error, too many matrix-vector products. - ``niters``, number of iterations - ``nProdA``, number of multiplications with A - ``nProdAt``, number of multiplications with A' - ``n_newton``, number of Newton steps - ``time_project``, projection time (seconds) - ``time_matprod``, matrix-vector multiplications time (seconds) - ``time_total``, total solution time (seconds) - ``niters_lsqr``, number of lsqr iterations (if ``subspace_min=True``) - ``xnorm1``, L1-norm model solution history through iterations - ``rnorm2``, L2-norm residual history through iterations - ``lambdaa``, Lagrange multiplier history through iterations """ pinv, _, _, info = ext_spgl1( self.Op if self.SOp is None else self.Op * self.SOp.H, self.y, tau=self.tau, sigma=self.sigma, x0=x, **kwargs_spgl1, ) xinv = pinv.copy() if self.SOp is None else self.SOp.H * pinv return xinv, pinv, info def solve( self, y: NDArray, x0: Optional[NDArray] = None, SOp: Optional[LinearOperator] = None, tau: float = 0.0, sigma: float = 0, show: bool = False, **kwargs_spgl1, ) -> Tuple[NDArray, NDArray, Dict[str, Any]]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`numpy.ndarray`, optional Initial guess SOp : :obj:`pylops.LinearOperator`, optional Sparsifying transform tau : :obj:`float`, optional Non-negative LASSO scalar. If different from ``0``, SPGL1 will solve LASSO problem sigma : :obj:`list`, optional BPDN scalar. If different from ``0``, SPGL1 will solve BPDN problem show : :obj:`bool`, optional Display log **kwargs_spgl1 Arbitrary keyword arguments for :py:func:`spgl1.spgl1` solver Returns ------- xinv : :obj:`numpy.ndarray` Inverted model in original domain. pinv : :obj:`numpy.ndarray` Inverted model in sparse domain. info : :obj:`dict` Dictionary with the following information: - ``tau``, final value of tau (see sigma above) - ``rnorm``, two-norm of the optimal residual - ``rgap``, relative duality gap (an optimality measure) - ``gnorm``, Lagrange multiplier of (LASSO) - ``stat``, final status of solver * ``1``: found a BPDN solution, * ``2``: found a BP solution; exit based on small gradient, * ``3``: found a BP solution; exit based on small residual, * ``4``: found a LASSO solution, * ``5``: error, too many iterations, * ``6``: error, linesearch failed, * ``7``: error, found suboptimal BP solution, * ``8``: error, too many matrix-vector products. - ``niters``, number of iterations - ``nProdA``, number of multiplications with A - ``nProdAt``, number of multiplications with A' - ``n_newton``, number of Newton steps - ``time_project``, projection time (seconds) - ``time_matprod``, matrix-vector multiplications time (seconds) - ``time_total``, total solution time (seconds) - ``niters_lsqr``, number of lsqr iterations (if ``subspace_min=True``) - ``xnorm1``, L1-norm model solution history through iterations - ``rnorm2``, L2-norm residual history through iterations - ``lambdaa``, Lagrange multiplier history through iterations """ self.setup(y=y, SOp=SOp, tau=tau, sigma=sigma, show=show) xinv, pinv, info = self.run(x0, show=show, **kwargs_spgl1) self.finalize(show) return xinv, pinv, info class SplitBregman(Solver): r"""Split Bregman for mixed L2-L1 norms. Solve an unconstrained system of equations with mixed :math:`L_2` and :math:`L_1` regularization terms given the operator ``Op``, a list of :math:`L_1` regularization terms ``RegsL1``, and an optional list of :math:`L_2` regularization terms ``RegsL2``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert Notes ----- Solve the following system of unconstrained, regularized equations given the operator :math:`\mathbf{Op}` and a set of mixed norm (:math:`L^2` and :math:`L_1`) regularization terms :math:`\mathbf{R}_{2,i}` and :math:`\mathbf{R}_{1,i}`, respectively: .. math:: J = \frac{\mu}{2} \|\textbf{y} - \textbf{Op}\,\textbf{x} \|_2^2 + \frac{1}{2}\sum_i \epsilon_{\mathbf{R}_{2,i}} \|\mathbf{y}_{\mathbf{R}_{2,i}} - \mathbf{R}_{2,i} \textbf{x} \|_2^2 + \sum_i \epsilon_{\mathbf{R}_{1,i}} \| \mathbf{R}_{1,i} \textbf{x} \|_1 where :math:`\mu` is the reconstruction damping, :math:`\epsilon_{\mathbf{R}_{2,i}}` are the damping factors used to weight the different :math:`L^2` regularization terms of the cost function and :math:`\epsilon_{\mathbf{R}_{1,i}}` are the damping factors used to weight the different :math:`L_1` regularization terms of the cost function. The generalized Split-Bergman algorithm [1]_ is used to solve such cost function: the algorithm is composed of a sequence of unconstrained inverse problems and Bregman updates. The original system of equations is initially converted into a constrained problem: .. math:: J = \frac{\mu}{2} \|\textbf{y} - \textbf{Op}\,\textbf{x}\|_2^2 + \frac{1}{2}\sum_i \epsilon_{\mathbf{R}_{2,i}} \|\mathbf{y}_{\mathbf{R}_{2,i}} - \mathbf{R}_{2,i} \textbf{x}\|_2^2 + \sum_i \| \textbf{y}_i \|_1 \quad \text{subject to} \quad \textbf{y}_i = \mathbf{R}_{1,i} \textbf{x} \quad \forall i and solved as follows: .. math:: \DeclareMathOperator*{\argmin}{arg\,min} \begin{align} (\textbf{x}^{k+1}, \textbf{y}_i^{k+1}) = \argmin_{\mathbf{x}, \mathbf{y}_i} \|\textbf{y} - \textbf{Op}\,\textbf{x}\|_2^2 &+ \frac{1}{2}\sum_i \epsilon_{\mathbf{R}_{2,i}} \|\mathbf{y}_{\mathbf{R}_{2,i}} - \mathbf{R}_{2,i} \textbf{x}\|_2^2 \\ &+ \frac{1}{2}\sum_i \epsilon_{\mathbf{R}_{1,i}} \|\textbf{y}_i - \mathbf{R}_{1,i} \textbf{x} - \textbf{b}_i^k\|_2^2 \\ &+ \sum_i \| \textbf{y}_i \|_1 \end{align} .. math:: \textbf{b}_i^{k+1}=\textbf{b}_i^k + (\mathbf{R}_{1,i} \textbf{x}^{k+1} - \textbf{y}^{k+1}) The :py:func:`scipy.sparse.linalg.lsqr` solver and a fast shrinkage algorithm are used within a inner loop to solve the first step. The entire procedure is repeated ``niter_outer`` times until convergence. .. [1] Goldstein T. and Osher S., "The Split Bregman Method for L1-Regularized Problems", SIAM J. on Scientific Computing, vol. 2(2), pp. 323-343. 2008. """ def _print_setup(self, xcomplex: bool = False) -> None: self._print_solver(nbar=65) strpar = ( f"niter_outer = {self.niter_outer:3d} niter_inner = {self.niter_inner:3d} tol = {self.tol:2.2e}\n" f"mu = {self.mu:2.2e} epsL1 = {self.epsRL1s}\t epsL2 = {self.epsRL2s}" ) print(strpar) print("-" * 65) if not xcomplex: head1 = " Itn x[0] r2norm r12norm" else: head1 = " Itn x[0] r2norm r12norm" print(head1) def _print_step(self, x: NDArray) -> None: strx = f"{x[0]:1.2e} " if np.iscomplexobj(x) else f"{x[0]:11.4e} " str1 = f"{self.iiter:6g} " + strx str2 = f"{self.costdata:10.3e} {self.costtot:9.3e}" print(str1 + str2) def setup( self, y: NDArray, RegsL1: List[LinearOperator], x0: Optional[NDArray] = None, niter_outer: int = 3, niter_inner: int = 5, RegsL2: Optional[List[LinearOperator]] = None, dataregsL2: Optional[Sequence[NDArray]] = None, mu: float = 1.0, epsRL1s: Optional[SamplingLike] = None, epsRL2s: Optional[SamplingLike] = None, tol: float = 1e-10, tau: float = 1.0, restart: bool = False, show: bool = False, ) -> NDArray: r"""Setup solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` RegsL1 : :obj:`list` :math:`L_1` regularization operators x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector niter_outer : :obj:`int`, optional Number of iterations of outer loop niter_inner : :obj:`int`, optional Number of iterations of inner loop of first step of the Split Bregman algorithm. A small number of iterations is generally sufficient and for many applications optimal efficiency is obtained when only one iteration is performed. RegsL2 : :obj:`list`, optional Additional :math:`L^2` regularization operators (if ``None``, :math:`L^2` regularization is not added to the problem) dataregsL2 : :obj:`list`, optional :math:`L^2` Regularization data (must have the same number of elements of ``RegsL2`` or equal to ``None`` to use a zero data for every regularization operator in ``RegsL2``) mu : :obj:`float`, optional Data term damping epsRL1s : :obj:`list` :math:`L_1` Regularization dampings (must have the same number of elements as ``RegsL1``) epsRL2s : :obj:`list` :math:`L^2` Regularization dampings (must have the same number of elements as ``RegsL2``) tol : :obj:`float`, optional Tolerance. Stop outer iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` tau : :obj:`float`, optional Scaling factor in the Bregman update (must be close to 1) restart : :obj:`bool`, optional Initialize the unconstrained inverse problem in inner loop with the initial guess (``True``) or with the last estimate (``False``). Note that when this is set to ``True``, the ``x0`` provided in the setup will be used in all iterations. show : :obj:`bool`, optional Display setup log Returns ------- x : :obj:`np.ndarray` Initial guess of size :math:`[N \times 1]` """ self.y = y self.RegsL1 = RegsL1 self.niter_outer = niter_outer self.niter_inner = niter_inner self.RegsL2 = RegsL2 self.dataregsL2 = list(dataregsL2) if dataregsL2 is not None else [] self.mu = mu self.epsRL1s = list(epsRL1s) if epsRL1s is not None else [] self.epsRL2s = list(epsRL2s) if epsRL2s is not None else [] self.tol = tol self.tau = tau self.restart = restart self.ncp = get_array_module(y) # L1 regularizations self.nregsL1 = len(RegsL1) self.b = [ self.ncp.zeros(RegL1.shape[0], dtype=self.Op.dtype) for RegL1 in RegsL1 ] self.d = self.b.copy() # L2 regularizations self.nregsL2 = 0 if RegsL2 is None else len(RegsL2) if self.nregsL2 > 0 and RegsL2 is not None: self.Regs = RegsL2 + RegsL1 if dataregsL2 is None: self.dataregsL2 = [ self.ncp.zeros(Reg.shape[0], dtype=self.Op.dtype) for Reg in RegsL2 ] else: self.Regs = RegsL1 self.dataregsL2 = [] # Rescale dampings self.epsRs: List[float] = [] if epsRL2s is not None: self.epsRs += [ np.sqrt(epsRL2s[ireg] / 2) / np.sqrt(mu / 2) for ireg in range(self.nregsL2) ] if epsRL1s is not None: self.epsRs += [ np.sqrt(epsRL1s[ireg] / 2) / np.sqrt(mu / 2) for ireg in range(self.nregsL1) ] self.x0 = x0 x = self.ncp.zeros(self.Op.shape[1], dtype=self.Op.dtype) if x0 is None else x0 # create variables to track the residual norm and iterations self.cost: List[float] = [] self.iiter = 0 if show: self._print_setup(np.iscomplexobj(x)) return x def step( self, x: NDArray, show: bool = False, show_inner: bool = False, **kwargs_lsqr, ) -> NDArray: r"""Run one step of solver Parameters ---------- x : :obj:`list` or :obj:`np.ndarray` Current model vector to be updated by a step of OMP show_inner : :obj:`bool`, optional Display inner iteration logs of lsqr show : :obj:`bool`, optional Display iteration log **kwargs_lsqr Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.lsqr` solver used to solve the first subproblem in the first step of the Split Bregman algorithm. Returns ------- x : :obj:`np.ndarray` Updated model vector """ for _ in range(self.niter_inner): # regularized problem dataregs = self.dataregsL2 + [ self.d[ireg] - self.b[ireg] for ireg in range(self.nregsL1) ] x = regularized_inversion( self.Op, self.y, self.Regs, dataregs=dataregs, epsRs=self.epsRs, x0=self.x0 if self.restart else x, show=show_inner, **kwargs_lsqr, )[0] # Shrinkage self.d = [ _softthreshold(self.RegsL1[ireg] * x + self.b[ireg], self.epsRL1s[ireg]) for ireg in range(self.nregsL1) ] # Bregman update self.b = [ self.b[ireg] + self.tau * (self.RegsL1[ireg] * x - self.d[ireg]) for ireg in range(self.nregsL1) ] # compute residual norms self.costdata = ( self.mu / 2.0 * self.ncp.linalg.norm(self.y - self.Op.matvec(x)) ** 2 ) self.costregL2 = ( 0 if self.RegsL2 is None else [ epsRL2 * self.ncp.linalg.norm(dataregL2 - RegL2.matvec(x)) ** 2 for epsRL2, RegL2, dataregL2 in zip( self.epsRL2s, self.RegsL2, self.dataregsL2 ) ] ) self.costregL1 = [ self.ncp.linalg.norm(RegL1.matvec(x), ord=1) for epsRL1, RegL1 in zip(self.epsRL1s, self.RegsL1) ] self.costtot = ( self.costdata + self.ncp.sum(self.ncp.array(self.costregL2)) + self.ncp.sum(self.ncp.array(self.costregL1)) ) # update history parameters self.iiter += 1 self.cost.append(float(self.costtot)) if show: self._print_step(x) return x def run( self, x: NDArray, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), show_inner: bool = False, **kwargs_lsqr, ) -> NDArray: r"""Run solver Parameters ---------- x : :obj:`np.ndarray` Current model vector to be updated by multiple steps of IRLS show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. show_inner : :obj:`bool`, optional Display inner iteration logs of lsqr **kwargs_lsqr Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.lsqr` solver used to solve the first subproblem in the first step of the Split Bregman algorithm. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ xold = x.copy() + 1.1 * self.tol while ( self.ncp.linalg.norm(x - xold) > self.tol and self.iiter < self.niter_outer ): xold = x.copy() showstep = ( True if show and ( self.iiter < itershow[0] or self.niter_outer - self.iiter < itershow[1] or self.iiter % itershow[2] == 0 ) else False ) x = self.step(x, showstep, show_inner, **kwargs_lsqr) self.callback(x) return x def finalize(self, show: bool = False) -> NDArray: r"""Finalize solver Parameters ---------- show : :obj:`bool`, optional Display finalize log Returns ------- xfin : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` """ self.tend = time.time() self.telapsed = self.tend - self.tstart self.cost = np.array(self.cost) if show: self._print_finalize(nbar=65) def solve( self, y: NDArray, RegsL1: List[LinearOperator], x0: Optional[NDArray] = None, niter_outer: int = 3, niter_inner: int = 5, RegsL2: Optional[List[LinearOperator]] = None, dataregsL2: Optional[List[NDArray]] = None, mu: float = 1.0, epsRL1s: Optional[SamplingLike] = None, epsRL2s: Optional[SamplingLike] = None, tol: float = 1e-10, tau: float = 1.0, restart: bool = False, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), show_inner: bool = False, **kwargs_lsqr, ) -> Tuple[NDArray, int, NDArray]: r"""Run entire solver Parameters ---------- y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` RegsL1 : :obj:`list` :math:`L_1` regularization operators x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]`. If ``None``, initialize internally as zero vector niter_outer : :obj:`int`, optional Number of iterations of outer loop niter_inner : :obj:`int`, optional Number of iterations of inner loop of first step of the Split Bregman algorithm. A small number of iterations is generally sufficient and for many applications optimal efficiency is obtained when only one iteration is performed. RegsL2 : :obj:`list`, optional Additional :math:`L^2` regularization operators (if ``None``, :math:`L^2` regularization is not added to the problem) dataregsL2 : :obj:`list`, optional :math:`L^2` Regularization data (must have the same number of elements of ``RegsL2`` or equal to ``None`` to use a zero data for every regularization operator in ``RegsL2``) mu : :obj:`float`, optional Data term damping epsRL1s : :obj:`list` :math:`L_1` Regularization dampings (must have the same number of elements as ``RegsL1``) epsRL2s : :obj:`list` :math:`L^2` Regularization dampings (must have the same number of elements as ``RegsL2``) tol : :obj:`float`, optional Tolerance. Stop outer iterations if difference between inverted model at subsequent iterations is smaller than ``tol`` tau : :obj:`float`, optional Scaling factor in the Bregman update (must be close to 1) restart : :obj:`bool`, optional Initialize the unconstrained inverse problem in inner loop with the initial guess (``True``) or with the last estimate (``False``). Note that when this is set to ``True``, the ``x0`` provided in the setup will be used in all iterations. show : :obj:`bool`, optional Display logs itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. show_inner : :obj:`bool`, optional Display inner iteration logs of lsqr **kwargs_lsqr Arbitrary keyword arguments for :py:func:`scipy.sparse.linalg.lsqr` solver used to solve the first subproblem in the first step of the Split Bregman algorithm. Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` iiter : :obj:`int` Iteration number of outer loop upon termination cost : :obj:`numpy.ndarray` History of cost function through iterations """ x = self.setup( y, RegsL1, x0=x0, niter_outer=niter_outer, niter_inner=niter_inner, RegsL2=RegsL2, dataregsL2=dataregsL2, mu=mu, epsRL1s=epsRL1s, epsRL2s=epsRL2s, tol=tol, tau=tau, restart=restart, show=show, ) x = self.run( x, show=show, itershow=itershow, show_inner=show_inner, **kwargs_lsqr ) self.finalize(show) return x, self.iiter, self.cost
85,203
33.607636
116
py
pylops
pylops-master/pylops/optimization/__init__.py
""" Optimization ============ The subpackage optimization provides an extensive set of solvers to be used with PyLops linear operators. A list of least-squares solvers in pylops.optimization.solver: cg Conjugate gradient. cgls Conjugate gradient least-squares. lsqr LSQR. and wrappers for regularized or preconditioned inversion in pylops.optimization.leastsquares: normal_equations_inversion Inversion of normal equations. regularized_inversion Regularized inversion. preconditioned_inversion Preconditioned inversion. and sparsity-promoting solvers in pylops.optimization.sparsity: irls Iteratively reweighted least squares. omp Orthogonal Matching Pursuit (OMP). ista Iterative Soft Thresholding Algorithm. fista Fast Iterative Soft Thresholding Algorithm. spgl1 Spectral Projected-Gradient for L1 norm. splitbregman Split Bregman for mixed L2-L1 norms. Note that these solvers are thin wrappers over class-based solvers (new in v2), which can be accessed from submodules with equivalent name and suffix c. """
1,338
39.575758
106
py
pylops
pylops-master/pylops/optimization/callback.py
__all__ = [ "Callbacks", "MetricsCallback", ] from typing import TYPE_CHECKING, Dict, List, Optional, Sequence from pylops.utils.metrics import mae, mse, psnr, snr from pylops.utils.typing import NDArray if TYPE_CHECKING: from pylops.linearoperator import LinearOperator from pylops.optimization.basesolver import Solver class Callbacks: r"""Callbacks This is a template class which a user must subclass when implementing callbacks for a solver. This class comprises of the following methods: - ``on_setup_begin``: a method that is invoked at the start of the setup method of the solver - ``on_setup_end``: a method that is invoked at the end of the setup method of the solver - ``on_step_begin``: a method that is invoked at the start of the step method of the solver - ``on_step_end``: a method that is invoked at the end of the setup step of the solver - ``on_run_begin``: a method that is invoked at the start of the run method of the solver - ``on_run_end``: a method that is invoked at the end of the run method of the solver All methods take two input parameters: the solver itself, and the vector ``x``. Examples -------- >>> import numpy as np >>> from pylops.basicoperators import MatrixMult >>> from pylops.optimization.solver import CG >>> from pylops.optimization.callback import Callbacks >>> class StoreIterCallback(Callbacks): ... def __init__(self): ... self.stored = [] ... def on_step_end(self, solver, x): ... self.stored.append(solver.iiter) >>> cb_sto = StoreIterCallback() >>> Aop = MatrixMult(np.random.normal(0., 1., 36).reshape(6, 6)) >>> Aop = Aop.H @ Aop >>> y = Aop @ np.ones(6) >>> cgsolve = CG(Aop, callbacks=[cb_sto, ]) >>> xest = cgsolve.solve(y=y, x0=np.zeros(6), tol=0, niter=6, show=False)[0] >>> xest array([1., 1., 1., 1., 1., 1.]) """ def __init__(self) -> None: pass def on_setup_begin(self, solver: "Solver", x0: NDArray) -> None: """Callback before setup Parameters ---------- solver : :obj:`pylops.optimization.basesolver.Solver` Solver object x0 : :obj:`np.ndarray` Initial guess (when present as one of the inputs of the solver setup method) """ pass def on_setup_end(self, solver: "Solver", x: NDArray) -> None: """Callback after setup Parameters ---------- solver : :obj:`pylops.optimization.basesolver.Solver` Solver object x : :obj:`np.ndarray` Current model vector """ pass def on_step_begin(self, solver: "Solver", x: NDArray) -> None: """Callback before step of solver Parameters ---------- solver : :obj:`pylops.optimization.basesolver.Solver` Solver object x : :obj:`np.ndarray` Current model vector """ pass def on_step_end(self, solver: "Solver", x: NDArray) -> None: """Callback after step of solver Parameters ---------- solver : :obj:`pylops.optimization.basesolver.Solver` Solver object x : :obj:`np.ndarray` Current model vector """ pass def on_run_begin(self, solver: "Solver", x: NDArray) -> None: """Callback before entire solver run Parameters ---------- solver : :obj:`pylops.optimization.basesolver.Solver` Solver object x : :obj:`np.ndarray` Current model vector """ pass def on_run_end(self, solver: "Solver", x: NDArray) -> None: """Callback after entire solver run Parameters ---------- solver : :obj:`pylops.optimization.basesolver.Solver` Solver object x : :obj:`np.ndarray` Current model vector """ pass class MetricsCallback(Callbacks): r"""Metrics callback This callback can be used to store different metrics from the ``pylops.utils.metrics`` module during iterations. Parameters ---------- xtrue : :obj:`np.ndarray` True model vector Op : :obj:`pylops.LinearOperator`, optional Operator to apply to the solution prior to comparing it with `xtrue` which : :obj:`tuple`, optional List of metrics to compute (currently available: "mae", "mse", "snr", and "psnr") """ def __init__( self, xtrue: NDArray, Op: Optional["LinearOperator"] = None, which: Sequence[str] = ("mae", "mse", "snr", "psnr"), ): self.xtrue = xtrue self.Op = Op self.which = which self.metrics: Dict[str, List] = {} if "mae" in self.which: self.metrics["mae"] = [] if "mse" in self.which: self.metrics["mse"] = [] if "snr" in self.which: self.metrics["snr"] = [] if "psnr" in self.which: self.metrics["psnr"] = [] def on_step_end(self, solver: "Solver", x: NDArray) -> None: if self.Op is not None: x = self.Op * x if "mae" in self.which: self.metrics["mae"].append(mae(self.xtrue, x)) if "mse" in self.which: self.metrics["mse"].append(mse(self.xtrue, x)) if "snr" in self.which: self.metrics["snr"].append(snr(self.xtrue, x)) if "psnr" in self.which: self.metrics["psnr"].append(psnr(self.xtrue, x))
5,594
29.407609
97
py
pylops
pylops-master/pylops/optimization/basic.py
__all__ = [ "cg", "cgls", "lsqr", ] from typing import TYPE_CHECKING, Callable, Optional, Tuple from pylops.optimization.cls_basic import CG, CGLS, LSQR from pylops.utils.decorators import add_ndarray_support_to_solver from pylops.utils.typing import NDArray if TYPE_CHECKING: from pylops.linearoperator import LinearOperator @add_ndarray_support_to_solver def cg( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, niter: int = 10, tol: float = 1e-4, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, ) -> Tuple[NDArray, int, NDArray]: r"""Conjugate gradient Solve a square system of equations given an operator ``Op`` and data ``y`` using conjugate gradient iterations. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times N]` y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess niter : :obj:`int`, optional Number of iterations tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[N \times 1]` iit : :obj:`int` Number of executed iterations cost : :obj:`numpy.ndarray`, optional History of the L2 norm of the residual Notes ----- See :class:`pylops.optimization.cls_basic.CG` """ cgsolve = CG(Op) if callback is not None: cgsolve.callback = callback x, iiter, cost = cgsolve.solve( y=y, x0=x0, tol=tol, niter=niter, show=show, itershow=itershow ) return x, iiter, cost @add_ndarray_support_to_solver def cgls( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, niter: int = 10, damp: float = 0.0, tol: float = 1e-4, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, ) -> Tuple[NDArray, int, int, float, float, NDArray]: r"""Conjugate gradient least squares Solve an overdetermined system of equations given an operator ``Op`` and data ``y`` using conjugate gradient iterations. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess niter : :obj:`int`, optional Number of iterations damp : :obj:`float`, optional Damping coefficient tol : :obj:`float`, optional Tolerance on residual norm show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` istop : :obj:`int` Gives the reason for termination ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem iit : :obj:`int` Iteration number upon termination r1norm : :obj:`float` :math:`||\mathbf{r}||_2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` cost : :obj:`numpy.ndarray`, optional History of r1norm through iterations Notes ----- See :class:`pylops.optimization.cls_basic.CGLS` """ cgsolve = CGLS(Op) if callback is not None: cgsolve.callback = callback x, istop, iiter, r1norm, r2norm, cost = cgsolve.solve( y=y, x0=x0, tol=tol, niter=niter, damp=damp, show=show, itershow=itershow ) return x, istop, iiter, r1norm, r2norm, cost @add_ndarray_support_to_solver def lsqr( Op: "LinearOperator", y: NDArray, x0: Optional[NDArray] = None, damp: float = 0.0, atol: float = 1e-08, btol: float = 1e-08, conlim: float = 100000000.0, niter: int = 10, calc_var: bool = True, show: bool = False, itershow: Tuple[int, int, int] = (10, 10, 10), callback: Optional[Callable] = None, ) -> Tuple[NDArray, int, int, float, float, float, float, float, float, float, NDArray]: r"""LSQR Solve an overdetermined system of equations given an operator ``Op`` and data ``y`` using LSQR iterations. .. math:: \DeclareMathOperator{\cond}{cond} Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to invert of size :math:`[N \times M]` y : :obj:`np.ndarray` Data of size :math:`[N \times 1]` x0 : :obj:`np.ndarray`, optional Initial guess of size :math:`[M \times 1]` damp : :obj:`float`, optional Damping coefficient atol, btol : :obj:`float`, optional Stopping tolerances. If both are 1.0e-9, the final residual norm should be accurate to about 9 digits. (The solution will usually have fewer correct digits, depending on :math:`\cond(\mathbf{Op})` and the size of ``damp``.) conlim : :obj:`float`, optional Stopping tolerance on :math:`\cond(\mathbf{Op})` exceeds ``conlim``. For square, ``conlim`` could be as large as 1.0e+12. For least-squares problems, ``conlim`` should be less than 1.0e+8. Maximum precision can be obtained by setting ``atol = btol = conlim = 0``, but the number of iterations may then be excessive. niter : :obj:`int`, optional Number of iterations calc_var : :obj:`bool`, optional Estimate diagonals of :math:`(\mathbf{Op}^H\mathbf{Op} + \epsilon^2\mathbf{I})^{-1}`. show : :obj:`bool`, optional Display iterations log itershow : :obj:`tuple`, optional Display set log for the first N1 steps, last N2 steps, and every N3 steps in between where N1, N2, N3 are the three element of the list. callback : :obj:`callable`, optional Function with signature (``callback(x)``) to call after each iteration where ``x`` is the current model vector Returns ------- x : :obj:`np.ndarray` Estimated model of size :math:`[M \times 1]` istop : :obj:`int` Gives the reason for termination ``0`` means the exact solution is :math:`\mathbf{x}=0` ``1`` means :math:`\mathbf{x}` is an approximate solution to :math:`\mathbf{y} = \mathbf{Op}\,\mathbf{x}` ``2`` means :math:`\mathbf{x}` approximately solves the least-squares problem ``3`` means the estimate of :math:`\cond(\overline{\mathbf{Op}})` has exceeded ``conlim`` ``4`` means :math:`\mathbf{y} - \mathbf{Op}\,\mathbf{x}` is small enough for this machine ``5`` means the least-squares solution is good enough for this machine ``6`` means :math:`\cond(\overline{\mathbf{Op}})` seems to be too large for this machine ``7`` means the iteration limit has been reached r1norm : :obj:`float` :math:`||\mathbf{r}||_2^2`, where :math:`\mathbf{r} = \mathbf{y} - \mathbf{Op}\,\mathbf{x}` r2norm : :obj:`float` :math:`\sqrt{\mathbf{r}^T\mathbf{r} + \epsilon^2 \mathbf{x}^T\mathbf{x}}`. Equal to ``r1norm`` if :math:`\epsilon=0` anorm : :obj:`float` Estimate of Frobenius norm of :math:`\overline{\mathbf{Op}} = [\mathbf{Op} \; \epsilon \mathbf{I}]` acond : :obj:`float` Estimate of :math:`\cond(\overline{\mathbf{Op}})` arnorm : :obj:`float` Estimate of norm of :math:`\cond(\mathbf{Op}^H\mathbf{r}- \epsilon^2\mathbf{x})` var : :obj:`float` Diagonals of :math:`(\mathbf{Op}^H\mathbf{Op})^{-1}` (if ``damp=0``) or more generally :math:`(\mathbf{Op}^H\mathbf{Op} + \epsilon^2\mathbf{I})^{-1}`. cost : :obj:`numpy.ndarray`, optional History of r1norm through iterations Notes ----- See :class:`pylops.optimization.cls_basic.LSQR` """ lsqrsolve = LSQR(Op) if callback is not None: lsqrsolve.callback = callback ( x, istop, iiter, r1norm, r2norm, anorm, acond, arnorm, xnorm, var, cost, ) = lsqrsolve.solve( y=y, x0=x0, damp=damp, atol=atol, btol=btol, conlim=conlim, niter=niter, calc_var=calc_var, show=show, itershow=itershow, ) return x, istop, iiter, r1norm, r2norm, anorm, acond, arnorm, xnorm, var, cost
9,636
31.123333
88
py
pylops
pylops-master/pylops/basicoperators/identity.py
__all__ = ["Identity"] from typing import Optional import numpy as np from pylops import LinearOperator from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, NDArray class Identity(LinearOperator): r"""Identity operator. Simply move model to data in forward model and viceversa in adjoint mode if :math:`M = N`. If :math:`M > N` removes last :math:`M - N` elements from model in forward and pads with :math:`0` in adjoint. If :math:`N > M` removes last :math:`N - M` elements from data in adjoint and pads with :math:`0` in forward. Parameters ---------- N : :obj:`int` Number of samples in data (and model, if ``M`` is not provided). M : :obj:`int`, optional Number of samples in model. inplace : :obj:`bool`, optional Work inplace (``True``) or make a new copy (``False``). By default, data is a reference to the model (in forward) and model is a reference to the data (in adjoint). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- For :math:`M = N`, an *Identity* operator simply moves the model :math:`\mathbf{x}` to the data :math:`\mathbf{y}` in forward mode and viceversa in adjoint mode: .. math:: y_i = x_i \quad \forall i=1,2,\ldots,N or in matrix form: .. math:: \mathbf{y} = \mathbf{I} \mathbf{x} = \mathbf{x} and .. math:: \mathbf{x} = \mathbf{I} \mathbf{y} = \mathbf{y} For :math:`M > N`, the *Identity* operator takes the first :math:`M` elements of the model :math:`\mathbf{x}` into the data :math:`\mathbf{y}` in forward mode .. math:: y_i = x_i \quad \forall i=1,2,\ldots,N and all the elements of the data :math:`\mathbf{y}` into the first :math:`M` elements of model in adjoint mode (other elements are ``O``): .. math:: x_i = y_i \quad \forall i=1,2,\ldots,M x_i = 0 \quad \forall i=M+1,\ldots,N """ def __init__( self, N: int, M: Optional[int] = None, inplace: bool = True, dtype: DTypeLike = "float64", name: str = "I", ) -> None: M = N if M is None else M super().__init__(dtype=np.dtype(dtype), shape=(N, M), name=name) self.inplace = inplace def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if not self.inplace: x = x.copy() if self.shape[0] == self.shape[1]: y = x elif self.shape[0] < self.shape[1]: y = x[: self.shape[0]] else: y = ncp.zeros(self.shape[0], dtype=self.dtype) y[: self.shape[1]] = x return y def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if not self.inplace: x = x.copy() if self.shape[0] == self.shape[1]: y = x elif self.shape[0] < self.shape[1]: y = ncp.zeros(self.shape[1], dtype=self.dtype) y[: self.shape[0]] = x else: y = x[: self.shape[1]] return y
3,512
27.104
79
py
pylops
pylops-master/pylops/basicoperators/memoizeoperator.py
__all__ = ["MemoizeOperator"] from typing import List, Tuple import numpy as np from pylops import LinearOperator from pylops.utils.typing import NDArray class MemoizeOperator(LinearOperator): r"""Memoize Operator. This operator can be used to wrap any PyLops operator and add a memoize functionality and stores the last ``max_neval`` model/data vector pairs Parameters ---------- Op : :obj:`pylops.LinearOperator` PyLops linear operator max_neval : :obj:`int`, optional Maximum number of previous evaluations stored, use ``np.inf`` for infinite memory Attributes ---------- shape : :obj:`tuple` Operator shape :math:`[n \times m]` explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) """ def __init__( self, Op: LinearOperator, max_neval: int = 10, ) -> None: super().__init__(Op=Op) self.max_neval = max_neval self.store: List[Tuple[NDArray, NDArray]] = [] # Store a list of (x, y) self.neval = 0 # Number of evaluations of the operator def _matvec(self, x: NDArray) -> NDArray: for xstored, ystored in self.store: if np.allclose(xstored, x): return ystored if len(self.store) + 1 > self.max_neval: del self.store[0] # Delete oldest y = self.Op._matvec(x) self.neval += 1 self.store.append((x.copy(), y.copy())) return y def _rmatvec(self, y: NDArray) -> NDArray: for xstored, ystored in self.store: if np.allclose(ystored, y): return xstored if len(self.store) + 1 > self.max_neval: del self.store[0] # Delete oldest x = self.Op._rmatvec(y) self.neval += 1 self.store.append((x.copy(), y.copy())) return x
1,935
27.057971
80
py
pylops
pylops-master/pylops/basicoperators/spread.py
__all__ = ["Spread"] import logging from typing import Callable, Optional import numpy as np from pylops import LinearOperator from pylops.utils import deps from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray jit_message = deps.numba_import("the spread module") if jit_message is None: from numba import jit from ._spread_numba import ( _matvec_numba_onthefly, _matvec_numba_table, _rmatvec_numba_onthefly, _rmatvec_numba_table, ) logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class Spread(LinearOperator): r"""Spread operator. Spread values from the input model vector arranged as a 2-dimensional array of size :math:`[n_{x_0} \times n_{t_0}]` into the data vector of size :math:`[n_x \times n_t]`. Note that the value at each single pair :math:`(x_0, t_0)` in the input is spread over the entire :math:`x` axis in the output. Spreading is performed along parametric curves provided as look-up table of pre-computed indices (``table``) or computed on-the-fly using a function handle (``fh``). In adjont mode, values from the data vector are instead stacked along the same parametric curves. Parameters ---------- dims : :obj:`tuple` Dimensions of model vector (vector will be reshaped internally into a two-dimensional array of size :math:`[n_{x_0} \times n_{t_0}]`, where the first dimension is the spreading direction) dimsd : :obj:`tuple` Dimensions of data vector (vector will be reshaped internal into a two-dimensional array of size :math:`[n_x \times n_t]`, where the first dimension is the stacking direction) table : :obj:`np.ndarray`, optional Look-up table of indices of size :math:`[n_{x_0} \times n_{t_0} \times n_x]` (if ``None`` use function handle ``fh``). When ``dtable`` is not provided, the ``data`` will be created as follows .. code-block:: python data[ix, table[ix0, it0, ix]] += model[ix0, it0] .. note:: When using ``table`` without ``dtable``, its elements must be between 0 and :math:`n_{t_0} - 1` (or ``numpy.nan``). dtable : :obj:`np.ndarray`, optional Look-up table of decimals remainders for linear interpolation of size :math:`[n_{x_0} \times n_{t_0} \times n_x]` (if ``None`` use function handle ``fh``). When provided, the ``data`` will be created as follows .. code-block:: python data[ix, table[ix0, it0, ix]] += (1 - dtable[ix0, it0, ix]) * model[ix0, it0] data[ix, table[ix0, it0, ix] + 1] += dtable[ix0, it0, ix] * model[ix0, it0] .. note:: When using ``table`` and ``dtable``, the elements of ``table`` indices must be between 0 and :math:`n_{t_0} - 2` (or ``numpy.nan``). fh : :obj:`callable`, optional If ``None`` will use look-up table ``table``. When provided, should be a function which takes indices ``ix0`` and ``it0`` and returns an array of size :math:`n_x` containing each respective time index. Alternatively, if linear interpolation is required, it should output in addition to the time indices, a weight for interpolation with linear interpolation, to be used as follows .. code-block:: python data[ix, index] += (1 - dindices[ix]) * model[ix0, it0] data[ix, index + 1] += dindices[ix] * model[ix0, it0] where ``index`` refers to a time index in the first array returned by ``fh`` and ``dindices`` refers to the weight in the second array returned by ``fh``. .. note:: When using ``fh`` with one output (time indices), the time indices must be between 0 and :math:`n_{t_0} - 1` (or ``numpy.nan``). When using ``fh`` with two outputs (time indices and weights), they must be within the between 0 and :math:`n_{t_0} - 2` (or ``numpy.nan``). interp : :obj:`bool`, optional Use only if engine ``engine='numba'``. Apply linear interpolation (``True``) or nearest interpolation (``False``) during stacking/spreading along parametric curve. When using ``engine="numpy"``, it will be inferred directly from ``fh`` or the presence of ``dtable``. engine : :obj:`str`, optional Engine used for spread computation (``numpy`` or ``numba``). Note that ``numba`` can only be used when providing a look-up table dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ KeyError If ``engine`` is neither ``numpy`` nor ``numba`` NotImplementedError If both ``table`` and ``fh`` are not provided ValueError If ``table`` has shape different from :math:`[n_{x_0} \times n_{t_0} \times n_x]` Notes ----- The Spread operator applies the following linear transform in forward mode to the model vector after reshaping it into a 2-dimensional array of size :math:`[n_x \times n_t]`: .. math:: m(x_0, t_0) \rightarrow d(x, t=f(x_0, x, t_0)) \quad \forall x where :math:`f(x_0, x, t)` is a mapping function that returns a value :math:`t` given values :math:`x_0`, :math:`x`, and :math:`t_0`. Note that for each :math:`(x_0, t_0)` pair, spreading is done over the entire :math:`x` axis in the data domain. In adjoint mode, the model is reconstructed by means of the following stacking operation: .. math:: m(x_0, t_0) = \int{d(x, t=f(x_0, x, t_0))} \,\mathrm{d}x Note that ``table`` (or ``fh``) must return integer numbers representing indices in the axis :math:`t`. However it also possible to perform linear interpolation as part of the spreading/stacking process by providing the decimal part of the mapping function (:math:`t - \lfloor t \rfloor`) either in ``dtable`` input parameter or as second value in the return of ``fh`` function. """ def __init__( self, dims: InputDimsLike, dimsd: InputDimsLike, table: Optional[NDArray] = None, dtable: Optional[NDArray] = None, fh: Optional[Callable] = None, interp: Optional[bool] = None, engine: str = "numpy", dtype: DTypeLike = "float64", name: str = "S", ) -> None: super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) if engine not in ["numpy", "numba"]: raise KeyError("engine must be numpy or numba") if engine == "numba" and jit_message is None: self.engine = "numba" else: if engine == "numba" and jit is not None: logging.warning(jit_message) self.engine = "numpy" # axes self.nx0, self.nt0 = self.dims[0], self.dims[1] self.nx, self.nt = self.dimsd[0], self.dimsd[1] self.table = table self.dtable = dtable self.fh = fh # find out if mapping is in table of function handle if self.table is None and fh is None: raise NotImplementedError("provide either table or fh.") elif self.table is not None: if fh is not None: raise ValueError("provide only one of table or fh.") if self.table.shape != (self.nx0, self.nt0, self.nx): raise ValueError("table must have shape [nx0 x nt0 x nx]") self.usetable = True if np.any(self.table > self.nt): raise ValueError("values in table must be smaller than nt") else: self.usetable = False # find out if linear interpolation has to be carried out self.interp = False if self.usetable: if self.dtable is not None: if self.dtable.shape != (self.nx0, self.nt0, self.nx): raise ValueError("dtable must have shape [nx0 x nt x nx]") self.interp = True else: if self.engine == "numba": self.interp = interp else: if len(fh(0, 0)) == 2: self.interp = True if interp is not None and self.interp != interp: logging.warning("interp has been overridden to %r.", self.interp) def _matvec_numpy(self, x: NDArray) -> NDArray: y = np.zeros(self.dimsd, dtype=self.dtype) for it in range(self.dims[1]): for ix0 in range(self.dims[0]): if self.usetable: indices = self.table[ix0, it] if self.interp: dindices = self.dtable[ix0, it] else: if self.interp: indices, dindices = self.fh(ix0, it) else: indices = self.fh(ix0, it) mask = np.argwhere(~np.isnan(indices)) if mask.size > 0: indices = (indices[mask]).astype(int) if not self.interp: y[mask, indices] += x[ix0, it] else: y[mask, indices] += (1 - dindices[mask]) * x[ix0, it] y[mask, indices + 1] += dindices[mask] * x[ix0, it] return y def _rmatvec_numpy(self, x: NDArray) -> NDArray: y = np.zeros(self.dims, dtype=self.dtype) for it in range(self.dims[1]): for ix0 in range(self.dims[0]): if self.usetable: indices = self.table[ix0, it] if self.interp: dindices = self.dtable[ix0, it] else: if self.interp: indices, dindices = self.fh(ix0, it) else: indices = self.fh(ix0, it) mask = np.argwhere(~np.isnan(indices)) if mask.size > 0: indices = (indices[mask]).astype(int) if not self.interp: y[ix0, it] = np.sum(x[mask, indices]) else: y[ix0, it] = np.sum( x[mask, indices] * (1 - dindices[mask]) ) + np.sum(x[mask, indices + 1] * dindices[mask]) return y @reshaped def _matvec(self, x: NDArray) -> NDArray: if self.engine == "numba": y = np.zeros(self.dimsd, dtype=self.dtype) if self.usetable: y = _matvec_numba_table( x, y, self.dims, self.interp, self.table, self.table if self.dtable is None else self.dtable, ) else: y = _matvec_numba_onthefly(x, y, self.dims, self.interp, self.fh) else: y = self._matvec_numpy(x) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: if self.engine == "numba": y = np.zeros(self.dims, dtype=self.dtype) if self.usetable: y = _rmatvec_numba_table( x, y, self.dims, self.dimsd, self.interp, self.table, self.table if self.dtable is None else self.dtable, ) else: y = _rmatvec_numba_onthefly( x, y, self.dims, self.dimsd, self.interp, self.fh ) else: y = self._rmatvec_numpy(x) return y
12,187
37.815287
100
py
pylops
pylops-master/pylops/basicoperators/roll.py
__all__ = ["Roll"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Roll(LinearOperator): r"""Roll along an axis. Roll a multi-dimensional array along ``axis`` for a chosen number of samples (``shift``). Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which model is rolled. shift : :obj:`int`, optional Number of samples by which elements are shifted dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The Roll operator is a thin wrapper around :func:`numpy.roll` and shifts elements in a multi-dimensional array along a specified direction for a chosen number of samples. """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, shift: int = 1, dtype: DTypeLike = "float64", name: str = "R", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) self.axis = axis self.shift = shift @reshaped(swapaxis=True) def _matvec(self, x: NDArray) -> NDArray: return np.roll(x, shift=self.shift, axis=-1) @reshaped(swapaxis=True) def _rmatvec(self, x: NDArray) -> NDArray: return np.roll(x, shift=-self.shift, axis=-1)
2,018
27.041667
81
py
pylops
pylops-master/pylops/basicoperators/diagonal.py
__all__ = ["Diagonal"] from typing import Optional, Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module, to_cupy_conditional from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Diagonal(LinearOperator): r"""Diagonal operator. Applies element-wise multiplication of the input vector with the vector ``diag`` in forward and with its complex conjugate in adjoint mode. This operator can also broadcast; in this case the input vector is reshaped into its dimensions ``dims`` and the element-wise multiplication with ``diag`` is perfomed along ``axis``. Note that the vector ``diag`` will need to have size equal to ``dims[axis]``. Parameters ---------- diag : :obj:`numpy.ndarray` Vector to be used for element-wise multiplication. dims : :obj:`list`, optional Number of samples for each dimension (``None`` if only one dimension is available) axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which multiplication is applied. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- Element-wise multiplication between the model :math:`\mathbf{x}` and/or data :math:`\mathbf{y}` vectors and the array :math:`\mathbf{d}` can be expressed as .. math:: y_i = d_i x_i \quad \forall i=1,2,\ldots,N This is equivalent to a matrix-vector multiplication with a matrix containing the vector :math:`\mathbf{d}` along its main diagonal. For real-valued ``diag``, the Diagonal operator is self-adjoint as the adjoint of a diagonal matrix is the diagonal matrix itself. For complex-valued ``diag``, the adjoint is equivalent to the element-wise multiplication with the complex conjugate elements of ``diag``. """ def __init__( self, diag: NDArray, dims: Optional[Union[int, InputDimsLike]] = None, axis: int = -1, dtype: DTypeLike = "float64", name: str = "D", ) -> None: self.diag = diag self.axis = axis origdims = dims dims = self.diag.shape if dims is None else _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) ncp = get_array_module(diag) self.complex = True if ncp.iscomplexobj(self.diag) else False if origdims is not None: diagdims = np.ones_like(self.dims) diagdims[axis] = self.dims[axis] self.diag = self.diag.reshape(diagdims) @reshaped def _matvec(self, x: NDArray) -> NDArray: if type(self.diag) is not type(x): self.diag = to_cupy_conditional(x, self.diag) y = self.diag * x return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: if type(self.diag) is not type(x): self.diag = to_cupy_conditional(x, self.diag) if self.complex: diagadj = self.diag.conj() else: diagadj = self.diag y = diagadj * x return y def matrix(self) -> NDArray: """Return diagonal matrix as dense :obj:`numpy.ndarray` Returns ------- densemat : :obj:`numpy.ndarray` Dense matrix. """ ncp = get_array_module(self.diag) densemat = ncp.diag(self.diag.squeeze()) return densemat def todense(self) -> NDArray: """Fast implementation of todense based on known structure of the operator Returns ------- densemat : :obj:`numpy.ndarray` Dense matrix. """ if self.diag.ndim == 1: return self.matrix() else: dims = list(self.dims) dims[self.axis] = 1 matrix = np.diag(np.tile(self.diag, dims).ravel()) return matrix
4,395
30.625899
82
py
pylops
pylops-master/pylops/basicoperators/kronecker.py
__all__ = ["Kronecker"] import numpy as np from pylops import LinearOperator from pylops.utils.typing import DTypeLike, NDArray class Kronecker(LinearOperator): r"""Kronecker operator. Perform Kronecker product of two operators. Note that the combined operator is never created explicitly, rather the product of this operator with the model vector is performed in forward mode, or the product of the adjoint of this operator and the data vector in adjoint mode. Parameters ---------- Op1 : :obj:`pylops.LinearOperator` First operator Op2 : :obj:`pylops.LinearOperator` Second operator dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The Kronecker product (denoted with :math:`\otimes`) is an operation on two operators :math:`\mathbf{Op}_1` and :math:`\mathbf{Op}_2` of sizes :math:`\lbrack n_1 \times m_1 \rbrack` and :math:`\lbrack n_2 \times m_2 \rbrack` respectively, resulting in a block matrix of size :math:`\lbrack n_1 n_2 \times m_1 m_2 \rbrack`. .. math:: \mathbf{Op}_1 \otimes \mathbf{Op}_2 = \begin{bmatrix} \text{Op}_1^{1,1} \mathbf{Op}_2 & \ldots & \text{Op}_1^{1,m_1} \mathbf{Op}_2 \\ \vdots & \ddots & \vdots \\ \text{Op}_1^{n_1,1} \mathbf{Op}_2 & \ldots & \text{Op}_1^{n_1,m_1} \mathbf{Op}_2 \end{bmatrix} The application of the resulting matrix to a vector :math:`\mathbf{x}` of size :math:`\lbrack m_1 m_2 \times 1 \rbrack` is equivalent to the application of the second operator :math:`\mathbf{Op}_2` to the rows of a matrix of size :math:`\lbrack m_2 \times m_1 \rbrack` obtained by reshaping the input vector :math:`\mathbf{x}`, followed by the application of the first operator to the transposed matrix produced by the first operator. In adjoint mode the same procedure is followed but the adjoint of each operator is used. """ def __init__( self, Op1: LinearOperator, Op2: LinearOperator, dtype: DTypeLike = "float64", name: str = "K", ) -> None: self.Op1 = Op1 self.Op2 = Op2 self.Op1H = self.Op1.H self.Op2H = self.Op2.H shape = ( self.Op1.shape[0] * self.Op2.shape[0], self.Op1.shape[1] * self.Op2.shape[1], ) super().__init__(dtype=np.dtype(dtype), shape=shape, name=name) def _matvec(self, x: NDArray) -> NDArray: x = x.reshape(self.Op1.shape[1], self.Op2.shape[1]) y = self.Op2.matmat(x.T).T y = self.Op1.matmat(y).ravel() return y def _rmatvec(self, x: NDArray) -> NDArray: x = x.reshape(self.Op1.shape[0], self.Op2.shape[0]) y = self.Op2H.matmat(x.T).T y = self.Op1H.matmat(y).ravel() return y
3,238
33.827957
96
py
pylops
pylops-master/pylops/basicoperators/directionalderivative.py
__all__ = [ "FirstDirectionalDerivative", "SecondDirectionalDerivative", ] from pylops import LinearOperator from pylops.basicoperators import Diagonal, Gradient, Sum from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class FirstDirectionalDerivative(LinearOperator): r"""First Directional derivative. Apply a directional derivative operator to a multi-dimensional array along either a single common axis or different axes for each point of the array. .. note:: At least 2 dimensions are required, consider using :py:func:`pylops.FirstDerivative` for 1d arrays. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension. v : :obj:`np.ndarray`, optional Single direction (array of size :math:`n_\text{dims}`) or group of directions (array of size :math:`[n_\text{dims} \times n_{d_0} \times ... \times n_{d_{n_\text{dims}}}]`) sampling : :obj:`tuple`, optional Sampling steps for each direction. edge : :obj:`bool`, optional Use reduced order derivative at edges (``True``) or ignore them (``False``). kind : :obj:`str`, optional Derivative kind (``forward``, ``centered``, or ``backward``). dtype : :obj:`str`, optional Type of elements in input array. Notes ----- The FirstDirectionalDerivative applies a first-order derivative to a multi-dimensional array along the direction defined by the unitary vector :math:`\mathbf{v}`: .. math:: df_\mathbf{v} = \nabla f \mathbf{v} or along the directions defined by the unitary vectors :math:`\mathbf{v}(x, y)`: .. math:: df_\mathbf{v}(x,y) = \nabla f(x,y) \mathbf{v}(x,y) where we have here considered the 2-dimensional case. This operator can be easily implemented as the concatenation of the :py:class:`pylops.Gradient` operator and the :py:class:`pylops.Diagonal` operator with :math:`\mathbf{v}` along the main diagonal. """ def __init__(self, dims: InputDimsLike, v: NDArray, sampling: int = 1, edge: bool = False, kind: str = "centered", dtype: DTypeLike = "float64", name: str = 'F'): self.sampling = sampling self.edge = edge self.kind = kind self.v = v Op = self._calc_first_ddop(dims=dims, sampling=sampling, edge=edge, kind=kind, dtype=dtype, v=v) super().__init__(Op=Op, name=name) def _matvec(self, x: NDArray) -> NDArray: return super()._matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return super()._rmatvec(x) @staticmethod def _calc_first_ddop(dims: InputDimsLike, v: NDArray, sampling: int, edge: bool, kind: str, dtype: DTypeLike): Gop = Gradient(dims, sampling=sampling, edge=edge, kind=kind, dtype=dtype) if v.ndim == 1: Dop = Diagonal(v, dims=[len(dims)] + list(dims), axis=0, dtype=dtype) else: Dop = Diagonal(v.ravel(), dtype=dtype) Sop = Sum(dims=[len(dims)] + list(dims), axis=0, dtype=dtype) return Sop * Dop * Gop class SecondDirectionalDerivative(LinearOperator): r"""Second Directional derivative. Apply a second directional derivative operator to a multi-dimensional array along either a single common axis or different axes for each point of the array. .. note:: At least 2 dimensions are required, consider using :py:func:`pylops.SecondDerivative` for 1d arrays. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension. v : :obj:`np.ndarray`, optional Single direction (array of size :math:`n_\text{dims}`) or group of directions (array of size :math:`[n_\text{dims} \times n_{d_0} \times ... \times n_{d_{n_\text{dims}}}]`) sampling : :obj:`tuple`, optional Sampling steps for each direction. edge : :obj:`bool`, optional Use reduced order derivative at edges (``True``) or ignore them (``False``). dtype : :obj:`str`, optional Type of elements in input array. Notes ----- The SecondDirectionalDerivative applies a second-order derivative to a multi-dimensional array along the direction defined by the unitary vector :math:`\mathbf{v}`: .. math:: d^2f_\mathbf{v} = - D_\mathbf{v}^T [D_\mathbf{v} f] where :math:`D_\mathbf{v}` is the first-order directional derivative implemented by :func:`pylops.SecondDirectionalDerivative`. This operator is sometimes also referred to as directional Laplacian in the literature. """ def __init__(self, dims: InputDimsLike, v: NDArray, sampling: int = 1, edge: bool = False, dtype: DTypeLike = "float64", name: str = 'S'): self.dims = dims self.v = v self.sampling = sampling self.edge = edge Op = self._calc_second_ddop(dims=dims, v=v, sampling=sampling, edge=edge, dtype=dtype) super().__init__(Op=Op, name=name) def _matvec(self, x: NDArray) -> NDArray: return super()._matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return super()._rmatvec(x) @staticmethod def _calc_second_ddop(dims: InputDimsLike, v: NDArray, sampling: int, edge: bool, dtype: DTypeLike): Dop = FirstDirectionalDerivative(dims=dims, v=v, sampling=sampling, edge=edge, dtype=dtype) ddop = -Dop.H * Dop return ddop
5,576
34.75
114
py
pylops
pylops-master/pylops/basicoperators/imag.py
__all__ = ["Imag"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Imag(LinearOperator): r"""Imag operator. Return the imaginary component of the input as a real value. The adjoint returns a complex number with zero real component and the imaginary component set to the real component of the input. Parameters ---------- dims : :obj:`int` or :obj:`tuple` Number of samples for each dimension dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- In forward mode: .. math:: y_{i} = \Im\{x_{i}\} \quad \forall i=0,\ldots,N-1 In adjoint mode: .. math:: x_{i} = 0 + i\Re\{y_{i}\} \quad \forall i=0,\ldots,N-1 """ def __init__( self, dims: Union[int, InputDimsLike], dtype: DTypeLike = "complex128", name: str = "I", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__( dtype=np.dtype(dtype), dims=dims, dimsd=dims, clinear=False, name=name ) self.rdtype = np.real(np.ones(1, self.dtype)).dtype def _matvec(self, x: NDArray) -> NDArray: return x.imag.astype(self.rdtype) def _rmatvec(self, x: NDArray) -> NDArray: return (0 + 1j * x.real).astype(self.dtype)
1,820
24.647887
82
py
pylops
pylops-master/pylops/basicoperators/restriction.py
__all__ = ["Restriction"] from typing import Sequence, Union import numpy as np import numpy.ma as np_ma from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module, to_cupy_conditional from pylops.utils.typing import DTypeLike, InputDimsLike, IntNDArray, NDArray def _compute_iavamask(dims, axis, iava, ncp): """Compute restriction mask when using cupy arrays""" otherdims = np.array(dims) otherdims = np.delete(otherdims, axis) iavamask = ncp.zeros(int(dims[axis]), dtype=int) iavamask[iava] = 1 iavamask = ncp.moveaxis( ncp.broadcast_to(iavamask, list(otherdims) + [dims[axis]]), -1, axis ) iavamask = ncp.where(iavamask.ravel() == 1)[0] return iavamask class Restriction(LinearOperator): r"""Restriction (or sampling) operator. Extract subset of values from input vector at locations ``iava`` in forward mode and place those values at locations ``iava`` in an otherwise zero vector in adjoint mode. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension iava : :obj:`list` or :obj:`numpy.ndarray` Integer indices of available samples for data selection. axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which restriction is applied to model. inplace : :obj:`bool`, optional Work inplace (``True``) or make a new copy (``False``). By default, data is a reference to the model (in forward) and model is a reference to the data (in adjoint). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) See Also -------- pylops.signalprocessing.Interp : Interpolation operator Notes ----- Extraction (or *sampling*) of a subset of :math:`N` values at locations ``iava`` from an input (or model) vector :math:`\mathbf{x}` of size :math:`M` can be expressed as: .. math:: y_i = x_{l_i} \quad \forall i=0,1,\ldots,N-1 where :math:`\mathbf{l}=[l_0, l_1,\ldots, l_{N-1}]` is a vector containing the indices of the original array at which samples are taken. Conversely, in adjoint mode the available values in the data vector :math:`\mathbf{y}` are placed at locations :math:`\mathbf{l}=[l_0, l_1,\ldots, l_{M-1}]` in the model vector: .. math:: x_{l_i} = y_i \quad \forall i=0,1,\ldots,N-1 and :math:`x_{j}=0` for :math:`j \neq l_i` (i.e., at all other locations in input vector). """ def __init__( self, dims: Union[int, InputDimsLike], iava: Union[IntNDArray, Sequence[int]], axis: int = -1, inplace: bool = True, dtype: DTypeLike = "float64", name: str = "R", ) -> None: ncp = get_array_module(iava) dims = _value_or_sized_to_tuple(dims) axis = normalize_axis_index(axis, len(dims)) dimsd = list(dims) # data dimensions dimsd[axis] = len(iava) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) iavareshape = np.ones(len(self.dims), dtype=int) iavareshape[axis] = len(iava) # currently cupy does not support put_along_axis, so we need to # explicitly create a list of indices in the n-dimensional # model space which will be used in _rmatvec to place the input if ncp != np: self.iavamask = _compute_iavamask(self.dims, axis, iava, ncp) self.inplace = inplace self.axis = axis self.iavareshape = iavareshape self.iava = ncp.asarray(iava) def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if not self.inplace: x = x.copy() x = ncp.reshape(x, self.dims) y = ncp.take(x, self.iava, axis=self.axis) y = y.ravel() return y def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if not self.inplace: x = x.copy() x = ncp.reshape(x, self.dimsd) if ncp == np: y = ncp.zeros(self.dims, dtype=self.dtype) ncp.put_along_axis( y, ncp.reshape(self.iava, self.iavareshape), x, axis=self.axis ) else: if not hasattr(self, "iavamask"): self.iava = to_cupy_conditional(x, self.iava) self.iavamask = _compute_iavamask(self.dims, self.axis, self.iava, ncp) y = ncp.zeros(int(self.shape[-1]), dtype=self.dtype) y[self.iavamask] = x.ravel() y = y.ravel() return y def mask(self, x: NDArray) -> NDArray: """Apply mask to input signal returning a signal of same size with values at ``iava`` locations and ``0`` at other locations Parameters ---------- x : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` Input array (can be either flattened or not) Returns ---------- y : :obj:`numpy.ma.core.MaskedArray` Masked array. """ ncp = get_array_module(x) if ncp != np: iava = ncp.asnumpy(self.iava) else: iava = self.iava.copy() y = np_ma.array(np.zeros(self.dims), mask=np.ones(self.dims), dtype=self.dtype) x = np.reshape(x, self.dims) x = np.swapaxes(x, self.axis, -1) y = np.swapaxes(y, self.axis, -1) y.mask[..., iava] = False if ncp == np: y[..., iava] = x[..., self.iava] else: y[..., iava] = ncp.asnumpy(x)[..., iava] y = np.swapaxes(y, -1, self.axis) return y
6,113
32.409836
90
py
pylops
pylops-master/pylops/basicoperators/functionoperator.py
__all__ = ["FunctionOperator"] from numbers import Integral from typing import Callable from pylops import LinearOperator from pylops.utils.typing import NDArray, ShapeLike class FunctionOperator(LinearOperator): r"""Function Operator. Simple wrapper to functions for forward `f` and adjoint `f_c` multiplication. Functions :math:`f` and :math:`f_c` are such that :math:`f:\mathbb{F}^m \to \mathbb{F}_c^n` and :math:`f_c:\mathbb{F}_c^n \to \mathbb{F}^m` where :math:`\mathbb{F}` and :math:`\mathbb{F}_c` are the underlying fields (e.g., :math:`\mathbb{R}` for real or :math:`\mathbb{C}` for complex) FunctionOperator can be called in the following ways: ``FunctionOperator(f, n)``, ``FunctionOperator(f, n, m)``, ``FunctionOperator(f, fc, n)``, and ``FunctionOperator(f, fc, n, m)``. The first two methods can only be used for forward modelling and will return ``NotImplementedError`` if the adjoint is called. The first and third method assume the matrix (or matrices) to be square. All methods can be called with the ``dtype`` keyword argument. Parameters ---------- f : :obj:`callable` Function for forward multiplication. fc : :obj:`callable`, optional Function for adjoint multiplication. n : :obj:`int`, optional Number of rows (length of data vector). m : :obj:`int`, optional Number of columns (length of model vector). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape :math:`[n \times m]` explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Examples -------- >>> from pylops.basicoperators import FunctionOperator >>> def forward(v): ... return np.array([2*v[0], 3*v[1]]) ... >>> A = FunctionOperator(forward, 2) >>> A <2x2 FunctionOperator with dtype=float64> >>> A.matvec(np.ones(2)) array([2., 3.]) >>> A @ np.ones(2) array([2., 3.]) """ def __init__( self, f: Callable, *args, **kwargs, ) -> None: # call is FunctionOperator(f, n) shape: ShapeLike if len(args) == 1: shape = (args[0], args[0]) fc = None elif len(args) == 2: # call is FunctionOperator(f, n, m) if isinstance(args[0], Integral): shape = (args[0], args[1]) fc = None # call is FunctionOperator(f, fc, n) else: fc = args[0] shape = (args[1], args[1]) # call is FunctionOperator(f, fc, n, m) elif len(args) == 3: fc = args[0] shape = args[1:3] super().__init__( dtype=kwargs.get("dtype", "float64"), shape=shape, name=kwargs.get("name", "F"), ) self.f = f self.fc = fc def _matvec(self, x: NDArray) -> NDArray: return self.f(x) def _rmatvec(self, x: NDArray) -> NDArray: if self.fc is None: raise NotImplementedError("Adjoint not implemented") return self.fc(x)
3,417
29.792793
80
py
pylops
pylops-master/pylops/basicoperators/block.py
__all__ = ["Block"] import multiprocessing as mp from typing import Iterable, Optional from pylops import LinearOperator from pylops.basicoperators import HStack, VStack from pylops.utils.typing import DTypeLike, NDArray class _Block(LinearOperator): """Block operator. Used to be able to provide operators from different libraries to Block. """ def __init__(self, ops: Iterable[Iterable[LinearOperator]], dtype: Optional[DTypeLike] = None, _HStack=HStack, _VStack=VStack, args_HStack: Optional[dict] = None, args_VStack: Optional[dict] = None, name: str = 'B'): if args_HStack is None: self.args_HStack = {} else: self.args_HStack = args_HStack if args_VStack is None: self.args_VStack = {} else: self.args_VStack = args_VStack hblocks = [_HStack(hblock, dtype=dtype, **self.args_HStack) for hblock in ops] super().__init__(Op=_VStack(ops=hblocks, dtype=dtype, **self.args_VStack), name=name) def _matvec(self, x: NDArray) -> NDArray: return super()._matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return super()._rmatvec(x) class Block(_Block): r"""Block operator. Create a block operator from N lists of M linear operators each. Parameters ---------- ops : :obj:`list` List of lists of operators to be combined in block fashion. Alternatively, :obj:`numpy.ndarray` or :obj:`scipy.sparse` matrices can be passed in place of one or more operators. nproc : :obj:`int`, optional Number of processes used to evaluate the N operators in parallel using ``multiprocessing``. If ``nproc=1``, work in serial mode. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- In mathematics, a block or a partitioned matrix is a matrix that is interpreted as being broken into sections called blocks or submatrices. Similarly a block operator is composed of N sets of M linear operators each such that its application in forward mode leads to .. math:: \begin{bmatrix} \mathbf{L}_{1,1} & \mathbf{L}_{1,2} & \ldots & \mathbf{L}_{1,M} \\ \mathbf{L}_{2,1} & \mathbf{L}_{2,2} & \ldots & \mathbf{L}_{2,M} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf{L}_{N,1} & \mathbf{L}_{N,2} & \ldots & \mathbf{L}_{N,M} \end{bmatrix} \begin{bmatrix} \mathbf{x}_{1} \\ \mathbf{x}_{2} \\ \vdots \\ \mathbf{x}_{M} \end{bmatrix} = \begin{bmatrix} \mathbf{L}_{1,1} \mathbf{x}_{1} + \mathbf{L}_{1,2} \mathbf{x}_{2} + \mathbf{L}_{1,M} \mathbf{x}_{M} \\ \mathbf{L}_{2,1} \mathbf{x}_{1} + \mathbf{L}_{2,2} \mathbf{x}_{2} + \mathbf{L}_{2,M} \mathbf{x}_{M} \\ \vdots \\ \mathbf{L}_{N,1} \mathbf{x}_{1} + \mathbf{L}_{N,2} \mathbf{x}_{2} + \mathbf{L}_{N,M} \mathbf{x}_{M} \end{bmatrix} while its application in adjoint mode leads to .. math:: \begin{bmatrix} \mathbf{L}_{1,1}^H & \mathbf{L}_{2,1}^H & \ldots & \mathbf{L}_{N,1}^H \\ \mathbf{L}_{1,2}^H & \mathbf{L}_{2,2}^H & \ldots & \mathbf{L}_{N,2}^H \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf{L}_{1,M}^H & \mathbf{L}_{2,M}^H & \ldots & \mathbf{L}_{N,M}^H \end{bmatrix} \begin{bmatrix} \mathbf{y}_{1} \\ \mathbf{y}_{2} \\ \vdots \\ \mathbf{y}_{N} \end{bmatrix} = \begin{bmatrix} \mathbf{L}_{1,1}^H \mathbf{y}_{1} + \mathbf{L}_{2,1}^H \mathbf{y}_{2} + \mathbf{L}_{N,1}^H \mathbf{y}_{N} \\ \mathbf{L}_{1,2}^H \mathbf{y}_{1} + \mathbf{L}_{2,2}^H \mathbf{y}_{2} + \mathbf{L}_{N,2}^H \mathbf{y}_{N} \\ \vdots \\ \mathbf{L}_{1,M}^H \mathbf{y}_{1} + \mathbf{L}_{2,M}^H \mathbf{y}_{2} + \mathbf{L}_{N,M}^H \mathbf{y}_{N} \end{bmatrix} """ def __init__(self, ops: Iterable[Iterable[LinearOperator]], nproc: int = 1, dtype: Optional[DTypeLike] = None): if nproc > 1: self.pool = mp.Pool(processes=nproc) super().__init__(ops=ops, dtype=dtype, args_VStack={"nproc": nproc})
4,822
35.537879
93
py
pylops
pylops-master/pylops/basicoperators/sum.py
__all__ = ["Sum"] import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Sum(LinearOperator): r"""Sum operator. Sum along ``axis`` of a multi-dimensional array (at least 2 dimensions are required) in forward model, and spread along the same axis in adjoint mode. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which model is summed. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- Given a two dimensional array, the *Sum* operator re-arranges the input model into a multi-dimensional array of size ``dims`` and sums values along ``axis``: .. math:: y_j = \sum_i x_{i, j} In adjoint mode, the data is spread along the same direction: .. math:: x_{i, j} = y_j \quad \forall i=0, N-1 """ def __init__( self, dims: InputDimsLike, axis: int = -1, dtype: DTypeLike = "float64", name: str = "S", ) -> None: dims = _value_or_sized_to_tuple(dims) # to avoid reducing matvec to a scalar dims = (dims[0], 1) if len(dims) == 1 else dims self.axis = axis # data dimensions dimsd = list(dims).copy() dimsd.pop(self.axis) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) # array of ones with dims of model in self.axis for np.tile in adjoint mode self.tile = np.ones(len(self.dims), dtype=int) self.tile[self.axis] = self.dims[self.axis] @reshaped def _matvec(self, x: NDArray) -> NDArray: return x.sum(axis=self.axis) @reshaped def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.expand_dims(x, self.axis) y = ncp.tile(y, self.tile) return y
2,511
26.911111
83
py
pylops
pylops-master/pylops/basicoperators/linearregression.py
__all__ = ["LinearRegression"] import logging import numpy.typing as npt from pylops.basicoperators import Regression from pylops.utils.typing import DTypeLike logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class LinearRegression(Regression): r"""Linear regression. Creates an operator that applies linear regression to a set of points. Values along the :math:`t`-axis must be provided while initializing the operator. Intercept and gradient form the model vector to be provided in forward mode, while the values of the regression line curve shall be provided in adjoint mode. Parameters ---------- taxis : :obj:`numpy.ndarray` Elements along the :math:`t`-axis. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ TypeError If ``taxis`` is not :obj:`numpy.ndarray`. See Also -------- Regression: Polynomial regression Notes ----- The LinearRegression operator solves the following problem: .. math:: y_i = x_0 + x_1 t_i \qquad \forall i=0,1,\ldots,N-1 We can express this problem in a matrix form .. math:: \mathbf{y}= \mathbf{A} \mathbf{x} where .. math:: \mathbf{y}= [y_0, y_1,\ldots,y_{N-1}]^T, \qquad \mathbf{x}= [x_0, x_1]^T and .. math:: \mathbf{A} = \begin{bmatrix} 1 & t_{0} \\ 1 & t_{1} \\ \vdots & \vdots \\ 1 & t_{N-1} \end{bmatrix} Note that this is a particular case of the :py:class:`pylops.Regression` operator and it is in fact just a lazy call of that operator with ``order=1``. """ def __init__(self, taxis: npt.ArrayLike, dtype: DTypeLike = "float64", name: str = 'L'): super().__init__(taxis=taxis, order=1, dtype=dtype, name=name)
2,123
25.222222
92
py
pylops
pylops-master/pylops/basicoperators/regression.py
__all__ = ["Regression"] import logging import numpy as np import numpy.typing as npt from pylops import LinearOperator from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class Regression(LinearOperator): r"""Polynomial regression. Creates an operator that applies polynomial regression to a set of points. Values along the :math:`t`-axis must be provided while initializing the operator. The coefficients of the polynomial regression form the model vector to be provided in forward mode, while the values of the regression curve shall be provided in adjoint mode. Parameters ---------- taxis : :obj:`numpy.ndarray` Elements along the :math:`t`-axis. order : :obj:`int` Order of the regressed polynomial. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ TypeError If ``taxis`` is not :obj:`numpy.ndarray`. See Also -------- LinearRegression: Linear regression Notes ----- The Regression operator solves the following problem: .. math:: y_i = \sum_{n=0}^\text{order} x_n t_i^n \qquad \forall i=0,1,\ldots,N-1 where :math:`N` represents the number of points in ``taxis``. We can express this problem in a matrix form .. math:: \mathbf{y}= \mathbf{A} \mathbf{x} where .. math:: \mathbf{y}= [y_0, y_1,\ldots,y_{N-1}]^T, \qquad \mathbf{x}= [x_0, x_1,\ldots,x_\text{order}]^T and .. math:: \mathbf{A} = \begin{bmatrix} 1 & t_{0} & t_{0}^2 & \ldots & t_{0}^\text{order} \\ 1 & t_{1} & t_{1}^2 & \ldots & t_{1}^\text{order} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 1 & t_{N-1} & t_{N-1}^2 & \ldots & t_{N-1}^\text{order} \end{bmatrix}_{N\times \text{order}+1} """ def __init__( self, taxis: npt.ArrayLike, order: int, dtype: DTypeLike = "float64", name: str = "R", ) -> None: ncp = get_array_module(taxis) if not isinstance(taxis, ncp.ndarray): logging.error("t must be ndarray...") raise TypeError("t must be ndarray...") else: self.taxis = taxis self.order = order shape = (len(self.taxis), self.order + 1) super().__init__(dtype=np.dtype(dtype), shape=shape, name=name) def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros_like(self.taxis) for i in range(self.order + 1): y += x[i] * self.taxis**i return y def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) return ncp.vstack([ncp.dot(self.taxis**i, x) for i in range(self.order + 1)]) def apply(self, t: npt.ArrayLike, x: NDArray) -> NDArray: """Return values along y-axis given certain ``t`` location(s) along t-axis and regression coefficients ``x`` Parameters ---------- t : :obj:`numpy.ndarray` Elements along the t-axis. x : :obj:`numpy.ndarray` Regression coefficients Returns ---------- y : :obj:`numpy.ndarray` Values along y-axis """ torig = self.taxis.copy() self.taxis = t y = self._matvec(x) self.taxis = torig return y
3,918
27.605839
85
py
pylops
pylops-master/pylops/basicoperators/real.py
__all__ = ["Real"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Real(LinearOperator): r"""Real operator. Return the real component of the input. The adjoint returns a complex number with the same real component as the input and zero imaginary component. Parameters ---------- dims : :obj:`int` or :obj:`tuple` Number of samples for each dimension dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- In forward mode: .. math:: y_{i} = \Re\{x_{i}\} \quad \forall i=0,\ldots,N-1 In adjoint mode: .. math:: x_{i} = \Re\{y_{i}\} + 0i \quad \forall i=0,\ldots,N-1 """ def __init__( self, dims: Union[int, InputDimsLike], dtype: DTypeLike = "complex128", name: str = "R", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__( dtype=np.dtype(dtype), dims=dims, dimsd=dims, clinear=False, name=name ) self.rdtype = np.real(np.ones(1, self.dtype)).dtype def _matvec(self, x: NDArray) -> NDArray: return x.real.astype(self.rdtype) def _rmatvec(self, x: NDArray) -> NDArray: return (x.real + 0j).astype(self.dtype)
1,773
24.342857
82
py
pylops
pylops-master/pylops/basicoperators/hstack.py
__all__ = ["HStack"] import multiprocessing as mp import numpy as np import scipy as sp # need to check scipy version since the interface submodule changed into # _interface from scipy>=1.8.0 sp_version = sp.__version__.split(".") if int(sp_version[0]) <= 1 and int(sp_version[1]) < 8: from scipy.sparse.linalg.interface import LinearOperator as spLinearOperator from scipy.sparse.linalg.interface import _get_dtype else: from scipy.sparse.linalg._interface import _get_dtype from scipy.sparse.linalg._interface import ( LinearOperator as spLinearOperator, ) from typing import Optional, Sequence from pylops import LinearOperator from pylops.basicoperators import MatrixMult from pylops.utils.backend import get_array_module from pylops.utils.typing import NDArray def _matvec_rmatvec_map(op, x: NDArray) -> NDArray: """matvec/rmatvec for multiprocessing""" return op(x).squeeze() class HStack(LinearOperator): r"""Horizontal stacking. Stack a set of N linear operators horizontally. Parameters ---------- ops : :obj:`list` Linear operators to be stacked. Alternatively, :obj:`numpy.ndarray` or :obj:`scipy.sparse` matrices can be passed in place of one or more operators. nproc : :obj:`int`, optional Number of processes used to evaluate the N operators in parallel using ``multiprocessing``. If ``nproc=1``, work in serial mode. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If ``ops`` have different number of columns Notes ----- An horizontal stack of N linear operators is created such as its application in forward mode leads to .. math:: \begin{bmatrix} \mathbf{L}_{1} & \mathbf{L}_{2} & \ldots & \mathbf{L}_{N} \end{bmatrix} \begin{bmatrix} \mathbf{x}_{1} \\ \mathbf{x}_{2} \\ \vdots \\ \mathbf{x}_{N} \end{bmatrix} = \mathbf{L}_{1} \mathbf{x}_1 + \mathbf{L}_{2} \mathbf{x}_2 + \ldots + \mathbf{L}_{N} \mathbf{x}_N while its application in adjoint mode leads to .. math:: \begin{bmatrix} \mathbf{L}_{1}^H \\ \mathbf{L}_{2}^H \\ \vdots \\ \mathbf{L}_{N}^H \end{bmatrix} \mathbf{y} = \begin{bmatrix} \mathbf{L}_{1}^H \mathbf{y} \\ \mathbf{L}_{2}^H \mathbf{y} \\ \vdots \\ \mathbf{L}_{N}^H \mathbf{y} \end{bmatrix} = \begin{bmatrix} \mathbf{x}_{1} \\ \mathbf{x}_{2} \\ \vdots \\ \mathbf{x}_{N} \end{bmatrix} """ def __init__( self, ops: Sequence[LinearOperator], nproc: int = 1, dtype: Optional[str] = None, ) -> None: self.ops = ops mops = np.zeros(len(ops), dtype=int) for iop, oper in enumerate(ops): if not isinstance(oper, (LinearOperator, spLinearOperator)): self.ops[iop] = MatrixMult(oper, dtype=oper.dtype) mops[iop] = self.ops[iop].shape[1] self.mops = int(mops.sum()) nops = [oper.shape[0] for oper in self.ops] if len(set(nops)) > 1: raise ValueError("operators have different number of rows") self.nops = int(nops[0]) self.mmops = np.insert(np.cumsum(mops), 0, 0) # create pool for multiprocessing self._nproc = nproc self.pool = None if self.nproc > 1: self.pool = mp.Pool(processes=nproc) dtype = _get_dtype(self.ops) if dtype is None else np.dtype(dtype) clinear = all([getattr(oper, "clinear", True) for oper in self.ops]) super().__init__(dtype=dtype, shape=(self.nops, self.mops), clinear=clinear) @property def nproc(self) -> int: return self._nproc @nproc.setter def nproc(self, nprocnew: int): if self._nproc > 1: self.pool.close() if nprocnew > 1: self.pool = mp.Pool(processes=nprocnew) self._nproc = nprocnew def _matvec_serial(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.nops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y += oper.matvec(x[self.mmops[iop] : self.mmops[iop + 1]]).squeeze() return y def _rmatvec_serial(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y[self.mmops[iop] : self.mmops[iop + 1]] = oper.rmatvec(x).squeeze() return y def _matvec_multiproc(self, x: NDArray) -> NDArray: ys = self.pool.starmap( _matvec_rmatvec_map, [ (oper._matvec, x[self.mmops[iop] : self.mmops[iop + 1]]) for iop, oper in enumerate(self.ops) ], ) y = np.sum(ys, axis=0) return y def _rmatvec_multiproc(self, x: NDArray) -> NDArray: ys = self.pool.starmap( _matvec_rmatvec_map, [(oper._rmatvec, x) for iop, oper in enumerate(self.ops)], ) y = np.hstack(ys) return y def _matvec(self, x: NDArray) -> NDArray: if self.nproc == 1: y = self._matvec_serial(x) else: y = self._matvec_multiproc(x) return y def _rmatvec(self, x: NDArray) -> NDArray: if self.nproc == 1: y = self._rmatvec_serial(x) else: y = self._rmatvec_multiproc(x) return y
5,929
29.885417
84
py
pylops
pylops-master/pylops/basicoperators/transpose.py
__all__ = ["Transpose"] import numpy as np from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Transpose(LinearOperator): r"""Transpose operator. Transpose axes of a multi-dimensional array. This operator works with flattened input model (or data), which are however multi-dimensional in nature and will be reshaped and treated as such in both forward and adjoint modes. Parameters ---------- dims : :obj:`tuple`, optional Number of samples for each dimension axes : :obj:`tuple`, optional Direction along which transposition is applied dtype : :obj:`str`, optional Type of elements in input array name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If ``axes`` contains repeated dimensions (or a dimension is missing) Notes ----- The Transpose operator reshapes the input model into a multi-dimensional array of size ``dims`` and transposes (or swaps) its axes as defined in ``axes``. Similarly, in adjoint mode the data is reshaped into a multi-dimensional array whose size is a permuted version of ``dims`` defined by ``axes``. The array is then rearragned into the original model dimensions ``dims``. """ def __init__( self, dims: InputDimsLike, axes: InputDimsLike, dtype: DTypeLike = "float64", name: str = "T", ) -> None: dims = _value_or_sized_to_tuple(dims) ndims = len(dims) self.axes = [normalize_axis_index(ax, ndims) for ax in axes] # find out if all axes are present only once in axes if len(np.unique(self.axes)) != ndims: raise ValueError("axes must contain each direction once") # find out how axes should be transposed in adjoint mode axesd = np.empty(ndims, dtype=int) axesd[self.axes] = np.arange(ndims, dtype=int) dimsd = np.empty(ndims, dtype=int) dimsd[axesd] = dims self.axesd = list(axesd) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) @reshaped def _matvec(self, x: NDArray) -> NDArray: return x.transpose(self.axes) @reshaped def _rmatvec(self, x: NDArray) -> NDArray: return x.transpose(self.axesd)
2,822
30.366667
82
py
pylops
pylops-master/pylops/basicoperators/smoothing1d.py
__all__ = ["Smoothing1D"] from typing import Union import numpy as np from pylops.signalprocessing import Convolve1D from pylops.utils.typing import DTypeLike, InputDimsLike class Smoothing1D(Convolve1D): r"""1D Smoothing. Apply smoothing to model (and data) to a multi-dimensional array along ``axis``. Parameters ---------- nsmooth : :obj:`int` Length of smoothing operator (must be odd) dims : :obj:`tuple` or :obj:`int` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which model (and data) are smoothed. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The Smoothing1D operator is a special type of convolutional operator that convolves the input model (or data) with a constant filter of size :math:`n_\text{smooth}`: .. math:: \mathbf{f} = [ 1/n_\text{smooth}, 1/n_\text{smooth}, ..., 1/n_\text{smooth} ] When applied to the first direction: .. math:: y[i,j,k] = 1/n_\text{smooth} \sum_{l=-(n_\text{smooth}-1)/2}^{(n_\text{smooth}-1)/2} x[l,j,k] Similarly when applied to the second direction: .. math:: y[i,j,k] = 1/n_\text{smooth} \sum_{l=-(n_\text{smooth}-1)/2}^{(n_\text{smooth}-1)/2} x[i,l,k] and the third direction: .. math:: y[i,j,k] = 1/n_\text{smooth} \sum_{l=-(n_\text{smooth}-1)/2}^{(n_\text{smooth}-1)/2} x[i,j,l] Note that since the filter is symmetrical, the *Smoothing1D* operator is self-adjoint. """ def __init__(self, nsmooth: int, dims: Union[int, InputDimsLike], axis: int = -1, dtype: DTypeLike = "float64", name: str = 'S'): if nsmooth % 2 == 0: nsmooth += 1 h = np.ones(nsmooth) / float(nsmooth) offset = (nsmooth - 1) // 2 super().__init__(dims=dims, h=h, axis=axis, offset=offset, dtype=dtype, name=name)
2,200
27.584416
92
py
pylops
pylops-master/pylops/basicoperators/causalintegration.py
__all__ = ["CausalIntegration"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class CausalIntegration(LinearOperator): r"""Causal integration. Apply causal integration to a multi-dimensional array along ``axis``. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which the model is integrated. sampling : :obj:`float`, optional Sampling step ``dx``. kind : :obj:`str`, optional Integration kind (``full``, ``half``, or ``trapezoidal``). removefirst : :obj:`bool`, optional Remove first sample (``True``) or not (``False``). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The CausalIntegration operator applies a causal integration to any chosen direction of a multi-dimensional array. For simplicity, given a one dimensional array, the causal integration is: .. math:: y(t) = \int\limits_{-\infty}^t x(\tau) \,\mathrm{d}\tau which can be discretised as : .. math:: y[i] = \sum_{j=0}^i x[j] \,\Delta t or .. math:: y[i] = \left(\sum_{j=0}^{i-1} x[j] + 0.5x[i]\right) \,\Delta t or .. math:: y[i] = \left(\sum_{j=1}^{i-1} x[j] + 0.5x[0] + 0.5x[i]\right) \,\Delta t where :math:`\Delta t` is the ``sampling`` interval, and assuming the signal is zero before sample :math:`j=0`. In our implementation, the choice to add :math:`x[i]` or :math:`0.5x[i]` is made by selecting ``kind=full`` or ``kind=half``, respectively. The choice to add :math:`0.5x[i]` and :math:`0.5x[0]` instead of made by selecting the ``kind=trapezoidal``. Note that the causal integral of a signal will depend, up to a constant, on causal start of the signal. For example if :math:`x(\tau) = t^2` the resulting indefinite integration is: .. math:: y(t) = \int \tau^2 \,\mathrm{d}\tau = \frac{t^3}{3} + C However, if we apply a first derivative to :math:`y` always obtain: .. math:: x(t) = \frac{\mathrm{d}y}{\mathrm{d}t} = t^2 no matter the choice of :math:`C`. """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, sampling: float = 1, kind: str = "full", removefirst: bool = False, dtype: DTypeLike = "float64", name: str = "C", ) -> None: self.axis = axis self.sampling = sampling # backwards compatible self.kind = kind self.removefirst = removefirst dims = _value_or_sized_to_tuple(dims) dimsd = list(dims) if self.removefirst: dimsd[self.axis] -= 1 super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) @reshaped(swapaxis=True) def _matvec(self, x: NDArray) -> NDArray: y = self.sampling * np.cumsum(x, axis=-1) if self.kind in ("half", "trapezoidal"): y -= self.sampling * x / 2.0 if self.kind == "trapezoidal": y[..., 1:] -= self.sampling * x[..., 0:1] / 2.0 if self.removefirst: y = y[..., 1:] return y @reshaped(swapaxis=True) def _rmatvec(self, x: NDArray) -> NDArray: if self.removefirst: x = np.insert(x, 0, 0, axis=-1) xflip = np.flip(x, axis=-1) if self.kind == "half": y = self.sampling * (np.cumsum(xflip, axis=-1) - xflip / 2.0) elif self.kind == "trapezoidal": y = self.sampling * (np.cumsum(xflip, axis=-1) - xflip / 2.0) y[..., -1] = self.sampling * np.sum(xflip, axis=-1) / 2.0 else: y = self.sampling * np.cumsum(xflip, axis=-1) y = np.flip(y, axis=-1) return y
4,417
30.557143
88
py
pylops
pylops-master/pylops/basicoperators/symmetrize.py
__all__ = ["Symmetrize"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Symmetrize(LinearOperator): r"""Symmetrize along an axis. Symmetrize a multi-dimensional array along ``axis``. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension (``None`` if only one dimension is available) axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which model is symmetrized. dtype : :obj:`str`, optional Type of elements in input array name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The Symmetrize operator constructs a symmetric array given an input model in forward mode, by pre-pending the input model in reversed order. For simplicity, given a one dimensional array, the forward operation can be expressed as: .. math:: y[i] = \begin{cases} x[i-N+1],& i\geq N\\ x[N-1-i],& \text{otherwise} \end{cases} for :math:`i=0,1,2,\ldots,2N-2`, where :math:`N` is the dimension of the input model. In adjoint mode, the Symmetrize operator assigns the sums of the elements in position :math:`N-1-i` and :math:`N-1+i` to position :math:`i` as follows: .. math:: \begin{multline} x[i] = y[N-1-i]+y[N-1+i] \quad \forall i=0,2,\ldots,N-1 \end{multline} apart from the central sample where :math:`x[0] = y[N-1]`. """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, dtype: DTypeLike = "float64", name: str = "S", ) -> None: dims = _value_or_sized_to_tuple(dims) self.axis = axis self.nsym = dims[self.axis] dimsd = list(dims) dimsd[self.axis] = 2 * dims[self.axis] - 1 super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) @reshaped(swapaxis=True) def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.dimsd, dtype=self.dtype) y = y.swapaxes(self.axis, -1) y[..., self.nsym - 1 :] = x y[..., : self.nsym - 1] = x[..., -1:0:-1] return y @reshaped(swapaxis=True) def _rmatvec(self, x: NDArray) -> NDArray: y = x[..., self.nsym - 1 :].copy() y[..., 1:] += x[..., self.nsym - 2 :: -1] return y
2,949
28.5
82
py
pylops
pylops-master/pylops/basicoperators/secondderivative.py
__all__ = ["SecondDerivative"] from typing import Callable, Union import numpy as np from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class SecondDerivative(LinearOperator): r"""Second derivative. Apply a second derivative using a three-point stencil finite-difference approximation along ``axis``. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension (``None`` if only one dimension is available) axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which derivative is applied. sampling : :obj:`float`, optional Sampling step :math:`\Delta x`. kind : :obj:`str`, optional Derivative kind (``forward``, ``centered``, or ``backward``). edge : :obj:`bool`, optional Use shifted derivatives at edges (``True``) or ignore them (``False``). This is currently only available for centered derivative dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The SecondDerivative operator applies a second derivative to any chosen direction of a multi-dimensional array. For simplicity, given a one dimensional array, the second-order centered first derivative is: .. math:: y[i] = (x[i+1] - 2x[i] + x[i-1]) / \Delta x^2 while the second-order forward stencil is: .. math:: y[i] = (x[i+2] - 2x[i+1] + x[i]) / \Delta x^2 and the second-order backward stencil is: .. math:: y[i] = (x[i] - 2x[i-1] + x[i-2]) / \Delta x^2 """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, sampling: float = 1.0, kind: str = "centered", edge: bool = False, dtype: DTypeLike = "float64", name: str = "S", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) self.axis = normalize_axis_index(axis, len(self.dims)) self.sampling = sampling self.kind = kind self.edge = edge self._register_multiplications(self.kind) def _register_multiplications( self, kind: str, ) -> None: # choose _matvec and _rmatvec kind self._hmatvec: Callable self._hrmatvec: Callable if kind == "forward": self._hmatvec = self._matvec_forward self._hrmatvec = self._rmatvec_forward elif kind == "centered": self._hmatvec = self._matvec_centered self._hrmatvec = self._rmatvec_centered elif kind == "backward": self._hmatvec = self._matvec_backward self._hrmatvec = self._rmatvec_backward else: raise NotImplementedError( "'kind' must be 'forward', 'centered' or 'backward'" ) def _matvec(self, x: NDArray) -> NDArray: return self._hmatvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return self._hrmatvec(x) @reshaped(swapaxis=True) def _matvec_forward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-2] = x[..., 2:] - 2 * x[..., 1:-1] + x[..., :-2] y /= self.sampling**2 return y @reshaped(swapaxis=True) def _rmatvec_forward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-2] += x[..., :-2] y[..., 1:-1] -= 2 * x[..., :-2] y[..., 2:] += x[..., :-2] y /= self.sampling**2 return y @reshaped(swapaxis=True) def _matvec_centered(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., 1:-1] = x[..., 2:] - 2 * x[..., 1:-1] + x[..., :-2] if self.edge: y[..., 0] = x[..., 0] - 2 * x[..., 1] + x[..., 2] y[..., -1] = x[..., -3] - 2 * x[..., -2] + x[..., -1] y /= self.sampling**2 return y @reshaped(swapaxis=True) def _rmatvec_centered(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-2] += x[..., 1:-1] y[..., 1:-1] -= 2 * x[..., 1:-1] y[..., 2:] += x[..., 1:-1] if self.edge: y[..., 0] += x[..., 0] y[..., 1] -= 2 * x[..., 0] y[..., 2] += x[..., 0] y[..., -3] += x[..., -1] y[..., -2] -= 2 * x[..., -1] y[..., -1] += x[..., -1] y /= self.sampling**2 return y @reshaped(swapaxis=True) def _matvec_backward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., 2:] = x[..., 2:] - 2 * x[..., 1:-1] + x[..., :-2] y /= self.sampling**2 return y @reshaped(swapaxis=True) def _rmatvec_backward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-2] += x[..., 2:] y[..., 1:-1] -= 2 * x[..., 2:] y[..., 2:] += x[..., 2:] y /= self.sampling**2 return y
5,864
30.702703
81
py
pylops
pylops-master/pylops/basicoperators/gradient.py
__all__ = ["Gradient"] from typing import Union from pylops import LinearOperator from pylops.basicoperators import FirstDerivative, VStack from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Gradient(LinearOperator): r"""Gradient. Apply gradient operator to a multi-dimensional array. .. note:: At least 2 dimensions are required, use :py:func:`pylops.FirstDerivative` for 1d arrays. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension. sampling : :obj:`tuple`, optional Sampling steps for each direction. edge : :obj:`bool`, optional Use reduced order derivative at edges (``True``) or ignore them (``False``). kind : :obj:`str`, optional Derivative kind (``forward``, ``centered``, or ``backward``). dtype : :obj:`str`, optional Type of elements in input array. Notes ----- The Gradient operator applies a first-order derivative to each dimension of a multi-dimensional array in forward mode. For simplicity, given a three dimensional array, the Gradient in forward mode using a centered stencil can be expressed as: .. math:: \mathbf{g}_{i, j, k} = (f_{i+1, j, k} - f_{i-1, j, k}) / d_1 \mathbf{i_1} + (f_{i, j+1, k} - f_{i, j-1, k}) / d_2 \mathbf{i_2} + (f_{i, j, k+1} - f_{i, j, k-1}) / d_3 \mathbf{i_3} which is discretized as follows: .. math:: \mathbf{g} = \begin{bmatrix} \mathbf{df_1} \\ \mathbf{df_2} \\ \mathbf{df_3} \end{bmatrix} In adjoint mode, the adjoints of the first derivatives along different axes are instead summed together. """ def __init__(self, dims: Union[int, InputDimsLike], sampling: int = 1, edge: bool = False, kind: str = "centered", dtype: DTypeLike = "float64", name: str = 'G'): dims = _value_or_sized_to_tuple(dims) ndims = len(dims) sampling = _value_or_sized_to_tuple(sampling, repeat=ndims) self.sampling = sampling self.edge = edge self.kind = kind Op = VStack([FirstDerivative( dims=dims, axis=iax, sampling=sampling[iax], edge=edge, kind=kind, dtype=dtype, ) for iax in range(ndims) ]) super().__init__(Op=Op, dims=dims, dimsd=(ndims, *dims), name=name) def _matvec(self, x: NDArray) -> NDArray: return super()._matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return super()._rmatvec(x)
2,781
29.571429
79
py
pylops
pylops-master/pylops/basicoperators/vstack.py
__all__ = ["VStack"] import multiprocessing as mp import numpy as np import scipy as sp # need to check scipy version since the interface submodule changed into # _interface from scipy>=1.8.0 sp_version = sp.__version__.split(".") if int(sp_version[0]) <= 1 and int(sp_version[1]) < 8: from scipy.sparse.linalg.interface import LinearOperator as spLinearOperator from scipy.sparse.linalg.interface import _get_dtype else: from scipy.sparse.linalg._interface import _get_dtype from scipy.sparse.linalg._interface import ( LinearOperator as spLinearOperator, ) from typing import Callable, Optional, Sequence from pylops import LinearOperator from pylops.basicoperators import MatrixMult from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, NDArray def _matvec_rmatvec_map(op: Callable, x: NDArray) -> NDArray: """matvec/rmatvec for multiprocessing""" return op(x).squeeze() class VStack(LinearOperator): r"""Vertical stacking. Stack a set of N linear operators vertically. Parameters ---------- ops : :obj:`list` Linear operators to be stacked. Alternatively, :obj:`numpy.ndarray` or :obj:`scipy.sparse` matrices can be passed in place of one or more operators. nproc : :obj:`int`, optional Number of processes used to evaluate the N operators in parallel using ``multiprocessing``. If ``nproc=1``, work in serial mode. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If ``ops`` have different number of rows Notes ----- A vertical stack of N linear operators is created such as its application in forward mode leads to .. math:: \begin{bmatrix} \mathbf{L}_{1} \\ \mathbf{L}_{2} \\ \vdots \\ \mathbf{L}_{N} \end{bmatrix} \mathbf{x} = \begin{bmatrix} \mathbf{L}_{1} \mathbf{x} \\ \mathbf{L}_{2} \mathbf{x} \\ \vdots \\ \mathbf{L}_{N} \mathbf{x} \end{bmatrix} = \begin{bmatrix} \mathbf{y}_{1} \\ \mathbf{y}_{2} \\ \vdots \\ \mathbf{y}_{N} \end{bmatrix} while its application in adjoint mode leads to .. math:: \begin{bmatrix} \mathbf{L}_{1}^H & \mathbf{L}_{2}^H & \ldots & \mathbf{L}_{N}^H \end{bmatrix} \begin{bmatrix} \mathbf{y}_{1} \\ \mathbf{y}_{2} \\ \vdots \\ \mathbf{y}_{N} \end{bmatrix} = \mathbf{L}_{1}^H \mathbf{y}_1 + \mathbf{L}_{2}^H \mathbf{y}_2 + \ldots + \mathbf{L}_{N}^H \mathbf{y}_N """ def __init__( self, ops: Sequence[LinearOperator], nproc: int = 1, dtype: Optional[DTypeLike] = None, ) -> None: self.ops = ops nops = np.zeros(len(self.ops), dtype=int) for iop, oper in enumerate(ops): if not isinstance(oper, (LinearOperator, spLinearOperator)): self.ops[iop] = MatrixMult(oper, dtype=oper.dtype) nops[iop] = self.ops[iop].shape[0] self.nops = int(nops.sum()) mops = [oper.shape[1] for oper in self.ops] if len(set(mops)) > 1: raise ValueError("operators have different number of columns") self.mops = int(mops[0]) self.nnops = np.insert(np.cumsum(nops), 0, 0) # create pool for multiprocessing self._nproc = nproc self.pool = None if self.nproc > 1: self.pool = mp.Pool(processes=nproc) dtype = _get_dtype(self.ops) if dtype is None else np.dtype(dtype) clinear = all([getattr(oper, "clinear", True) for oper in self.ops]) super().__init__(dtype=dtype, shape=(self.nops, self.mops), clinear=clinear) @property def nproc(self) -> int: return self._nproc @nproc.setter def nproc(self, nprocnew: int): if self._nproc > 1: self.pool.close() if nprocnew > 1: self.pool = mp.Pool(processes=nprocnew) self._nproc = nprocnew def _matvec_serial(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.nops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y[self.nnops[iop] : self.nnops[iop + 1]] = oper.matvec(x).squeeze() return y def _rmatvec_serial(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y += oper.rmatvec(x[self.nnops[iop] : self.nnops[iop + 1]]).squeeze() return y def _matvec_multiproc(self, x: NDArray) -> NDArray: ys = self.pool.starmap( _matvec_rmatvec_map, [(oper._matvec, x) for iop, oper in enumerate(self.ops)], ) y = np.hstack(ys) return y def _rmatvec_multiproc(self, x: NDArray) -> NDArray: ys = self.pool.starmap( _matvec_rmatvec_map, [ (oper._rmatvec, x[self.nnops[iop] : self.nnops[iop + 1]]) for iop, oper in enumerate(self.ops) ], ) y = np.sum(ys, axis=0) return y def _matvec(self, x: NDArray) -> NDArray: if self.nproc == 1: y = self._matvec_serial(x) else: y = self._matvec_multiproc(x) return y def _rmatvec(self, x: NDArray) -> NDArray: if self.nproc == 1: y = self._rmatvec_serial(x) else: y = self._rmatvec_multiproc(x) return y
5,964
30.067708
84
py
pylops
pylops-master/pylops/basicoperators/__init__.py
""" Basic Linear Operators ====================== The subpackage basicoperators extends some of the basic linear algebra operations provided by numpy providing forward and adjoint functionalities. A list of operators present in pylops.basicoperators : MatrixMult Matrix multiplication. Identity Identity operator. Zero Zero operator. Diagonal Diagonal operator. Transpose Transpose operator. Flip Flip along an axis. Roll Roll along an axis. Pad Pad operator. Sum Sum operator. Symmetrize Symmetrize along an axis. Restriction Restriction (or sampling) operator. Regression Polynomial regression. LinearRegression Linear regression. CausalIntegration Causal integration. Spread Spread operator. VStack Vertical stacking. HStack Horizontal stacking. Block Block operator. BlockDiag Block-diagonal operator. Kronecker Kronecker operator. Real Real operator. Imag Imag operator. Conj Conj operator. Smoothing1D 1D Smoothing. Smoothing2D 2D Smoothing. FirstDerivative First derivative. SecondDerivative Second derivative. Laplacian Laplacian. Gradient Gradient. FirstDirectionalDerivative First Directional derivative. SecondDirectionalDerivative Second Directional derivative. """ from .functionoperator import * from .memoizeoperator import * from .regression import * from .linearregression import * from .matrixmult import * from .diagonal import * from .zero import * from .identity import * from .restriction import * from .flip import * from .symmetrize import * from .spread import * from .transpose import * from .roll import * from .pad import * from .sum import * from .vstack import * from .hstack import * from .block import * from .blockdiag import * from .kronecker import * from .real import * from .imag import * from .conj import * from .smoothing1d import * from .smoothing2d import * from .causalintegration import * from .firstderivative import * from .secondderivative import * from .laplacian import * from .gradient import * from .directionalderivative import * __all__ = [ "FunctionOperator", "MemoizeOperator", "Regression", "LinearRegression", "MatrixMult", "Diagonal", "Zero", "Identity", "Restriction", "Flip", "Symmetrize", "Spread", "Transpose", "Roll", "Pad", "Sum", "VStack", "HStack", "Block", "BlockDiag", "Kronecker", "Real", "Imag", "Conj", "Smoothing1D", "Smoothing2D", "CausalIntegration", "FirstDerivative", "SecondDerivative", "Laplacian", "Gradient", "FirstDirectionalDerivative", "SecondDirectionalDerivative", ]
3,381
29.468468
75
py
pylops
pylops-master/pylops/basicoperators/flip.py
__all__ = ["Flip"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Flip(LinearOperator): r"""Flip along an axis. Flip a multi-dimensional array along ``axis``. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which model is flipped. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The Flip operator flips the input model (and data) along any chosen direction. For simplicity, given a one dimensional array, in forward mode this is equivalent to: .. math:: y[i] = x[N-1-i] \quad \forall i=0,1,2,\ldots,N-1 where :math:`N` is the dimension of the input model along ``axis``. As this operator is self-adjoint, :math:`x` and :math:`y` in the equation above are simply swapped in adjoint mode. """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, dtype: DTypeLike = "float64", name: str = "F", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) self.axis = axis @reshaped(swapaxis=True) def _matvec(self, x: NDArray) -> NDArray: y = np.flip(x, axis=-1) return y def _rmatvec(self, x: NDArray) -> NDArray: return self._matvec(x)
2,039
26.567568
91
py
pylops
pylops-master/pylops/basicoperators/laplacian.py
__all__ = ["Laplacian"] from typing import Tuple from pylops.utils.typing import NDArray from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.basicoperators import SecondDerivative from pylops.utils.typing import DTypeLike, InputDimsLike class Laplacian(LinearOperator): r"""Laplacian. Apply second-order centered Laplacian operator to a multi-dimensional array. .. note:: At least 2 dimensions are required, use :py:func:`pylops.SecondDerivative` for 1d arrays. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension. axes : :obj:`int`, optional .. versionadded:: 2.0.0 Axes along which the Laplacian is applied. weights : :obj:`tuple`, optional Weight to apply to each direction (real laplacian operator if ``weights=(1, 1)``) sampling : :obj:`tuple`, optional Sampling steps for each direction edge : :obj:`bool`, optional Use reduced order derivative at edges (``True``) or ignore them (``False``) for centered derivative kind : :obj:`str`, optional Derivative kind (``forward``, ``centered``, or ``backward``) dtype : :obj:`str`, optional Type of elements in input array. Raises ------ ValueError If ``axes``. ``weights``, and ``sampling`` do not have the same size Notes ----- The Laplacian operator applies a second derivative along two directions of a multi-dimensional array. For simplicity, given a two dimensional array, the Laplacian is: .. math:: y[i, j] = (x[i+1, j] + x[i-1, j] + x[i, j-1] +x[i, j+1] - 4x[i, j]) / (\Delta x \Delta y) """ def __init__(self, dims: InputDimsLike, axes: InputDimsLike = (-2, -1), weights: Tuple[float, ...] = (1, 1), sampling: Tuple[float, ...] = (1, 1), edge: bool = False, kind: str = "centered", dtype: DTypeLike = "float64", name: str = "L"): axes = tuple(normalize_axis_index(ax, len(dims)) for ax in axes) if not (len(axes) == len(weights) == len(sampling)): raise ValueError("axes, weights, and sampling have different size") self.axes = axes self.weights = weights self.sampling = sampling self.edge = edge self.kind = kind Op = self._calc_l2op(dims=dims, axes=axes, sampling=sampling, edge=edge, kind=kind, dtype=dtype, weights=weights) super().__init__(Op=Op, name=name) def _matvec(self, x: NDArray) -> NDArray: return super()._matvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return super()._rmatvec(x) @staticmethod def _calc_l2op(dims: InputDimsLike, axes: InputDimsLike, weights: Tuple[float, ...], sampling: Tuple[float, ...], edge: bool, kind: str, dtype: DTypeLike): l2op = SecondDerivative( dims, axis=axes[0], sampling=sampling[0], edge=edge, kind=kind, dtype=dtype ) dims = l2op.dims l2op *= weights[0] for ax, samp, weight in zip(axes[1:], sampling[1:], weights[1:]): l2op += weight * SecondDerivative( dims, axis=ax, sampling=samp, edge=edge, dtype=dtype ) return l2op
3,411
33.464646
117
py
pylops
pylops-master/pylops/basicoperators/smoothing2d.py
__all__ = ["Smoothing2D"] from typing import Union import numpy as np from pylops.signalprocessing import Convolve2D from pylops.utils.typing import DTypeLike, InputDimsLike class Smoothing2D(Convolve2D): r"""2D Smoothing. Apply smoothing to model (and data) along two ``axes`` of a multi-dimensional array. Parameters ---------- nsmooth : :obj:`tuple` or :obj:`list` Length of smoothing operator in 1st and 2nd dimensions (must be odd) dims : :obj:`tuple` Number of samples for each dimension axes : :obj:`int`, optional .. versionadded:: 2.0.0 Axes along which model (and data) are smoothed. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) See Also -------- pylops.signalprocessing.Convolve2D : 2D convolution Notes ----- The 2D Smoothing operator is a special type of convolutional operator that convolves the input model (or data) with a constant 2d filter of size :math:`n_{\text{smooth}, 1} \times n_{\text{smooth}, 2}`: Its application to a two dimensional input signal is: .. math:: y[i,j] = 1/(n_{\text{smooth}, 1}\cdot n_{\text{smooth}, 2}) \sum_{l=-(n_{\text{smooth},1}-1)/2}^{(n_{\text{smooth},1}-1)/2} \sum_{m=-(n_{\text{smooth},2}-1)/2}^{(n_{\text{smooth},2}-1)/2} x[l,m] Note that since the filter is symmetrical, the *Smoothing2D* operator is self-adjoint. """ def __init__(self, nsmooth: InputDimsLike, dims: Union[int, InputDimsLike], axes: InputDimsLike = (-2, -1), dtype: DTypeLike = "float64", name: str = 'S'): nsmooth = list(nsmooth) if nsmooth[0] % 2 == 0: nsmooth[0] += 1 if nsmooth[1] % 2 == 0: nsmooth[1] += 1 h = np.ones((nsmooth[0], nsmooth[1])) / float(nsmooth[0] * nsmooth[1]) offset = [(nsmooth[0] - 1) // 2, (nsmooth[1] - 1) // 2] super().__init__(dims=dims, h=h, offset=offset, axes=axes, dtype=dtype, name=name)
2,264
30.458333
90
py
pylops
pylops-master/pylops/basicoperators/blockdiag.py
__all__ = ["BlockDiag"] import multiprocessing as mp import numpy as np import scipy as sp # need to check scipy version since the interface submodule changed into # _interface from scipy>=1.8.0 sp_version = sp.__version__.split(".") if int(sp_version[0]) <= 1 and int(sp_version[1]) < 8: from scipy.sparse.linalg.interface import LinearOperator as spLinearOperator from scipy.sparse.linalg.interface import _get_dtype else: from scipy.sparse.linalg._interface import ( _get_dtype, LinearOperator as spLinearOperator, ) from typing import Optional, Sequence from pylops import LinearOperator from pylops.basicoperators import MatrixMult from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, NDArray def _matvec_rmatvec_map(op, x: NDArray) -> NDArray: """matvec/rmatvec for multiprocessing""" return op(x).squeeze() class BlockDiag(LinearOperator): r"""Block-diagonal operator. Create a block-diagonal operator from N linear operators. Parameters ---------- ops : :obj:`list` Linear operators to be stacked. Alternatively, :obj:`numpy.ndarray` or :obj:`scipy.sparse` matrices can be passed in place of one or more operators. nproc : :obj:`int`, optional Number of processes used to evaluate the N operators in parallel using ``multiprocessing``. If ``nproc=1``, work in serial mode. dtype : :obj:`str`, optional Type of elements in input array. Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- A block-diagonal operator composed of N linear operators is created such as its application in forward mode leads to .. math:: \begin{bmatrix} \mathbf{L}_1 & \mathbf{0} & \ldots & \mathbf{0} \\ \mathbf{0} & \mathbf{L}_2 & \ldots & \mathbf{0} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_N \end{bmatrix} \begin{bmatrix} \mathbf{x}_{1} \\ \mathbf{x}_{2} \\ \vdots \\ \mathbf{x}_{N} \end{bmatrix} = \begin{bmatrix} \mathbf{L}_1 \mathbf{x}_{1} \\ \mathbf{L}_2 \mathbf{x}_{2} \\ \vdots \\ \mathbf{L}_N \mathbf{x}_{N} \end{bmatrix} while its application in adjoint mode leads to .. math:: \begin{bmatrix} \mathbf{L}_1^H & \mathbf{0} & \ldots & \mathbf{0} \\ \mathbf{0} & \mathbf{L}_2^H & \ldots & \mathbf{0} \\ \vdots & \vdots & \ddots & \vdots \\ \mathbf{0} & \mathbf{0} & \ldots & \mathbf{L}_N^H \end{bmatrix} \begin{bmatrix} \mathbf{y}_{1} \\ \mathbf{y}_{2} \\ \vdots \\ \mathbf{y}_{N} \end{bmatrix} = \begin{bmatrix} \mathbf{L}_1^H \mathbf{y}_{1} \\ \mathbf{L}_2^H \mathbf{y}_{2} \\ \vdots \\ \mathbf{L}_N^H \mathbf{y}_{N} \end{bmatrix} """ def __init__( self, ops: Sequence[LinearOperator], nproc: int = 1, dtype: Optional[DTypeLike] = None, ) -> None: self.ops = ops mops = np.zeros(len(ops), dtype=int) nops = np.zeros(len(ops), dtype=int) for iop, oper in enumerate(ops): if not isinstance(oper, (LinearOperator, spLinearOperator)): self.ops[iop] = MatrixMult(oper, dtype=oper.dtype) nops[iop] = self.ops[iop].shape[0] mops[iop] = self.ops[iop].shape[1] self.nops = int(nops.sum()) self.mops = int(mops.sum()) self.nnops = np.insert(np.cumsum(nops), 0, 0) self.mmops = np.insert(np.cumsum(mops), 0, 0) # create pool for multiprocessing self._nproc = nproc self.pool: Optional[mp.pool.Pool] = None if self.nproc > 1: self.pool = mp.Pool(processes=nproc) dtype = _get_dtype(ops) if dtype is None else np.dtype(dtype) clinear = all([getattr(oper, "clinear", True) for oper in self.ops]) super().__init__(dtype=dtype, shape=(self.nops, self.mops), clinear=clinear) @property def nproc(self) -> int: return self._nproc @nproc.setter def nproc(self, nprocnew: int) -> None: if self._nproc > 1 and self.pool is not None: self.pool.close() if nprocnew > 1: self.pool = mp.Pool(processes=nprocnew) self._nproc = nprocnew def _matvec_serial(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.nops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y[self.nnops[iop] : self.nnops[iop + 1]] = oper.matvec( x[self.mmops[iop] : self.mmops[iop + 1]] ).squeeze() return y def _rmatvec_serial(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.mops, dtype=self.dtype) for iop, oper in enumerate(self.ops): y[self.mmops[iop] : self.mmops[iop + 1]] = oper.rmatvec( x[self.nnops[iop] : self.nnops[iop + 1]] ).squeeze() return y def _matvec_multiproc(self, x: NDArray) -> NDArray: if self.pool is None: raise ValueError ys = self.pool.starmap( _matvec_rmatvec_map, [ (oper._matvec, x[self.mmops[iop] : self.mmops[iop + 1]]) for iop, oper in enumerate(self.ops) ], ) y = np.hstack(ys) return y def _rmatvec_multiproc(self, x: NDArray) -> NDArray: if self.pool is None: raise ValueError ys = self.pool.starmap( _matvec_rmatvec_map, [ (oper._rmatvec, x[self.nnops[iop] : self.nnops[iop + 1]]) for iop, oper in enumerate(self.ops) ], ) y = np.hstack(ys) return y def _matvec(self, x: NDArray) -> NDArray: if self.nproc == 1: y = self._matvec_serial(x) else: y = self._matvec_multiproc(x) return y def _rmatvec(self, x: NDArray) -> NDArray: if self.nproc == 1: y = self._rmatvec_serial(x) else: y = self._rmatvec_multiproc(x) return y
6,677
31.735294
84
py
pylops
pylops-master/pylops/basicoperators/matrixmult.py
__all__ = ["MatrixMult"] import logging from typing import Optional, Union import numpy as np import scipy as sp from scipy.sparse.linalg import inv from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_array from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class MatrixMult(LinearOperator): r"""Matrix multiplication. Simple wrapper to :py:func:`numpy.dot` and :py:func:`numpy.vdot` for an input matrix :math:`\mathbf{A}`. Parameters ---------- A : :obj:`numpy.ndarray` or :obj:`scipy.sparse` matrix Matrix. otherdims : :obj:`tuple`, optional Number of samples for each other dimension of model (model/data will be reshaped and ``A`` applied multiple times to each column of the model/data). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- dimsd : :obj:`tuple` Shape of the array after the forward, but before linearization. For example, ``y_reshaped = (Op * x.ravel()).reshape(Op.dimsd)``. shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) complex : :obj:`bool` Matrix has complex numbers (``True``) or not (``False``) """ def __init__( self, A: NDArray, otherdims: Optional[Union[int, InputDimsLike]] = None, dtype: DTypeLike = "float64", name: str = "M", ) -> None: ncp = get_array_module(A) self.A = A if isinstance(A, ncp.ndarray): self.complex = np.iscomplexobj(A) else: self.complex = np.iscomplexobj(A.data) if otherdims is None: dims, dimsd = (A.shape[1],), (A.shape[0],) self.reshape = False explicit = True else: otherdims = _value_or_sized_to_array(otherdims) self.otherdims = np.array(otherdims, dtype=int) dims, dimsd = np.insert(self.otherdims, 0, self.A.shape[1]), np.insert( self.otherdims, 0, self.A.shape[0] ) self.dimsflatten, self.dimsdflatten = np.insert( [np.prod(self.otherdims)], 0, self.A.shape[1] ), np.insert([np.prod(self.otherdims)], 0, self.A.shape[0]) self.reshape = True explicit = False # Check dtype for correctness (upcast to complex when A is complex) if np.iscomplexobj(A) and not np.iscomplexobj(np.ones(1, dtype=dtype)): dtype = A.dtype logging.warning("Matrix A is a complex object, dtype cast to %s" % dtype) super().__init__( dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, explicit=explicit, name=name ) def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if self.reshape: x = ncp.reshape(x, self.dimsflatten) y = self.A.dot(x) if self.reshape: return y.ravel() else: return y def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) if self.reshape: x = ncp.reshape(x, self.dimsdflatten) if self.complex: y = (self.A.T.dot(x.conj())).conj() else: y = self.A.T.dot(x) if self.reshape: return y.ravel() else: return y def inv(self) -> NDArray: r"""Return the inverse of :math:`\mathbf{A}`. Returns ---------- Ainv : :obj:`numpy.ndarray` Inverse matrix. """ if sp.sparse.issparse(self.A): Ainv = inv(self.A) else: ncp = get_array_module(self.A) Ainv = ncp.linalg.inv(self.A) return Ainv
4,142
30.625954
87
py
pylops
pylops-master/pylops/basicoperators/firstderivative.py
__all__ = ["FirstDerivative"] from typing import Callable, Union import numpy as np from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class FirstDerivative(LinearOperator): r"""First derivative. Apply a first derivative using a multiple-point stencil finite-difference approximation along ``axis``. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which derivative is applied. sampling : :obj:`float`, optional Sampling step :math:`\Delta x`. kind : :obj:`str`, optional Derivative kind (``forward``, ``centered``, or ``backward``). edge : :obj:`bool`, optional Use reduced order derivative at edges (``True``) or ignore them (``False``). This is currently only available for centered derivative order : :obj:`int`, optional .. versionadded:: 2.0.0 Derivative order (``3`` or ``5``). This is currently only available for centered derivative dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- The FirstDerivative operator applies a first derivative to any chosen direction of a multi-dimensional array using either a second- or third-order centered stencil or first-order forward/backward stencils. For simplicity, given a one dimensional array, the second-order centered first derivative is: .. math:: y[i] = (0.5x[i+1] - 0.5x[i-1]) / \Delta x while the first-order forward stencil is: .. math:: y[i] = (x[i+1] - x[i]) / \Delta x and the first-order backward stencil is: .. math:: y[i] = (x[i] - x[i-1]) / \Delta x Formulas for the third-order centered stencil can be found at this `link <https://en.wikipedia.org/wiki/Finite_difference_coefficient>`_. """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, sampling: float = 1.0, kind: str = "centered", edge: bool = False, order: int = 3, dtype: DTypeLike = "float64", name: str = "F", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) self.axis = normalize_axis_index(axis, len(self.dims)) self.sampling = sampling self.kind = kind self.edge = edge self.order = order self._register_multiplications(self.kind, self.order) def _register_multiplications( self, kind: str, order: int, ) -> None: # choose _matvec and _rmatvec kind self._hmatvec: Callable self._hrmatvec: Callable if kind == "forward": self._hmatvec = self._matvec_forward self._hrmatvec = self._rmatvec_forward elif kind == "centered": if order == 3: self._hmatvec = self._matvec_centered3 self._hrmatvec = self._rmatvec_centered3 elif order == 5: self._hmatvec = self._matvec_centered5 self._hrmatvec = self._rmatvec_centered5 else: raise NotImplementedError("'order' must be '3, or '5'") elif kind == "backward": self._hmatvec = self._matvec_backward self._hrmatvec = self._rmatvec_backward else: raise NotImplementedError( "'kind' must be 'forward', 'centered', or 'backward'" ) def _matvec(self, x: NDArray) -> NDArray: return self._hmatvec(x) def _rmatvec(self, x: NDArray) -> NDArray: return self._hrmatvec(x) @reshaped(swapaxis=True) def _matvec_forward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-1] = (x[..., 1:] - x[..., :-1]) / self.sampling return y @reshaped(swapaxis=True) def _rmatvec_forward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-1] -= x[..., :-1] y[..., 1:] += x[..., :-1] y /= self.sampling return y @reshaped(swapaxis=True) def _matvec_centered3(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., 1:-1] = 0.5 * (x[..., 2:] - x[..., :-2]) if self.edge: y[..., 0] = x[..., 1] - x[..., 0] y[..., -1] = x[..., -1] - x[..., -2] y /= self.sampling return y @reshaped(swapaxis=True) def _rmatvec_centered3(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-2] -= 0.5 * x[..., 1:-1] y[..., 2:] += 0.5 * x[..., 1:-1] if self.edge: y[..., 0] -= x[..., 0] y[..., 1] += x[..., 0] y[..., -2] -= x[..., -1] y[..., -1] += x[..., -1] y /= self.sampling return y @reshaped(swapaxis=True) def _matvec_centered5(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., 2:-2] = ( x[..., :-4] / 12.0 - 2 * x[..., 1:-3] / 3.0 + 2 * x[..., 3:-1] / 3.0 - x[..., 4:] / 12.0 ) if self.edge: y[..., 0] = x[..., 1] - x[..., 0] y[..., 1] = 0.5 * (x[..., 2] - x[..., 0]) y[..., -2] = 0.5 * (x[..., -1] - x[..., -3]) y[..., -1] = x[..., -1] - x[..., -2] y /= self.sampling return y @reshaped(swapaxis=True) def _rmatvec_centered5(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-4] += x[..., 2:-2] / 12.0 y[..., 1:-3] -= 2.0 * x[..., 2:-2] / 3.0 y[..., 3:-1] += 2.0 * x[..., 2:-2] / 3.0 y[..., 4:] -= x[..., 2:-2] / 12.0 if self.edge: y[..., 0] -= x[..., 0] + 0.5 * x[..., 1] y[..., 1] += x[..., 0] y[..., 2] += 0.5 * x[..., 1] y[..., -3] -= 0.5 * x[..., -2] y[..., -2] -= x[..., -1] y[..., -1] += 0.5 * x[..., -2] + x[..., -1] y /= self.sampling return y @reshaped(swapaxis=True) def _matvec_backward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., 1:] = (x[..., 1:] - x[..., :-1]) / self.sampling return y @reshaped(swapaxis=True) def _rmatvec_backward(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(x.shape, self.dtype) y[..., :-1] -= x[..., 1:] y[..., 1:] += x[..., 1:] y /= self.sampling return y
7,536
31.627706
81
py
pylops
pylops-master/pylops/basicoperators/pad.py
__all__ = ["Pad"] from typing import Sequence, Tuple, Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Pad(LinearOperator): r"""Pad operator. Zero-pad model in forward model and extract non-zero subsequence in adjoint. Padding can be performed in one or multiple directions to any multi-dimensional input arrays. Parameters ---------- dims : :obj:`int` or :obj:`tuple` Number of samples for each dimension pad : :obj:`tuple` Number of samples to pad. If ``dims`` is a scalar, ``pad`` is a single tuple ``(pad_in, pad_end)``. If ``dims`` is a tuple, ``pad`` is a tuple of tuples where each inner tuple contains the number of samples to pad in each dimension dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If any element of ``pad`` is negative. Notes ----- Given an array of size :math:`N`, the *Pad* operator simply adds :math:`\text{pad}_\text{in}` at the start and :math:`\text{pad}_\text{end}` at the end in forward mode: .. math:: y_{i} = x_{i-\text{pad}_\text{in}} \quad \forall i=\text{pad}_\text{in},\ldots,\text{pad}_\text{in}+N-1 and :math:`y_i = 0 \quad \forall i=0,\ldots,\text{pad}_\text{in}-1, \text{pad}_\text{in}+N-1,\ldots,N+\text{pad}_\text{in}+\text{pad}_\text{end}` In adjoint mode, values from :math:`\text{pad}_\text{in}` to :math:`N-\text{pad}_\text{end}` are extracted from the data: .. math:: x_{i} = y_{\text{pad}_\text{in}+i} \quad \forall i=0, N-1 """ def __init__( self, dims: Union[int, InputDimsLike], pad: Union[Tuple[int, int], Sequence[Tuple[int, int]]], dtype: DTypeLike = "float64", name: str = "P", ) -> None: if np.any(np.array(pad) < 0): raise ValueError("Padding must be positive or zero") dims = _value_or_sized_to_tuple(dims) # Accept (padbeg, padend) and [(padbeg, padend)] self.pad: Sequence = [pad] if len(dims) == 1 and len(pad) == 2 else pad dimsd = [dim + before + after for dim, (before, after) in zip(dims, self.pad)] super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) @reshaped def _matvec(self, x: NDArray) -> NDArray: return np.pad(x, self.pad, mode="constant") @reshaped def _rmatvec(self, x: NDArray) -> NDArray: for ax, (before, _) in enumerate(self.pad): x = np.take(x, np.arange(before, before + self.dims[ax]), axis=ax) return x
3,144
32.105263
116
py
pylops
pylops-master/pylops/basicoperators/conj.py
__all__ = ["Conj"] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class Conj(LinearOperator): r"""Complex conjugate operator. Return the complex conjugate of the input. It is self-adjoint. Parameters ---------- dims : :obj:`int` or :obj:`tuple` Number of samples for each dimension dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- In forward mode: .. math:: y_{i} = \Re\{x_{i}\} - i\Im\{x_{i}\} \quad \forall i=0,\ldots,N-1 In adjoint mode: .. math:: x_{i} = \Re\{y_{i}\} - i\Im\{y_{i}\} \quad \forall i=0,\ldots,N-1 """ def __init__( self, dims: Union[int, InputDimsLike], dtype: DTypeLike = "complex128", name: str = "C", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__( dtype=np.dtype(dtype), dims=dims, dimsd=dims, clinear=False, name=name ) def _matvec(self, x: NDArray) -> NDArray: return x.conj() def _rmatvec(self, x: NDArray) -> NDArray: return x.conj()
1,619
22.478261
82
py
pylops
pylops-master/pylops/basicoperators/_spread_numba.py
import os from numba import jit, prange # detect whether to use parallel or not numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1")) parallel = True if numba_threads != 1 else False @jit(nopython=True, parallel=parallel, nogil=True) def _matvec_numba_table(x, y, dims, interp, table, dtable): """numba implementation of forward mode with table. See official documentation for description of variables """ dim0, dim1 = dims x = x.reshape(dims) for ix0 in range(dim0): for it in range(dim1): indices = table[ix0, it] if interp: dindices = dtable[ix0, it] for i, indexfloat in enumerate(indices): index = int(indexfloat) if index != -9223372036854775808: # =int(np.nan) if not interp: y[i, index] += x[ix0, it] else: y[i, index] += (1 - dindices[i]) * x[ix0, it] y[i, index + 1] += dindices[i] * x[ix0, it] return y.ravel() @jit(nopython=True, parallel=parallel, nogil=True) def _rmatvec_numba_table(x, y, dims, dimsd, interp, table, dtable): """numba implementation of adjoint mode with table. See official documentation for description of variables """ dim0, dim1 = dims x = x.reshape(dimsd) for ix0 in prange(dim0): for it in range(dim1): indices = table[ix0, it] if interp: dindices = dtable[ix0, it] for i, indexfloat in enumerate(indices): index = int(indexfloat) if index != -9223372036854775808: # =int(np.nan) if not interp: y[ix0, it] += x[i, index] else: y[ix0, it] += ( x[i, index] * (1 - dindices[i]) + x[i, index + 1] * dindices[i] ) return y.ravel() @jit(nopython=True, parallel=parallel, nogil=True) def _matvec_numba_onthefly(x, y, dims, interp, fh): """numba implementation of forward mode with on-the-fly computations. See official documentation for description of variables """ dim0, dim1 = dims x = x.reshape(dims) for ix0 in range(dim0): for it in range(dim1): if interp: indices, dindices = fh(ix0, it) else: indices, dindices = fh(ix0, it) for i, indexfloat in enumerate(indices): index = int(indexfloat) if index != -9223372036854775808: # =int(np.nan) if not interp: y[i, index] += x[ix0, it] else: y[i, index] += (1 - dindices[i]) * x[ix0, it] y[i, index + 1] += dindices[i] * x[ix0, it] return y.ravel() @jit(nopython=True, parallel=parallel, nogil=True) def _rmatvec_numba_onthefly(x, y, dims, dimsd, interp, fh): """numba implementation of adjoint mode with on-the-fly computations. See official documentation for description of variables """ dim0, dim1 = dims x = x.reshape(dimsd) for ix0 in prange(dim0): for it in range(dim1): if interp: indices, dindices = fh(ix0, it) else: indices, dindices = fh(ix0, it) for i, indexfloat in enumerate(indices): index = int(indexfloat) if index != -9223372036854775808: # =int(np.nan) if not interp: y[ix0, it] += x[i, index] else: y[ix0, it] += ( x[i, index] * (1 - dindices[i]) + x[i, index + 1] * dindices[i] ) return y.ravel()
3,890
35.027778
73
py
pylops
pylops-master/pylops/basicoperators/zero.py
__all__ = ["Zero"] from typing import Optional import numpy as np from pylops import LinearOperator from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, NDArray class Zero(LinearOperator): r"""Zero operator. Transform model into array of zeros of size :math:`N` in forward and transform data into array of zeros of size :math:`N` in adjoint. Parameters ---------- N : :obj:`int` Number of samples in data (and model in M is not provided). M : :obj:`int`, optional Number of samples in model. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- An *Zero* operator simply creates a null data vector :math:`\mathbf{y}` in forward mode: .. math:: \mathbf{0} \mathbf{x} = \mathbf{0}_N and a null model vector :math:`\mathbf{x}` in forward mode: .. math:: \mathbf{0} \mathbf{y} = \mathbf{0}_M """ def __init__( self, N: int, M: Optional[int] = None, dtype: DTypeLike = "float64", name: str = "Z", ) -> None: M = N if M is None else M super().__init__(dtype=np.dtype(dtype), shape=(N, M), name=name) def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) return ncp.zeros(self.shape[0], dtype=self.dtype) def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) return ncp.zeros(self.shape[1], dtype=self.dtype)
1,866
24.575342
79
py
pylops
pylops-master/pylops/utils/_internal.py
from typing import Sized, Tuple import numpy as np import numpy.typing as npt from pylops.utils.typing import DTypeLike, NDArray def _value_or_sized_to_array(value_or_sized, repeat: int = 1) -> NDArray: """Convert an object which is either single value or a list-like to an array. Parameters ---------- value_or_sized : `obj`:`int` or `obj`:`float` or `obj`:`list` or `obj`:`tuple` Single value or list-like. repeat : `obj`:`int` Size of resulting array if value is passed. If list is passed, it is ignored. Returns ------- out : `obj`:`numpy.array` When the input is a single value, returned an array with `repeat` samples containing that value. When the input is a list-like object, converts it to an array. """ return ( np.asarray(value_or_sized) if isinstance(value_or_sized, Sized) else np.array([value_or_sized] * repeat) ) def _value_or_sized_to_tuple(value_or_sized, repeat: int = 1) -> Tuple: """Convert an object which is either single value or a list-like to a tuple. Parameters ---------- value_or_sized : `obj`:`int` or `obj`:`float` or `obj`:`list` or `obj`:`tuple` Single value or list-like. repeat : `obj`:`int` Size of resulting array if value is passed. If list is passed, it is ignored. Returns ------- out : `obj`:`tuple` When the input is a single value, returned an array with `repeat` samples containing that value. When the input is a list-like object, converts it to a tuple. """ return tuple(_value_or_sized_to_array(value_or_sized, repeat=repeat)) def _raise_on_wrong_dtype(arr: npt.ArrayLike, dtype: DTypeLike, name: str) -> None: """Raises an error if dtype of `arr` is not a subdtype of `dtype`. Parameters ---------- arr : `obj`:`numpy.array` Array whose type will be checked dtype : `obj`:`numpy.dtype` Type which must be a supertype of `arr.dtype`. name : `obj`:`str` Name of parameter to issue error. Raises ------ TypeError When `arr.dtype` is not a subdtype of `dtype`. """ if not np.issubdtype(arr.dtype, dtype): raise TypeError( ( f"Wrong input type for `{name}`. " f'Must be "{dtype}", but received to "{arr.dtype}".' ) )
2,414
29.1875
86
py
pylops
pylops-master/pylops/utils/estimators.py
__all__ = [ "trace_hutchinson", "trace_hutchpp", "trace_nahutchpp", ] from itertools import chain from types import ModuleType from typing import Optional, Tuple import numpy import numpy.typing as npt from pylops.utils.backend import get_module def _sampler_gaussian( m: float, batch_size: int, backend_module: ModuleType = numpy ) -> Tuple[float, npt.ArrayLike]: return backend_module.random.randn(m, batch_size) def _sampler_rayleigh( m: float, batch_size: int, backend_module: ModuleType = numpy ) -> npt.ArrayLike: z = backend_module.random.randn(m, batch_size) for i in range(batch_size): z[:, i] *= m / backend_module.dot(z[:, i].T, z[:, i]) return z def _sampler_rademacher( m: float, batch_size: int, backend_module: ModuleType = numpy ) -> npt.ArrayLike: return 2 * backend_module.random.binomial(1, 0.5, size=(m, batch_size)) - 1 _SAMPLERS = { "gaussian": _sampler_gaussian, "rayleigh": _sampler_rayleigh, "rademacher": _sampler_rademacher, } def trace_hutchinson( Op, neval: Optional[int] = None, batch_size: Optional[int] = None, sampler: str = "rademacher", backend: str = "numpy", ) -> float: r"""Trace of linear operator using the Hutchinson method. Returns an estimate of the trace of a linear operator using the Hutchinson method [1]_. Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear operator to compute trace on. neval : :obj:`int`, optional Maximum number of matrix-vector products compute. Defaults to 10% of ``shape[1]``. batch_size : :obj:`int`, optional Vectorize computations by sampling sketching matrices instead of vectors. Set this value to as high as permitted by memory, but there is no guarantee of speedup. Coerced to never exceed ``neval``. When using "unitvector" as sampler, is coerced to not exceed ``shape[1]``. Defaults to 100 or ``neval``. sampler : :obj:`str`, optional Sample sketching matrices from the following distributions: - "gaussian": Mean zero, unit variance Gaussian. - "rayleigh": Sample from mean zero, unit variance Gaussian and normalize the columns. - "rademacher": Random sign. - "unitvector": Samples from the unit vectors :math:`\mathrm{e}_i` without replacement. backend : :obj:`str`, optional Backend used to densify matrix (``numpy`` or ``cupy``). Note that this must be consistent with how the operator has been created. Returns ------- trace : :obj:`float` Operator trace. Raises ------- ValueError If ``neval`` is smaller than 3. NotImplementedError If the ``sampler`` is not one of the available samplers. Notes ----- Let :math:`m` = ``shape[1]`` and :math:`k` = ``neval``. This algorithm estimates the trace via .. math:: \frac{1}{k}\sum\limits_{i=1}^k \mathbf{z}_i^T\,\mathbf{Op}\,\mathbf{z}_i where vectors :math:`\mathbf{z}_i` are sampled according to the sampling function. See [2]_ for a description of the variance and :math:`\epsilon`-approximation of different samplers. Prefer the Rademacher sampler if the goal is to minimize variance, but the Gaussian for a better probability of approximating the correct value. Use the Unit Vector approach if you are sampling a large number of ``neval`` (compared to ``shape[1]``), especially if the operator is highly-structured. .. [1] Hutchinson, M. F. (1990). *A stochastic estimator of the trace of the influence matrix for laplacian smoothing splines*. Communications in Statistics - Simulation and Computation, 19(2), 433–450. .. [2] Avron, H., and Toledo, S. (2011). *Randomized algorithms for estimating the trace of an implicit symmetric positive semi-definite matrix*. Journal of the ACM, 58(2), 1–34. """ ncp = get_module(backend) m = Op.shape[1] neval = int(numpy.round(m * 0.1)) if neval is None else neval batch_size = min(neval, 100 if batch_size is None else batch_size) n_missing = neval - batch_size * (neval // batch_size) batch_range = chain( (batch_size for _ in range(0, neval - n_missing, batch_size)), (n_missing for _ in range(int(n_missing != 0))), ) trace = ncp.zeros(1, dtype=Op.dtype) if sampler == "unitvector": remaining_vectors = list(range(m)) n_total = 0 while remaining_vectors: batch = min(batch_size, len(remaining_vectors)) z = ncp.zeros((m, batch), dtype=Op.dtype) z_idx = ncp.random.choice(remaining_vectors, batch, replace=False) for i, idx in enumerate(z_idx): z[idx, i] = 1.0 remaining_vectors.remove(idx) trace += ncp.trace((z.T @ (Op @ z))) n_total += batch trace *= m / n_total return trace[0] if sampler not in _SAMPLERS: raise NotImplementedError(f"sampler {sampler} not available.") sampler_fun = _SAMPLERS[sampler] for batch in batch_range: z = sampler_fun(m, batch, backend_module=ncp).astype(Op.dtype) trace += ncp.trace((z.T @ (Op @ z))) trace /= neval return trace[0] def trace_hutchpp( Op, neval: Optional[int] = None, sampler: str = "rademacher", backend: str = "numpy", ) -> float: r"""Trace of linear operator using the Hutch++ method. Returns an estimate of the trace of a linear operator using the Hutch++ method [1]_. Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear operator to compute trace on. neval : :obj:`int`, optional Maximum number of matrix-vector products compute. Defaults to 10% of ``shape[1]``. sampler : :obj:`str`, optional Sample sketching matrices from the following distributions: - "gaussian": Mean zero, unit variance Gaussian. - "rayleigh": Sample from mean zero, unit variance Gaussian and normalize the columns. - "rademacher": Random sign. backend : :obj:`str`, optional Backend used to densify matrix (``numpy`` or ``cupy``). Note that this must be consistent with how the operator has been created. Returns ------- trace : :obj:`float` Operator trace. Raises ------- ValueError If ``neval`` is smaller than 3. NotImplementedError If the ``sampler`` is not one of the available samplers. Notes ----- This function follows Algorithm 1 of [1]_. Let :math:`m` = ``shape[1]`` and :math:`k` = ``neval``. 1. Sample sketching matrices :math:`\mathbf{S} \in \mathbb{R}^{m \times \lfloor k/3\rfloor}`, and :math:`\mathbf{G} \in \mathbb{R}^{m \times \lfloor k/3\rfloor}`, from sub-Gaussian distributions. 2. Compute reduced QR decomposition of :math:`\mathbf{Op}\,\mathbf{S}`, retaining only :math:`\mathbf{Q}`. 3. Return :math:`\operatorname{tr}(\mathbf{Q}^T\,\mathbf{Op}\,\mathbf{Q}) + \frac{1}{\lfloor k/3\rfloor}\operatorname{tr}\left(\mathbf{G}^T(\mathbf{I} - \mathbf{Q}\mathbf{Q}^T)\,\mathbf{Op}\,(\mathbf{I} - \mathbf{Q}\mathbf{Q}^T)\mathbf{G}\right)` Use the Rademacher sampler unless you know what you are doing. .. [1] Meyer, R. A., Musco, C., Musco, C., & Woodruff, D. P. (2021). *Hutch++: Optimal Stochastic Trace Estimation*. In Symposium on Simplicity in Algorithms (SOSA) (pp. 142–155). Philadelphia, PA: Society for Industrial and Applied Mathematics. `link <https://arxiv.org/abs/2010.09649>`_ """ ncp = get_module(backend) m = Op.shape[1] neval = int(numpy.round(m * 0.1)) if neval is None else neval if sampler not in _SAMPLERS: raise NotImplementedError(f"sampler {sampler} not available.") sampler_fun = _SAMPLERS[sampler] batch = neval // 3 if batch <= 0: msg = f"Sampler '{sampler}' not supported with {neval} samples." msg += " Try increasing it." raise ValueError(msg) S = sampler_fun(m, batch, backend_module=ncp).astype(Op.dtype) G = sampler_fun(m, batch, backend_module=ncp).astype(Op.dtype) Q, _ = ncp.linalg.qr(Op @ S) del S G = G - Q @ (Q.T @ G) trace = ncp.zeros(1, dtype=Op.dtype) trace += ncp.trace(Q.T @ (Op @ Q)) + ncp.trace(G.T @ (Op @ G)) / batch return trace[0] def trace_nahutchpp( Op, neval: Optional[int] = None, sampler: str = "rademacher", c1: float = 1.0 / 6.0, c2: float = 1.0 / 3.0, backend: str = "numpy", ) -> float: r"""Trace of linear operator using the NA-Hutch++ method. Returns an estimate of the trace of a linear operator using the Non-Adaptive variant of Hutch++ method [1]_. Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear operator to compute trace on. neval : :obj:`int`, optional Maximum number of matrix-vector products compute. Defaults to 10% of ``shape[1]``. sampler : :obj:`str`, optional Sample sketching matrices from the following distributions: - "gaussian": Mean zero, unit variance Gaussian. - "rayleigh": Sample from mean zero, unit variance Gaussian and normalize the columns. - "rademacher": Random sign. c1 : :obj:`float`, optional Fraction of ``neval`` for sketching matrix :math:`\mathbf{S}`. c2 : :obj:`float`, optional Fraction of ``neval`` for sketching matrix :math:`\mathbf{R}`. Must be larger than ``c2``, ideally by a factor of at least 2. backend : :obj:`str`, optional Backend used to densify matrix (``numpy`` or ``cupy``). Note that this must be consistent with how the operator has been created. Returns ------- trace : :obj:`float` Operator trace. Raises ------- ValueError If ``neval`` not large enough to accomodate ``c1`` and ``c2``. NotImplementedError If the ``sampler`` is not one of the available samplers. Notes ----- This function follows Algorithm 2 of [1]_. Let :math:`m` = ``shape[1]`` and :math:`k` = ``neval``. 1. Fix constants :math:`c_1`, :math:`c_2`, :math:`c_3` such that :math:`c_1 < c_2` and :math:`c_1 + c_2 + c_3 = 1`. 2. Sample sketching matrices :math:`\mathbf{S} \in \mathbb{R}^{m \times c_1 k}`, :math:`\mathbf{R} \in \mathbb{R}^{m \times c_2 k}`, and :math:`\mathbf{G} \in \mathbb{R}^{m \times c_3 k}` from sub-Gaussian distributions. 3. Compute :math:`\mathbf{Z} = \mathbf{Op}\,\mathbf{R}`, :math:`\mathbf{W} = \mathbf{Op}\,\mathbf{S}`, and :math:`\mathbf{Y} = (\mathbf{S}^T \mathbf{Z})^+`, where :math:`+` denotes the Moore–Penrose inverse. 4. Return :math:`\operatorname{tr}(\mathbf{Y} \mathbf{W}^T \mathbf{Z}) + \frac{1}{c_3 k} \left[ \operatorname{tr}(\mathbf{G}^T\,\mathbf{Op}\,\mathbf{G}) - \operatorname{tr}(\mathbf{G}^T\mathbf{Z}\mathbf{Y}\mathbf{W}^T\mathbf{G})\right]` The default values for :math:`c_1` and :math:`c_2` are set to :math:`1/6` and :math:`1/3`, respectively, but [1]_ suggests :math:`1/4` and :math:`1/2`. Use the Rademacher sampler unless you know what you are doing. .. [1] Meyer, R. A., Musco, C., Musco, C., & Woodruff, D. P. (2021). *Hutch++: Optimal Stochastic Trace Estimation*. In Symposium on Simplicity in Algorithms (SOSA) (pp. 142–155). Philadelphia, PA: Society for Industrial and Applied Mathematics. `link <https://arxiv.org/abs/2010.09649>`_ """ ncp = get_module(backend) m = Op.shape[1] neval = int(numpy.round(m * 0.1)) if neval is None else neval if sampler not in _SAMPLERS: raise NotImplementedError(f"sampler {sampler} not available.") sampler_fun = _SAMPLERS[sampler] batch1 = int(numpy.round(neval * c1)) batch2 = int(numpy.round(neval * c2)) batch3 = neval - batch1 - batch2 if batch1 <= 0 or batch2 <= 0 or batch3 <= 0: msg = f"Sampler '{sampler}' not supported with {neval} samples." msg += " Try increasing it." raise ValueError(msg) S = sampler_fun(m, batch1, backend_module=ncp).astype(Op.dtype) R = sampler_fun(m, batch2, backend_module=ncp).astype(Op.dtype) G = sampler_fun(m, batch3, backend_module=ncp).astype(Op.dtype) Z = Op @ R Wt = (Op @ S).T Y = ncp.linalg.pinv(S.T @ Z) trace = ncp.zeros(1, dtype=Op.dtype) trace += ( ncp.trace(Y @ Wt @ Z) + (ncp.trace(G.T @ (Op @ G)) - ncp.trace(G.T @ Z @ Y @ Wt @ G)) / batch3 ) return trace[0]
12,907
34.461538
254
py
pylops
pylops-master/pylops/utils/multiproc.py
__all__ = ["scalability_test"] import time from typing import List, Optional, Tuple import numpy.typing as npt def scalability_test( Op, x: npt.ArrayLike, workers: Optional[List[int]] = None, forward: bool = True, ) -> Tuple[List[float], List[float]]: r"""Scalability test. Small auxiliary routine to test the performance of operators using ``multiprocessing``. This helps identifying the maximum number of workers beyond which no performance gain is observed. Parameters ---------- Op : :obj:`pylops.LinearOperator` Operator to test. It must allow for multiprocessing. x : :obj:`numpy.ndarray`, optional Input vector. workers : :obj:`list`, optional Number of workers to test out. Defaults to `[1, 2, 4]`. forward : :obj:`bool`, optional Apply forward (``True``) or adjoint (``False``) Returns ------- compute_times : :obj:`list` Compute times as function of workers speedup : :obj:`list` Speedup as function of workers """ if workers is None: workers = [1, 2, 4] compute_times = [] speedup = [] for nworkers in workers: print(f"Working with {nworkers} workers...") # update number of workers in operator Op.nproc = nworkers # run forward/adjoint computation starttime = time.time() if forward: _ = Op.matvec(x) else: _ = Op.rmatvec(x) elapsedtime = time.time() - starttime compute_times.append(elapsedtime) speedup.append(compute_times[0] / elapsedtime) Op.pool.close() return compute_times, speedup
1,670
27.322034
77
py
pylops
pylops-master/pylops/utils/typing.py
__all__ = [ "IntNDArray", "NDArray", "InputDimsLike", "SamplingLike", "ShapeLike", "DTypeLike", "TensorTypeLike", ] from typing import Sequence, Tuple, Union import numpy as np import numpy.typing as npt from pylops.utils.deps import torch_enabled if torch_enabled: import torch IntNDArray = npt.NDArray[np.int_] NDArray = npt.NDArray InputDimsLike = Union[Sequence[int], IntNDArray] SamplingLike = Union[Sequence[float], NDArray] ShapeLike = Tuple[int, ...] DTypeLike = npt.DTypeLike if torch_enabled: TensorTypeLike = torch.Tensor else: TensorTypeLike = None
608
17.454545
48
py
pylops
pylops-master/pylops/utils/utils.py
__all__ = ["Report"] # scooby is a soft dependency for pylops from typing import Optional try: from scooby import Report as ScoobyReport except ImportError: class ScoobyReport: def __init__(self, additional, core, optional, ncol, text_width, sort): print( "\nNOTE: `pylops.Report` requires `scooby`. Install it via" "\n `pip install scooby` or " "`conda install -c conda-forge scooby`.\n" ) class Report(ScoobyReport): r"""Print date, time, and version information. Use ``scooby`` to print date, time, and package version information in any environment (Jupyter notebook, IPython console, Python console, QT console), either as html-table (notebook) or as plain text (anywhere). Always shown are the OS, number of CPU(s), ``numpy``, ``scipy``, ``pylops``, ``sys.version``, and time/date. Additionally shown are, if they can be imported, ``IPython``, ``numba``, and ``matplotlib``. It also shows MKL information, if available. All modules provided in ``add_pckg`` are also shown. .. note:: The package ``scooby`` has to be installed in order to use ``Report``: ``pip install scooby`` or ``conda install -c conda-forge scooby``. Parameters ---------- add_pckg : packages, optional Package or list of packages to add to output information (must be imported beforehand). ncol : int, optional Number of package-columns in html table (no effect in text-version); Defaults to 3. text_width : int, optional The text width for non-HTML display modes sort : bool, optional Sort the packages when the report is shown Examples -------- >>> import pytest >>> import dateutil >>> from pylops import Report >>> Report() # Default values >>> Report(pytest) # Provide additional package >>> Report([pytest, dateutil], ncol=5) # Set nr of columns """ def __init__( self, add_pckg: Optional[list] = None, ncol: int = 3, text_width: int = 80, sort: bool = False, ) -> None: """Initiate a scooby.Report instance.""" # Mandatory packages. core = ["numpy", "scipy", "pylops"] # Optional packages. optional = ["IPython", "matplotlib", "numba"] super().__init__( additional=add_pckg, core=core, optional=optional, ncol=ncol, text_width=text_width, sort=sort, )
2,641
28.032967
79
py
pylops
pylops-master/pylops/utils/tapers.py
__all__ = [ "hanningtaper", "cosinetaper", "taper", "taper2d", "taper3d", "tapernd", ] from typing import Tuple, Union import numpy as np import numpy.typing as npt from pylops.utils.typing import InputDimsLike, NDArray def hanningtaper( nmask: int, ntap: int, ) -> npt.ArrayLike: r"""1D Hanning taper Create unitary mask of length ``nmask`` with Hanning tapering at edges of size ``ntap`` Parameters ---------- nmask : :obj:`int` Number of samples of mask ntap : :obj:`int` Number of samples of hanning tapering at edges Returns ------- taper : :obj:`numpy.ndarray` taper """ if ntap > 0: if (nmask // ntap) < 2: ntap_min = nmask // 2 if nmask % 2 == 0 else (nmask - 1) // 2 raise ValueError(f"ntap={ntap} must be smaller or equal than {ntap_min}") han_win = np.hanning(ntap * 2 - 1) st_tpr = han_win[ :ntap, ] mid_tpr = np.ones( [ nmask - (2 * ntap), ] ) end_tpr = np.flipud(st_tpr) tpr_1d = np.concatenate([st_tpr, mid_tpr, end_tpr]) return tpr_1d def cosinetaper( nmask: int, ntap: int, square: bool = False, ) -> npt.ArrayLike: r"""1D Cosine or Cosine square taper Create unitary mask of length ``nmask`` with Hanning tapering at edges of size ``ntap`` Parameters ---------- nmask : :obj:`int` Number of samples of mask ntap : :obj:`int` Number of samples of hanning tapering at edges square : :obj:`bool` Cosine square taper (``True``)or Cosine taper (``False``) Returns ------- taper : :obj:`numpy.ndarray` taper """ ntap = 0 if ntap == 1 else ntap exponent = 1 if not square else 2 cos_win = ( 0.5 * ( np.cos( (np.arange(ntap * 2 - 1) - (ntap * 2 - 2) / 2) * np.pi / ((ntap * 2 - 2) / 2) ) + 1.0 ) ) ** exponent st_tpr = cos_win[ :ntap, ] mid_tpr = np.ones( [ nmask - (2 * ntap), ] ) end_tpr = np.flipud(st_tpr) tpr_1d = np.concatenate([st_tpr, mid_tpr, end_tpr]) return tpr_1d def taper( nmask: int, ntap: int, tapertype: str, ) -> NDArray: r"""1D taper Create unitary mask of length ``nmask`` with tapering of choice at edges of size ``ntap`` Parameters ---------- nmask : :obj:`int` Number of samples of mask ntap : :obj:`int` Number of samples of hanning tapering at edges tapertype : :obj:`str`, optional Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) Returns ------- taper : :obj:`numpy.ndarray` taper """ if tapertype == "hanning": tpr_1d = hanningtaper(nmask, ntap) elif tapertype == "cosine": tpr_1d = cosinetaper(nmask, ntap, False) elif tapertype == "cosinesquare": tpr_1d = cosinetaper(nmask, ntap, True) else: tpr_1d = np.ones(nmask) return tpr_1d def taper2d( nt: int, nmask: int, ntap: Union[int, Tuple[int, int]], tapertype: str = "hanning", ) -> NDArray: r"""2D taper Create 2d mask of size :math:`[n_\text{mask} \times n_t]` with tapering of size ``ntap`` along the first (and possibly second) dimensions Parameters ---------- nt : :obj:`int` Number of samples along second dimension nmask : :obj:`int` Number of samples along first dimension ntap : :obj:`int` or :obj:`list` Number of samples of tapering at edges of first dimension (or both dimensions). tapertype : :obj:`str`, optional Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) Returns ------- taper : :obj:`numpy.ndarray` 2d mask with tapering along first dimension of size :math:`[n_\text{mask} \times n_t]` """ # create 1d window along first dimension tpr_x = taper( nmask, ntap[0] if isinstance(ntap, (list, tuple)) else ntap, tapertype ) # create 1d window along second dimension if isinstance(ntap, (list, tuple)): tpr_t = taper(nt, ntap[1], tapertype) # create 2d taper if isinstance(ntap, (list, tuple)): # replicate taper to second dimension tpr_2d = np.outer(tpr_x, tpr_t) else: # replicate taper to second dimension tpr_2d = np.tile(tpr_x[:, np.newaxis], (1, nt)) return tpr_2d def taper3d( nt: int, nmask: Tuple[int, int], ntap: Tuple[int, int], tapertype: str = "hanning", ) -> NDArray: r"""3D taper Create 3d mask of size :math:`[n_\text{mask}[0] \times n_\text{mask}[1] \times n_t]` with tapering of size ``ntap`` along the first and second dimension Parameters ---------- nt : :obj:`int` Number of time samples of mask along third dimension nmask : :obj:`tuple` Number of space samples of mask along first and second dimensions ntap : :obj:`tuple` Number of samples of tapering at edges of first and second dimensions tapertype : :obj:`int` Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) Returns ------- taper : :obj:`numpy.ndarray` 3d mask with tapering along first dimension of size :math:`[n_\text{mask,0} \times n_\text{mask,1} \times n_t]` """ nmasky, nmaskx = nmask[0], nmask[1] ntapy, ntapx = ntap[0], ntap[1] # create 1d window if tapertype == "hanning": tpr_y = hanningtaper(nmasky, ntapy) tpr_x = hanningtaper(nmaskx, ntapx) elif tapertype == "cosine": tpr_y = cosinetaper(nmasky, ntapy, False) tpr_x = cosinetaper(nmaskx, ntapx, False) elif tapertype == "cosinesquare": tpr_y = cosinetaper(nmasky, ntapy, True) tpr_x = cosinetaper(nmaskx, ntapx, True) else: tpr_y = np.ones(nmasky) tpr_x = np.ones(nmaskx) tpr_yx = np.outer(tpr_y, tpr_x) # replicate taper to third dimension tpr_3d = np.tile(tpr_yx[:, :, np.newaxis], (1, nt)) return tpr_3d def tapernd( nmask: InputDimsLike, ntap: InputDimsLike, tapertype: str = "hanning", ) -> NDArray: r"""ND taper Create nd mask of size :math:`[n_\text{mask}[0] \times n_\text{mask}[1] \times ... \times n_\text{mask}[N-1]]` with tapering of size ``ntap`` along all dimensions Parameters ---------- nmask : :obj:`tuple` Number of space samples of mask along every dimension ntap : :obj:`tuple` Number of samples of tapering at edges of every dimension tapertype : :obj:`int` Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) Returns ------- taper : :obj:`numpy.ndarray` Nd mask with tapering along first dimension of size :math:`[n_\text{mask,0} \times n_\text{mask,1} \times ... \times n_\text{mask,N-1}]` """ # create 1d window if tapertype == "hanning": tpr = [hanningtaper(nm, nt) for nm, nt in zip(nmask, ntap)] elif tapertype == "cosine": tpr = [cosinetaper(nm, nt, False) for nm, nt in zip(nmask, ntap)] elif tapertype == "cosinesquare": tpr = [cosinetaper(nm, nt, True) for nm, nt in zip(nmask, ntap)] else: tpr = [np.ones(nm) for nm in nmask] # create nd tapers via repeated outer products taper = tpr[-1] for tpr_tmp in tpr[:-1][::-1]: taper = np.outer(tpr_tmp, taper).reshape(tpr_tmp.size, *taper.shape) return taper
7,669
25.177474
114
py
pylops
pylops-master/pylops/utils/seismicevents.py
__all__ = [ "makeaxis", "linear2d", "parabolic2d", "hyperbolic2d", "linear3d", "hyperbolic3d", ] from typing import Dict, Tuple, Union import numpy as np import numpy.typing as npt import scipy.signal as filt def _filterdata( d: npt.NDArray, nt: int, wav: npt.ArrayLike, wcenter: int ) -> Tuple[npt.ArrayLike, npt.ArrayLike]: r"""Apply filtering to data with wavelet wav""" dwav = filt.lfilter(wav, 1, d, axis=-1) dwav = dwav[..., wcenter:] d = d[..., : (nt - wcenter)] return d, dwav def makeaxis(par: Dict) -> Tuple[npt.NDArray, npt.NDArray, npt.NDArray, npt.NDArray]: r"""Create axes t, x, and y axes Create space and time axes from dictionary containing initial values ``ot``, ``ox``, ``oy``, sampling steps ``dt``, dx``, ``dy`` and number of elements ``nt``, nx``, ``ny`` for each axis. Parameters ---------- par : :obj:`dict` Dictionary containing initial values, sampling steps, and number of elements Returns ------- t : :obj:`numpy.ndarray` Time axis t2 : :obj:`numpy.ndarray` Symmetric time axis x : :obj:`numpy.ndarray` x axis y : :obj:`numpy.ndarray` y axis (``None``, if ``oy``, ``dy`` or ``ny`` are not provided) Examples -------- >>> par = {'ox':0, 'dx':2, 'nx':60, >>> 'oy':0, 'dy':2, 'ny':100, >>> 'ot':0, 'dt':4, 'nt':400} >>> # Create axis >>> t, t2, x, y = makeaxis(par) """ x = par["ox"] + np.arange(par["nx"]) * par["dx"] t = par["ot"] + np.arange(par["nt"]) * par["dt"] t2 = np.arange(-par["nt"] + 1, par["nt"]) * par["dt"] if "oy" in par.keys(): y = par["oy"] + np.arange(par["ny"]) * par["dy"] else: y = None return t, t2, x, y def linear2d( x: npt.NDArray, t: npt.NDArray, v: float, t0: Union[float, Tuple[float]], theta: Union[float, Tuple[float]], amp: Union[float, Tuple[float]], wav: npt.NDArray, ) -> Tuple[npt.NDArray, npt.NDArray]: r"""Linear 2D events Create 2d linear events given propagation velocity, intercept time, angle, and amplitude of each event Parameters ---------- x : :obj:`numpy.ndarray` space axis t : :obj:`numpy.ndarray` time axis v : :obj:`float` propagation velocity t0 : :obj:`tuple` or :obj:`float` intercept time at :math:`x=0` of each linear event theta : :obj:`tuple` or :obj:`float` angle (in degrees) of each linear event amp : :obj:`tuple` or :obj:`float` amplitude of each linear event wav : :obj:`numpy.ndarray` wavelet to be applied to data Returns ------- d : :obj:`numpy.ndarray` data without wavelet of size :math:`[n_x \times n_t]` dwav : :obj:`numpy.ndarray` data with wavelet of size :math:`[n_x \times n_t]` Notes ----- Each event is created using the following relation: .. math:: t_i(x) = t_{0,i} + p_{x,i} x where :math:`p_{x,i}=\sin( \theta_i)/v` """ if isinstance(t0, (float, int)): t0 = (t0,) if isinstance(theta, (float, int)): theta = (theta,) if isinstance(amp, (float, int)): amp = (amp,) # identify dimensions dt = t[1] - t[0] wcenter = int(len(wav) / 2) nx = np.size(x) nt = np.size(t) + wcenter nevents = np.size(t0) # create events d = np.zeros((nx, nt)) for ievent in range(nevents): px = np.sin(np.deg2rad(theta[ievent])) / v tevent = t0[ievent] + px * x tevent = (tevent - t[0]) / dt itevent = tevent.astype(int) dtevent = tevent - itevent for ix in range(nx): if itevent[ix] < nt - 1 and itevent[ix] >= 0: d[ix, itevent[ix]] += amp[ievent] * (1 - dtevent[ix]) d[ix, itevent[ix] + 1] += amp[ievent] * dtevent[ix] # filter events with certain wavelet d, dwav = _filterdata(d, nt, wav, wcenter) return d, dwav def parabolic2d( x: npt.NDArray, t: npt.NDArray, t0: Union[float, Tuple[float]], px: Union[float, Tuple[float]], pxx: Union[float, Tuple[float]], amp: Union[float, Tuple[float]], wav: npt.NDArray, ) -> Tuple[npt.NDArray, npt.NDArray]: r"""Parabolic 2D events Create 2d parabolic events given intercept time, slowness, curvature, and amplitude of each event Parameters ---------- x : :obj:`numpy.ndarray` space axis t : :obj:`numpy.ndarray` time axis t0 : :obj:`tuple` or :obj:`float` intercept time at :math:`x=0` of each parabolic event px : :obj:`tuple` or :obj:`float` slowness of each parabolic event pxx : :obj:`tuple` or :obj:`float` curvature of each parabolic event amp : :obj:`tuple` or :obj:`float` amplitude of each parabolic event wav : :obj:`numpy.ndarray` wavelet to be applied to data Returns ------- d : :obj:`numpy.ndarray` data without wavelet of size :math:`[n_x \times n_t]` dwav : :obj:`numpy.ndarray` data with wavelet of size :math:`[n_x \times n_t]` Notes ----- Each event is created using the following relation: .. math:: t_i(x) = t_{0,i} + p_{x,i} x + p_{xx,i} x^2 """ if isinstance(t0, (float, int)): t0 = (t0,) if isinstance(px, (float, int)): px = (px,) if isinstance(pxx, (float, int)): pxx = (pxx,) if isinstance(amp, (float, int)): amp = (amp,) # identify dimensions dt = t[1] - t[0] wcenter = int(len(wav) / 2) nx = np.size(x) nt = np.size(t) + wcenter nevents = np.size(t0) # create events d = np.zeros((nx, nt)) for ievent in range(nevents): tevent = t0[ievent] + px[ievent] * x + pxx[ievent] * x**2 tevent = (tevent - t[0]) / dt itevent = tevent.astype(int) dtevent = tevent - itevent for ix in range(nx): if itevent[ix] < nt - 1 and itevent[ix] >= 0: d[ix, itevent[ix]] += amp[ievent] * (1 - dtevent[ix]) d[ix, itevent[ix] + 1] += amp[ievent] * dtevent[ix] # filter events with certain wavelet d, dwav = _filterdata(d, nt, wav, wcenter) return d, dwav def hyperbolic2d( x: npt.NDArray, t: npt.NDArray, t0: Union[float, Tuple[float]], vrms: Union[float, Tuple[float]], amp: Union[float, Tuple[float]], wav: npt.NDArray, ) -> Tuple[npt.NDArray, npt.NDArray]: r"""Hyperbolic 2D events Create 2d hyperbolic events given intercept time, root-mean-square velocity, and amplitude of each event Parameters ---------- x : :obj:`numpy.ndarray` space axis t : :obj:`numpy.ndarray` time axis t0 : :obj:`tuple` or :obj:`float` intercept time at :math:`x=0` of each of hyperbolic event vrms : :obj:`tuple` or :obj:`float` root-mean-square velocity of each hyperbolic event amp : :obj:`tuple` or :obj:`float` amplitude of each hyperbolic event wav : :obj:`numpy.ndarray` wavelet to be applied to data Returns ------- d : :obj:`numpy.ndarray` data without wavelet of size :math:`[n_x \times n_t]` dwav : :obj:`numpy.ndarray` data with wavelet of size :math:`[n_x \times n_t]` Notes ----- Each event is created using the following relation: .. math:: t_i(x) = \sqrt{t_{0,i}^2 + \frac{x^2}{v_{\text{rms},i}^2}} """ if isinstance(t0, (float, int)): t0 = (t0,) if isinstance(vrms, (float, int)): vrms = (vrms,) if isinstance(amp, (float, int)): amp = (amp,) # identify dimensions dt = t[1] - t[0] wcenter = int(len(wav) / 2) nx = np.size(x) nt = np.size(t) + wcenter nevents = np.size(t0) # create events d = np.zeros((nx, nt)) for ievent in range(nevents): tevent = np.sqrt(t0[ievent] ** 2 + x**2 / vrms[ievent] ** 2) tevent = (tevent - t[0]) / dt itevent = tevent.astype(int) dtevent = tevent - itevent for ix in range(nx): if itevent[ix] < nt - 1 and itevent[ix] >= 0: d[ix, itevent[ix]] += amp[ievent] * (1 - dtevent[ix]) d[ix, itevent[ix] + 1] += amp[ievent] * dtevent[ix] # filter events with certain wavelet d, dwav = _filterdata(d, nt, wav, wcenter) return d, dwav def linear3d( x: npt.NDArray, y: npt.NDArray, t: npt.NDArray, v: Union[float, Tuple[float]], t0: Union[float, Tuple[float]], theta: Union[float, Tuple[float]], phi: Union[float, Tuple[float]], amp: Union[float, Tuple[float]], wav: npt.NDArray, ) -> Tuple[npt.NDArray, npt.NDArray]: r"""Linear 3D events Create 3d linear events given propagation velocity, intercept time, angles, and amplitude of each event. Parameters ---------- x : :obj:`numpy.ndarray` space axis in x direction y : :obj:`numpy.ndarray` space axis in y direction t : :obj:`numpy.ndarray` time axis v : :obj:`float` propagation velocity t0 : :obj:`tuple` or :obj:`float` intercept time at :math:`x=0` of each linear event theta : :obj:`tuple` or :obj:`float` angle in x direction (in degrees) of each linear event phi : :obj:`tuple` or :obj:`float` angle in y direction (in degrees) of each linear event amp : :obj:`tuple` or :obj:`float` amplitude of each linear event wav : :obj:`numpy.ndarray` wavelet to be applied to data Returns ------- d : :obj:`numpy.ndarray` data without wavelet of size :math:`[n_y \times n_x \times n_t]` dwav : :obj:`numpy.ndarray` data with wavelet of size :math:`[n_y \times n_x \times n_t]` Notes ----- Each event is created using the following relation: .. math:: t_i(x, y) = t_{0,i} + p_{x,i} x + p_{y,i} y where :math:`p_{x,i}=\frac{1}{v} \sin( \theta_i)\cos( \phi_i)` and :math:`p_{x,i}=\frac{1}{v} \sin( \theta_i)\sin( \phi_i)`. """ if isinstance(t0, (float, int)): t0 = (t0,) if isinstance(theta, (float, int)): theta = (theta,) if isinstance(phi, (float, int)): phi = (phi,) if isinstance(amp, (float, int)): amp = (amp,) # identify dimensions dt = t[1] - t[0] wcenter = int(len(wav) / 2) nx = np.size(x) ny = np.size(y) nt = np.size(t) + wcenter nevents = np.size(t0) # create events d = np.zeros((ny, nx, nt)) for ievent in range(nevents): px = np.sin(np.deg2rad(theta[ievent])) * np.cos(np.deg2rad(phi[ievent])) / v py = np.sin(np.deg2rad(theta[ievent])) * np.sin(np.deg2rad(phi[ievent])) / v for iy in range(ny): tevent = t0[ievent] + px * x + py * y[iy] tevent = (tevent - t[0]) / dt itevent = tevent.astype(int) dtevent = tevent - itevent for ix in range(nx): if itevent[ix] < nt - 1 and itevent[ix] >= 0: d[iy, ix, itevent[ix]] += amp[ievent] * (1 - dtevent[ix]) d[iy, ix, itevent[ix] + 1] += amp[ievent] * dtevent[ix] # filter events with certain wavelet d, dwav = _filterdata(d, nt, wav, wcenter) return d, dwav def hyperbolic3d( x: npt.NDArray, y: npt.NDArray, t: npt.NDArray, t0: Union[float, Tuple[float]], vrms_x: Union[float, Tuple[float]], vrms_y: Union[float, Tuple[float]], amp: Union[float, Tuple[float]], wav: npt.NDArray, ): r"""Hyperbolic 3D events Create 3d hyperbolic events given intercept time, root-mean-square velocities, and amplitude of each event Parameters ---------- x : :obj:`numpy.ndarray` space axis in x direction y : :obj:`numpy.ndarray` space axis in y direction t : :obj:`numpy.ndarray` time axis t0 : :obj:`tuple` or :obj:`float` intercept time at :math:`x=0` of each of hyperbolic event vrms_x : :obj:`tuple` or :obj:`float` root-mean-square velocity in x direction for each hyperbolic event vrms_y : :obj:`tuple` or :obj:`float` root-mean-square velocity in y direction for each hyperbolic event amp : :obj:`tuple` or :obj:`float` amplitude of each hyperbolic event wav : :obj:`numpy.ndarray` wavelet to be applied to data Returns ------- d : :obj:`numpy.ndarray` data without wavelet of size :math:`[n_y \times n_x \times n_t]` dwav : :obj:`numpy.ndarray` data with wavelet of size :math:`[n_y \times n_x \times n_t]` Notes ----- Each event is created using the following relation: .. math:: t_i(x, y) = \sqrt{t_{0,i}^2 + \frac{x^2}{v_{\text{rms}_x, i}^2} + \frac{y^2}{v_{\text{rms}_y, i}^2}} Note that velocities do not have a physical meaning here (compared to the corresponding :func:`pylops.utils.seismicevents.hyperbolic2d`), they rather simply control the curvature of the hyperboloid along the spatial axes. """ if isinstance(t0, (float, int)): t0 = (t0,) if isinstance(vrms_x, (float, int)): vrms_x = (vrms_x,) if isinstance(vrms_y, (float, int)): vrms_y = (vrms_y,) if isinstance(amp, (float, int)): amp = (amp,) # identify dimensions dt = t[1] - t[0] wcenter = int(len(wav) / 2) nx = np.size(x) ny = np.size(y) nt = np.size(t) + wcenter nevents = np.size(t0) # create events d = np.zeros((ny, nx, nt)) for ievent in range(nevents): for iy in range(ny): tevent = np.sqrt( t0[ievent] ** 2 + x**2 / vrms_x[ievent] ** 2 + y[iy] ** 2 / vrms_y[ievent] ** 2 ) tevent = (tevent - t[0]) / dt itevent = tevent.astype(int) dtevent = tevent - itevent for ix in range(nx): if itevent[ix] < nt - 1 and itevent[ix] >= 0: d[iy, ix, itevent[ix]] += amp[ievent] * (1 - dtevent[ix]) d[iy, ix, itevent[ix] + 1] += amp[ievent] * dtevent[ix] # filter events with certain wavelet d, dwav = _filterdata(d, nt, wav, wcenter) return d, dwav
14,379
28.287169
96
py
pylops
pylops-master/pylops/utils/wavelets.py
__all__ = [ "gaussian", "klauder", "ormsby", "ricker", ] import warnings from typing import Callable, Optional, Sequence, Tuple import numpy as np import numpy.typing as npt from scipy.signal import chirp from scipy.signal.windows import gaussian as spgauss def _tcrop(t: npt.ArrayLike) -> npt.ArrayLike: """Crop time axis with even number of samples""" if len(t) % 2 == 0: t = t[:-1] warnings.warn("one sample removed from time axis...") return t def gaussian( t: npt.ArrayLike, std: float = 1.0, ) -> Tuple[npt.ArrayLike, npt.ArrayLike, int]: r"""Gaussian wavelet Create a Gaussian wavelet given time axis ``t`` and standard deviation ``std`` using :py:func:`scipy.signal.windows.gaussian`. Parameters ---------- t : :obj:`numpy.ndarray` Time axis (positive part including zero sample) std : :obj:`float`, optional Standard deviation of gaussian Returns ------- w : :obj:`numpy.ndarray` Wavelet t : :obj:`numpy.ndarray` Symmetric time axis wcenter : :obj:`int` Index of center of wavelet """ t = _tcrop(t) t = np.concatenate((np.flipud(-t[1:]), t), axis=0) w = spgauss(len(t), std=std) wcenter = np.argmax(np.abs(w)) return w, t, wcenter def klauder( t: npt.ArrayLike, f: Sequence[float] = (5.0, 20.0), taper: Optional[Callable] = None, ) -> Tuple[npt.ArrayLike, npt.ArrayLike, int]: r"""Klauder wavelet Create a Klauder wavelet given time axis ``t`` and standard deviation ``std``. This wavelet mimics the autocorrelation of a linear frequency modulated sweep. Parameters ---------- t : :obj:`numpy.ndarray` Time axis (positive part including zero sample) f : :obj:`tuple`, optional Frequency sweep taper : :obj:`func`, optional Taper to apply to wavelet (must be a function that takes the size of the window as input Returns ------- w : :obj:`numpy.ndarray` Wavelet t : :obj:`numpy.ndarray` Symmetric time axis wcenter : :obj:`int` Index of center of wavelet """ t = _tcrop(t) t = np.concatenate((np.flipud(-t[1:]), t), axis=0) t1 = t[-1] f1, f2 = f c = chirp(t, f1 + (f2 - f1) / 2.0, t1, f2) w = np.correlate(c, c, mode="same") w = np.squeeze(w) / np.amax(w) wcenter = np.argmax(np.abs(w)) # apply taper if taper is not None: w *= taper(len(t)) return w, t, wcenter def ormsby( t: npt.ArrayLike, f: Sequence[float] = (5.0, 10.0, 45.0, 50.0), taper: Optional[Callable] = None, ) -> Tuple[npt.ArrayLike, npt.ArrayLike, int]: r"""Ormsby wavelet Create a Ormsby wavelet given time axis ``t`` and frequency range defined by four frequencies which parametrize a trapezoidal shape in the frequency spectrum. Parameters ---------- t : :obj:`numpy.ndarray` Time axis (positive part including zero sample) f : :obj:`tuple`, optional Frequency range taper : :obj:`func`, optional Taper to apply to wavelet (must be a function that takes the size of the window as input Returns ------- w : :obj:`numpy.ndarray` Wavelet t : :obj:`numpy.ndarray` Symmetric time axis wcenter : :obj:`int` Index of center of wavelet """ def numerator(f, t): """The numerator of the Ormsby wavelet""" return (np.sinc(f * t) ** 2) * ((np.pi * f) ** 2) t = _tcrop(t) t = np.concatenate((np.flipud(-t[1:]), t), axis=0) f1, f2, f3, f4 = f pf43 = (np.pi * f4) - (np.pi * f3) pf21 = (np.pi * f2) - (np.pi * f1) w = ( (numerator(f4, t) / pf43) - (numerator(f3, t) / pf43) - (numerator(f2, t) / pf21) + (numerator(f1, t) / pf21) ) w = w / np.amax(w) wcenter = np.argmax(np.abs(w)) # apply taper if taper is not None: w *= taper(len(t)) return w, t, wcenter def ricker( t: npt.ArrayLike, f0: float = 10, taper: Optional[Callable] = None, ) -> Tuple[npt.ArrayLike, npt.ArrayLike, int]: r"""Ricker wavelet Create a Ricker wavelet given time axis ``t`` and central frequency ``f_0`` Parameters ---------- t : :obj:`numpy.ndarray` Time axis (positive part including zero sample) f0 : :obj:`float`, optional Central frequency taper : :obj:`func`, optional Taper to apply to wavelet (must be a function that takes the size of the window as input Returns ------- w : :obj:`numpy.ndarray` Wavelet t : :obj:`numpy.ndarray` Symmetric time axis wcenter : :obj:`int` Index of center of wavelet """ t = _tcrop(t) t = np.concatenate((np.flipud(-t[1:]), t), axis=0) w = (1 - 2 * (np.pi * f0 * t) ** 2) * np.exp(-((np.pi * f0 * t) ** 2)) wcenter = np.argmax(np.abs(w)) # apply taper if taper is not None: w *= taper(len(t)) return w, t, wcenter
5,072
23.507246
79
py
pylops
pylops-master/pylops/utils/dottest.py
__all__ = ["dottest"] from typing import Optional import numpy as np from pylops.utils.backend import get_module, to_numpy def dottest( Op, nr: Optional[int] = None, nc: Optional[int] = None, rtol: float = 1e-6, atol: float = 1e-21, complexflag: int = 0, raiseerror: bool = True, verb: bool = False, backend: str = "numpy", ) -> bool: r"""Dot test. Generate random vectors :math:`\mathbf{u}` and :math:`\mathbf{v}` and perform dot-test to verify the validity of forward and adjoint operators. This test can help to detect errors in the operator implementation. Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear operator to test. nr : :obj:`int` Number of rows of operator (i.e., elements in data) nc : :obj:`int` Number of columns of operator (i.e., elements in model) rtol : :obj:`float`, optional Relative dottest tolerance atol : :obj:`float`, optional Absolute dottest tolerance .. versionadded:: 2.0.0 complexflag : :obj:`bool`, optional Generate random vectors with * ``0``: Real entries for model and data * ``1``: Complex entries for model and real entries for data * ``2``: Real entries for model and complex entries for data * ``3``: Complex entries for model and data raiseerror : :obj:`bool`, optional Raise error or simply return ``False`` when dottest fails verb : :obj:`bool`, optional Verbosity backend : :obj:`str`, optional Backend used for dot test computations (``numpy`` or ``cupy``). This parameter will be used to choose how to create the random vectors. Returns ------- passed : :obj:`bool` Passed flag. Raises ------ AssertionError If dot-test is not verified within chosen tolerances. Notes ----- A dot-test is mathematical tool used in the development of numerical linear operators. More specifically, a correct implementation of forward and adjoint for a linear operator should verify the following *equality* within a numerical tolerance: .. math:: (\mathbf{Op}\,\mathbf{u})^H\mathbf{v} = \mathbf{u}^H(\mathbf{Op}^H\mathbf{v}) """ ncp = get_module(backend) if nr is None: nr = Op.shape[0] if nc is None: nc = Op.shape[1] if (nr, nc) != Op.shape: raise AssertionError("Provided nr and nc do not match operator shape") # make u and v vectors rdtype = np.ones(1, Op.dtype).real.dtype u = ncp.random.randn(nc).astype(rdtype) if complexflag not in (0, 2): u = u + 1j * ncp.random.randn(nc).astype(rdtype) v = ncp.random.randn(nr).astype(rdtype) if complexflag not in (0, 1): v = v + 1j * ncp.random.randn(nr).astype(rdtype) y = Op.matvec(u) # Op * u x = Op.rmatvec(v) # Op'* v if getattr(Op, "clinear", True): yy = ncp.vdot(y, v) # (Op * u)' * v xx = ncp.vdot(u, x) # u' * (Op' * v) else: # Op is only R-linear, so treat complex numbers as elements of R^2 yy = ncp.dot(y.real, v.real) + ncp.dot(y.imag, v.imag) xx = ncp.dot(u.real, x.real) + ncp.dot(u.imag, x.imag) # convert back to numpy (in case cupy arrays were used), make into a numpy # array and extract the first element. This is ugly but allows to handle # complex numbers in subsequent prints also when using cupy arrays. xx, yy = np.array([to_numpy(xx)])[0], np.array([to_numpy(yy)])[0] # evaluate if dot test passed passed = np.isclose(xx, yy, rtol, atol) # verbosity or error raising if (not passed and raiseerror) or verb: passed_status = "passed" if passed else "failed" msg = f"Dot test {passed_status}, v^H(Opu)={yy} - u^H(Op^Hv)={xx}" if not passed and raiseerror: raise AssertionError(msg) else: print(msg) return passed
4,001
29.090226
78
py
pylops
pylops-master/pylops/utils/describe.py
__all__ = ["describe"] import logging import random import string from typing import List, Set, Union from pylops import LinearOperator from pylops.basicoperators import BlockDiag, HStack, VStack from pylops.linearoperator import ( _AdjointLinearOperator, _ProductLinearOperator, _ScaledLinearOperator, _SumLinearOperator, _TransposedLinearOperator, ) from pylops.utils import deps sympy_message = deps.sympy_import("the describe module") if sympy_message is None: from sympy import BlockDiagMatrix, BlockMatrix, MatrixSymbol compositeops = ( LinearOperator, _SumLinearOperator, _ProductLinearOperator, _ScaledLinearOperator, _AdjointLinearOperator, _TransposedLinearOperator, HStack, VStack, BlockDiag, ) logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _in_notebook() -> bool: """Check if code is running inside notebook""" try: from IPython import get_ipython if "IPKernelApp" not in get_ipython().config: return False except ImportError: return False except AttributeError: return False return True def _assign_name(Op, Ops, names: List[str]) -> str: """Assign name to an operator as provided by the user (or randomly select one when not provided by the user) Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear Operator to assign name to Ops : :obj:`dict` dictionary of Operators found by the _describe method whilst crawling through the composite operator to describe names : :obj:`list` list of currently assigned names Returns ------- name : :obj:`str` Name assigned to operator """ # Add a suffix when all letters of the alphabet are already in use. This # decision is made by counting the length of the names list and using the # English vocabulary (26 characters) suffix = str(len(names) // 26) * (len(names) // 26 > 0) # Propose a new name, where a random letter # is chosen if Op does not have a name or the name is set to None if getattr(Op, "name", None) is None: proposedname = random.choice(string.ascii_letters).upper() + suffix else: proposedname = Op.name + suffix if proposedname not in names or (Ops[proposedname][1] == id(Op)): # Assign the proposed name if this is not yet in use or if it is # used by the same operator. Note that an operator may reapper # multiple times in an expression name = proposedname else: # Propose a new name until an unused character is found origname = proposedname while proposedname in names: proposedname = random.choice(string.ascii_letters).upper() + suffix name = proposedname logging.warning( f"The user has used the same name {origname} for two distinct operators, " f"changing name of operator {type(Op).__name__} to {name}..." ) Op.name = name return name def _describeop(Op, Ops, names: List[str]): """Core steps to describe a single operator Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear Operator to assign name to Ops : :obj:`dict` dictionary of Operators found by the _describe method whilst crawling through the composite operator to describe names : :obj:`list` list of currently assigned names Returns ------- Op0 : :obj:`sympy.MatrixSymbol` Sympy equivalent od Linear Operator ``Op`` Ops_ : :obj:`dict` New or updated dictionary of Operators """ if type(Op) not in compositeops: # A native PyLops operator has been found, assign a name and store # it as MatrixSymbol name = _assign_name(Op, Ops, names) Op0 = MatrixSymbol(name, 1, 1) Ops_ = {name: (type(Op).__name__, id(Op))} elif type(Op) is LinearOperator: # A LinearOperator has been found, either extract Op and start to # describe it or if a name has been given to the operator treat as # it is (this is useful when users do not want an operator to be # further disected into its components name = getattr(Op, "name", None) if name is None: Op0, Ops_, names = _describe(Op.Op, Ops, names) else: Ops_ = {name: (type(Op).__name__, id(Op))} Op0 = MatrixSymbol(name, 1, 1) else: # When finding a composite operator, send it again to the _describe # method Op0, Ops_, names = _describe(Op, Ops, names) return Op0, Ops_ def _describe( Op, Ops, names: Union[List[str], Set[str]], ): """Core steps to describe a composite operator. This is done recursively. Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear Operator to assign name to Ops : :obj:`dict` dictionary of Operators found by the _describe method whilst crawling through the composite operator to describe names : :obj:`list` list of currently assigned names Returns ------- Opsym : :obj:`sympy.MatrixSymbol` Sympy equivalent od Linear Operator ``Op`` Ops : :obj:`dict` dictionary of Operators """ # Check if a name has been given to the operator and store it as # MatrixSymbol (this is useful when users do not want an operator to be # further disected into its components) name = getattr(Op, "name", None) if name is not None: Ops[name] = (type(Op).__name__, id(Op)) Opsym = MatrixSymbol(Op.name, 1, 1) names.update(name) return Opsym, Ops, names # Given that no name has been assigned, interpret the operator further if type(Op) not in compositeops: # A native PyLops operator has been found, assign a name # or if a name has been given to the operator treat as # it is and store it as MatrixSymbol name = _assign_name(Op, Ops, list(Ops.keys())) Ops[name] = (type(Op).__name__, id(Op)) Opsym = MatrixSymbol(Op.name, 1, 1) names.update(name) else: if type(Op) is LinearOperator: # A LinearOperator has been found, either extract Op and start to # describe it or if a name has been given to the operator treat as # it is and store it as MatrixSymbol Opsym, Ops_, names = _describe(Op.Op, Ops, names) Ops.update(Ops_) elif type(Op) is _AdjointLinearOperator: # An adjoint LinearOperator has been found, describe it and attach # the adjoint symbol to its sympy representation Opsym, Ops_, names = _describe(Op.args[0], Ops, names) Opsym = Opsym.adjoint() Ops.update(Ops_) elif type(Op) is _TransposedLinearOperator: # A transposed LinearOperator has been found, describe it and # attach the transposed symbol to its sympy representation Opsym, Ops_, names = _describe(Op.args[0], Ops, names) Opsym = Opsym.T Ops.update(Ops_) elif type(Op) is _ScaledLinearOperator: # A scaled LinearOperator has been found, describe it and # attach the scaling to its sympy representation. Note that the # scaling could either on the left or right side of the operator, # so we need to try both if isinstance(Op.args[0], LinearOperator): Opsym, Ops_ = _describeop(Op.args[0], Ops, names) Opsym = Op.args[1] * Opsym Ops.update(Ops_) names.update(list(Ops_.keys())) else: Opsym, Ops_ = _describeop(Op.args[1], Ops, names) Opsym = Op.args[1] * Opsym Ops.update(Ops_) names.update(list(Ops_.keys())) elif type(Op) is _SumLinearOperator: # A sum LinearOperator has been found, describe both operators # either side of the plus sign and sum their sympy representations Opsym0, Ops_ = _describeop(Op.args[0], Ops, names) Ops.update(Ops_) names.update(list(Ops_.keys())) Opsym1, Ops_ = _describeop(Op.args[1], Ops, names) Ops.update(Ops_) names.update(list(Ops_.keys())) Opsym = Opsym0 + Opsym1 elif type(Op) is _ProductLinearOperator: # Same as sum LinearOperator but for product Opsym0, Ops_ = _describeop(Op.args[0], Ops, names) Ops.update(Ops_) names.update(list(Ops_.keys())) Opsym1, Ops_ = _describeop(Op.args[1], Ops, names) Ops.update(Ops_) names.update(list(Ops_.keys())) Opsym = Opsym0 * Opsym1 elif type(Op) in (VStack, HStack, BlockDiag): # A special composite operator has been found, stack its components # horizontally, vertically, or along a diagonal Opsyms = [] for op in Op.ops: Opsym, Ops_ = _describeop(op, Ops, names) Opsyms.append(Opsym) names.update(list(Ops_.keys())) Ops.update(Ops_) Ops.update(Ops_) if type(Op) is VStack: Opsym = BlockMatrix([[Opsym] for Opsym in Opsyms]) elif type(Op) is HStack: Opsym = BlockMatrix(Opsyms) elif type(Op) is BlockDiag: Opsym = BlockDiagMatrix(*Opsyms) return Opsym, Ops, names def describe(Op) -> None: r"""Describe a PyLops operator .. versionadded:: 1.17.0 Convert a PyLops operator into a ``sympy`` mathematical formula. This routine is useful both for debugging and educational purposes. Note that users can add a name to each operator prior to running the describe method, i.e. ``Op.name='Opname'``. Alternatively, each of the PyLops operator that composes the operator ``Op`` is automatically assigned a name. Moreover, note that the symbols :math:`T` and :math:`\dagger` are used in the mathematical expressions to indicate transposed and adjoint operators, respectively. Parameters ---------- Op : :obj:`pylops.LinearOperator` Linear Operator to describe """ if sympy_message is not None: raise NotImplementedError(sympy_message) # Describe the operator Ops = {} names = set() Opsym, Ops, names = _describe(Op, Ops=Ops, names=names) # Clean up Ops from id Ops = {op: Ops[op][0] for op in Ops.keys()} # Check if this command is run in a Jupyter notebook or normal shell and # display the operator accordingly if _in_notebook(): from IPython.display import display display(Opsym) else: print(Opsym) print("where:", Ops)
10,926
34.82623
86
py
pylops
pylops-master/pylops/utils/backend.py
__all__ = [ "get_module", "get_module_name", "get_array_module", "get_convolve", "get_fftconvolve", "get_oaconvolve", "get_correlate", "get_add_at", "get_block_diag", "get_toeplitz", "get_csc_matrix", "get_sparse_eye", "get_lstsq", "get_complex_dtype", "get_real_dtype", "to_numpy", "to_cupy_conditional", ] from types import ModuleType from typing import Callable import numpy as np import numpy.typing as npt import scipy.fft as sp_fft from scipy.linalg import block_diag, lstsq, toeplitz from scipy.signal import convolve, correlate, fftconvolve, oaconvolve from scipy.sparse import csc_matrix, eye from pylops.utils import deps from pylops.utils.typing import DTypeLike, NDArray if deps.cupy_enabled: import cupy as cp import cupyx import cupyx.scipy.fft as cp_fft from cupyx.scipy.linalg import block_diag as cp_block_diag from cupyx.scipy.linalg import toeplitz as cp_toeplitz from cupyx.scipy.sparse import csc_matrix as cp_csc_matrix from cupyx.scipy.sparse import eye as cp_eye if deps.cusignal_enabled: import cusignal cu_message = "cupy package not installed. Use numpy arrays of " "install cupy." cusignal_message = ( "cusignal package not installed. Use numpy arrays of" "install cusignal." ) def get_module(backend: str = "numpy") -> ModuleType: """Returns correct numerical module based on backend string Parameters ---------- backend : :obj:`str`, optional Backend used for dot test computations (``numpy`` or ``cupy``). This parameter will be used to choose how to create the random vectors. Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if backend == "numpy": ncp = np elif backend == "cupy": ncp = cp else: raise ValueError("backend must be numpy or cupy") return ncp def get_module_name(mod: ModuleType) -> str: """Returns name of numerical module based on input numerical module Parameters ---------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) Returns ------- backend : :obj:`str`, optional Backend used for dot test computations (``numpy`` or ``cupy``). This parameter will be used to choose how to create the random vectors. """ if mod == np: backend = "numpy" elif mod == cp: backend = "cupy" else: raise ValueError("module must be numpy or cupy") return backend def get_array_module(x: npt.ArrayLike) -> ModuleType: """Returns correct numerical module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if deps.cupy_enabled: return cp.get_array_module(x) else: return np def get_convolve(x: npt.ArrayLike) -> Callable: """Returns correct convolve module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return convolve if cp.get_array_module(x) == np: return convolve else: if deps.cusignal_enabled: return cusignal.convolution.convolve else: raise ModuleNotFoundError(cusignal_message) def get_fftconvolve(x: npt.ArrayLike) -> Callable: """Returns correct fftconvolve module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return fftconvolve if cp.get_array_module(x) == np: return fftconvolve else: if deps.cusignal_enabled: return cusignal.convolution.fftconvolve else: raise ModuleNotFoundError(cusignal_message) def get_oaconvolve(x: npt.ArrayLike) -> Callable: """Returns correct oaconvolve module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return oaconvolve if cp.get_array_module(x) == np: return oaconvolve else: raise NotImplementedError( "oaconvolve not implemented in " "cupy/cusignal. Consider using a different" "option..." ) def get_correlate(x: npt.ArrayLike) -> Callable: """Returns correct correlate module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return correlate if cp.get_array_module(x) == np: return correlate else: if deps.cusignal_enabled: return cusignal.convolution.correlate else: raise ModuleNotFoundError(cusignal_message) def get_add_at(x: npt.ArrayLike) -> Callable: """Returns correct add.at module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return np.add.at if cp.get_array_module(x) == np: return np.add.at else: return cupyx.scatter_add def get_block_diag(x: npt.ArrayLike) -> Callable: """Returns correct block_diag module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return block_diag if cp.get_array_module(x) == np: return block_diag else: return cp_block_diag def get_toeplitz(x: npt.ArrayLike) -> Callable: """Returns correct toeplitz module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return toeplitz if cp.get_array_module(x) == np: return toeplitz else: return cp_toeplitz def get_csc_matrix(x: npt.ArrayLike) -> Callable: """Returns correct csc_matrix module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return csc_matrix if cp.get_array_module(x) == np: return csc_matrix else: return cp_csc_matrix def get_sparse_eye(x: npt.ArrayLike) -> Callable: """Returns correct sparse eye module based on input Parameters ---------- x : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return eye if cp.get_array_module(x) == np: return eye else: return cp_eye def get_lstsq(x: npt.ArrayLike) -> Callable: """Returns correct lstsq module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return lstsq if cp.get_array_module(x) == np: return lstsq else: return cp.linalg.lstsq def get_sp_fft(x: npt.ArrayLike) -> Callable: """Returns correct scipy.fft module based on input Parameters ---------- x : :obj:`numpy.ndarray` Array Returns ------- mod : :obj:`func` Module to be used to process array (:mod:`numpy` or :mod:`cupy`) """ if not deps.cupy_enabled: return sp_fft if cp.get_array_module(x) == np: return sp_fft else: return cp_fft def get_complex_dtype(dtype: DTypeLike) -> DTypeLike: """Returns a complex type in the precision of the input type. Parameters ---------- dtype : :obj:`numpy.dtype` Input dtype. Returns ------- complex_dtype : :obj:`numpy.dtype` Complex output type. """ return (np.ones(1, dtype=dtype) + 1j * np.ones(1, dtype=dtype)).dtype def get_real_dtype(dtype: DTypeLike) -> DTypeLike: """Returns a real type in the precision of the input type. Parameters ---------- dtype : :obj:`numpy.dtype` Input dtype. Returns ------- real_dtype : :obj:`numpy.dtype` Real output type. """ return np.real(np.ones(1, dtype)).dtype def to_numpy(x: NDArray) -> NDArray: """Convert x to numpy array Parameters ---------- x : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` Array to evaluate Returns ------- x : :obj:`cupy.ndarray` Converted array """ if deps.cupy_enabled: if cp.get_array_module(x) == cp: x = cp.asnumpy(x) return x def to_cupy_conditional(x: npt.ArrayLike, y: npt.ArrayLike) -> NDArray: """Convert y to cupy array conditional to x being a cupy array Parameters ---------- x : :obj:`numpy.ndarray` or :obj:`cupy.ndarray` Array to evaluate y : :obj:`numpy.ndarray` Array to convert Returns ------- y : :obj:`cupy.ndarray` Converted array """ if deps.cupy_enabled: if cp.get_array_module(x) == cp and cp.get_array_module(y) == np: y = cp.asarray(y) return y
10,258
21.302174
79
py
pylops
pylops-master/pylops/utils/metrics.py
__all__ = [ "mae", "mse", "snr", "psnr", ] import numpy as np def mae(xref, xcmp): """Mean Absolute Error (MAE) Compute Mean Absolute Error between two vectors Parameters ---------- xref : :obj:`numpy.ndarray` Reference vector xcmp : :obj:`numpy.ndarray` Comparison vector Returns ------- mae : :obj:`float` Mean Absolute Error """ mae = np.mean(np.abs(xref - xcmp)) return mae def mse(xref, xcmp): """Mean Square Error (MSE) Compute Mean Square Error between two vectors Parameters ---------- xref : :obj:`numpy.ndarray` Reference vector xcmp : :obj:`numpy.ndarray` Comparison vector Returns ------- mse : :obj:`float` Mean Square Error """ mse = np.mean(np.abs(xref - xcmp) ** 2) return mse def snr(xref, xcmp): """Signal to Noise Ratio (SNR) Compute Signal to Noise Ratio between two vectors Parameters ---------- xref : :obj:`numpy.ndarray` Reference vector xcmp : :obj:`numpy.ndarray` Comparison vector Returns ------- snr : :obj:`float` Signal to Noise Ratio of ``xcmp`` with respect to ``xref`` """ xrefv = np.mean(np.abs(xref) ** 2) snr = 10.0 * np.log10(xrefv / mse(xref, xcmp)) return snr def psnr(xref, xcmp, xmax=None): """Peak Signal to Noise Ratio (PSNR) Compute Peak Signal to Noise Ratio between two vectors. Parameters ---------- xref : :obj:`numpy.ndarray` Reference vector xcmp : :obj:`numpy.ndarray` Comparison vector xmax : :obj:`float`, optional Maximum value to use. If ``None``, the actual maximum of the reference vector is used Returns ------- psnr : :obj:`float` Peak Signal to Noise Ratio of ``xcmp`` with respect to ``xref`` """ if xmax is None: xmax = xref.max() psrn = 10.0 * np.log10(xmax**2 / mse(xref, xcmp)) return psrn
2,014
18.563107
69
py
pylops
pylops-master/pylops/utils/signalprocessing.py
__all__ = [ "convmtx", "nonstationary_convmtx", "slope_estimate", "dip_estimate", ] from typing import Tuple import numpy as np import numpy.typing as npt from scipy.ndimage import gaussian_filter from pylops.utils.backend import get_array_module, get_toeplitz from pylops.utils.typing import NDArray def convmtx(h: npt.ArrayLike, n: int) -> NDArray: r"""Convolution matrix Equivalent of `MATLAB's convmtx function <http://www.mathworks.com/help/signal/ref/convmtx.html>`_ . Makes a dense convolution matrix :math:`\mathbf{C}` such that the dot product ``np.dot(C, x)`` is the convolution of the filter :math:`h` and the input signal :math:`x`. Parameters ---------- h : :obj:`np.ndarray` Convolution filter (1D array) n : :obj:`int` Number of columns (if :math:`\text{len}(h) < n`) or rows (if :math:`\text{len}(h) \geq n`) of convolution matrix Returns ------- C : :obj:`np.ndarray` Convolution matrix of size :math:`\text{len}(h)+n-1 \times n` (if :math:`\text{len}(h) < n`) or :math:`n \times \text{len}(h)+n-1` (if :math:`\text{len}(h) \geq n`) """ ncp = get_array_module(h) if len(h) < n: col_1 = ncp.r_[h[0], ncp.zeros(n - 1, dtype=h.dtype)] row_1 = ncp.r_[h, ncp.zeros(n - 1, dtype=h.dtype)] else: row_1 = ncp.r_[h[0], ncp.zeros(n - 1, dtype=h.dtype)] col_1 = ncp.r_[h, ncp.zeros(n - 1, dtype=h.dtype)] C = get_toeplitz(h)(col_1, row_1) return C def nonstationary_convmtx( H: npt.ArrayLike, n: int, hc: int = 0, pad: Tuple[int] = (0, 0), ) -> NDArray: r"""Convolution matrix from a bank of filters Makes a dense convolution matrix :math:`\mathbf{C}` such that the dot product ``np.dot(C, x)`` is the nonstationary convolution of the bank of filters :math:`H=[h_1, h_2, h_n]` and the input signal :math:`x`. Parameters ---------- H : :obj:`np.ndarray` Convolution filters (2D array of shape :math:`[n_\text{filters} \times n_{h}]` n : :obj:`int` Number of columns of convolution matrix hc : :obj:`np.ndarray`, optional Index of center of first filter pad : :obj:`np.ndarray` Zero-padding to apply to the bank of filters before and after the provided values (use it to avoid wrap-around or pass filters with enough padding) Returns ------- C : :obj:`np.ndarray` Convolution matrix """ ncp = get_array_module(H) H = ncp.pad(H, ((0, 0), pad), mode="constant") C = ncp.array([ncp.roll(h, ih) for ih, h in enumerate(H)]) C = C[:, pad[0] + hc : pad[0] + hc + n].T # take away edges return C def slope_estimate( d: npt.ArrayLike, dz: float = 1.0, dx: float = 1.0, smooth: int = 5, eps: float = 0.0, dips: bool = False, ) -> Tuple[NDArray, NDArray]: r"""Local slope estimation Local slopes are estimated using the *Structure Tensor* algorithm [1]_. Slopes are returned as :math:`\tan\theta`, defined in a RHS coordinate system with :math:`z`-axis pointing upward. .. note:: For stability purposes, it is important to ensure that the orders of magnitude of the samplings are similar. Parameters ---------- d : :obj:`np.ndarray` Input dataset of size :math:`n_z \times n_x` dz : :obj:`float`, optional Sampling in :math:`z`-axis, :math:`\Delta z` .. warning:: Since version 1.17.0, defaults to 1.0. dx : :obj:`float`, optional Sampling in :math:`x`-axis, :math:`\Delta x` .. warning:: Since version 1.17.0, defaults to 1.0. smooth : :obj:`float` or :obj:`np.ndarray`, optional Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. .. warning:: Default changed in version 1.17.0 to 5 from previous value of 20. eps : :obj:`float`, optional .. versionadded:: 1.17.0 Regularization term. All slopes where :math:`|g_{zx}| < \epsilon \max_{(x, z)} \{|g_{zx}|, |g_{zz}|, |g_{xx}|\}` are set to zero. All anisotropies where :math:`\lambda_\text{max} < \epsilon` are also set to zero. See Notes. When using with small values of ``smooth``, start from a very small number (e.g. 1e-10) and start increasing by a power of 10 until results are satisfactory. dips : :obj:`bool`, optional .. versionadded:: 2.0.0 Return dips (``True``) instead of slopes (``False``). Returns ------- slopes : :obj:`np.ndarray` Estimated local slopes. The unit is that of :math:`\Delta z/\Delta x`. .. warning:: Prior to version 1.17.0, always returned dips. anisotropies : :obj:`np.ndarray` Estimated local anisotropies: :math:`1-\lambda_\text{min}/\lambda_\text{max}` .. note:: Since 1.17.0, changed name from ``linearity`` to ``anisotropies``. Definition remains the same. Notes ----- For each pixel of the input dataset :math:`\mathbf{d}` the local gradients :math:`g_z = \frac{\partial \mathbf{d}}{\partial z}` and :math:`g_x = \frac{\partial \mathbf{d}}{\partial x}` are computed and used to define the following three quantities: .. math:: \begin{align} g_{zz} &= \left(\frac{\partial \mathbf{d}}{\partial z}\right)^2\\ g_{xx} &= \left(\frac{\partial \mathbf{d}}{\partial x}\right)^2\\ g_{zx} &= \frac{\partial \mathbf{d}}{\partial z}\cdot\frac{\partial \mathbf{d}}{\partial x} \end{align} They are then spatially smoothed and at each pixel their smoothed versions are arranged in a :math:`2 \times 2` matrix called the *smoothed gradient-square tensor*: .. math:: \mathbf{G} = \begin{bmatrix} g_{zz} & g_{zx} \\ g_{zx} & g_{xx} \end{bmatrix} Local slopes can be expressed as :math:`p = \frac{\lambda_\text{max} - g_{zz}}{g_{zx}}`, where :math:`\lambda_\text{max}` is the largest eigenvalue of :math:`\mathbf{G}`. Similarly, local dips can be expressed as :math:`\tan(2\theta) = 2g_{zx} / (g_{zz} - g_{xx})`. Moreover, we can obtain a measure of local anisotropy, defined as .. math:: a = 1-\lambda_\text{min}/\lambda_\text{max} where :math:`\lambda_\text{min}` is the smallest eigenvalue of :math:`\mathbf{G}`. A value of :math:`a = 0` indicates perfect isotropy whereas :math:`a = 1` indicates perfect anisotropy. .. [1] Van Vliet, L. J., Verbeek, P. W., "Estimators for orientation and anisotropy in digitized images", Journal ASCI Imaging Workshop. 1995. """ slopes = np.zeros_like(d) anisos = np.zeros_like(d) gz, gx = np.gradient(d, dz, dx) gzz, gzx, gxx = gz * gz, gz * gx, gx * gx # smoothing gzz = gaussian_filter(gzz, sigma=smooth) gzx = gaussian_filter(gzx, sigma=smooth) gxx = gaussian_filter(gxx, sigma=smooth) gmax = max(gzz.max(), gxx.max(), np.abs(gzx).max()) if gmax <= eps: return np.zeros_like(d), anisos gzz /= gmax gzx /= gmax gxx /= gmax lcommon1 = 0.5 * (gzz + gxx) lcommon2 = 0.5 * np.sqrt((gzz - gxx) ** 2 + 4 * gzx**2) l1 = lcommon1 + lcommon2 l2 = lcommon1 - lcommon2 regdata = l1 > eps anisos[regdata] = 1 - l2[regdata] / l1[regdata] if not dips: slopes = 0.5 * np.arctan2(2 * gzx, gzz - gxx) else: regdata = np.abs(gzx) > eps slopes[regdata] = (l1 - gzz)[regdata] / gzx[regdata] return slopes, anisos def dip_estimate( d: npt.ArrayLike, dz: float = 1.0, dx: float = 1.0, smooth: int = 5, eps: float = 0.0, ) -> Tuple[NDArray, NDArray]: r"""Local dip estimation Local dips are estimated using the *Structure Tensor* algorithm [1]_. .. note:: For stability purposes, it is important to ensure that the orders of magnitude of the samplings are similar. Parameters ---------- d : :obj:`np.ndarray` Input dataset of size :math:`n_z \times n_x` dz : :obj:`float`, optional Sampling in :math:`z`-axis, :math:`\Delta z` dx : :obj:`float`, optional Sampling in :math:`x`-axis, :math:`\Delta x` smooth : :obj:`float` or :obj:`np.ndarray`, optional Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. eps : :obj:`float`, optional Regularization term. All anisotropies where :math:`\lambda_\text{max} < \epsilon` are also set to zero. See Notes. When using with small values of ``smooth``, start from a very small number (e.g. 1e-10) and start increasing by a power of 10 until results are satisfactory. Returns ------- dips : :obj:`np.ndarray` Estimated local dips. The unit is radians, in the range of :math:`-\frac{\pi}{2}` to :math:`\frac{\pi}{2}`. anisotropies : :obj:`np.ndarray` Estimated local anisotropies: :math:`1-\lambda_\text{min}/\lambda_\text{max}` Notes ----- Thin wrapper around ``pylops.utils.signalprocessing.dip_estimate`` with ``slopes==True``. See the Notes of ``pylops.utils.signalprocessing.dip_estimate`` for details. .. [1] Van Vliet, L. J., Verbeek, P. W., "Estimators for orientation and anisotropy in digitized images", Journal ASCI Imaging Workshop. 1995. """ dips, anisos = slope_estimate(d, dz=dz, dx=dx, smooth=smooth, eps=eps, dips=True) return dips, anisos
9,848
31.939799
99
py
pylops
pylops-master/pylops/utils/__init__.py
# isort: skip_file from .backend import * from .deps import * from .dottest import * from .estimators import * from .metrics import * from .multiproc import * from .utils import * from .typing import *
202
19.3
25
py
pylops
pylops-master/pylops/utils/deps.py
__all__ = [ "cupy_enabled", "cusignal_enabled", "devito_enabled", "numba_enabled", "pyfftw_enabled", "pywt_enabled", "skfmm_enabled", "spgl1_enabled", "sympy_enabled", "torch_enabled", ] import os from importlib import util # check package availability cupy_enabled = ( util.find_spec("cupy") is not None and int(os.getenv("CUPY_PYLOPS", 1)) == 1 ) cusignal_enabled = ( util.find_spec("cusignal") is not None and int(os.getenv("CUSIGNAL_PYLOPS", 1)) == 1 ) devito_enabled = util.find_spec("devito") is not None numba_enabled = util.find_spec("numba") is not None pyfftw_enabled = util.find_spec("pyfftw") is not None pywt_enabled = util.find_spec("pywt") is not None skfmm_enabled = util.find_spec("skfmm") is not None spgl1_enabled = util.find_spec("spgl1") is not None sympy_enabled = util.find_spec("sympy") is not None torch_enabled = util.find_spec("torch") is not None # error message at import of available package def devito_import(message): if devito_enabled: try: import devito # noqa: F401 devito_message = None except Exception as e: devito_message = f"Failed to import devito (error:{e})." else: devito_message = ( f"Devito not available. " f"In order to be able to use " f'{message} run "pip install devito".' ) return devito_message def numba_import(message): if numba_enabled: try: import numba # noqa: F401 numba_message = None except Exception as e: numba_message = f"Failed to import numba (error:{e}), use numpy." else: numba_message = ( "Numba not available, reverting to numpy. " "In order to be able to use " f"{message} run " f'"pip install numba" or ' f'"conda install numba".' ) return numba_message def pyfftw_import(message): if pyfftw_enabled: try: import pyfftw # noqa: F401 pyfftw_message = None except Exception as e: pyfftw_message = f"Failed to import pyfftw (error:{e}), use numpy." else: pyfftw_message = ( "Pyfftw not available, reverting to numpy. " "In order to be able to use " f"{message} run " f'"pip install pyFFTW" or ' f'"conda install -c conda-forge pyfftw".' ) return pyfftw_message def pywt_import(message): if pywt_enabled: try: import pywt # noqa: F401 pywt_message = None except Exception as e: pywt_message = f"Failed to import pywt (error:{e})." else: pywt_message = ( "Pywt not available. " "In order to be able to use " f"{message} run " f'"pip install PyWavelets" or ' f'"conda install pywavelets".' ) return pywt_message def skfmm_import(message): if skfmm_enabled: try: import skfmm # noqa: F401 skfmm_message = None except Exception as e: skfmm_message = f"Failed to import skfmm (error:{e})." else: skfmm_message = ( f"Skfmm package not installed. In order to be able to use " f"{message} run " f'"pip install scikit-fmm" or ' f'"conda install -c conda-forge scikit-fmm".' ) return skfmm_message def spgl1_import(message): if spgl1_enabled: try: import spgl1 # noqa: F401 spgl1_message = None except Exception as e: spgl1_message = f"Failed to import spgl1 (error:{e})." else: spgl1_message = ( f"Spgl1 package not installed. In order to be able to use " f"{message} run " f'"pip install spgl1".' ) return spgl1_message def sympy_import(message): if sympy_enabled: try: import sympy # noqa: F401 sympy_message = None except Exception as e: sympy_message = f"Failed to import sympy (error:{e})." else: sympy_message = ( f"Sympy package not installed. In order to be able to use " f"{message} run " f'"pip install sympy".' ) return sympy_message
4,378
26.540881
88
py
pylops
pylops-master/pylops/utils/decorators.py
__all__ = [ "disable_ndarray_multiplication", "add_ndarray_support_to_solver", "reshaped", "count", ] from functools import wraps from typing import Callable, Optional from pylops.config import disabled_ndarray_multiplication def disable_ndarray_multiplication(func: Callable) -> Callable: """Decorator which disables ndarray multiplication. Parameters ---------- func : :obj:`callable` Generic function Returns ------- wrapper : :obj:`callable` Decorated function """ @wraps(func) def wrapper(*args, **kwargs): # SciPy-type signature with disabled_ndarray_multiplication(): out = func(*args, **kwargs) return out return wrapper def add_ndarray_support_to_solver(func: Callable) -> Callable: """Decorator which converts a solver-type function that only supports a 1d-array into one that supports one (dimsd-shaped) ndarray. Parameters ---------- func : :obj:`callable` Solver type function. Its signature must be ``func(A, b, *args, **kwargs)``. Its output must be a result-type tuple: ``(xinv, ...)``. Returns ------- wrapper : :obj:`callable` Decorated function """ @wraps(func) def wrapper(A, b, *args, **kwargs): # SciPy-type signature if "x0" in kwargs and kwargs["x0"] is not None: kwargs["x0"] = kwargs["x0"].ravel() with disabled_ndarray_multiplication(): res = list(func(A, b.ravel(), *args, **kwargs)) res[0] = res[0].reshape(getattr(A, "dims", (A.shape[1],))) return tuple(res) return wrapper def reshaped( func: Optional[Callable] = None, forward: Optional[bool] = None, swapaxis: bool = False, ) -> Callable: """Decorator for the common reshape/flatten pattern used in many operators. Parameters ---------- func : :obj:`callable`, optional Function to be decorated when no arguments are provided forward : :obj:`bool`, optional Reshape to ``dims`` if True, otherwise to ``dimsd``. If not provided, the decorated function's name will be inspected to infer the mode. Any operator having a name with 'rmat' as substring or whose name is 'div' or '__truediv__' will reshape to ``dimsd``. swapaxis : :obj:`bool`, optional If True, swaps the last axis of the input array of the decorated function with ``self.axis``. Only use if the decorated LinearOperator has ``axis`` attribute. Notes ----- A ``_matvec`` (forward) function can be simplified from .. code-block:: python def _matvec(self, x): x = x.reshape(self.dims) x = x.swapaxes(self.axis, -1) y = do_things_to_reshaped_swapped(y) y = y.swapaxes(self.axis, -1) y = y.ravel() return y to .. code-block:: python @reshaped(swapaxis=True) def _matvec(self, x): y = do_things_to_reshaped_swapped(y) return y When the decorator has no arguments, it can be called without parenthesis, e.g.: .. code-block:: python @reshaped def _matvec(self, x): y = do_things_to_reshaped(y) return y """ def decorator(f): if forward is None: fwd = ( "rmat" not in f.__name__ and f.__name__ != "div" and f.__name__ != "__truediv__" ) else: fwd = forward inp_dims = "dims" if fwd else "dimsd" @wraps(f) def wrapper(self, x): x = x.reshape(getattr(self, inp_dims)) if swapaxis: x = x.swapaxes(self.axis, -1) y = f(self, x) if swapaxis: y = y.swapaxes(self.axis, -1) y = y.ravel() return y return wrapper if func is not None: return decorator(func) return decorator def count( func: Optional[Callable] = None, forward: Optional[bool] = None, matmat: bool = False, ) -> Callable: """Decorator used to count the number of forward and adjoint performed by an operator. Parameters ---------- func : :obj:`callable`, optional Function to be decorated when no arguments are provided forward : :obj:`bool`, optional Whether to count the forward (``True``) or adjoint (``False``). If not provided, the decorated function's name will be inspected to infer the mode. Any operator having a name with 'rmat' as substring will be defaulted to False matmat : :obj:`bool`, optional Whether to count the matmat (``True``) or matvec (``False``). If not provided, the decorated function's name will be inspected to infer the mode. Any operator having a name with 'matvec' as substring will be defaulted to False """ def decorator(f): if forward is None: fwd = "rmat" not in f.__name__ else: fwd = forward if matmat is None: mat = "matvec" not in f.__name__ else: mat = matmat @wraps(f) def wrapper(self, x): # perform operation y = f(self, x) # increase count of the associated operation if fwd: if mat: self.matmat_count += 1 self.matvec_count -= x.shape[-1] else: self.matvec_count += 1 else: if mat: self.rmatmat_count += 1 self.rmatvec_count -= x.shape[-1] else: self.rmatvec_count += 1 return y return wrapper if func is not None: return decorator(func) return decorator
5,893
27.61165
102
py
pylops
pylops-master/pylops/signalprocessing/patch2d.py
__all__ = [ "patch2d_design", "Patch2D", ] import logging from typing import Optional, Sequence, Tuple import numpy as np from pylops import LinearOperator from pylops.basicoperators import BlockDiag, Diagonal, HStack, Restriction from pylops.signalprocessing.sliding2d import _slidingsteps from pylops.utils.tapers import taper2d from pylops.utils.typing import InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def patch2d_design( dimsd: InputDimsLike, nwin: Tuple[int, int], nover: Tuple[int, int], nop: Tuple[int, int], ) -> Tuple[ Tuple[int, int], Tuple[int, int], Tuple[Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]], Tuple[Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]], ]: """Design Patch2D operator This routine can be used prior to creating the :class:`pylops.signalprocessing.Patch2D` operator to identify the correct number of windows to be used based on the dimension of the data (``dimsd``), dimension of the window (``nwin``), overlap (``nover``),a and dimension of the operator acting in the model space. Parameters ---------- dimsd : :obj:`tuple` Shape of 2-dimensional data. nwin : :obj:`tuple` Number of samples of window. nover : :obj:`tuple` Number of samples of overlapping part of window. nop : :obj:`tuple` Size of model in the transformed domain. Returns ------- nwins : :obj:`tuple` Number of windows. dims : :obj:`tuple` Shape of 2-dimensional model. mwins_inends : :obj:`tuple` Start and end indices for model patches (stored as tuple of tuples). dwins_inends : :obj:`tuple` Start and end indices for data patches (stored as tuple of tuples). """ # data windows dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0]) dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1]) dwins_inends = ((dwin0_ins, dwin0_ends), (dwin1_ins, dwin1_ends)) nwins0 = len(dwin0_ins) nwins1 = len(dwin1_ins) nwins = (nwins0, nwins1) # model windows dims = (nwins0 * nop[0], nwins1 * nop[1]) mwin0_ins, mwin0_ends = _slidingsteps(dims[0], nop[0], 0) mwin1_ins, mwin1_ends = _slidingsteps(dims[1], nop[1], 0) mwins_inends = ((mwin0_ins, mwin0_ends), (mwin1_ins, mwin1_ends)) # print information about patching logging.warning("%d-%d windows required...", nwins0, nwins1) logging.warning( "data wins - start:%s, end:%s / start:%s, end:%s", dwin0_ins, dwin0_ends, dwin1_ins, dwin1_ends, ) logging.warning( "model wins - start:%s, end:%s / start:%s, end:%s", mwin0_ins, mwin0_ends, mwin1_ins, mwin1_ends, ) return nwins, dims, mwins_inends, dwins_inends def Patch2D( Op: LinearOperator, dims: InputDimsLike, dimsd: InputDimsLike, nwin: Tuple[int, int], nover: Tuple[int, int], nop: Tuple[int, int], tapertype: str = "hanning", scalings: Optional[Sequence[float]] = None, name: str = "P", ) -> LinearOperator: """2D Patch transform operator. Apply a transform operator ``Op`` repeatedly to patches of the model vector in forward mode and patches of the data vector in adjoint mode. More specifically, in forward mode the model vector is divided into patches, each patch is transformed, and patches are then recombined together. Both model and data are internally reshaped and interpreted as 2-dimensional arrays: each patch contains a portion of the array in both the first and second dimension. This operator can be used to perform local, overlapping transforms (e.g., :obj:`pylops.signalprocessing.FFT2D` or :obj:`pylops.signalprocessing.Radon2D`) on 2-dimensional arrays. .. note:: The shape of the model has to be consistent with the number of windows for this operator not to return an error. As the number of windows depends directly on the choice of ``nwin`` and ``nover``, it is recommended to first run ``patch2d_design`` to obtain the corresponding ``dims`` and number of windows. .. warning:: Depending on the choice of `nwin` and `nover` as well as the size of the data, sliding windows may not cover the entire data. The start and end indices of each window will be displayed and returned with running ``patch2d_design``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Transform operator dims : :obj:`tuple` Shape of 2-dimensional model. Note that ``dims[0]`` and ``dims[1]`` should be multiple of the model size of the transform in their respective dimensions dimsd : :obj:`tuple` Shape of 2-dimensional data nwin : :obj:`tuple` Number of samples of window nover : :obj:`tuple` Number of samples of overlapping part of window nop : :obj:`tuple` Size of model in the transformed domain tapertype : :obj:`str`, optional Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) scalings : :obj:`tuple` or :obj:`list`, optional Set of scalings to apply to each patch. If ``None``, no scale will be applied name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Sop : :obj:`pylops.LinearOperator` Sliding operator Raises ------ ValueError Identified number of windows is not consistent with provided model shape (``dims``). See Also -------- Sliding1D: 1D Sliding transform operator. Sliding2D: 2D Sliding transform operator. Sliding3D: 3D Sliding transform operator. Patch3D: 3D Patching transform operator. """ # data windows dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0]) dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1]) nwins0 = len(dwin0_ins) nwins1 = len(dwin1_ins) nwins = nwins0 * nwins1 # check patching if nwins0 * nop[0] != dims[0] or nwins1 * nop[1] != dims[1]: raise ValueError( f"Model shape (dims={dims}) is not consistent with chosen " f"number of windows. Run patch2d_design to identify the " f"correct number of windows for the current " "model size..." ) # create tapers if tapertype is not None: tap = taper2d(nwin[1], nwin[0], nover, tapertype=tapertype).astype(Op.dtype) taps = {itap: tap for itap in range(nwins)} # topmost tapers taptop = tap.copy() taptop[: nover[0]] = tap[nwin[0] // 2] for itap in range(0, nwins1): taps[itap] = taptop # bottommost tapers tapbottom = tap.copy() tapbottom[-nover[0] :] = tap[nwin[0] // 2] for itap in range(nwins - nwins1, nwins): taps[itap] = tapbottom # leftmost tapers tapleft = tap.copy() tapleft[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis] for itap in range(0, nwins, nwins1): taps[itap] = tapleft # rightmost tapers tapright = tap.copy() tapright[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis] for itap in range(nwins1 - 1, nwins, nwins1): taps[itap] = tapright # lefttopcorner taper taplefttop = tap.copy() taplefttop[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis] taplefttop[: nover[0]] = taplefttop[nwin[0] // 2] taps[0] = taplefttop # righttopcorner taper taprighttop = tap.copy() taprighttop[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis] taprighttop[: nover[0]] = taprighttop[nwin[0] // 2] taps[nwins1 - 1] = taprighttop # leftbottomcorner taper tapleftbottom = tap.copy() tapleftbottom[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis] tapleftbottom[-nover[0] :] = tapleftbottom[nwin[0] // 2] taps[nwins - nwins1] = tapleftbottom # rightbottomcorner taper taprightbottom = tap.copy() taprightbottom[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis] taprightbottom[-nover[0] :] = taprightbottom[nwin[0] // 2] taps[nwins - 1] = taprightbottom # define scalings if scalings is None: scalings = [1.0] * nwins # transform to apply if tapertype is None: OOp = BlockDiag([scalings[itap] * Op for itap in range(nwins)]) else: OOp = BlockDiag( [ scalings[itap] * Diagonal(taps[itap].ravel(), dtype=Op.dtype) * Op for itap in range(nwins) ] ) hstack = HStack( [ Restriction( (nwin[0], dimsd[1]), range(win_in, win_end), axis=1, dtype=Op.dtype ).H for win_in, win_end in zip(dwin1_ins, dwin1_ends) ] ) combining1 = BlockDiag([hstack] * nwins0) combining0 = HStack( [ Restriction(dimsd, range(win_in, win_end), axis=0, dtype=Op.dtype).H for win_in, win_end in zip(dwin0_ins, dwin0_ends) ] ) Pop = LinearOperator(combining0 * combining1 * OOp) Pop.dims, Pop.dimsd = ( nwins0, nwins1, int(dims[0] // nwins0), int(dims[1] // nwins1), ), dimsd Pop.name = name return Pop
9,570
33.677536
113
py
pylops
pylops-master/pylops/signalprocessing/chirpradon2d.py
__all__ = ["ChirpRadon2D"] import logging import numpy as np from pylops import LinearOperator from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, NDArray from ._chirpradon2d import _chirp_radon_2d logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class ChirpRadon2D(LinearOperator): r"""2D Chirp Radon transform Apply Radon forward (and adjoint) transform using Fast Fourier Transform and Chirp functions to a 2-dimensional array of size :math:`[n_x \times n_t]` (both in forward and adjoint mode). Note that forward and adjoint are swapped compared to the time-space implementation in :class:`pylops.signalprocessing.Radon2D` and a direct `inverse` method is also available for this implementation. Parameters ---------- taxis : :obj:`np.ndarray` Time axis haxis : :obj:`np.ndarray` Spatial axis pmax : :obj:`np.ndarray` Maximum slope defined as :math:`\tan` of maximum stacking angle in :math:`x` direction :math:`p_\text{max} = \tan(\alpha_{x, \text{max}})`. If one operates in terms of minimum velocity :math:`c_0`, set :math:`p_{x, \text{max}}=c_0 \,\mathrm{d}y/\mathrm{d}t`. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Notes ----- Refer to [1]_ for the theoretical and implementation details. .. [1] Andersson, F and Robertsson J. "Fast :math:`\tau-p` transforms by chirp modulation", Geophysics, vol 84, NO.1, pp. A13-A17, 2019. """ def __init__( self, taxis: NDArray, haxis: NDArray, pmax: float, dtype: DTypeLike = "float64", name: str = "C", ) -> None: dims = len(haxis), len(taxis) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) self.nh, self.nt = self.dims self.dt = taxis[1] - taxis[0] self.dh = haxis[1] - haxis[0] self.pmax = pmax @reshaped def _matvec(self, x: NDArray) -> NDArray: return _chirp_radon_2d(x, self.dt, self.dh, self.pmax, mode="f") @reshaped def _rmatvec(self, x: NDArray) -> NDArray: return _chirp_radon_2d(x, self.dt, self.dh, self.pmax, mode="a") def inverse(self, x: NDArray) -> NDArray: x = x.reshape(self.dimsd) y = _chirp_radon_2d(x, self.dt, self.dh, self.pmax, mode="i") return y.ravel()
2,812
30.255556
81
py
pylops
pylops-master/pylops/signalprocessing/shift.py
__all__ = ["Shift"] from typing import Tuple, Union import numpy as np import numpy.typing as npt from numpy.core.multiarray import normalize_axis_index from pylops.basicoperators import Diagonal from pylops.signalprocessing import FFT from pylops.utils._internal import _value_or_sized_to_array from pylops.utils.typing import DTypeLike def Shift( dims: Tuple, shift: Union[float, npt.ArrayLike], axis: int = -1, nfft: int = None, sampling: float = 1.0, real: bool = False, engine: str = "numpy", dtype: DTypeLike = "complex128", name: str = "S", **kwargs_fftw ): r"""Shift operator Apply fractional shift in the frequency domain along an ``axis`` of a multi-dimensional array of size ``dims``. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension shift : :obj:`float` or :obj:`numpy.ndarray` Fractional shift to apply in the same unit as ``sampling``. For multi-dimensional inputs, this can be a scalar to apply to every trace along the chosen axis or an array of shifts to be applied to each trace. axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which shift is applied nfft : :obj:`int`, optional Number of samples in Fourier Transform (same as input if ``nfft=None``) sampling : :obj:`float`, optional Sampling step :math:`\Delta t`. real : :obj:`bool`, optional Model to which fft is applied has real numbers (``True``) or not (``False``). Used to enforce that the output of adjoint of a real model is real. engine : :obj:`str`, optional Engine used for fft computation (``numpy``, ``scipy``, or ``fftw``). Choose ``numpy`` when working with CuPy arrays. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) **kwargs_fftw Arbitrary keyword arguments for :py:class:`pyfftw.FTTW` Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If ``dims`` is provided and ``axis`` is bigger than ``len(dims)`` NotImplementedError If ``engine`` is neither ``numpy``, ``scipy``, nor ``fftw`` Notes ----- The Shift operator applies the forward Fourier transform, an element-wise complex scaling, and inverse fourier transform .. math:: \mathbf{y}= \mathbf{F}^{-1} \mathbf{S} \mathbf{F} \mathbf{x} Here :math:`\mathbf{S}` is a diagonal operator that scales the Fourier transformed input by :math:`e^{-j2\pi f t_S}`, where :math:`t_S` is the chosen ``shift``. """ Fop = FFT( dims, axis=axis, nfft=nfft, sampling=sampling, real=real, engine=engine, dtype=dtype, **kwargs_fftw ) if isinstance(dims, int): dimsdiag = None else: dimsdiag = list(dims) dimsdiag[axis] = len(Fop.f) shift = _value_or_sized_to_array(shift) if shift.size == 1: shift = np.exp(-1j * 2 * np.pi * Fop.f * shift) Sop = Diagonal(shift, dims=dimsdiag, axis=axis, dtype=Fop.cdtype) else: # add dimensions to shift to match dimensions of model and data axis = normalize_axis_index(axis, len(dims)) fdims = np.ones(shift.ndim + 1, dtype=int) fdims[axis] = Fop.f.size f = Fop.f.reshape(fdims) sdims = np.ones(shift.ndim + 1, dtype=int) sdims[:axis] = shift.shape[:axis] sdims[axis + 1 :] = shift.shape[axis:] shift = np.exp(-1j * 2 * np.pi * f * shift.reshape(sdims)) Sop = Diagonal(shift, dtype=Fop.cdtype) Op = Fop.H * Sop * Fop Op.dims = Op.dimsd = Fop.dims # force dtype to that of input (FFT always upcasts it to complex) Op.dtype = dtype Op.name = name return Op
4,157
30.740458
97
py
pylops
pylops-master/pylops/signalprocessing/seislet.py
__all__ = ["Seislet"] from math import ceil, log from typing import Optional, Sequence import numpy as np from pylops import LinearOperator from pylops.basicoperators import Pad from pylops.utils.typing import DTypeLike, NDArray def _predict_trace( trace: NDArray, t: NDArray, dt: float, dx: float, slope: NDArray, adj: bool = False, ) -> NDArray: r"""Slope-based trace prediction. Resample a trace to a new time axis defined by the local slopes along the trace. Slopes do implicitly represent a time-varying time delay :math:`\Delta t (t) = dx*s(t)`. The input trace is interpolated using sinc-interpolation to a new time axis given by the following formula: :math:`t_{new} = t - dx*s(t)`. Parameters ---------- trace : :obj:`numpy.ndarray` Trace t : :obj:`numpy.ndarray` Time axis dt : :obj:`float` Time axis sampling dx : :obj:`float` Spatial axis sampling slope : :obj:`numpy.ndarray` Slope field adj : :obj:`bool`, optional Perform forward (``False``) or adjoint (``True``) operation Returns ------- tracenew : :obj:`numpy.ndarray` Resampled trace """ newt = t - dx * slope sinc = np.tile(newt, (len(newt), 1)) - np.tile(t[:, np.newaxis], (1, len(newt))) if adj: tracenew = np.dot(trace, np.sinc(sinc / dt).T) else: tracenew = np.dot(trace, np.sinc(sinc / dt)) return tracenew def _predict_haar( traces: NDArray, dt: float, dx: float, slopes: NDArray, repeat: int = 0, backward: bool = False, adj: bool = False, ) -> NDArray: """Predict set of traces given time-varying slopes (Haar basis function) A set of input traces are resampled based on local slopes. If the number of traces in ``slopes`` is twice the number of traces in ``traces``, the resampling is done only once per trace. If the number of traces in ``slopes`` is a multiple of 2 of the number of traces in ``traces``, the prediction is done recursively or in other words the output traces are obtained by resampling the input traces followed by ``repeat-1`` further resampling steps of the intermediate results. Parameters ---------- traces : :obj:`numpy.ndarray` Input traces of size :math:`n_x \times n_t` dt : :obj:`float` Time axis sampling of the slope field dx : :obj:`float` Spatial axis sampling of the slope field slopes: :obj:`numpy.ndarray` Slope field of size :math:`n_x * 2^{repeat} \times n_t` repeat : :obj:`int`, optional Number of repeated predictions backward : :obj:`bool`, optional Predicted trace is on the right (``False``) or on the left (``True``) of input trace adj : :obj:`bool`, optional Perform forward (``False``) or adjoint (``True``) operation Returns ------- pred : :obj:`numpy.ndarray` Predicted traces """ if backward: iback = 1 idir = -1 else: iback = 0 idir = 1 slopejump = 2 ** (repeat + 1) repeat = 2**repeat nx, nt = traces.shape t = np.arange(nt) * dt pred = np.zeros_like(traces) for ix in range(nx): pred_tmp = traces[ix] if adj: for irepeat in range(repeat - 1, -1, -1): pred_tmp = _predict_trace( pred_tmp, t, dt, idir * dx, slopes[ix * slopejump + iback * repeat + idir * irepeat], adj=True, ) else: for irepeat in range(repeat): pred_tmp = _predict_trace( pred_tmp, t, dt, idir * dx, slopes[ix * slopejump + iback * repeat + idir * irepeat], ) pred[ix] = pred_tmp return pred def _predict_lin( traces: NDArray, dt: float, dx: float, slopes: NDArray, repeat: int = 0, backward: bool = False, adj: bool = False, ) -> NDArray: """Predict set of traces given time-varying slopes (Linear basis function) See _predict_haar for details. """ if backward: iback = 1 idir = -1 else: iback = 0 idir = 1 slopejump = 2 ** (repeat + 1) repeat = 2**repeat nx, nt = traces.shape t = np.arange(nt) * dt pred = np.zeros_like(traces) for ix in range(nx): pred_tmp = traces[ix] if adj: if not ((ix == 0 and not backward) or (ix == nx - 1 and backward)): pred_tmp1 = traces[ix - idir] for irepeat in range(repeat - 1, -1, -1): if (ix == 0 and not backward) or (ix == nx - 1 and backward): pred_tmp = _predict_trace( pred_tmp, t, dt, idir * dx, slopes[ix * slopejump + iback * repeat + idir * irepeat], adj=True, ) pred_tmp1 = 0 else: pred_tmp = _predict_trace( pred_tmp, t, dt, idir * dx, slopes[ix * slopejump + iback * repeat + idir * irepeat], adj=True, ) pred_tmp1 = _predict_trace( pred_tmp1, t, dt, (-idir) * dx, slopes[ix * slopejump + iback * repeat - idir * irepeat], adj=True, ) else: if not ((ix == nx - 1 and not backward) or (ix == 0 and backward)): pred_tmp1 = traces[ix + idir] for irepeat in range(repeat): if (ix == nx - 1 and not backward) or (ix == 0 and backward): pred_tmp = _predict_trace( pred_tmp, t, dt, idir * dx, slopes[ix * slopejump + iback * repeat + idir * irepeat], ) pred_tmp1 = 0 else: pred_tmp = _predict_trace( pred_tmp, t, dt, idir * dx, slopes[ix * slopejump + iback * repeat + idir * irepeat], ) pred_tmp1 = _predict_trace( pred_tmp1, t, dt, (-idir) * dx, slopes[ (ix + idir) * slopejump + iback * repeat - idir * irepeat ], ) # if (adj and ((ix == 0 and not backward) or (ix == nx - 1 and backward))) or # (ix == nx - 1 and not backward) or (ix == 0 and backward): # pred[ix] = pred_tmp # else: if ix == nx - 1: pred[ix] = pred_tmp + pred_tmp1 / 2.0 else: pred[ix] = (pred_tmp + pred_tmp1) / 2.0 return pred class Seislet(LinearOperator): r"""Two dimensional Seislet operator. Apply 2D-Seislet Transform to an input array given an estimate of its local ``slopes``. In forward mode, the input array is reshaped into a two-dimensional array of size :math:`n_x \times n_t` and the transform is performed along the first (spatial) axis (see Notes for more details). Parameters ---------- slopes : :obj:`numpy.ndarray` Slope field of size :math:`n_x \times n_t` sampling : :obj:`tuple`, optional Sampling steps in x- and t-axis. level : :obj:`int`, optional Number of scaling levels (must be >=0). kind : :obj:`str`, optional Basis function used for predict and update steps: ``haar`` or ``linear``. inv : :obj:`int`, optional Apply inverse transform when invoking the adjoint (``True``) or not (``False``). Note that in some scenario it may be more appropriate to use the exact inverse as adjoint of the Seislet operator even if this is not an orthogonal operator and the dot-test would not be satisfied (see Notes for details). Otherwise, the user can access the inverse directly as method of this class. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ NotImplementedError If ``kind`` is different from haar or linear ValueError If ``sampling`` has more or less than two elements. Notes ----- The Seislet transform [1]_ is implemented using the lifting scheme. In its simplest form (i.e., corresponding to the Haar basis function for the Wavelet transform) the input dataset is separated into even (:math:`\mathbf{e}`) and odd (:math:`\mathbf{o}`) traces. Even traces are used to forward predict the odd traces using local slopes and the new odd traces (also referred to as residual) is defined as: .. math:: \mathbf{o}^{i+1} = \mathbf{r}^i = \mathbf{o}^i - P(\mathbf{e}^i) where :math:`P = P^+` is the slope-based forward prediction operator (which is here implemented as a sinc-based resampling). The residual is then updated and summed to the even traces to obtain the new even traces (also referred to as coarse representation): .. math:: \mathbf{e}^{i+1} = \mathbf{c}^i = \mathbf{e}^i + U(\mathbf{o}^{i+1}) where :math:`U = P^- / 2` is the update operator which performs a slope-based backward prediction. At this point :math:`\mathbf{e}^{i+1}` becomes the new data and the procedure is repeated `level` times (at maximum until :math:`\mathbf{e}^{i+1}` is a single trace. The Seislet transform is effectively composed of all residuals and the coarsest data representation. In the inverse transform the two operations are reverted. Starting from the coarsest scale data representation :math:`\mathbf{c}` and residual :math:`\mathbf{r}`, the even and odd parts of the previous scale are reconstructed as: .. math:: \mathbf{e}^i = \mathbf{c}^i - U(\mathbf{r}^i) = \mathbf{e}^{i+1} - U(\mathbf{o}^{i+1}) and: .. math:: \mathbf{o}^i = \mathbf{r}^i + P(\mathbf{e}^i) = \mathbf{o}^{i+1} + P(\mathbf{e}^i) A new data is formed by interleaving :math:`\mathbf{e}^i` and :math:`\mathbf{o}^i` and the procedure repeated until the new data as the same number of traces as the original one. Finally the adjoint operator can be easily derived by writing the lifting scheme in a matricial form: .. math:: \begin{bmatrix} \mathbf{r}_1 \\ \mathbf{r}_2 \\ \vdots \\ \mathbf{r}_N \\ \mathbf{c}_1 \\ \mathbf{c}_2 \\ \vdots \\ \mathbf{c}_N \end{bmatrix} = \begin{bmatrix} \mathbf{I} & \mathbf{0} & \ldots & \mathbf{0} & -\mathbf{P} & \mathbf{0} & \ldots & \mathbf{0} \\ \mathbf{0} & \mathbf{I} & \ldots & \mathbf{0} & \mathbf{0} & -\mathbf{P} & \ldots & \mathbf{0} \\ \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\ \mathbf{0} & \mathbf{0} & \ldots & \mathbf{I} & \mathbf{0} & \mathbf{0} & \ldots & -\mathbf{P} \\ \mathbf{U} & \mathbf{0} & \ldots & \mathbf{0} & \mathbf{I}-\mathbf{UP} & \mathbf{0} & \ldots & \mathbf{0} \\ \mathbf{0} & \mathbf{U} & \ldots & \mathbf{0} & \mathbf{0} & \mathbf{I}-\mathbf{UP} & \ldots & \mathbf{0} \\ \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\ \mathbf{0} & \mathbf{0} & \ldots & \mathbf{U} & \mathbf{0} & \mathbf{0} & \ldots & \mathbf{I}-\mathbf{UP} \end{bmatrix} \begin{bmatrix} \mathbf{o}_1 \\ \mathbf{o}_2 \\ \vdots \\ \mathbf{o}_N \\ \mathbf{e}_1 \\ \mathbf{e}_2 \\ \vdots \\ \mathbf{e}_N \end{bmatrix} Transposing the operator leads to: .. math:: \begin{bmatrix} \mathbf{o}_1 \\ \mathbf{o}_2 \\ \vdots \\ \mathbf{o}_N \\ \mathbf{e}_1 \\ \mathbf{e}_2 \\ \vdots \\ \mathbf{e}_N \end{bmatrix} = \begin{bmatrix} \mathbf{I} & \mathbf{0} & \ldots & \mathbf{0} & -\mathbf{U^T} & \mathbf{0} & \ldots & \mathbf{0} \\ \mathbf{0} & \mathbf{I} & \ldots & \mathbf{0} & \mathbf{0} & -\mathbf{U^T} & \ldots & \mathbf{0} \\ \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\ \mathbf{0} & \mathbf{0} & \ldots & \mathbf{I} & \mathbf{0} & \mathbf{0} & \ldots & -\mathbf{U^T} \\ \mathbf{P^T} & \mathbf{0} & \ldots & \mathbf{0} & \mathbf{I}-\mathbf{P^T U^T} & \mathbf{0} & \ldots & \mathbf{0} \\ \mathbf{0} & \mathbf{P^T} & \ldots & \mathbf{0} & \mathbf{0} & \mathbf{I}-\mathbf{P^T U^T} & \ldots & \mathbf{0} \\ \vdots & \vdots & \ddots & \vdots & \vdots & \vdots & \ddots & \vdots \\ \mathbf{0} & \mathbf{0} & \ldots & \mathbf{P^T} & \mathbf{0} & \mathbf{0} & \ldots & \mathbf{I}-\mathbf{P^T U^T} \end{bmatrix} \begin{bmatrix} \mathbf{r}_1 \\ \mathbf{r}_2 \\ \vdots \\ \mathbf{r}_N \\ \mathbf{c}_1 \\ \mathbf{c}_2 \\ \vdots \\ \mathbf{c}_N \end{bmatrix} which can be written more easily in the following two steps: .. math:: \mathbf{o} = \mathbf{r} + \mathbf{U}^H\mathbf{c} and: .. math:: \mathbf{e} = \mathbf{c} - \mathbf{P}^H(\mathbf{r} + \mathbf{U}^H(\mathbf{c})) = \mathbf{c} - \mathbf{P}^H\mathbf{o} Similar derivations follow for more complex wavelet bases. .. [1] Fomel, S., Liu, Y., "Seislet transform and seislet frame", Geophysics, 75, no. 3, V25-V38. 2010. """ def __init__( self, slopes: NDArray, sampling: Sequence[float] = (1.0, 1.0), level: Optional[int] = None, kind: str = "haar", inv: bool = False, dtype: DTypeLike = "float64", name: str = "S", ) -> None: if len(sampling) != 2: raise ValueError("provide two sampling steps") # define predict and update steps if kind == "haar": self.predict = _predict_haar elif kind == "linear": self.predict = _predict_lin else: raise NotImplementedError("kind should be haar or linear") # define padding for length to be power of 2 dims = slopes.shape ndimpow2 = 2 ** ceil(log(dims[0], 2)) dimsd = [ndimpow2] + list(dims[1:]) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) pad = [(0, ndimpow2 - self.dims[0])] + [(0, 0)] * (len(self.dims) - 1) self.pad = Pad(self.dims, pad) self.nx, self.nt = self.dimsd # define levels nlevels_max = int(np.log2(self.dimsd[0])) levels_size = np.flip(np.array([2**i for i in range(nlevels_max)])) if level is None: levels_size = levels_size[:-1] level = nlevels_max - 1 else: levels_size = levels_size[:level] self.level = level self.levels_size = levels_size self.levels_cum = np.insert(np.cumsum(self.levels_size), 0, 0) self.dx, self.dt = sampling self.slopes = (self.pad * slopes.ravel()).reshape(self.dimsd) self.inv = inv def _matvec(self, x: NDArray) -> NDArray: x = self.pad.matvec(x) x = np.reshape(x, self.dimsd) y = np.zeros((np.sum(self.levels_size) + self.levels_size[-1], self.nt)) for ilevel in range(self.level): odd = x[1::2] even = x[::2] res = odd - self.predict( even, self.dt, self.dx, self.slopes, repeat=ilevel, backward=False ) x = ( even + self.predict( res, self.dt, self.dx, self.slopes, repeat=ilevel, backward=True ) / 2.0 ) y[self.levels_cum[ilevel] : self.levels_cum[ilevel + 1]] = res y[self.levels_cum[-1] :] = x return y.ravel() def _rmatvec(self, x: NDArray) -> NDArray: if not self.inv: x = np.reshape(x, self.dimsd) y = x[self.levels_cum[-1] :] for ilevel in range(self.level, 0, -1): res = x[self.levels_cum[ilevel - 1] : self.levels_cum[ilevel]] odd = ( res + self.predict( y, self.dt, self.dx, self.slopes, repeat=ilevel - 1, backward=True, adj=True, ) / 2.0 ) even = y - self.predict( odd, self.dt, self.dx, self.slopes, repeat=ilevel - 1, backward=False, adj=True, ) y = np.zeros((2 * even.shape[0], self.nt)) y[1::2] = odd y[::2] = even y = self.pad.rmatvec(y.ravel()) else: y = self.inverse(x) return y def inverse(self, x: NDArray) -> NDArray: x = np.reshape(x, self.dimsd) y = x[self.levels_cum[-1] :] for ilevel in range(self.level, 0, -1): res = x[self.levels_cum[ilevel - 1] : self.levels_cum[ilevel]] even = ( y - self.predict( res, self.dt, self.dx, self.slopes, repeat=ilevel - 1, backward=True ) / 2.0 ) odd = res + self.predict( even, self.dt, self.dx, self.slopes, repeat=ilevel - 1, backward=False ) y = np.zeros((2 * even.shape[0], self.nt)) y[1::2] = odd y[::2] = even y = self.pad.rmatvec(y.ravel()) return y
18,945
35.225621
127
py
pylops
pylops-master/pylops/signalprocessing/patch3d.py
__all__ = [ "patch3d_design", "Patch3D", ] import logging from typing import Optional, Sequence, Tuple import numpy as np from pylops import LinearOperator from pylops.basicoperators import BlockDiag, Diagonal, HStack, Restriction from pylops.signalprocessing.sliding2d import _slidingsteps from pylops.utils.tapers import tapernd from pylops.utils.typing import InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def patch3d_design( dimsd: InputDimsLike, nwin: Tuple[int, int, int], nover: Tuple[int, int, int], nop: Tuple[int, int, int], ) -> Tuple[ Tuple[int, int, int], Tuple[int, int, int], Tuple[Tuple[NDArray, NDArray], Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]], Tuple[Tuple[NDArray, NDArray], Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]], ]: """Design Patch3D operator This routine can be used prior to creating the :class:`pylops.signalprocessing.Patch3D` operator to identify the correct number of windows to be used based on the dimension of the data (``dimsd``), dimension of the window (``nwin``), overlap (``nover``),a and dimension of the operator acting in the model space. Parameters ---------- dimsd : :obj:`tuple` Shape of 3-dimensional data. nwin : :obj:`tuple` Number of samples of window. nover : :obj:`tuple` Number of samples of overlapping part of window. nop : :obj:`tuple` Size of model in the transformed domain. Returns ------- nwins : :obj:`tuple` Number of windows. dims : :obj:`tuple` Shape of 3-dimensional model. mwins_inends : :obj:`tuple` Start and end indices for model patches (stored as tuple of tuples). dwins_inends : :obj:`tuple` Start and end indices for data patches (stored as tuple of tuples). """ # data windows dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0]) dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1]) dwin2_ins, dwin2_ends = _slidingsteps(dimsd[2], nwin[2], nover[2]) dwins_inends = ( (dwin0_ins, dwin0_ends), (dwin1_ins, dwin1_ends), (dwin2_ins, dwin2_ends), ) nwins0 = len(dwin0_ins) nwins1 = len(dwin1_ins) nwins2 = len(dwin2_ins) nwins = (nwins0, nwins1, nwins2) # model windows dims = (nwins0 * nop[0], nwins1 * nop[1], nwins2 * nop[2]) mwin0_ins, mwin0_ends = _slidingsteps(dims[0], nop[0], 0) mwin1_ins, mwin1_ends = _slidingsteps(dims[1], nop[1], 0) mwin2_ins, mwin2_ends = _slidingsteps(dims[2], nop[2], 0) mwins_inends = ( (mwin0_ins, mwin0_ends), (mwin1_ins, mwin1_ends), (mwin2_ins, mwin2_ends), ) # print information about patching logging.warning("%d-%d-%d windows required...", nwins0, nwins1, nwins2) logging.warning( "data wins - start:%s, end:%s / start:%s, end:%s / start:%s, end:%s", dwin0_ins, dwin0_ends, dwin1_ins, dwin1_ends, dwin2_ins, dwin2_ends, ) logging.warning( "model wins - start:%s, end:%s / start:%s, end:%s / start:%s, end:%s", mwin0_ins, mwin0_ends, mwin1_ins, mwin1_ends, mwin2_ins, mwin2_ends, ) return nwins, dims, mwins_inends, dwins_inends def Patch3D( Op, dims: InputDimsLike, dimsd: InputDimsLike, nwin: Tuple[int, int, int], nover: Tuple[int, int, int], nop: Tuple[int, int, int], tapertype: str = "hanning", scalings: Optional[Sequence[float]] = None, name: str = "P", ) -> LinearOperator: """3D Patch transform operator. Apply a transform operator ``Op`` repeatedly to patches of the model vector in forward mode and patches of the data vector in adjoint mode. More specifically, in forward mode the model vector is divided into patches, each patch is transformed, and patches are then recombined together. Both model and data are internally reshaped and interpreted as 3-dimensional arrays: each patch contains a portion of the array in every axis. This operator can be used to perform local, overlapping transforms (e.g., :obj:`pylops.signalprocessing.FFTND` or :obj:`pylops.signalprocessing.Radon3D`) on 3-dimensional arrays. .. note:: The shape of the model has to be consistent with the number of windows for this operator not to return an error. As the number of windows depends directly on the choice of ``nwin`` and ``nover``, it is recommended to first run ``patch3d_design`` to obtain the corresponding ``dims`` and number of windows. .. warning:: Depending on the choice of `nwin` and `nover` as well as the size of the data, sliding windows may not cover the entire data. The start and end indices of each window will be displayed and returned with running ``patch3d_design``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Transform operator dims : :obj:`tuple` Shape of 3-dimensional model. Note that ``dims[0]``, ``dims[1]`` and ``dims[2]`` should be multiple of the model size of the transform in their respective dimensions dimsd : :obj:`tuple` Shape of 3-dimensional data nwin : :obj:`tuple` Number of samples of window nover : :obj:`tuple` Number of samples of overlapping part of window nop : :obj:`tuple` Size of model in the transformed domain tapertype : :obj:`str`, optional Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) scalings : :obj:`tuple` or :obj:`list`, optional Set of scalings to apply to each patch. If ``None``, no scale will be applied name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Sop : :obj:`pylops.LinearOperator` Sliding operator Raises ------ ValueError Identified number of windows is not consistent with provided model shape (``dims``). See Also -------- Sliding1D: 1D Sliding transform operator. Sliding2D: 2D Sliding transform operator. Sliding3D: 3D Sliding transform operator. Patch2D: 2D Patching transform operator. """ # data windows dwin0_ins, dwin0_ends = _slidingsteps(dimsd[0], nwin[0], nover[0]) dwin1_ins, dwin1_ends = _slidingsteps(dimsd[1], nwin[1], nover[1]) dwin2_ins, dwin2_ends = _slidingsteps(dimsd[2], nwin[2], nover[2]) nwins0 = len(dwin0_ins) nwins1 = len(dwin1_ins) nwins2 = len(dwin2_ins) nwins = nwins0 * nwins1 * nwins2 # check patching if ( nwins0 * nop[0] != dims[0] or nwins1 * nop[1] != dims[1] or nwins2 * nop[2] != dims[2] ): raise ValueError( f"Model shape (dims={dims}) is not consistent with chosen " f"number of windows. Run patch3d_design to identify the " f"correct number of windows for the current " "model size..." ) # create tapers if tapertype is not None: tap = tapernd(nwin, nover, tapertype=tapertype).astype(Op.dtype) taps = {itap: tap for itap in range(nwins)} # 1, sides # topmost tapers taptop = tap.copy() taptop[: nover[0]] = tap[nwin[0] // 2] for itap in range(0, nwins1 * nwins2): taps[itap] = taptop # bottommost tapers tapbottom = tap.copy() tapbottom[-nover[0] :] = tap[nwin[0] // 2] for itap in range(nwins - nwins1 * nwins2, nwins): taps[itap] = tapbottom # frontmost tapers tapfront = tap.copy() tapfront[:, :, : nover[2]] = tap[:, :, nwin[2] // 2][:, :, np.newaxis] for itap in range(0, nwins, nwins2): taps[itap] = tapfront # backmost tapers tapback = tap.copy() tapback[:, :, -nover[2] :] = tap[:, :, nwin[2] // 2][:, :, np.newaxis] for itap in range(nwins2 - 1, nwins, nwins2): taps[itap] = tapback # leftmost tapers tapleft = tap.copy() tapleft[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis, :] for itap in range(0, nwins, nwins1 * nwins2): for i in range(nwins2): taps[itap + i] = tapleft # rightmost tapers tapright = tap.copy() tapright[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis, :] for itap in range(nwins2 * (nwins1 - 1), nwins, nwins2 * nwins1): for i in range(nwins2): taps[itap + i] = tapright # 2. pillars # topleftmost tapers taplefttop = tap.copy() taplefttop[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis, :] taplefttop[: nover[0]] = taplefttop[nwin[0] // 2] for itap in range(nwins2): taps[itap] = taplefttop # toprightmost tapers taprighttop = tap.copy() taprighttop[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis, :] taprighttop[: nover[0]] = taprighttop[nwin[0] // 2] for itap in range(nwins2): taps[nwins2 * (nwins1 - 1) + itap] = taprighttop # topfrontmost tapers tapfronttop = tap.copy() tapfronttop[:, :, : nover[2]] = tap[:, :, nwin[2] // 2][:, :, np.newaxis] tapfronttop[: nover[0]] = tapfronttop[nwin[0] // 2] for itap in range(0, nwins1 * nwins2, nwins2): taps[itap] = tapfronttop # topbackmost tapers tapbacktop = tap.copy() tapbacktop[:, :, -nover[2] :] = tap[:, :, nwin[2] // 2][:, :, np.newaxis] tapbacktop[: nover[0]] = tapbacktop[nwin[0] // 2] for itap in range(nwins2 - 1, nwins1 * nwins2, nwins2): taps[itap] = tapbacktop # bottomleftmost tapers tapleftbottom = tap.copy() tapleftbottom[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis, :] tapleftbottom[-nover[0] :] = tapleftbottom[nwin[0] // 2] for itap in range(nwins2): taps[(nwins0 - 1) * nwins1 * nwins2 + itap] = tapleftbottom # bottomrightmost tapers taprightbottom = tap.copy() taprightbottom[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis, :] taprightbottom[-nover[0] :] = taprightbottom[nwin[0] // 2] for itap in range(nwins2): taps[ (nwins0 - 1) * nwins1 * nwins2 + (nwins1 - 1) * nwins2 + itap ] = taprightbottom # bottomfrontmost tapers tapfrontbottom = tap.copy() tapfrontbottom[:, :, : nover[2]] = tap[:, :, nwin[2] // 2][:, :, np.newaxis] tapfrontbottom[-nover[0] :] = tapfrontbottom[nwin[0] // 2] for itap in range(0, nwins1 * nwins2, nwins2): taps[(nwins0 - 1) * nwins1 * nwins2 + itap] = tapfrontbottom # bottombackmost tapers tapbackbottom = tap.copy() tapbackbottom[:, :, -nover[2] :] = tap[:, :, nwin[2] // 2][:, :, np.newaxis] tapbackbottom[-nover[0] :] = tapbackbottom[nwin[0] // 2] for itap in range(0, nwins1 * nwins2, nwins2): taps[(nwins0 - 1) * nwins1 * nwins2 + nwins2 + itap - 1] = tapbackbottom # leftfrontmost tapers tapleftfront = tap.copy() tapleftfront[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis, :] tapleftfront[:, :, : nover[2]] = tapleftfront[:, :, nwin[2] // 2][ :, :, np.newaxis ] for itap in range(0, nwins, nwins1 * nwins2): taps[itap] = tapleftfront # rightfrontmost tapers taprightfront = tap.copy() taprightfront[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis, :] taprightfront[:, :, : nover[2]] = taprightfront[:, :, nwin[2] // 2][ :, :, np.newaxis ] for itap in range(0, nwins, nwins1 * nwins2): taps[(nwins1 - 1) * nwins2 + itap] = taprightfront # leftbackmost tapers tapleftback = tap.copy() tapleftback[:, : nover[1]] = tap[:, nwin[1] // 2][:, np.newaxis, :] tapleftback[:, :, -nover[2] :] = tapleftback[:, :, nwin[2] // 2][ :, :, np.newaxis ] for itap in range(0, nwins, nwins1 * nwins2): taps[nwins2 + itap - 1] = tapleftback # rightbackmost tapers taprightback = tap.copy() taprightback[:, -nover[1] :] = tap[:, nwin[1] // 2][:, np.newaxis, :] taprightback[:, :, -nover[2] :] = taprightback[:, :, nwin[2] // 2][ :, :, np.newaxis ] for itap in range(0, nwins, nwins1 * nwins2): taps[(nwins1 - 1) * nwins2 + nwins2 + itap - 1] = taprightback # 3. corners # lefttopfrontcorner taper taplefttop = tap.copy() taplefttop[: nover[0]] = tap[nwin[0] // 2] taplefttop[:, : nover[1]] = taplefttop[:, nwin[1] // 2][:, np.newaxis, :] taplefttop[:, :, : nover[2]] = taplefttop[:, :, nwin[2] // 2][:, :, np.newaxis] taps[0] = taplefttop # lefttopbackcorner taper taplefttop = tap.copy() taplefttop[: nover[0]] = tap[nwin[0] // 2] taplefttop[:, : nover[1]] = taplefttop[:, nwin[1] // 2][:, np.newaxis, :] taplefttop[:, :, -nover[2] :] = taplefttop[:, :, nwin[2] // 2][:, :, np.newaxis] taps[nwins2 - 1] = taplefttop # righttopfrontcorner taper taprighttop = tap.copy() taprighttop[: nover[0]] = tap[nwin[0] // 2] taprighttop[:, -nover[1] :] = taprighttop[:, nwin[1] // 2][:, np.newaxis, :] taprighttop[:, :, : nover[2]] = taprighttop[:, :, nwin[2] // 2][ :, :, np.newaxis ] taps[(nwins1 - 1) * nwins2] = taprighttop # righttopbackcorner taper taprighttop = tap.copy() taprighttop[: nover[0]] = tap[nwin[0] // 2] taprighttop[:, -nover[1] :] = taprighttop[:, nwin[1] // 2][:, np.newaxis, :] taprighttop[:, :, -nover[2] :] = taprighttop[:, :, nwin[2] // 2][ :, :, np.newaxis ] taps[(nwins1 - 1) * nwins2 + nwins2 - 1] = taprighttop # leftbottomfrontcorner taper tapleftbottom = tap.copy() tapleftbottom[-nover[0] :] = tap[nwin[0] // 2] tapleftbottom[:, : nover[1]] = tapleftbottom[:, nwin[1] // 2][:, np.newaxis, :] tapleftbottom[:, :, : nover[2]] = tapleftbottom[:, :, nwin[2] // 2][ :, :, np.newaxis ] taps[(nwins0 - 1) * nwins1 * nwins2] = tapleftbottom # leftbottombackcorner taper tapleftbottom = tap.copy() tapleftbottom[-nover[0] :] = tap[nwin[0] // 2] tapleftbottom[:, : nover[1]] = tapleftbottom[:, nwin[1] // 2][:, np.newaxis, :] tapleftbottom[:, :, -nover[2] :] = tapleftbottom[:, :, nwin[2] // 2][ :, :, np.newaxis ] taps[(nwins0 - 1) * nwins1 * nwins2 + nwins2 - 1] = tapleftbottom # rightbottomfrontcorner taper taprightbottom = tap.copy() taprightbottom[-nover[0] :] = tap[nwin[0] // 2] taprightbottom[:, -nover[1] :] = taprightbottom[:, nwin[1] // 2][ :, np.newaxis, : ] taprightbottom[:, :, : nover[2]] = taprightbottom[:, :, nwin[2] // 2][ :, :, np.newaxis ] taps[(nwins0 - 1) * nwins1 * nwins2 + (nwins1 - 1) * nwins2] = taprightbottom # rightbottombackcorner taper taprightbottom = tap.copy() taprightbottom[-nover[0] :] = tap[nwin[0] // 2] taprightbottom[:, -nover[1] :] = taprightbottom[:, nwin[1] // 2][ :, np.newaxis, : ] taprightbottom[:, :, -nover[2] :] = taprightbottom[:, :, nwin[2] // 2][ :, :, np.newaxis ] taps[ (nwins0 - 1) * nwins1 * nwins2 + (nwins1 - 1) * nwins2 + nwins2 - 1 ] = taprightbottom # define scalings if scalings is None: scalings = [1.0] * nwins # transform to apply if tapertype is None: OOp = BlockDiag([scalings[itap] * Op for itap in range(nwins)]) else: OOp = BlockDiag( [ scalings[itap] * Diagonal(taps[itap].ravel(), dtype=Op.dtype) * Op for itap in range(nwins) ] ) hstack2 = HStack( [ Restriction( (nwin[0], nwin[1], dimsd[2]), range(win_in, win_end), axis=2, dtype=Op.dtype, ).H for win_in, win_end in zip(dwin2_ins, dwin2_ends) ] ) combining2 = BlockDiag([hstack2] * (nwins1 * nwins0)) hstack1 = HStack( [ Restriction( (nwin[0], dimsd[1], dimsd[2]), range(win_in, win_end), axis=1, dtype=Op.dtype, ).H for win_in, win_end in zip(dwin1_ins, dwin1_ends) ] ) combining1 = BlockDiag([hstack1] * nwins0) combining0 = HStack( [ Restriction(dimsd, range(win_in, win_end), axis=0, dtype=Op.dtype).H for win_in, win_end in zip(dwin0_ins, dwin0_ends) ] ) Pop = LinearOperator(combining0 * combining1 * combining2 * OOp) Pop.dims, Pop.dimsd = ( nwins0, nwins1, nwins2, int(dims[0] // nwins0), int(dims[1] // nwins1), int(dims[2] // nwins2), ), dimsd Pop.name = name return Pop
17,481
37.253829
113
py
pylops
pylops-master/pylops/signalprocessing/fft.py
__all__ = ["FFT"] import logging import warnings from typing import Optional, Union import numpy as np import numpy.typing as npt import scipy.fft from pylops import LinearOperator from pylops.signalprocessing._baseffts import _BaseFFT, _FFTNorms from pylops.utils import deps from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray pyfftw_message = deps.pyfftw_import("the fft module") if pyfftw_message is None: import pyfftw logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class _FFT_numpy(_BaseFFT): """One dimensional Fast-Fourier Transform using NumPy""" def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, nfft: Optional[int] = None, sampling: float = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", ) -> None: super().__init__( dims=dims, axis=axis, nfft=nfft, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) if self.cdtype != np.complex128: warnings.warn( f"numpy backend always returns complex128 dtype. To respect the passed dtype, data will be casted to {self.cdtype}." ) self._norm_kwargs = {"norm": None} # equivalent to "backward" in Numpy/Scipy if self.norm is _FFTNorms.ORTHO: self._norm_kwargs["norm"] = "ortho" elif self.norm is _FFTNorms.NONE: self._scale = self.nfft elif self.norm is _FFTNorms.ONE_OVER_N: self._scale = 1.0 / self.nfft @reshaped def _matvec(self, x: NDArray) -> NDArray: if self.ifftshift_before: x = np.fft.ifftshift(x, axes=self.axis) if not self.clinear: x = np.real(x) if self.real: y = np.fft.rfft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) # Apply scaling to obtain a correct adjoint for this operator y = np.swapaxes(y, -1, self.axis) y[..., 1 : 1 + (self.nfft - 1) // 2] *= np.sqrt(2) y = np.swapaxes(y, self.axis, -1) else: y = np.fft.fft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) if self.norm is _FFTNorms.ONE_OVER_N: y *= self._scale if self.fftshift_after: y = np.fft.fftshift(y, axes=self.axis) y = y.astype(self.cdtype) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: if self.fftshift_after: x = np.fft.ifftshift(x, axes=self.axis) if self.real: # Apply scaling to obtain a correct adjoint for this operator x = x.copy() x = np.swapaxes(x, -1, self.axis) x[..., 1 : 1 + (self.nfft - 1) // 2] /= np.sqrt(2) x = np.swapaxes(x, self.axis, -1) y = np.fft.irfft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) else: y = np.fft.ifft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) if self.norm is _FFTNorms.NONE: y *= self._scale if self.nfft > self.dims[self.axis]: y = np.take(y, range(0, self.dims[self.axis]), axis=self.axis) elif self.nfft < self.dims[self.axis]: y = np.pad(y, self.ifftpad) if not self.clinear: y = np.real(y) if self.ifftshift_before: y = np.fft.fftshift(y, axes=self.axis) y = y.astype(self.rdtype) return y def __truediv__(self, y: npt.ArrayLike) -> npt.ArrayLike: if self.norm is not _FFTNorms.ORTHO: return self._rmatvec(y) / self._scale return self._rmatvec(y) class _FFT_scipy(_BaseFFT): """One dimensional Fast-Fourier Transform using SciPy""" def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, nfft: Optional[int] = None, sampling: float = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", ) -> None: super().__init__( dims=dims, axis=axis, nfft=nfft, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) self._norm_kwargs = {"norm": None} # equivalent to "backward" in Numpy/Scipy if self.norm is _FFTNorms.ORTHO: self._norm_kwargs["norm"] = "ortho" elif self.norm is _FFTNorms.NONE: self._scale = self.nfft elif self.norm is _FFTNorms.ONE_OVER_N: self._scale = 1.0 / self.nfft @reshaped def _matvec(self, x: NDArray) -> NDArray: if self.ifftshift_before: x = scipy.fft.ifftshift(x, axes=self.axis) if not self.clinear: x = np.real(x) if self.real: y = scipy.fft.rfft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) # Apply scaling to obtain a correct adjoint for this operator y = np.swapaxes(y, -1, self.axis) y[..., 1 : 1 + (self.nfft - 1) // 2] *= np.sqrt(2) y = np.swapaxes(y, self.axis, -1) else: y = scipy.fft.fft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) if self.norm is _FFTNorms.ONE_OVER_N: y *= self._scale if self.fftshift_after: y = scipy.fft.fftshift(y, axes=self.axis) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: if self.fftshift_after: x = scipy.fft.ifftshift(x, axes=self.axis) if self.real: # Apply scaling to obtain a correct adjoint for this operator x = x.copy() x = np.swapaxes(x, -1, self.axis) x[..., 1 : 1 + (self.nfft - 1) // 2] /= np.sqrt(2) x = np.swapaxes(x, self.axis, -1) y = scipy.fft.irfft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) else: y = scipy.fft.ifft(x, n=self.nfft, axis=self.axis, **self._norm_kwargs) if self.norm is _FFTNorms.NONE: y *= self._scale if self.nfft > self.dims[self.axis]: y = np.take(y, range(0, self.dims[self.axis]), axis=self.axis) elif self.nfft < self.dims[self.axis]: y = np.pad(y, self.ifftpad) if not self.clinear: y = np.real(y) if self.ifftshift_before: y = scipy.fft.fftshift(y, axes=self.axis) return y def __truediv__(self, y): if self.norm is not _FFTNorms.ORTHO: return self._rmatvec(y) / self._scale return self._rmatvec(y) class _FFT_fftw(_BaseFFT): """One dimensional Fast-Fourier Transform using pyFFTW""" def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, nfft: Optional[int] = None, sampling: float = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", **kwargs_fftw, ) -> None: if np.dtype(dtype) == np.float16: warnings.warn( "fftw backend is unavailable with float16 dtype. Will use float32." ) dtype = np.float32 for badop in ["ortho", "normalise_idft"]: if badop in kwargs_fftw: if badop == "ortho" and norm == "ortho": continue warnings.warn( f"FFTW option '{badop}' will be overwritten by norm={norm}" ) del kwargs_fftw[badop] super().__init__( dims=dims, axis=axis, nfft=nfft, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) if self.cdtype != np.complex128: warnings.warn( f"fftw backend returns complex128 dtype. To respect the passed dtype, data will be cast to {self.cdtype}." ) dims_t = list(self.dims) dims_t[self.axis] = self.nfft self.dims_t = dims_t # define padding(fftw requires the user to provide padded input signal) self.pad = np.zeros((self.ndim, 2), dtype=int) if self.real: if self.nfft % 2: self.pad[self.axis, 1] = ( 2 * (self.dimsd[self.axis] - 1) + 1 - self.dims[self.axis] ) else: self.pad[self.axis, 1] = ( 2 * (self.dimsd[self.axis] - 1) - self.dims[self.axis] ) else: self.pad[self.axis, 1] = self.dimsd[self.axis] - self.dims[self.axis] self.dopad = True if np.sum(self.pad) > 0 else False # create empty arrays and plans for fft/ifft self.x = pyfftw.empty_aligned( self.dims_t, dtype=self.rdtype if real else self.cdtype ) self.y = pyfftw.empty_aligned(self.dimsd, dtype=self.cdtype) # Use FFTW without norm-related keywords above. In this case, FFTW standard # behavior is to scale with 1/N on the inverse transform. The _scale below # converts the default bevavior to that of ``norm``. Keywords ``ortho`` and # ``normalise_idft`` do not seem to be available over all versions of pyFFTW # so best to avoid them. if self.norm is _FFTNorms.ORTHO: self._scale = np.sqrt(1.0 / self.nfft) elif self.norm is _FFTNorms.NONE: self._scale = self.nfft elif self.norm is _FFTNorms.ONE_OVER_N: self._scale = 1.0 / self.nfft self.fftplan = pyfftw.FFTW( self.x, self.y, axes=(self.axis,), direction="FFTW_FORWARD", **kwargs_fftw ) self.ifftplan = pyfftw.FFTW( self.y, self.x, axes=(self.axis,), direction="FFTW_BACKWARD", **kwargs_fftw ) @reshaped def _matvec(self, x: NDArray) -> NDArray: if self.ifftshift_before: x = np.fft.ifftshift(x, axes=self.axis) if not self.clinear: x = np.real(x) if self.dopad: x = np.pad(x, self.pad, "constant", constant_values=0) elif self.doifftpad: x = np.take(x, range(0, self.nfft), axis=self.axis) # self.fftplan() always uses byte-alligned self.x as input array and # returns self.y as output array. As such, self.x must be copied so as # not to be overwritten on a subsequent call to _matvec. np.copyto(self.x, x) y = self.fftplan().copy() if self.norm is not _FFTNorms.NONE: y *= self._scale if self.real: # Apply scaling to obtain a correct adjoint for this operator y = np.swapaxes(y, -1, self.axis) y[..., 1 : 1 + (self.nfft - 1) // 2] *= np.sqrt(2) y = np.swapaxes(y, self.axis, -1) if self.fftshift_after: y = np.fft.fftshift(y, axes=self.axis) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: if self.fftshift_after: x = np.fft.ifftshift(x, axes=self.axis) # self.ifftplan() always uses byte-alligned self.y as input array. # We copy here so we don't need to copy again in the case of `real=True`, # which only performs operations that preserve byte-allignment. np.copyto(self.y, x) x = self.y # Update reference only if self.real: # Apply scaling to obtain a correct adjoint for this operator x = np.swapaxes(x, -1, self.axis) x[..., 1 : 1 + (self.nfft - 1) // 2] /= np.sqrt(2) x = np.swapaxes(x, self.axis, -1) # self.ifftplan() always returns self.x, which must be copied so as not # to be overwritten on a subsequent call to _rmatvec. y = self.ifftplan().copy() if self.norm is _FFTNorms.ORTHO: y /= self._scale elif self.norm is _FFTNorms.NONE: y *= self._scale if self.nfft > self.dims[self.axis]: y = np.take(y, range(0, self.dims[self.axis]), axis=self.axis) elif self.nfft < self.dims[self.axis]: y = np.pad(y, self.ifftpad) if self.ifftshift_before: y = np.fft.fftshift(y, axes=self.axis) if not self.clinear: y = np.real(y) return y def __truediv__(self, y: npt.ArrayLike) -> npt.ArrayLike: if self.norm is _FFTNorms.ORTHO: return self._rmatvec(y) return self._rmatvec(y) / self._scale def FFT( dims: Union[int, InputDimsLike], axis: int = -1, nfft: Optional[int] = None, sampling: float = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, engine: str = "numpy", dtype: DTypeLike = "complex128", name: str = "F", **kwargs_fftw, ) -> LinearOperator: r"""One dimensional Fast-Fourier Transform. Apply Fast-Fourier Transform (FFT) along an ``axis`` of a multi-dimensional array of size ``dim``. Using the default NumPy engine, the FFT operator is an overload to either the NumPy :py:func:`numpy.fft.fft` (or :py:func:`numpy.fft.rfft` for real models) in forward mode, and to :py:func:`numpy.fft.ifft` (or :py:func:`numpy.fft.irfft` for real models) in adjoint mode, or their CuPy equivalents. When ``engine='fftw'`` is chosen, the :py:class:`pyfftw.FFTW` class is used instead. Alternatively, when the SciPy engine is chosen, the overloads are of :py:func:`scipy.fft.fft` (or :py:func:`scipy.fft.rfft` for real models) in forward mode, and to :py:func:`scipy.fft.ifft` (or :py:func:`scipy.fft.irfft` for real models) in adjoint mode. When using ``real=True``, the result of the forward is also multiplied by :math:`\sqrt{2}` for all frequency bins except zero and Nyquist, and the input of the adjoint is multiplied by :math:`1 / \sqrt{2}` for the same frequencies. For a real valued input signal, it is advised to use the flag ``real=True`` as it stores the values of the Fourier transform at positive frequencies only as values at negative frequencies are simply their complex conjugates. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which FFT is applied nfft : :obj:`int`, optional Number of samples in Fourier Transform (same as input if ``nfft=None``) sampling : :obj:`float`, optional Sampling step ``dt``. norm : `{"ortho", "none", "1/n"}`, optional .. versionadded:: 1.17.0 - "ortho": Scales forward and adjoint FFT transforms with :math:`1/\sqrt{N_F}`, where :math:`N_F` is the number of samples in the Fourier domain given by ``nfft``. - "none": Does not scale the forward or the adjoint FFT transforms. - "1/n": Scales both the forward and adjoint FFT transforms by :math:`1/N_F`. .. note:: For "none" and "1/n", the operator is not unitary, that is, the adjoint is not the inverse. To invert the operator, simply use ``Op \ y``. real : :obj:`bool`, optional Model to which fft is applied has real numbers (``True``) or not (``False``). Used to enforce that the output of adjoint of a real model is real. ifftshift_before : :obj:`bool`, optional .. versionadded:: 1.17.0 Apply ifftshift (``True``) or not (``False``) to model vector (before FFT). Consider using this option when the model vector's respective axis is symmetric with respect to the zero value sample. This will shift the zero value sample to coincide with the zero index sample. With such an arrangement, FFT will not introduce a sample-dependent phase-shift when compared to the continuous Fourier Transform. Defaults to not applying ifftshift. fftshift_after : :obj:`bool`, optional .. versionadded:: 1.17.0 Apply fftshift (``True``) or not (``False``) to data vector (after FFT). Consider using this option when you require frequencies to be arranged naturally, from negative to positive. When not applying fftshift after FFT, frequencies are arranged from zero to largest positive, and then from negative Nyquist to the frequency bin before zero. engine : :obj:`str`, optional Engine used for fft computation (``numpy``, ``fftw``, or ``scipy``). Choose ``numpy`` when working with cupy arrays. .. note:: Since version 1.17.0, accepts "scipy". dtype : :obj:`str`, optional Type of elements in input array. Note that the ``dtype`` of the operator is the corresponding complex type even when a real type is provided. In addition, note that neither the NumPy nor the FFTW backends supports returning ``dtype`` different than ``complex128``. As such, when using either backend, arrays will be force-casted to types corresponding to the supplied ``dtype``. The SciPy backend supports all precisions natively. Under all backends, when a real ``dtype`` is supplied, a real result will be enforced on the result of the ``rmatvec`` and the input of the ``matvec``. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) **kwargs_fftw Arbitrary keyword arguments for :py:class:`pyfftw.FTTW` Attributes ---------- dimsd : :obj:`tuple` Shape of the array after the forward, but before linearization. For example, ``y_reshaped = (Op * x.ravel()).reshape(Op.dimsd)``. f : :obj:`numpy.ndarray` Discrete Fourier Transform sample frequencies real : :obj:`bool` When ``True``, uses ``rfft``/``irfft`` rdtype : :obj:`bool` Expected input type to the forward cdtype : :obj:`bool` Output type of the forward. Complex equivalent to ``rdtype``. shape : :obj:`tuple` Operator shape clinear : :obj:`bool` .. versionadded:: 1.17.0 Operator is complex-linear. Is false when either ``real=True`` or when ``dtype`` is not a complex type. explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError - If ``dims`` is provided and ``axis`` is bigger than ``len(dims)``. - If ``norm`` is not one of "ortho", "none", or "1/n". NotImplementedError If ``engine`` is neither ``numpy``, ``fftw``, nor ``scipy``. See Also -------- FFT2D: Two-dimensional FFT FFTND: N-dimensional FFT Notes ----- The FFT operator (using ``norm="ortho"``) applies the forward Fourier transform to a signal :math:`d(t)` in forward mode: .. math:: D(f) = \mathscr{F} (d) = \frac{1}{\sqrt{N_F}} \int\limits_{-\infty}^\infty d(t) e^{-j2\pi ft} \,\mathrm{d}t Similarly, the inverse Fourier transform is applied to the Fourier spectrum :math:`D(f)` in adjoint mode: .. math:: d(t) = \mathscr{F}^{-1} (D) = \frac{1}{\sqrt{N_F}} \int\limits_{-\infty}^\infty D(f) e^{j2\pi ft} \,\mathrm{d}f where :math:`N_F` is the number of samples in the Fourier domain ``nfft``. Both operators are effectively discretized and solved by a fast iterative algorithm known as Fast Fourier Transform. Note that the FFT operator (using ``norm="ortho"``) is a special operator in that the adjoint is also the inverse of the forward mode. For other norms, this does not hold (see ``norm`` help). However, for any norm, the Fourier transform is Hermitian for real input signals. """ if engine == "fftw" and pyfftw_message is None: f = _FFT_fftw( dims, axis=axis, nfft=nfft, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, **kwargs_fftw, ) elif engine == "numpy" or (engine == "fftw" and pyfftw_message is not None): if engine == "fftw" and pyfftw_message is not None: logging.warning(pyfftw_message) f = _FFT_numpy( dims, axis=axis, nfft=nfft, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) elif engine == "scipy": f = _FFT_scipy( dims, axis=axis, nfft=nfft, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) else: raise NotImplementedError("engine must be numpy, fftw or scipy") f.name = name return f
21,761
36.327616
132
py
pylops
pylops-master/pylops/signalprocessing/_chirpradon2d.py
from pylops.utils.backend import get_array_module from pylops.utils.typing import NDArray def _chirp_radon_2d( data: NDArray, dt: float, dx: float, pmax: float, mode: str = "f" ) -> NDArray: r"""2D Chirp Radon transform Applies 2D Radon transform using Fast Fourier Transform and Chirp functions. (mode='f': forward, 'a': adjoint, and 'i': inverse). See Chirp2DRadon operator docstring for more details. Parameters ---------- data : :obj:`np.ndarray` 2D input data of size :math:`[n_x \times n_t]` dt : :obj:`float` Time sampling :math:`dt` dx : :obj:`float` Spatial sampling in :math:`x` direction :math:`dx` pmax : :obj:`float` Maximum slope defined as :math:`\tan` of maximum stacking angle :math:`x` direction :math:`p_{max} = \tan(\alpha_x_{max})`. If one operates in terms of minimum velocity :math:`c_0`, then :math:`p_x_{max}=c_0 dy/dt`. mode : :obj:`str`, optional Mode of operation, 'f': forward, 'a': adjoint, and 'i': inverse Returns ------- g : :obj:`np.ndarray` 2D output of size :math:`[\times n_{x} \times n_t]` """ ncp = get_array_module(data) # define sign for mode sign = -1.0 if mode == "f" else 1.0 # data size (nx, nt) = data.shape # find dtype of input dtype = ncp.real(data).dtype cdtype = (ncp.ones(1, dtype=dtype) + 1j * ncp.ones(1, dtype=dtype)).dtype # frequency axis omega = (ncp.fft.fftfreq(nt, 1 / nt) / (nt * dt)).reshape((1, nt)).astype(dtype) # slowness sampling dp = 2 * dt * pmax / dx / nx # spatial axis x = (ncp.fft.fftfreq(2 * nx, 1 / (2 * nx)) ** 2).reshape((2 * nx, 1)).astype(dtype) # K coefficients K0 = ncp.exp(sign * ncp.pi * 1j * dp * dx * omega * x).reshape((2 * nx, nt)) # K conj coefficients K = ncp.conj(ncp.fft.fftshift(K0, axes=(0,)))[nx // 2 : 3 * nx // 2, :] # perform transform h = ncp.zeros((2 * nx, nt)).astype(cdtype) h[0:nx, :] = ncp.fft.fftn(data, axes=(1,)) * K g = ncp.fft.ifftn( ncp.fft.fftn(h, axes=(0,)) * ncp.fft.fftn(K0, axes=(0,)), axes=(0,) ) if mode == "i": g = ncp.fft.ifftn(g[0:nx, :] * K * abs(omega), axes=(1,)).real * dp * dx else: g = ncp.fft.ifftn(g[0:nx, :] * K, axes=(1,)).real return g
2,347
30.72973
87
py
pylops
pylops-master/pylops/signalprocessing/bilinear.py
__all__ = ["Bilinear"] import logging import numpy as np import numpy.typing as npt from pylops import LinearOperator from pylops.utils.backend import get_add_at, get_array_module, to_numpy from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, IntNDArray, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _checkunique(iava: npt.ArrayLike) -> None: """Check that vector as only unique values""" _, count = np.unique(iava, axis=1, return_counts=True) if np.any(count > 1): raise ValueError("Repeated values in iava array") class Bilinear(LinearOperator): r"""Bilinear interpolation operator. Apply bilinear interpolation onto fractionary positions ``iava`` along the first two axes of a n-dimensional array. .. note:: The vector ``iava`` should contain unique pais. If the same pair is repeated twice an error will be raised. Parameters ---------- iava : :obj:`list` or :obj:`numpy.ndarray` Array of size :math:`[2 \times n_\text{ava}]` containing pairs of floating indices of locations of available samples for interpolation. dims : :obj:`list` Number of samples for each dimension dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If the vector ``iava`` contains repeated values. Notes ----- Bilinear interpolation of a subset of :math:`N` values at locations ``iava`` from an input n-dimensional vector :math:`\mathbf{x}` of size :math:`[m_1 \times m_2 \times ... \times m_{ndim}]` can be expressed as: .. math:: y_{\mathbf{i}} = (1-w^0_{i}) (1-w^1_{i}) x_{l^{l,0}_i, l^{l,1}_i} + w^0_{i} (1-w^1_{i}) x_{l^{r,0}_i, l^{l,1}_i} + (1-w^0_{i}) w^1_{i} x_{l^{l,0}_i, l^{r,1}_i} + w^0_{i} w^1_{i} x_{l^{r,0}_i, l^{r,1}_i} \quad \forall i=1,2,\ldots,M where :math:`\mathbf{l^{l,0}}=[\lfloor l_1^0 \rfloor, \lfloor l_2^0 \rfloor, ..., \lfloor l_N^0 \rfloor]`, :math:`\mathbf{l^{l,1}}=[\lfloor l_1^1 \rfloor, \lfloor l_2^1 \rfloor, ..., \lfloor l_N^1 \rfloor]`, :math:`\mathbf{l^{r,0}}=[\lfloor l_1^0 \rfloor + 1, \lfloor l_2^0 \rfloor + 1, ..., \lfloor l_N^0 \rfloor + 1]`, :math:`\mathbf{l^{r,1}}=[\lfloor l_1^1 \rfloor + 1, \lfloor l_2^1 \rfloor + 1, ..., \lfloor l_N^1 \rfloor + 1]`, are vectors containing the indices of the original array at which samples are taken, and :math:`\mathbf{w^j}=[l_1^i - \lfloor l_1^i \rfloor, l_2^i - \lfloor l_2^i \rfloor, ..., l_N^i - \lfloor l_N^i \rfloor]` (:math:`\forall j=0,1`) are the bilinear interpolation weights. """ def __init__( self, iava: IntNDArray, dims: InputDimsLike, dtype: DTypeLike = "float64", name: str = "B", ) -> None: # define dimension of data ndims = len(dims) dimsd = [len(iava[1])] + list(dims[2:]) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) ncp = get_array_module(iava) # check non-unique pairs (works only with numpy arrays) _checkunique(to_numpy(iava)) # find indices and weights self.iava_t = ncp.floor(iava[0]).astype(int) self.iava_b = self.iava_t + 1 self.weights_tb = iava[0] - self.iava_t self.iava_l = ncp.floor(iava[1]).astype(int) self.iava_r = self.iava_l + 1 self.weights_lr = iava[1] - self.iava_l # expand dims to weights for nd-arrays if ndims > 2: for _ in range(ndims - 2): self.weights_tb = ncp.expand_dims(self.weights_tb, axis=-1) self.weights_lr = ncp.expand_dims(self.weights_lr, axis=-1) @reshaped def _matvec(self, x: NDArray) -> NDArray: return ( x[self.iava_t, self.iava_l] * (1 - self.weights_tb) * (1 - self.weights_lr) + x[self.iava_t, self.iava_r] * (1 - self.weights_tb) * self.weights_lr + x[self.iava_b, self.iava_l] * self.weights_tb * (1 - self.weights_lr) + x[self.iava_b, self.iava_r] * self.weights_tb * self.weights_lr ) @reshaped def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) ncp_add_at = get_add_at(x) y = ncp.zeros(self.dims, dtype=self.dtype) ncp_add_at( y, tuple([self.iava_t, self.iava_l]), x * (1 - self.weights_tb) * (1 - self.weights_lr), ) ncp_add_at( y, tuple([self.iava_t, self.iava_r]), x * (1 - self.weights_tb) * self.weights_lr, ) ncp_add_at( y, tuple([self.iava_b, self.iava_l]), x * self.weights_tb * (1 - self.weights_lr), ) ncp_add_at( y, tuple([self.iava_b, self.iava_r]), x * self.weights_tb * self.weights_lr ) return y
5,375
34.368421
87
py
pylops
pylops-master/pylops/signalprocessing/_nonstatconvolve2d_cuda.py
from math import floor from numba import cuda @cuda.jit(max_registers=40) def _matvec_rmatvec(x, y, hs, hshape, xdims, ohx, ohz, dhx, dhz, nhx, nhz, rmatvec): """Cuda kernels for NonStationaryConvolve2D operator Cuda implementation of matvec and rmatvec for NonStationaryConvolve2D operator. See :class:`pylops.signalprocessing.NonStationaryConvolve2D` for details about input parameters. """ ix, iz = cuda.grid(2) if ix < xdims[0] and iz < xdims[1]: # find and interpolate h ihx_l = int(floor((ix - ohx) / dhx)) ihz_t = int(floor((iz - ohz) / dhz)) dhx_r = (ix - ohx) / dhx - ihx_l dhz_b = (iz - ohz) / dhz - ihz_t if ihx_l < 0: ihx_l = ihx_r = 0 dhx_l = dhx_r = 0.5 elif ihx_l >= nhx - 1: ihx_l = ihx_r = nhx - 1 dhx_l = dhx_r = 0.5 else: ihx_r = ihx_l + 1 dhx_l = 1.0 - dhx_r if ihz_t < 0: ihz_t = ihz_b = 0 dhz_t = dhz_b = 0.5 elif ihz_t >= nhz - 1: ihz_t = ihz_b = nhz - 1 dhz_t = dhz_b = 0.5 else: ihz_b = ihz_t + 1 dhz_t = 1.0 - dhz_b h_tl = hs[ihx_l, ihz_t] h_bl = hs[ihx_l, ihz_b] h_tr = hs[ihx_r, ihz_t] h_br = hs[ihx_r, ihz_b] # find extremes of model where to apply h (in case h is going out of model) xextremes = ( int(max(0, ix - hshape[0] // 2)), int(min(ix + hshape[0] // 2 + 1, xdims[0])), ) zextremes = ( int(max(0, iz - hshape[1] // 2)), int(min(iz + hshape[1] // 2 + 1, xdims[1])), ) # find extremes of h (in case h is going out of model) hxextremes = ( int(max(0, -ix + hshape[0] // 2)), int(min(hshape[0], hshape[0] // 2 + (xdims[0] - ix))), ) hzextremes = ( int(max(0, -iz + hshape[1] // 2)), int(min(hshape[1], hshape[1] // 2 + (xdims[1] - iz))), ) # place filter in output for ixx, hxx in zip( range(xextremes[0], xextremes[1]), range(hxextremes[0], hxextremes[1]) ): for izz, hzz in zip( range(zextremes[0], zextremes[1]), range(hzextremes[0], hzextremes[1]), ): h = ( dhz_t * dhx_l * h_tl[hxx, hzz] + dhz_b * dhx_l * h_bl[hxx, hzz] + dhz_t * dhx_r * h_tr[hxx, hzz] + dhz_b * dhx_r * h_br[hxx, hzz] ) if rmatvec: y[ix, iz] += h * x[ixx, izz] else: cuda.atomic.add(y, (ixx, izz), x[ix, iz] * h) def _matvec_rmatvec_call( x, y, hs, hshape, xdims, ohx, ohz, dhx, dhz, nhx, nhz, rmatvec=False, num_blocks=(32, 32), num_threads_per_blocks=(32, 32), ): """Caller for NonStationaryConvolve2D operator Caller for cuda implementation of matvec and rmatvec for NonStationaryConvolve2D operato, with same signature as numpy/numba counterparts. See :class:`pylops.signalprocessing.NonStationaryConvolve2D` for details about input parameters. """ _matvec_rmatvec[num_blocks, num_threads_per_blocks]( x, y, hs, hshape, xdims, ohx, ohz, dhx, dhz, nhx, nhz, rmatvec ) return y
3,437
29.696429
113
py
pylops
pylops-master/pylops/signalprocessing/nonstatconvolve2d.py
__all__ = [ "NonStationaryConvolve2D", "NonStationaryFilters2D", ] import os from typing import Tuple, Union import numpy as np from pylops import LinearOperator from pylops.utils import deps from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray jit_message = deps.numba_import("the nonstatconvolve2d module") if jit_message is None: from numba import jit, prange from ._nonstatconvolve2d_cuda import ( _matvec_rmatvec_call as _matvec_rmatvec_cuda_call, ) # detect whether to use parallel or not numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1")) parallel = True if numba_threads != 1 else False else: prange = range class NonStationaryConvolve2D(LinearOperator): r"""2D non-stationary convolution operator. Apply non-stationary two-dimensional convolution. A varying compact filter is provided on a coarser grid and on-the-fly interpolation is applied in forward and adjoint modes. Both input and output have size :math`n_x \time n_z`. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension (which we refer to as :math`n_x \time n_z`). hs : :obj:`numpy.ndarray` Bank of 2d compact filters of size :math:`n_{\text{filts},x} \times n_{\text{filts},z} \times n_h \times n_{h,x} \times n_{h,z}`. Filters must have odd number of samples and are assumed to be centered in the middle of the filter support. ihx : :obj:`tuple` Indices of the x locations of the filters ``hs`` in the model (and data). Note that the filters must be regularly sampled, i.e. :math:`dh_x=\text{diff}(ihx)=\text{const.}` ihz : :obj:`tuple` Indices of the z locations of the filters ``hs`` in the model (and data). Note that the filters must be regularly sampled, i.e. :math:`dh_z=\text{diff}(ihz)=\text{const.}` engine : :obj:`str`, optional Engine used for spread computation (``numpy``, ``numba``, or ``cuda``) num_threads_per_blocks : :obj:`tuple`, optional Number of threads in each block (only when ``engine=cuda``) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If filters ``hs`` have even size ValueError If ``ihx`` or ``ihz`` is not regularly sampled ValueError If ``ihx`` or ``ihz`` are outside the bounds defined by ``dims`` NotImplementedError If ``engine`` is neither ``numpy``, ``fftw``, nor ``scipy``. Notes ----- The NonStationaryConvolve2D operator applies non-stationary two-dimensional convolution between the input signal :math:`d(x, z)` and a bank of compact filter kernels :math:`h(x, z; x_i, z_i)`. Assuming an input signal composed of :math:`N \times M` samples (with :math:`N=4` and :math:`M=3`, and filters at locations :math:`x_1, x_3` and :math:`z_1, z_3`, the forward operator can be represented as follows: .. math:: \mathbf{y} = \begin{bmatrix} \hat{h}_{(0,0),(0,0)} & \cdots & h_{(1,1),(0,0)} & \cdots & \hat{h}_{(2,2),(0,0)} & \cdots \\ \hat{h}_{(0,0),(0,1)} & \cdots & h_{(1,1),(0,1)} & \cdots & \hat{h}_{(2,2),(0,0)} & \cdots \\ \vdots & \ddots & & \ddots & \vdots & \vdots \\ \hat{h}_{(0,0),(4,3)} & \cdots & h_{(1,1),(4,3)} & \cdots & \hat{h}_{(2,2),(0,0)} & \cdots \\ \end{bmatrix} \begin{bmatrix} x_{0,0} \\ \vdots \\ x_{0,N} \\ x_{1,0} \\ \vdots \\ x_{1,N} \\ x_{M,0} \\ \vdots \\ x_{M,N} \end{bmatrix} where :math:`\mathbf{h}_{(1,1)} = [h_{(1,1),(0,0)}, h_{(1,1),(0,1)}, \ldots, h_{(1,1),(4,3)}]` (and :math:`\mathbf{h}_{(1,1)}`, :math:`\mathbf{h}_{(1,3)}`, :math:`\mathbf{h}_{(3,1)}`, :math:`\mathbf{h}_{(3,3)}`) are the provided filter, :math:`\hat{\mathbf{h}}_{(0,0)} = \mathbf{h}_{(1,1)}` and similar are the filters outside the range of the provided filters (which are extrapolated to be the same as the nearest provided filter) and :math:`\hat{\mathbf{h}}_{(2,2)} = \text{bilinear}(\mathbf{h}_{(1,1)}, \mathbf{h}_{(3,1)}, \mathbf{h}_{(1,3)},\mathbf{h}_{(3,3)})` is the filter within the range of the provided filters (which is bilinearly interpolated from the four nearest provided filter on either side of its location). For more details on the numerical implementation of the forward and adjoint, see :class:`pylops.signalprocessing.NonStationaryConvolve1D`. """ def __init__( self, dims: Union[int, InputDimsLike], hs: NDArray, ihx: InputDimsLike, ihz: InputDimsLike, engine: str = "numpy", num_threads_per_blocks: Tuple[int, int] = (32, 32), dtype: DTypeLike = "float64", name: str = "C", ) -> None: if engine not in ["numpy", "numba", "cuda"]: raise NotImplementedError("engine must be numpy or numba or cuda") if hs.shape[2] % 2 == 0 or hs.shape[3] % 2 == 0: raise ValueError("filters hs must have odd length") if len(np.unique(np.diff(ihx))) > 1 or len(np.unique(np.diff(ihz))) > 1: raise ValueError( "the indices of filters 'ih' are must be regularly sampled" ) if min(ihx) < 0 or min(ihz) < 0 or max(ihx) >= dims[0] or max(ihz) >= dims[1]: raise ValueError( "the indices of filters 'ih' must be larger than 0 and smaller than `dims`" ) self.hs = hs self.hshape = hs.shape[2:] self.ohx, self.dhx, self.nhx = ihx[0], ihx[1] - ihx[0], len(ihx) self.ohz, self.dhz, self.nhz = ihz[0], ihz[1] - ihz[0], len(ihz) self.ehx, self.ehz = ihx[-1], ihz[-1] self.dims = _value_or_sized_to_tuple(dims) self.engine = engine super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) # create additional input parameters for engine=cuda self.kwargs_cuda = {} if engine == "cuda": self.kwargs_cuda["num_threads_per_blocks"] = num_threads_per_blocks num_threads_per_blocks_x, num_threads_per_blocks_z = num_threads_per_blocks num_blocks_x = ( self.dims[0] + num_threads_per_blocks_x - 1 ) // num_threads_per_blocks_x num_blocks_z = ( self.dims[1] + num_threads_per_blocks_z - 1 ) // num_threads_per_blocks_z self.kwargs_cuda["num_blocks"] = (num_blocks_x, num_blocks_z) self._register_multiplications(engine) def _register_multiplications(self, engine: str) -> None: if engine == "numba": numba_opts = dict(nopython=True, fastmath=True, nogil=True, parallel=True) self._mvrmv = jit(**numba_opts)(self._matvec_rmatvec) elif engine == "cuda": self._mvrmv = _matvec_rmatvec_cuda_call else: self._mvrmv = self._matvec_rmatvec @staticmethod def _matvec_rmatvec( x: NDArray, y: NDArray, hs: NDArray, hshape: Tuple[int, int], xdims: Tuple[int, int], ohx: float, ohz: float, dhx: float, dhz: float, nhx: int, nhz: int, rmatvec: bool = False, ) -> NDArray: for ix in prange(xdims[0]): for iz in range(xdims[1]): # find closest filters and interpolate h ihx_l = int(np.floor((ix - ohx) / dhx)) ihz_t = int(np.floor((iz - ohz) / dhz)) dhx_r = (ix - ohx) / dhx - ihx_l dhz_b = (iz - ohz) / dhz - ihz_t if ihx_l < 0: ihx_l = ihx_r = 0 dhx_l = dhx_r = 0.5 elif ihx_l >= nhx - 1: ihx_l = ihx_r = nhx - 1 dhx_l = dhx_r = 0.5 else: ihx_r = ihx_l + 1 dhx_l = 1.0 - dhx_r if ihz_t < 0: ihz_t = ihz_b = 0 dhz_t = dhz_b = 0.5 elif ihz_t >= nhz - 1: ihz_t = ihz_b = nhz - 1 dhz_t = dhz_b = 0.5 else: ihz_b = ihz_t + 1 dhz_t = 1.0 - dhz_b h_tl = hs[ihx_l, ihz_t] h_bl = hs[ihx_l, ihz_b] h_tr = hs[ihx_r, ihz_t] h_br = hs[ihx_r, ihz_b] h = ( dhz_t * dhx_l * h_tl + dhz_b * dhx_l * h_bl + dhz_t * dhx_r * h_tr + dhz_b * dhx_r * h_br ) # find extremes of model where to apply h (in case h is going out of model) xextremes = ( max(0, ix - hshape[0] // 2), min(ix + hshape[0] // 2 + 1, xdims[0]), ) zextremes = ( max(0, iz - hshape[1] // 2), min(iz + hshape[1] // 2 + 1, xdims[1]), ) # find extremes of h (in case h is going out of model) hxextremes = ( max(0, -ix + hshape[0] // 2), min(hshape[0], hshape[0] // 2 + (xdims[0] - ix)), ) hzextremes = ( max(0, -iz + hshape[1] // 2), min(hshape[1], hshape[1] // 2 + (xdims[1] - iz)), ) if not rmatvec: y[xextremes[0] : xextremes[1], zextremes[0] : zextremes[1]] += ( x[ix, iz] * h[ hxextremes[0] : hxextremes[1], hzextremes[0] : hzextremes[1] ] ) else: y[ix, iz] = np.sum( h[hxextremes[0] : hxextremes[1], hzextremes[0] : hzextremes[1]] * x[xextremes[0] : xextremes[1], zextremes[0] : zextremes[1]] ) return y @reshaped def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.dims, dtype=self.dtype) y = self._mvrmv( x, y, self.hs, self.hshape, self.dims, self.ohx, self.ohz, self.dhx, self.dhz, self.nhx, self.nhz, rmatvec=False, **self.kwargs_cuda ) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.dims, dtype=self.dtype) y = self._mvrmv( x, y, self.hs, self.hshape, self.dims, self.ohx, self.ohz, self.dhx, self.dhz, self.nhx, self.nhz, rmatvec=True, **self.kwargs_cuda ) return y class NonStationaryFilters2D(LinearOperator): r"""2D non-stationary filter estimation operator. Estimate a non-stationary two-dimensional filter by non-stationary convolution. Parameters ---------- inp : :obj:`numpy.ndarray` Fixed input signal of size :math:`n_x \times n_z`. hshape : :obj:`list` or :obj:`tuple` Shape of the 2d compact filters (filters must have odd number of samples and are assumed to be centered in the middle of the filter support). ihx : :obj:`tuple` Indices of the x locations of the filters ``hs`` in the model (and data). Note that the filters must be regularly sampled, i.e. :math:`dh_x=\text{diff}(ihx)=\text{const.}` ihz : :obj:`tuple` Indices of the z locations of the filters ``hs`` in the model (and data). Note that the filters must be regularly sampled, i.e. :math:`dh_z=\text{diff}(ihz)=\text{const.}` engine : :obj:`str`, optional Engine used for spread computation (``numpy``, ``numba``, or ``cuda``) num_threads_per_blocks : :obj:`tuple`, optional Number of threads in each block (only when ``engine=cuda``) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If filters ``hs`` have even size ValueError If ``ihx`` or ``ihz`` is not regularly sampled NotImplementedError If ``engine`` is neither ``numpy``, ``fftw``, nor ``scipy``. Notes ----- The NonStationaryConvolve2D operator is used to estimate a non-stationary two-dimensional filter betwen two signals, an input signal (provided directly to the operator) and the desired output signal. For more details on the numerical implementation of the forward and adjoint, see :class:`pylops.signalprocessing.NonStationaryFilters1D`. """ def __init__( self, inp: NDArray, hshape: InputDimsLike, ihx: InputDimsLike, ihz: InputDimsLike, engine: str = "numpy", num_threads_per_blocks: Tuple[int, int] = (32, 32), dtype: DTypeLike = "float64", name: str = "C", ) -> None: if engine not in ["numpy", "numba", "cuda"]: raise NotImplementedError("engine must be numpy or numba or cuda") if hshape[0] % 2 == 0 or hshape[1] % 2 == 0: raise ValueError("filters hs must have odd length") if len(np.unique(np.diff(ihx))) > 1 or len(np.unique(np.diff(ihz))) > 1: raise ValueError( "the indices of filters 'ih' are must be regularly sampled" ) if ( min(ihx) < 0 or min(ihz) < 0 or max(ihx) >= inp.shape[0] or max(ihz) >= inp.shape[1] ): raise ValueError( "the indices of filters 'ih' must be larger than 0 and smaller than `dims`" ) self.inp = inp self.inpdims = inp.shape self.hshape = hshape self.ohx, self.dhx, self.nhx = ihx[0], ihx[1] - ihx[0], len(ihx) self.ohz, self.dhz, self.nhz = ihz[0], ihz[1] - ihz[0], len(ihz) self.ehx, self.ehz = ihx[-1], ihz[-1] self.engine = engine super().__init__( dtype=np.dtype(dtype), dims=(self.nhx, self.nhz, *hshape), dimsd=self.inpdims, name=name, ) # create additional input parameters for engine=cuda self.kwargs_cuda = {} if engine == "cuda": self.kwargs_cuda["num_threads_per_blocks"] = num_threads_per_blocks num_threads_per_blocks_x, num_threads_per_blocks_z = num_threads_per_blocks num_blocks_x = ( self.dims[0] + num_threads_per_blocks_x - 1 ) // num_threads_per_blocks_x num_blocks_z = ( self.dims[1] + num_threads_per_blocks_z - 1 ) // num_threads_per_blocks_z self.kwargs_cuda["num_blocks"] = (num_blocks_x, num_blocks_z) self._register_multiplications(engine) def _register_multiplications(self, engine: str) -> None: if engine == "numba": numba_opts = dict(nopython=True, fastmath=True, nogil=True, parallel=True) self._mv = jit(**numba_opts)(self.__matvec) self._rmv = jit(**numba_opts)(self.__rmatvec) elif engine == "cuda": raise NotImplementedError("engine=cuda is currently not available") else: self._mv = self.__matvec self._rmv = self.__rmatvec # use same matvec method as inNonStationaryConvolve2D __matvec = staticmethod(NonStationaryConvolve2D._matvec_rmatvec) @staticmethod def __rmatvec( x: NDArray, y: NDArray, hs: NDArray, hshape: Tuple[int, int], xdims: Tuple[int, int], ohx: float, ohz: float, dhx: float, dhz: float, nhx: int, nhz: int, ) -> NDArray: # Currently a race condition seem to occur when updating parts of hs multiple times within # the same loop (see https://numba.pydata.org/numba-doc/latest/user/parallel.html?highlight=njit). # Until atomic operations are provided we create a temporary filter array and store intermediate # results from each ix and reduce them at the end. hstmp = np.zeros((xdims[0], *hs.shape)) for ix in prange(xdims[0]): for iz in range(xdims[1]): # find extremes of model where to apply h (in case h is going out of model) xextremes = ( max(0, ix - hshape[0] // 2), min(ix + hshape[0] // 2 + 1, xdims[0]), ) zextremes = ( max(0, iz - hshape[1] // 2), min(iz + hshape[1] // 2 + 1, xdims[1]), ) # find extremes of h (in case h is going out of model) hxextremes = ( max(0, -ix + hshape[0] // 2), min(hshape[0], hshape[0] // 2 + (xdims[0] - ix)), ) hzextremes = ( max(0, -iz + hshape[1] // 2), min(hshape[1], hshape[1] // 2 + (xdims[1] - iz)), ) htmp = ( x[ix, iz] * y[xextremes[0] : xextremes[1], zextremes[0] : zextremes[1]] ) # find closest filters and interpolate h ihx_l = int(np.floor((ix - ohx) / dhx)) ihz_t = int(np.floor((iz - ohz) / dhz)) dhx_r = (ix - ohx) / dhx - ihx_l dhz_b = (iz - ohz) / dhz - ihz_t if ihx_l < 0: ihx_l = ihx_r = 0 dhx_l = dhx_r = 0.5 elif ihx_l >= nhx - 1: ihx_l = ihx_r = nhx - 1 dhx_l = dhx_r = 0.5 else: ihx_r = ihx_l + 1 dhx_l = 1.0 - dhx_r if ihz_t < 0: ihz_t = ihz_b = 0 dhz_t = dhz_b = 0.5 elif ihz_t >= nhz - 1: ihz_t = ihz_b = nhz - 1 dhz_t = dhz_b = 0.5 else: ihz_b = ihz_t + 1 dhz_t = 1.0 - dhz_b hstmp[ ix, ihx_l, ihz_t, hxextremes[0] : hxextremes[1], hzextremes[0] : hzextremes[1], ] += ( dhz_t * dhx_l * htmp ) hstmp[ ix, ihx_l, ihz_b, hxextremes[0] : hxextremes[1], hzextremes[0] : hzextremes[1], ] += ( dhz_b * dhx_l * htmp ) hstmp[ ix, ihx_r, ihz_t, hxextremes[0] : hxextremes[1], hzextremes[0] : hzextremes[1], ] += ( dhz_t * dhx_r * htmp ) hstmp[ ix, ihx_r, ihz_b, hxextremes[0] : hxextremes[1], hzextremes[0] : hzextremes[1], ] += ( dhz_b * dhx_r * htmp ) hs = hstmp.sum(axis=0) return hs @reshaped def _matvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) y = ncp.zeros(self.dimsd, dtype=self.dtype) y = self._mv( self.inp, y, x, self.hshape, self.dimsd, self.ohx, self.ohz, self.dhx, self.dhz, self.nhx, self.nhz, **self.kwargs_cuda ) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: ncp = get_array_module(x) hs = ncp.zeros(self.dims, dtype=self.dtype) hs = self._rmv( self.inp, x, hs, self.hshape, self.dimsd, self.ohx, self.ohz, self.dhx, self.dhz, self.nhx, self.nhz, **self.kwargs_cuda ) return hs
21,447
36.043178
106
py
pylops
pylops-master/pylops/signalprocessing/dwt2d.py
__all__ = ["DWT2D"] import logging from math import ceil, log import numpy as np from pylops import LinearOperator from pylops.basicoperators import Pad from pylops.utils import deps from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray from .dwt import _adjointwavelet, _checkwavelet pywt_message = deps.pywt_import("the dwt2d module") if pywt_message is None: import pywt logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class DWT2D(LinearOperator): """Two dimensional Wavelet operator. Apply 2D-Wavelet Transform along two ``axes`` of a multi-dimensional array of size ``dims``. Note that the Wavelet operator is an overload of the ``pywt`` implementation of the wavelet transform. Refer to https://pywavelets.readthedocs.io for a detailed description of the input parameters. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension axes : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which DWT2D is applied wavelet : :obj:`str`, optional Name of wavelet type. Use :func:`pywt.wavelist(kind='discrete')` for a list of available wavelets. level : :obj:`int`, optional Number of scaling levels (must be >=0). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ModuleNotFoundError If ``pywt`` is not installed ValueError If ``wavelet`` does not belong to ``pywt.families`` Notes ----- The Wavelet operator applies the 2-dimensional multilevel Discrete Wavelet Transform (DWT2) in forward mode and the 2-dimensional multilevel Inverse Discrete Wavelet Transform (IDWT2) in adjoint mode. """ def __init__( self, dims: InputDimsLike, axes: InputDimsLike = (-2, -1), wavelet: str = "haar", level: int = 1, dtype: DTypeLike = "float64", name: str = "D", ) -> None: if pywt_message is not None: raise ModuleNotFoundError(pywt_message) _checkwavelet(wavelet) # define padding for length to be power of 2 ndimpow2 = [max(2 ** ceil(log(dims[ax], 2)), 2**level) for ax in axes] pad = [(0, 0)] * len(dims) for i, ax in enumerate(axes): pad[ax] = (0, ndimpow2[i] - dims[ax]) self.pad = Pad(dims, pad) self.axes = axes dimsd = list(dims) for i, ax in enumerate(axes): dimsd[ax] = ndimpow2[i] super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) # apply transform once again to find out slices _, self.sl = pywt.coeffs_to_array( pywt.wavedec2( np.ones(self.dimsd), wavelet=wavelet, level=level, mode="periodization", axes=self.axes, ), axes=self.axes, ) self.wavelet = wavelet self.waveletadj = _adjointwavelet(wavelet) self.level = level def _matvec(self, x: NDArray) -> NDArray: x = self.pad.matvec(x) x = np.reshape(x, self.dimsd) y = pywt.coeffs_to_array( pywt.wavedec2( x, wavelet=self.wavelet, level=self.level, mode="periodization", axes=self.axes, ), axes=(self.axes), )[0] return y.ravel() def _rmatvec(self, x: NDArray) -> NDArray: x = np.reshape(x, self.dimsd) x = pywt.array_to_coeffs(x, self.sl, output_format="wavedec2") y = pywt.waverec2( x, wavelet=self.waveletadj, mode="periodization", axes=self.axes ) y = self.pad.rmatvec(y.ravel()) return y
4,195
28.971429
82
py
pylops
pylops-master/pylops/signalprocessing/sliding1d.py
__all__ = [ "sliding1d_design", "Sliding1D", ] import logging from typing import Tuple, Union from pylops import LinearOperator from pylops.basicoperators import BlockDiag, Diagonal, HStack, Restriction from pylops.signalprocessing.sliding2d import _slidingsteps from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.tapers import taper from pylops.utils.typing import InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def sliding1d_design( dimd: int, nwin: int, nover: int, nop: int, ) -> Tuple[int, int, Tuple[NDArray, NDArray], Tuple[NDArray, NDArray]]: """Design Sliding1D operator This routine can be used prior to creating the :class:`pylops.signalprocessing.Sliding1D` operator to identify the correct number of windows to be used based on the dimension of the data (``dimsd``), dimension of the window (``nwin``), overlap (``nover``),a and dimension of the operator acting in the model space. Parameters ---------- dimsd : :obj:`tuple` Shape of 2-dimensional data. nwin : :obj:`tuple` Number of samples of window. nover : :obj:`tuple` Number of samples of overlapping part of window. nop : :obj:`tuple` Size of model in the transformed domain. Returns ------- nwins : :obj:`int` Number of windows. dim : :obj:`int` Shape of 2-dimensional model. mwins_inends : :obj:`tuple` Start and end indices for model patches. dwins_inends : :obj:`tuple` Start and end indices for data patches. """ # data windows dwin_ins, dwin_ends = _slidingsteps(dimd, nwin, nover) dwins_inends = (dwin_ins, dwin_ends) nwins = len(dwin_ins) # model windows dim = nwins * nop mwin_ins, mwin_ends = _slidingsteps(dim, nop, 0) mwins_inends = (mwin_ins, mwin_ends) # print information about patching logging.warning("%d windows required...", nwins) logging.warning( "data wins - start:%s, end:%s", dwin_ins, dwin_ends, ) logging.warning( "model wins - start:%s, end:%s", mwin_ins, mwin_ends, ) return nwins, dim, mwins_inends, dwins_inends def Sliding1D( Op: LinearOperator, dim: Union[int, InputDimsLike], dimd: Union[int, InputDimsLike], nwin: int, nover: int, tapertype: str = "hanning", name: str = "S", ) -> LinearOperator: r"""1D Sliding transform operator. Apply a transform operator ``Op`` repeatedly to slices of the model vector in forward mode and slices of the data vector in adjoint mode. More specifically, in forward mode the model vector is divided into slices, each slice is transformed, and slices are then recombined in a sliding window fashion. This operator can be used to perform local, overlapping transforms (e.g., :obj:`pylops.signalprocessing.FFT`) on 1-dimensional arrays. .. note:: The shape of the model has to be consistent with the number of windows for this operator not to return an error. As the number of windows depends directly on the choice of ``nwin`` and ``nover``, it is recommended to first run ``sliding1d_design`` to obtain the corresponding ``dims`` and number of windows. .. warning:: Depending on the choice of `nwin` and `nover` as well as the size of the data, sliding windows may not cover the entire data. The start and end indices of each window will be displayed and returned with running ``sliding1d_design``. Parameters ---------- Op : :obj:`pylops.LinearOperator` Transform operator dim : :obj:`tuple` Shape of 1-dimensional model. dimd : :obj:`tuple` Shape of 1-dimensional data nwin : :obj:`int` Number of samples of window nover : :obj:`int` Number of samples of overlapping part of window tapertype : :obj:`str`, optional Type of taper (``hanning``, ``cosine``, ``cosinesquare`` or ``None``) name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- Sop : :obj:`pylops.LinearOperator` Sliding operator Raises ------ ValueError Identified number of windows is not consistent with provided model shape (``dims``). """ dim: Tuple[int, ...] = _value_or_sized_to_tuple(dim) dimd: Tuple[int, ...] = _value_or_sized_to_tuple(dimd) # data windows dwin_ins, dwin_ends = _slidingsteps(dimd[0], nwin, nover) nwins = len(dwin_ins) # check windows if nwins * Op.shape[1] != dim[0]: raise ValueError( f"Model shape (dim={dim}) is not consistent with chosen " f"number of windows. Run sliding1d_design to identify the " f"correct number of windows for the current " "model size..." ) # create tapers if tapertype is not None: tap = taper(nwin, nover, tapertype=tapertype) tapin = tap.copy() tapin[:nover] = 1 tapend = tap.copy() tapend[-nover:] = 1 taps = {} taps[0] = tapin for i in range(1, nwins - 1): taps[i] = tap taps[nwins - 1] = tapend # transform to apply if tapertype is None: OOp = BlockDiag([Op for _ in range(nwins)]) else: OOp = BlockDiag([Diagonal(taps[itap].ravel()) * Op for itap in range(nwins)]) combining = HStack( [ Restriction(dimd, range(win_in, win_end), dtype=Op.dtype).H for win_in, win_end in zip(dwin_ins, dwin_ends) ] ) Sop = LinearOperator(combining * OOp) Sop.dims, Sop.dimsd = (nwins, int(dim[0] // nwins)), dimd Sop.name = name return Sop
5,894
30.524064
113
py
pylops
pylops-master/pylops/signalprocessing/nonstatconvolve1d.py
__all__ = [ "NonStationaryConvolve1D", "NonStationaryFilters1D", ] from typing import Union import numpy as np from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray class NonStationaryConvolve1D(LinearOperator): r"""1D non-stationary convolution operator. Apply non-stationary one-dimensional convolution. A varying compact filter is provided on a coarser grid and on-the-fly interpolation is applied in forward and adjoint modes. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension hs : :obj:`numpy.ndarray` Bank of 1d compact filters of size :math:`n_\text{filts} \times n_h`. Filters must have odd number of samples and are assumed to be centered in the middle of the filter support. ih : :obj:`tuple` Indices of the locations of the filters ``hs`` in the model (and data). Note that the filters must be regularly sampled, i.e. :math:`dh=\text{diff}(ih)=\text{const.}` axis : :obj:`int`, optional Axis along which convolution is applied dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If filters ``hs`` have even size ValueError If ``ih`` is not regularly sampled ValueError If ``ih`` is outside the bounds defined by ``dims[axis]`` Notes ----- The NonStationaryConvolve1D operator applies non-stationary one-dimensional convolution between the input signal :math:`d(t)` and a bank of compact filter kernels :math:`h(t; t_i)`. Assuming an input signal composed of :math:`N=5` samples, and filters at locations :math:`t_1` and :math:`t_3`, the forward operator can be represented as follows: .. math:: \mathbf{y} = \begin{bmatrix} \hat{h}_{0,0} & h_{1,0} & \hat{h}_{2,0} & h_{3,0} & \hat{h}_{4,0} \\ \hat{h}_{0,1} & h_{1,1} & \hat{h}_{2,1} & h_{3,1} & \hat{h}_{4,1} \\ \vdots & \vdots & \vdots & \vdots & \vdots \\ \hat{h}_{0,4} & h_{1,4} & \hat{h}_{2,4} & h_{3,4} & \hat{h}_{4,4} \\ \end{bmatrix} \begin{bmatrix} x_0 \\ x_1 \\ \vdots \\ x_4 \end{bmatrix} where :math:`\mathbf{h}_1 = [h_{1,0}, h_{1,1}, \ldots, h_{1,N}]` and :math:`\mathbf{h}_3 = [h_{3,0}, h_{3,1}, \ldots, h_{3,N}]` are the provided filter, :math:`\hat{\mathbf{h}}_0 = \mathbf{h}_1` and :math:`\hat{\mathbf{h}}_4 = \mathbf{h}_3` are the filters outside the range of the provided filters (which are extrapolated to be the same as the nearest provided filter) and :math:`\hat{\mathbf{h}}_2 = 0.5 \mathbf{h}_1 + 0.5 \mathbf{h}_3` is the filter within the range of the provided filters (which is linearly interpolated from the two nearest provided filter on either side of its location). In forward mode, each filter is weighted by the corresponding input and spread over the output. In adjoint mode, the corresponding filter is element-wise multiplied with the input, all values are summed together and put in the output. """ def __init__( self, dims: Union[int, InputDimsLike], hs: NDArray, ih: InputDimsLike, axis: int = -1, dtype: DTypeLike = "float64", name: str = "C", ) -> None: if hs.shape[1] % 2 == 0: raise ValueError("filters hs must have odd length") if len(np.unique(np.diff(ih))) > 1: raise ValueError( "the indices of filters 'ih' are must be regularly sampled" ) dims = _value_or_sized_to_tuple(dims) if min(ih) < 0 or max(ih) >= dims[axis]: raise ValueError( "the indices of filters 'ih' must be larger than 0 and smaller than `dims`" ) self.hs = hs self.hsize = hs.shape[1] self.oh, self.dh, self.nh, self.eh = ih[0], ih[1] - ih[0], len(ih), ih[-1] self.axis = axis super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) @property def hsinterp(self): ncp = get_array_module(self.hs) self._hsinterp = ncp.empty((self.dims[self.axis], self.hsize), dtype=self.dtype) for ix in range(self.dims[self.axis]): self._hsinterp[ix] = self._interpolate_h( self.hs, ix, self.oh, self.dh, self.nh ) return self._hsinterp @hsinterp.deleter def hsinterp(self): del self._hsinterp @staticmethod def _interpolate_h(hs, ix, oh, dh, nh): """find closest filters and linearly interpolate between them and interpolate psf""" ih_closest = int(np.floor((ix - oh) / dh)) if ih_closest < 0: h = hs[0] elif ih_closest >= nh - 1: h = hs[nh - 1] else: dh_closest = (ix - oh) / dh - ih_closest h = (1 - dh_closest) * hs[ih_closest] + dh_closest * hs[ih_closest + 1] return h @reshaped(swapaxis=True) def _matvec(self, x: NDArray) -> NDArray: y = np.zeros_like(x) for ix in range(self.dims[self.axis]): h = self._interpolate_h(self.hs, ix, self.oh, self.dh, self.nh) xextremes = ( max(0, ix - self.hsize // 2), min(ix + self.hsize // 2 + 1, self.dims[self.axis]), ) hextremes = ( max(0, -ix + self.hsize // 2), min(self.hsize, self.hsize // 2 + (self.dims[self.axis] - ix)), ) y[..., xextremes[0] : xextremes[1]] += ( x[..., ix : ix + 1] * h[hextremes[0] : hextremes[1]] ) return y @reshaped(swapaxis=True) def _rmatvec(self, x: NDArray) -> NDArray: y = np.zeros_like(x) for ix in range(self.dims[self.axis]): h = self._interpolate_h(self.hs, ix, self.oh, self.dh, self.nh) xextremes = ( max(0, ix - self.hsize // 2), min(ix + self.hsize // 2 + 1, self.dims[self.axis]), ) hextremes = ( max(0, -ix + self.hsize // 2), min(self.hsize, self.hsize // 2 + (self.dims[self.axis] - ix)), ) y[..., ix] = np.sum( h[hextremes[0] : hextremes[1]] * x[..., xextremes[0] : xextremes[1]], axis=-1, ) return y def todense(self): hs = self.hsinterp H = np.array( [ np.roll(np.pad(h, (0, self.dims[self.axis])), ix) for ix, h in enumerate(hs) ] ) H = H[:, int(self.hsize // 2) : -int(self.hsize // 2) - 1] return H class NonStationaryFilters1D(LinearOperator): r"""1D non-stationary filter estimation operator. Estimate a non-stationary one-dimensional filter by non-stationary convolution. In forward mode, a varying compact filter on a coarser grid is on-the-fly linearly interpolated prior to being convolved with a fixed input signal. In adjoint mode, the output signal is first weighted by the fixed input signal and then spread across multiple filters (i.e., adjoint of linear interpolation). Parameters ---------- inp : :obj:`numpy.ndarray` Fixed input signal of size :math:`n_x`. hsize : :obj:`int` Size of the 1d compact filters (filters must have odd number of samples and are assumed to be centered in the middle of the filter support). ih : :obj:`tuple` Indices of the locations of the filters ``hs`` in the model (and data). Note that the filters must be regularly sampled, i.e. :math:`dh=\text{diff}(ih)=\text{const.}` dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If filters ``hsize`` is a even number ValueError If ``ih`` is not regularly sampled ValueError If ``ih`` is outside the bounds defined by ``dims[axis]`` See Also -------- NonStationaryConvolve1D : 1D non-stationary convolution operator. NonStationaryFilters2D : 2D non-stationary filter estimation operator. Notes ----- The NonStationaryFilters1D operates in a similar fashion to the :class:`pylops.signalprocessing.NonStationaryConvolve1D` operator. In practical applications, this operator shall be used when interested to estimate a 1-dimensional non-stationary filter given an input and output signal. In forward mode, this operator uses the same implementation of the :class:`pylops.signalprocessing.NonStationaryConvolve1D`, with the main difference that the role of the filters and the input signal is swapped. Nevertheless, to understand how to implement adjoint, mathematically we arrange the forward operator in a slightly different way. Assuming once again an input signal composed of :math:`N=5` samples, and filters at locations :math:`t_1` and :math:`t_3`, the forward operator can be represented as follows: .. math:: \mathbf{y} = \begin{bmatrix} \mathbf{X}_0 & \mathbf{X}_1 & \vdots & \mathbf{X}_4 \end{bmatrix} \mathbf{L} \begin{bmatrix} \mathbf{h}_1 \\ \mathbf{h}_3 \end{bmatrix} where :math:`\mathbf{L}` is an operator that linearly interpolates the filters from the available locations to the entire input grid -- i.e., :math:`[\hat{\mathbf{h}}_0 \quad \mathbf{h}_1 \quad \hat{\mathbf{h}}_2 \quad \mathbf{h}_3 \quad \hat{\mathbf{h}}_4]^T = \mathbf{L} [ \mathbf{h}_1 \quad \mathbf{h}_3]`. Finally, :math:`\mathbf{X}_i` is a diagonal matrix containing the value :math:`x_i` along the main diagonal. Note that in practice the filter may be shorter than the input and output signals and the :math:`x_i` values are placed only at the effective positions of the filter along the diagonal matrices :math:`\mathbf{X}_i`. In adjoint mode, the output signal is first weighted by the fixed input signal and then spread across multiple filters (i.e., adjoint of linear interpolation) as follows .. math:: \mathbf{h} = \mathbf{L}^H \begin{bmatrix} \mathbf{X}_0 \\ \mathbf{X}_1 \\ \vdots \\ \mathbf{X}_4 \end{bmatrix} \mathbf{y} """ def __init__( self, inp: NDArray, hsize: int, ih: InputDimsLike, dtype: DTypeLike = "float64", name: str = "C", ) -> None: if hsize % 2 == 0: raise ValueError("filters hs must have odd length") if len(np.unique(np.diff(ih))) > 1: raise ValueError( "the indices of filters 'ih' are must be regularly sampled" ) if min(ih) < 0 or max(ih) >= inp.size: raise ValueError( "the indices of filters 'ih' must be larger than 0 and smaller than `dims`" ) self.inp = inp self.hsize = hsize self.oh, self.dh, self.nh, self.eh = ih[0], ih[1] - ih[0], len(ih), ih[-1] super().__init__( dtype=np.dtype(dtype), dims=(len(ih), hsize), dimsd=inp.shape, name=name ) # use same interpolation method as inNonStationaryConvolve1D _interpolate_h = staticmethod(NonStationaryConvolve1D._interpolate_h) @staticmethod def _interpolate_hadj(htmp, hs, hextremes, ix, oh, dh, nh): """find closest filters and spread weighted psf""" ih_closest = int(np.floor((ix - oh) / dh)) if ih_closest < 0: hs[0, hextremes[0] : hextremes[1]] += htmp elif ih_closest >= nh - 1: hs[nh - 1, hextremes[0] : hextremes[1]] += htmp else: dh_closest = (ix - oh) / dh - ih_closest hs[ih_closest, hextremes[0] : hextremes[1]] += (1 - dh_closest) * htmp hs[ih_closest + 1, hextremes[0] : hextremes[1]] += dh_closest * htmp return hs @reshaped def _matvec(self, x: NDArray) -> NDArray: y = np.zeros(self.dimsd, dtype=self.dtype) for ix in range(self.dimsd[0]): h = self._interpolate_h(x, ix, self.oh, self.dh, self.nh) xextremes = ( max(0, ix - self.hsize // 2), min(ix + self.hsize // 2 + 1, self.dimsd[0]), ) hextremes = ( max(0, -ix + self.hsize // 2), min(self.hsize, self.hsize // 2 + (self.dimsd[0] - ix)), ) y[..., xextremes[0] : xextremes[1]] += ( self.inp[..., ix : ix + 1] * h[hextremes[0] : hextremes[1]] ) return y @reshaped def _rmatvec(self, x: NDArray) -> NDArray: hs = np.zeros(self.dims, dtype=self.dtype) for ix in range(self.dimsd[0]): xextremes = ( max(0, ix - self.hsize // 2), min(ix + self.hsize // 2 + 1, self.dimsd[0]), ) hextremes = ( max(0, -ix + self.hsize // 2), min(self.hsize, self.hsize // 2 + (self.dimsd[0] - ix)), ) htmp = self.inp[ix] * x[..., xextremes[0] : xextremes[1]] hs = self._interpolate_hadj( htmp, hs, hextremes, ix, self.oh, self.dh, self.nh ) return hs
14,218
37.956164
114
py
pylops
pylops-master/pylops/signalprocessing/_baseffts.py
import logging import warnings from enum import Enum, auto from typing import Optional, Sequence, Union import numpy as np from numpy.core.multiarray import normalize_axis_index from pylops import LinearOperator from pylops.utils._internal import ( _raise_on_wrong_dtype, _value_or_sized_to_array, _value_or_sized_to_tuple, ) from pylops.utils.backend import get_complex_dtype, get_real_dtype from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class _FFTNorms(Enum): ORTHO = auto() NONE = auto() ONE_OVER_N = auto() class _BaseFFT(LinearOperator): """Base class for one dimensional Fast-Fourier Transform""" def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, nfft: Optional[int] = None, sampling: float = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", ): dims = _value_or_sized_to_array(dims) _raise_on_wrong_dtype(dims, np.integer, "dims") self.ndim = len(dims) axes = _value_or_sized_to_array(axis) _raise_on_wrong_dtype(axes, np.integer, "axis") self.axis = normalize_axis_index(axes[0], self.ndim) if nfft is None: self.nfft = dims[self.axis] else: nffts = _value_or_sized_to_array(nfft) _raise_on_wrong_dtype(nffts, np.integer, "nfft") self.nfft = nffts[0] # Check if the user provided nfft smaller than n (size of signal in # original domain). If so, raise a warning as this is unlikely a # wanted behavoir (since FFT routines cut some of the input signal # before applying fft, which is lost forever) and set a flag such that # a padding is applied after ifft self.doifftpad = False if self.nfft < dims[self.axis]: self.doifftpad = True self.ifftpad = [(0, 0)] * self.ndim self.ifftpad[self.axis] = (0, dims[self.axis] - self.nfft) warnings.warn( f"nfft={self.nfft} has been selected to be smaller than the size of " f"the original signal (dims[axis]={dims[axis]}).\n" f"This is rarely intended behavior as the original signal will be " f"truncated prior to applying FFT. " f"If this is the required behaviour ignore this message." ) if norm == "ortho": self.norm = _FFTNorms.ORTHO elif norm == "none": self.norm = _FFTNorms.NONE elif norm.lower() == "1/n": self.norm = _FFTNorms.ONE_OVER_N elif norm == "backward": raise ValueError( 'To use no scaling on the forward transform, use "none". Note that in this case, the adjoint transform will *not* have a 1/n scaling.' ) elif norm == "forward": raise ValueError( 'To use 1/n scaling on the forward transform, use "1/n". Note that in this case, the adjoint transform will *also* have a 1/n scaling.' ) else: raise ValueError(f"'{norm}' is not one of 'ortho', 'none' or '1/n'") self.real = real self.ifftshift_before = ifftshift_before self.f = ( np.fft.rfftfreq(self.nfft, d=sampling) if real else np.fft.fftfreq(self.nfft, d=sampling) ) self.fftshift_after = fftshift_after if self.fftshift_after: if self.real: warnings.warn( "Using fftshift_after with real=True. fftshift should only be applied after a complex FFT. This is rarely intended behavior but if it is, ignore this message." ) self.f = np.fft.fftshift(self.f) dimsd = list(dims) dimsd[self.axis] = self.nfft // 2 + 1 if self.real else self.nfft # Find types to enforce to forward and adjoint outputs. This is # required as np.fft.fft always returns complex128 even if input is # float32 or less. Moreover, when choosing real=True, the type of the # adjoint output is forced to be real even if the provided dtype # is complex. self.rdtype = get_real_dtype(dtype) if self.real else np.dtype(dtype) self.cdtype = get_complex_dtype(dtype) clinear = False if self.real or np.issubdtype(dtype, np.floating) else True super().__init__(dtype=self.cdtype, dims=dims, dimsd=dimsd, clinear=clinear) def _matvec(self, x: NDArray) -> NDArray: raise NotImplementedError( "_BaseFFT does not provide _matvec. It must be implemented separately." ) def _rmatvec(self, x: NDArray) -> NDArray: raise NotImplementedError( "_BaseFFT does not provide _rmatvec. It must be implemented separately." ) class _BaseFFTND(LinearOperator): """Base class for N-dimensional fast Fourier Transform""" def __init__( self, dims: Union[int, InputDimsLike], axes: Optional[Union[int, InputDimsLike]] = None, nffts: Optional[Union[int, InputDimsLike]] = None, sampling: Union[float, Sequence[float]] = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", ): dims = _value_or_sized_to_array(dims) _raise_on_wrong_dtype(dims, np.integer, "dims") self.ndim = len(dims) axes = _value_or_sized_to_array(axes) _raise_on_wrong_dtype(axes, np.integer, "axes") self.axes = np.array([normalize_axis_index(d, self.ndim) for d in axes]) self.naxes = len(self.axes) if self.naxes != len(np.unique(self.axes)): warnings.warn( "At least one direction is repeated. This may cause unexpected results." ) nffts = _value_or_sized_to_array(nffts, repeat=self.naxes) if len(nffts[np.equal(nffts, None)]) > 0: # Found None(s) in nffts nffts[np.equal(nffts, None)] = np.array( [dims[d] for d, n in zip(axes, nffts) if n is None] ) nffts = nffts.astype(np.array(dims).dtype) _raise_on_wrong_dtype(nffts, np.integer, "nffts") self.nffts = _value_or_sized_to_tuple( nffts ) # tuple is strictly needed for cupy sampling = _value_or_sized_to_array(sampling, repeat=self.naxes) if np.issubdtype(sampling.dtype, np.integer): # Promote to float64 if integer sampling = sampling.astype(np.float64) self.sampling = sampling _raise_on_wrong_dtype(self.sampling, np.floating, "sampling") self.ifftshift_before = _value_or_sized_to_array( ifftshift_before, repeat=self.naxes ) _raise_on_wrong_dtype(self.ifftshift_before, bool, "ifftshift_before") self.fftshift_after = _value_or_sized_to_array( fftshift_after, repeat=self.naxes ) _raise_on_wrong_dtype(self.fftshift_after, bool, "fftshift_after") if ( self.naxes != len(self.nffts) or self.naxes != len(self.sampling) or self.naxes != len(self.ifftshift_before) or self.naxes != len(self.fftshift_after) ): raise ValueError( ( "`axes`, `nffts`, `sampling`, `ifftshift_before` and " "`fftshift_after` must the have same number of elements. Received " f"{self.naxes}, {len(self.nffts)}, {len(self.sampling)}, " f"{len(self.ifftshift_before)} and {len(self.fftshift_after)}, " "respectively." ) ) # Check if the user provided nfft smaller than n. See _BaseFFT for # details nfftshort = [ nfft < dims[direction] for direction, nfft in zip(self.axes, self.nffts) ] self.doifftpad = any(nfftshort) if self.doifftpad: self.ifftpad = [(0, 0)] * self.ndim for idir, (direction, nfshort) in enumerate(zip(self.axes, nfftshort)): if nfshort: self.ifftpad[direction] = ( 0, dims[direction] - self.nffts[idir], ) warnings.warn( f"nffts in directions {np.where(nfftshort)[0]} have been selected to be smaller than the size of the original signal. " f"This is rarely intended behavior as the original signal will be truncated prior to applying fft, " f"if this is the required behaviour ignore this message." ) if norm == "ortho": self.norm = _FFTNorms.ORTHO elif norm == "none": self.norm = _FFTNorms.NONE elif norm.lower() == "1/n": self.norm = _FFTNorms.ONE_OVER_N elif norm == "backward": raise ValueError( 'To use no scaling on the forward transform, use "none". Note that in this case, the adjoint transform will *not* have a 1/n scaling.' ) elif norm == "forward": raise ValueError( 'To use 1/n scaling on the forward transform, use "1/n". Note that in this case, the adjoint transform will *also* have a 1/n scaling.' ) else: raise ValueError(f"'{norm}' is not one of 'ortho', 'none' or '1/n'") self.real = real fs = [ np.fft.fftshift(np.fft.fftfreq(n, d=s)) if fftshift else np.fft.fftfreq(n, d=s) for n, s, fftshift in zip(self.nffts, self.sampling, self.fftshift_after) ] if self.real: fs[-1] = np.fft.rfftfreq(self.nffts[-1], d=self.sampling[-1]) if self.fftshift_after[-1]: warnings.warn( ( "Using real=True and fftshift_after on the last direction. " "fftshift should only be applied on directions with negative " "and positive frequencies. When using FFTND with real=True, " "are all directions except the last. If you wish to proceed " "applying fftshift on a frequency axis with only positive " "frequencies, ignore this message." ) ) fs[-1] = np.fft.fftshift(fs[-1]) self.fs = tuple(fs) dimsd = np.array(dims) dimsd[self.axes] = self.nffts if self.real: dimsd[self.axes[-1]] = self.nffts[-1] // 2 + 1 # Find types to enforce to forward and adjoint outputs. This is # required as np.fft.fft always returns complex128 even if input is # float32 or less. Moreover, when choosing real=True, the type of the # adjoint output is forced to be real even if the provided dtype # is complex. self.rdtype = get_real_dtype(dtype) if self.real else np.dtype(dtype) self.cdtype = get_complex_dtype(dtype) clinear = False if self.real or np.issubdtype(dtype, np.floating) else True super().__init__(dtype=self.cdtype, dims=dims, dimsd=dimsd, clinear=clinear) def _matvec(self, x: NDArray) -> NDArray: raise NotImplementedError( "_BaseFFT does not provide _matvec. It must be implemented separately." ) def _rmatvec(self, x: NDArray) -> NDArray: raise NotImplementedError( "_BaseFFT does not provide _rmatvec. It must be implemented separately." )
11,866
40.062284
179
py
pylops
pylops-master/pylops/signalprocessing/fft2d.py
__all__ = ["FFT2D"] import logging import warnings from typing import Dict, Optional, Sequence, Union import numpy as np import scipy.fft from pylops import LinearOperator from pylops.signalprocessing._baseffts import _BaseFFTND, _FFTNorms from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) class _FFT2D_numpy(_BaseFFTND): """Two dimensional Fast-Fourier Transform using NumPy""" def __init__( self, dims: InputDimsLike, axes: InputDimsLike = (-2, -1), nffts: Optional[Union[int, InputDimsLike]] = None, sampling: Union[float, Sequence[float]] = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", ) -> None: super().__init__( dims=dims, axes=axes, nffts=nffts, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) if self.cdtype != np.complex128: warnings.warn( f"numpy backend always returns complex128 dtype. To respect the passed dtype, data will be casted to {self.cdtype}." ) # checks if self.ndim < 2: raise ValueError("FFT2D requires at least two input dimensions") if self.naxes != 2: raise ValueError("FFT2D must be applied along exactly two dimensions") self.f1, self.f2 = self.fs del self.fs self._norm_kwargs: Dict[str, Union[None, str]] = { "norm": None } # equivalent to "backward" in Numpy/Scipy if self.norm is _FFTNorms.ORTHO: self._norm_kwargs["norm"] = "ortho" elif self.norm is _FFTNorms.NONE: self._scale = np.prod(self.nffts) elif self.norm is _FFTNorms.ONE_OVER_N: self._scale = 1.0 / np.prod(self.nffts) @reshaped def _matvec(self, x): if self.ifftshift_before.any(): x = np.fft.ifftshift(x, axes=self.axes[self.ifftshift_before]) if not self.clinear: x = np.real(x) if self.real: y = np.fft.rfft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) # Apply scaling to obtain a correct adjoint for this operator y = np.swapaxes(y, -1, self.axes[-1]) y[..., 1 : 1 + (self.nffts[-1] - 1) // 2] *= np.sqrt(2) y = np.swapaxes(y, self.axes[-1], -1) else: y = np.fft.fft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) if self.norm is _FFTNorms.ONE_OVER_N: y *= self._scale y = y.astype(self.cdtype) if self.fftshift_after.any(): y = np.fft.fftshift(y, axes=self.axes[self.fftshift_after]) return y @reshaped def _rmatvec(self, x): if self.fftshift_after.any(): x = np.fft.ifftshift(x, axes=self.axes[self.fftshift_after]) if self.real: # Apply scaling to obtain a correct adjoint for this operator x = x.copy() x = np.swapaxes(x, -1, self.axes[-1]) x[..., 1 : 1 + (self.nffts[-1] - 1) // 2] /= np.sqrt(2) x = np.swapaxes(x, self.axes[-1], -1) y = np.fft.irfft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) else: y = np.fft.ifft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) if self.norm is _FFTNorms.NONE: y *= self._scale if self.nffts[0] > self.dims[self.axes[0]]: y = np.take(y, range(self.dims[self.axes[0]]), axis=self.axes[0]) if self.nffts[1] > self.dims[self.axes[1]]: y = np.take(y, range(self.dims[self.axes[1]]), axis=self.axes[1]) if self.doifftpad: y = np.pad(y, self.ifftpad) if not self.clinear: y = np.real(y) y = y.astype(self.rdtype) if self.ifftshift_before.any(): y = np.fft.fftshift(y, axes=self.axes[self.ifftshift_before]) return y def __truediv__(self, y): if self.norm is not _FFTNorms.ORTHO: return self._rmatvec(y) / self._scale return self._rmatvec(y) class _FFT2D_scipy(_BaseFFTND): """Two dimensional Fast-Fourier Transform using SciPy""" def __init__( self, dims: InputDimsLike, axes: InputDimsLike = (-2, -1), nffts: Optional[Union[int, InputDimsLike]] = None, sampling: Union[float, Sequence[float]] = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, dtype: DTypeLike = "complex128", ) -> None: super().__init__( dims=dims, axes=axes, nffts=nffts, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) # checks if self.ndim < 2: raise ValueError("FFT2D requires at least two input dimensions") if self.naxes != 2: raise ValueError("FFT2D must be applied along exactly two dimensions") self.f1, self.f2 = self.fs del self.fs self._norm_kwargs: Dict[str, Union[None, str]] = { "norm": None } # equivalent to "backward" in Numpy/Scipy if self.norm is _FFTNorms.ORTHO: self._norm_kwargs["norm"] = "ortho" elif self.norm is _FFTNorms.NONE: self._scale = np.sqrt(np.prod(self.nffts)) elif self.norm is _FFTNorms.ONE_OVER_N: self._scale = np.sqrt(1.0 / np.prod(self.nffts)) @reshaped def _matvec(self, x): if self.ifftshift_before.any(): x = scipy.fft.ifftshift(x, axes=self.axes[self.ifftshift_before]) if not self.clinear: x = np.real(x) if self.real: y = scipy.fft.rfft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) # Apply scaling to obtain a correct adjoint for this operator y = np.swapaxes(y, -1, self.axes[-1]) y[..., 1 : 1 + (self.nffts[-1] - 1) // 2] *= np.sqrt(2) y = np.swapaxes(y, self.axes[-1], -1) else: y = scipy.fft.fft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) if self.norm is _FFTNorms.ONE_OVER_N: y *= self._scale if self.fftshift_after.any(): y = scipy.fft.fftshift(y, axes=self.axes[self.fftshift_after]) return y @reshaped def _rmatvec(self, x): if self.fftshift_after.any(): x = scipy.fft.ifftshift(x, axes=self.axes[self.fftshift_after]) if self.real: # Apply scaling to obtain a correct adjoint for this operator x = x.copy() x = np.swapaxes(x, -1, self.axes[-1]) x[..., 1 : 1 + (self.nffts[-1] - 1) // 2] /= np.sqrt(2) x = np.swapaxes(x, self.axes[-1], -1) y = scipy.fft.irfft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) else: y = scipy.fft.ifft2(x, s=self.nffts, axes=self.axes, **self._norm_kwargs) if self.norm is _FFTNorms.NONE: y *= self._scale y = np.take(y, range(self.dims[self.axes[0]]), axis=self.axes[0]) y = np.take(y, range(self.dims[self.axes[1]]), axis=self.axes[1]) if not self.clinear: y = np.real(y) if self.ifftshift_before.any(): y = scipy.fft.fftshift(y, axes=self.axes[self.ifftshift_before]) return y def __truediv__(self, y): if self.norm is not _FFTNorms.ORTHO: return self._rmatvec(y) / self._scale / self._scale return self._rmatvec(y) def FFT2D( dims: InputDimsLike, axes: InputDimsLike = (-2, -1), nffts: Optional[Union[int, InputDimsLike]] = None, sampling: Union[float, Sequence[float]] = 1.0, norm: str = "ortho", real: bool = False, ifftshift_before: bool = False, fftshift_after: bool = False, engine: str = "numpy", dtype: DTypeLike = "complex128", name: str = "F", ) -> LinearOperator: r"""Two dimensional Fast-Fourier Transform. Apply two dimensional Fast-Fourier Transform (FFT) to any pair of ``axes`` of a multi-dimensional array. Using the default NumPy engine, the FFT operator is an overload to either the NumPy :py:func:`numpy.fft.fft2` (or :py:func:`numpy.fft.rfft2` for real models) in forward mode, and to :py:func:`numpy.fft.ifft2` (or :py:func:`numpy.fft.irfft2` for real models) in adjoint mode, or their CuPy equivalents. Alternatively, when the SciPy engine is chosen, the overloads are of :py:func:`scipy.fft.fft2` (or :py:func:`scipy.fft.rfft2` for real models) in forward mode, and to :py:func:`scipy.fft.ifft2` (or :py:func:`scipy.fft.irfft2` for real models) in adjoint mode. When using ``real=True``, the result of the forward is also multiplied by :math:`\sqrt{2}` for all frequency bins except zero and Nyquist, and the input of the adjoint is multiplied by :math:`1 / \sqrt{2}` for the same frequencies. For a real valued input signal, it is advised to use the flag ``real=True`` as it stores the values of the Fourier transform of the last axis in ``axes`` at positive frequencies only as values at negative frequencies are simply their complex conjugates. Parameters ---------- dims : :obj:`tuple` Number of samples for each dimension axes : :obj:`tuple`, optional .. versionadded:: 2.0.0 Pair of axes along which FFT2D is applied nffts : :obj:`tuple` or :obj:`int`, optional Number of samples in Fourier Transform for each axis in ``axes``. In case only one dimension needs to be specified, use ``None`` for the other dimension in the tuple. An axis with ``None`` will use ``dims[axis]`` as ``nfft``. When supplying a tuple, the length must be 2. When a single value is passed, it will be used for both axes. As such the default is equivalent to ``nffts=(None, None)``. sampling : :obj:`tuple` or :obj:`float`, optional Sampling steps for each axis in ``axes``. When supplied a single value, it is used for both axes. Unlike ``nffts``, any ``None`` will not be converted to the default value. norm : `{"ortho", "none", "1/n"}`, optional .. versionadded:: 1.17.0 - "ortho": Scales forward and adjoint FFT transforms with :math:`1/\sqrt{N_F}`, where :math:`N_F` is the number of samples in the Fourier domain given by product of all elements of ``nffts``. - "none": Does not scale the forward or the adjoint FFT transforms. - "1/n": Scales both the forward and adjoint FFT transforms by :math:`1/N_F`. .. note:: For "none" and "1/n", the operator is not unitary, that is, the adjoint is not the inverse. To invert the operator, simply use ``Op \ y``. real : :obj:`bool`, optional Model to which fft is applied has real numbers (``True``) or not (``False``). Used to enforce that the output of adjoint of a real model is real. Note that the real FFT is applied only to the first dimension to which the FFT2D operator is applied (last element of ``axes``) ifftshift_before : :obj:`tuple` or :obj:`bool`, optional Apply ifftshift (``True``) or not (``False``) to model vector (before FFT). Consider using this option when the model vector's respective axis is symmetric with respect to the zero value sample. This will shift the zero value sample to coincide with the zero index sample. With such an arrangement, FFT will not introduce a sample-dependent phase-shift when compared to the continuous Fourier Transform. When passing a single value, the shift will the same for every axis in ``axes``. Pass a tuple to specify which dimensions are shifted. fftshift_after : :obj:`tuple` or :obj:`bool`, optional Apply fftshift (``True``) or not (``False``) to data vector (after FFT). Consider using this option when you require frequencies to be arranged naturally, from negative to positive. When not applying fftshift after FFT, frequencies are arranged from zero to largest positive, and then from negative Nyquist to the frequency bin before zero. When passing a single value, the shift will the same for every axis in ``axes``. Pass a tuple to specify which dimensions are shifted. engine : :obj:`str`, optional .. versionadded:: 1.17.0 Engine used for fft computation (``numpy`` or ``scipy``). dtype : :obj:`str`, optional Type of elements in input array. Note that the ``dtype`` of the operator is the corresponding complex type even when a real type is provided. In addition, note that the NumPy backend does not support returning ``dtype`` different than ``complex128``. As such, when using the NumPy backend, arrays will be force-casted to types corresponding to the supplied ``dtype``. The SciPy backend supports all precisions natively. Under both backends, when a real ``dtype`` is supplied, a real result will be enforced on the result of the ``rmatvec`` and the input of the ``matvec``. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- dimsd : :obj:`tuple` Shape of the array after the forward, but before linearization. For example, ``y_reshaped = (Op * x.ravel()).reshape(Op.dimsd)``. f1 : :obj:`numpy.ndarray` Discrete Fourier Transform sample frequencies along ``axes[0]`` f2 : :obj:`numpy.ndarray` Discrete Fourier Transform sample frequencies along ``axes[1]`` real : :obj:`bool` When ``True``, uses ``rfft2``/``irfft2`` rdtype : :obj:`bool` Expected input type to the forward cdtype : :obj:`bool` Output type of the forward. Complex equivalent to ``rdtype``. shape : :obj:`tuple` Operator shape clinear : :obj:`bool` .. versionadded:: 1.17.0 Operator is complex-linear. Is false when either ``real=True`` or when ``dtype`` is not a complex type. explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError - If ``dims`` has less than two elements. - If ``axes`` does not have exactly two elements. - If ``nffts`` or ``sampling`` are not either a single value or a tuple with two elements. - If ``norm`` is not one of "ortho", "none", or "1/n". NotImplementedError If ``engine`` is neither ``numpy``, nor ``scipy``. See Also -------- FFT: One-dimensional FFT FFTND: N-dimensional FFT Notes ----- The FFT2D operator (using ``norm="ortho"``) applies the two-dimensional forward Fourier transform to a signal :math:`d(y, x)` in forward mode: .. math:: D(k_y, k_x) = \mathscr{F} (d) = \frac{1}{\sqrt{N_F}} \iint\limits_{-\infty}^\infty d(y, x) e^{-j2\pi k_yy} e^{-j2\pi k_xx} \,\mathrm{d}y \,\mathrm{d}x Similarly, the two-dimensional inverse Fourier transform is applied to the Fourier spectrum :math:`D(k_y, k_x)` in adjoint mode: .. math:: d(y,x) = \mathscr{F}^{-1} (D) = \frac{1}{\sqrt{N_F}} \iint\limits_{-\infty}^\infty D(k_y, k_x) e^{j2\pi k_yy} e^{j2\pi k_xx} \,\mathrm{d}k_y \,\mathrm{d}k_x where :math:`N_F` is the number of samples in the Fourier domain given by the product of the element of ``nffts``. Both operators are effectively discretized and solved by a fast iterative algorithm known as Fast Fourier Transform. Note that the FFT2D operator (using ``norm="ortho"``) is a special operator in that the adjoint is also the inverse of the forward mode. For other norms, this does not hold (see ``norm`` help). However, for any norm, the 2D Fourier transform is Hermitian for real input signals. """ if engine == "numpy": f = _FFT2D_numpy( dims=dims, axes=axes, nffts=nffts, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) elif engine == "scipy": f = _FFT2D_scipy( dims=dims, axes=axes, nffts=nffts, sampling=sampling, norm=norm, real=real, ifftshift_before=ifftshift_before, fftshift_after=fftshift_after, dtype=dtype, ) else: raise NotImplementedError("engine must be numpy or scipy") f.name = name return f
17,347
39.818824
132
py
pylops
pylops-master/pylops/signalprocessing/radon2d.py
__all__ = ["Radon2D"] import logging from typing import Callable, Optional, Tuple import numpy as np from pylops.basicoperators import Spread from pylops.utils import deps from pylops.utils.typing import DTypeLike, NDArray jit_message = deps.numba_import("the radon2d module") if jit_message is None: from numba import jit from ._radon2d_numba import ( _create_table_numba, _hyperbolic_numba, _indices_2d_onthefly_numba, _linear_numba, _parabolic_numba, ) logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _linear( x: NDArray, t: int, px: float, ) -> NDArray: return t + px * x def _parabolic( x: NDArray, t: int, px: float, ) -> NDArray: return t + px * x**2 def _hyperbolic( x: NDArray, t: int, px: float, ) -> NDArray: return np.sqrt(t**2 + (x / px) ** 2) def _indices_2d( f: Callable, x: NDArray, px: float, t: int, nt: int, interp: bool = True, ) -> Tuple[NDArray, NDArray, Optional[NDArray]]: """Compute time and space indices of parametric line in ``f`` function Parameters ---------- f : :obj:`func` Function computing values of parametric line for stacking x : :obj:`np.ndarray` Spatial axis (must be symmetrical around 0 and with sampling 1) px : :obj:`float` Slowness/curvature t : :obj:`int` Time sample (time axis is assumed to have sampling 1) nt : :obj:`int` Size of time axis interp : :obj:`bool`, optional Apply linear interpolation (``True``) or nearest interpolation (``False``) during stacking/spreading along parametric curve Returns ------- xscan : :obj:`np.ndarray` Spatial indices tscan : :obj:`np.ndarray` Time indices dtscan : :obj:`np.ndarray` Decimal time variations for interpolation """ tdecscan = f(x, t, px) if not interp: xscan = (tdecscan >= 0) & (tdecscan < nt) else: xscan = (tdecscan >= 0) & (tdecscan < nt - 1) tscan = tdecscan[xscan].astype(int) if interp: dtscan = tdecscan[xscan] - tscan return xscan, tscan, (dtscan if interp else None) def _indices_2d_onthefly( f: Callable, x: NDArray, px: NDArray, ip: int, t: int, nt: int, interp: bool = True, ) -> Tuple[NDArray, NDArray, Optional[NDArray]]: """Wrapper around _indices_2d to allow on-the-fly computation of parametric curves""" tscan = np.full(len(x), np.nan, dtype=np.float32) if interp: dtscan = np.full(len(x), np.nan) xscan, tscan1, dtscan1 = _indices_2d(f, x, px[ip], t, nt, interp=interp) tscan[xscan] = tscan1 if interp: dtscan[xscan] = dtscan1 return xscan, tscan, (dtscan if interp else None) def _create_table( f: Callable, x: NDArray, pxaxis: NDArray, nt: int, npx: int, nx: int, interp: bool, ) -> Tuple[NDArray, Optional[NDArray]]: """Create look up table""" table = np.full((npx, nt, nx), np.nan, dtype=np.float32) if interp: dtable = np.full((npx, nt, nx), np.nan) else: dtable = None for ipx, px in enumerate(pxaxis): for it in range(nt): xscan, tscan, dtscan = _indices_2d(f, x, px, it, nt, interp=interp) table[ipx, it, xscan] = tscan if interp: dtable[ipx, it, xscan] = dtscan return table, dtable def Radon2D( taxis: NDArray, haxis: NDArray, pxaxis: NDArray, kind: str = "linear", centeredh: bool = True, interp: bool = True, onthefly: bool = False, engine: str = "numpy", dtype: DTypeLike = "float64", name: str = "R", ): r"""Two dimensional Radon transform. Apply two dimensional Radon forward (and adjoint) transform to a 2-dimensional array of size :math:`[n_{p_x} \times n_t]` (and :math:`[n_x \times n_t]`). In forward mode this entails to spreading the model vector along parametric curves (lines, parabolas, or hyperbolas depending on the choice of ``kind``), while stacking values in the data vector along the same parametric curves is performed in adjoint mode. Parameters ---------- taxis : :obj:`np.ndarray` Time axis haxis : :obj:`np.ndarray` Spatial axis pxaxis : :obj:`np.ndarray` Axis of scanning variable :math:`p_x` of parametric curve kind : :obj:`str`, optional Curve to be used for stacking/spreading (``linear``, ``parabolic``, and ``hyperbolic`` are currently supported) or a function that takes :math:`(x, t_0, p_x)` as input and returns :math:`t` as output centeredh : :obj:`bool`, optional Assume centered spatial axis (``True``) or not (``False``). If ``True`` the original ``haxis`` is ignored and a new axis is created. interp : :obj:`bool`, optional Apply linear interpolation (``True``) or nearest interpolation (``False``) during stacking/spreading along parametric curve onthefly : :obj:`bool`, optional Compute stacking parametric curves on-the-fly as part of forward and adjoint modelling (``True``) or at initialization and store them in look-up table (``False``). Using a look-up table is computationally more efficient but increases the memory burden engine : :obj:`str`, optional Engine used for computation (``numpy`` or ``numba``) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- r2op : :obj:`pylops.LinearOperator` Radon operator Raises ------ KeyError If ``engine`` is neither ``numpy`` nor ``numba`` NotImplementedError If ``kind`` is not ``linear``, ``parabolic``, or ``hyperbolic`` See Also -------- pylops.signalprocessing.Radon3D: Three dimensional Radon transform pylops.Spread: Spread operator Notes ----- The Radon2D operator applies the following linear transform in adjoint mode to the data after reshaping it into a 2-dimensional array of size :math:`[n_x \times n_t]` in adjoint mode: .. math:: m(p_x, t_0) = \int{d(x, t = f(p_x, x, t))} \,\mathrm{d}x where :math:`f(p_x, x, t) = t_0 + p_x x` where :math:`p_x = \sin(\theta)/v` in linear mode, :math:`f(p_x, x, t) = t_0 + p_x x^2` in parabolic mode, and :math:`f(p_x, x, t) = \sqrt{t_0^2 + x^2 / p_x^2}` in hyperbolic mode. Note that internally the :math:`p_x` axis will be normalized by the ratio of the spatial and time axes and used alongside unitless axes. Whilst this makes the linear mode fully unitless, users are required to apply additional scalings to the :math:`p_x` axis for other relationships: - :math:`p_x` should be pre-multipled by :math:`d_x` for the parabolic relationship; - :math:`p_x` should be pre-multipled by :math:`(d_t/d_x)^2` for the hyperbolic relationship. As the adjoint operator can be interpreted as a repeated summation of sets of elements of the model vector along chosen parametric curves, the forward is implemented as spreading of values in the data vector along the same parametric curves. This operator is actually a thin wrapper around the :class:`pylops.Spread` operator. """ # engine if engine not in ["numpy", "numba"]: raise KeyError("engine must be numpy or numba") if engine == "numba" and jit_message is not None: engine = "numpy" # axes nt, nh, npx = taxis.size, haxis.size, pxaxis.size if kind == "linear": f = _linear if engine == "numpy" else _linear_numba elif kind == "parabolic": f = _parabolic if engine == "numpy" else _parabolic_numba elif kind == "hyperbolic": f = _hyperbolic if engine == "numpy" else _hyperbolic_numba elif callable(kind): f = kind else: raise NotImplementedError("kind must be linear, " "parabolic, or hyperbolic...") # make axes unitless dh, dt = np.abs(haxis[1] - haxis[0]), np.abs(taxis[1] - taxis[0]) dpx = dh / dt pxaxis = pxaxis * dpx if not centeredh: haxisunitless = haxis / dh else: haxisunitless = np.arange(nh) - nh // 2 + ((nh + 1) % 2) / 2 dims = (npx, nt) dimsd = (nh, nt) if onthefly: if engine == "numba": @jit(nopython=True, nogil=True) def ontheflyfunc(x, y): return _indices_2d_onthefly_numba( f, haxisunitless, pxaxis, x, y, nt, interp=interp )[1:] else: if interp: def ontheflyfunc(x, y): return _indices_2d_onthefly( f, haxisunitless, pxaxis, x, y, nt, interp=interp )[1:] else: def ontheflyfunc(x, y): return _indices_2d_onthefly( f, haxisunitless, pxaxis, x, y, nt, interp=interp )[1] r2op = Spread( dims, dimsd, fh=ontheflyfunc, interp=interp, engine=engine, dtype=dtype ) else: if engine == "numba": tablefunc = _create_table_numba else: tablefunc = _create_table table, dtable = tablefunc(f, haxisunitless, pxaxis, nt, npx, nh, interp) if not interp: dtable = None r2op = Spread( dims, dimsd, table=table, dtable=dtable, interp=interp, engine=engine, dtype=dtype, ) r2op.name = name return r2op
9,844
29.574534
88
py
pylops
pylops-master/pylops/signalprocessing/interp.py
__all__ = ["Interp"] import logging from typing import Tuple, Union import numpy as np import numpy.typing as npt from pylops import LinearOperator, aslinearoperator from pylops.basicoperators import Diagonal, MatrixMult, Restriction, Transpose from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import get_array_module from pylops.utils.typing import DTypeLike, InputDimsLike, IntNDArray logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _checkunique(iava: npt.ArrayLike) -> None: _, count = np.unique(iava, return_counts=True) if np.any(count > 1): raise ValueError("Repeated values in iava array") def _nearestinterp( dims: Union[int, InputDimsLike], iava: IntNDArray, axis: int = -1, dtype: DTypeLike = "float64", ): """Nearest neighbour interpolation.""" iava = np.round(iava).astype(int) _checkunique(iava) return Restriction(dims, iava, axis=axis, dtype=dtype), iava def _linearinterp( dims: InputDimsLike, iava: IntNDArray, axis: int = -1, dtype: DTypeLike = "float64", ): """Linear interpolation.""" ncp = get_array_module(iava) if np.issubdtype(iava.dtype, np.integer): iava = iava.astype(np.float64) lastsample = dims[axis] dimsd = list(dims) dimsd[axis] = len(iava) dimsd = tuple(dimsd) # ensure that samples are not beyond the last sample, in that case set to # penultimate sample and raise a warning outside = iava >= lastsample - 1 if sum(outside) > 0: logging.warning( "at least one value is beyond penultimate sample, " "forced to be at penultimate sample" ) iava[outside] = lastsample - 1 - 1e-10 _checkunique(iava) # find indices and weights iva_l = ncp.floor(iava).astype(int) iva_r = iva_l + 1 weights = iava - iva_l # create operators Op = Diagonal(1 - weights, dims=dimsd, axis=axis, dtype=dtype) * Restriction( dims, iva_l, axis=axis, dtype=dtype ) + Diagonal(weights, dims=dimsd, axis=axis, dtype=dtype) * Restriction( dims, iva_r, axis=axis, dtype=dtype ) return Op, iava, dims, dimsd def _sincinterp( dims: InputDimsLike, iava: IntNDArray, axis: int = 0, dtype: DTypeLike = "float64", ): """Sinc interpolation.""" ncp = get_array_module(iava) _checkunique(iava) # create sinc interpolation matrix nreg = dims[axis] ireg = ncp.arange(nreg) sinc = ncp.tile(iava[:, np.newaxis], (1, nreg)) - ncp.tile(ireg, (len(iava), 1)) sinc = ncp.sinc(sinc) # identify additional dimensions and create MatrixMult operator otherdims = np.array(dims) otherdims = np.roll(otherdims, -axis) otherdims = otherdims[1:] Op = MatrixMult(sinc, otherdims=otherdims, dtype=dtype) # create Transpose operator that brings axis to first dimension dimsd = list(dims) dimsd[axis] = len(iava) if axis > 0: axes = np.arange(len(dims), dtype=int) axes = np.roll(axes, -axis) Top = Transpose(dims, axes=axes, dtype=dtype) T1op = Transpose(dimsd, axes=axes, dtype=dtype) Op = T1op.H * Op * Top return Op, dims, dimsd def Interp( dims: Union[int, InputDimsLike], iava: IntNDArray, axis: int = -1, kind: str = "linear", dtype: DTypeLike = "float64", name: str = "I", ) -> Tuple[LinearOperator, IntNDArray]: r"""Interpolation operator. Apply interpolation along ``axis`` from regularly sampled input vector into fractionary positions ``iava`` using one of the following algorithms: - *Nearest neighbour* interpolation is a thin wrapper around :obj:`pylops.Restriction` at ``np.round(iava)`` locations. - *Linear interpolation* extracts values from input vector at locations ``np.floor(iava)`` and ``np.floor(iava)+1`` and linearly combines them in forward mode, places weighted versions of the interpolated values at locations ``np.floor(iava)`` and ``np.floor(iava)+1`` in an otherwise zero vector in adjoint mode. - *Sinc interpolation* performs sinc interpolation at locations ``iava``. Note that this is the most accurate method but it has higher computational cost as it involves multiplying the input data by a matrix of size :math:`N \times M`. .. note:: The vector ``iava`` should contain unique values. If the same index is repeated twice an error will be raised. This also applies when values beyond the last element of the input array for *linear interpolation* as those values are forced to be just before this element. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension iava : :obj:`list` or :obj:`numpy.ndarray` Floating indices of locations of available samples for interpolation. axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which interpolation is applied. kind : :obj:`str`, optional Kind of interpolation (``nearest``, ``linear``, and ``sinc`` are currently supported) dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Returns ------- op : :obj:`pylops.LinearOperator` Linear intepolation operator iava : :obj:`list` or :obj:`numpy.ndarray` Corrected indices of locations of available samples (samples at ``M-1`` or beyond are forced to be at ``M-1-eps``) Raises ------ ValueError If the vector ``iava`` contains repeated values. NotImplementedError If ``kind`` is not ``nearest``, ``linear`` or ``sinc`` See Also -------- pylops.Restriction : Restriction operator Notes ----- *Linear interpolation* of a subset of :math:`N` values at locations ``iava`` from an input (or model) vector :math:`\mathbf{x}` of size :math:`M` can be expressed as: .. math:: y_i = (1-w_i) x_{l^{l}_i} + w_i x_{l^{r}_i} \quad \forall i=1,2,\ldots,N where :math:`\mathbf{l^l}=[\lfloor l_1 \rfloor, \lfloor l_2 \rfloor,\ldots, \lfloor l_N \rfloor]` and :math:`\mathbf{l^r}=[\lfloor l_1 \rfloor +1, \lfloor l_2 \rfloor +1,\ldots, \lfloor l_N \rfloor +1]` are vectors containing the indeces of the original array at which samples are taken, and :math:`\mathbf{w}=[l_1 - \lfloor l_1 \rfloor, l_2 - \lfloor l_2 \rfloor, ..., l_N - \lfloor l_N \rfloor]` are the linear interpolation weights. This operator can be implemented by simply summing two :class:`pylops.Restriction` operators which are weighted using :class:`pylops.basicoperators.Diagonal` operators. *Sinc interpolation* of a subset of :math:`N` values at locations ``iava`` from an input (or model) vector :math:`\mathbf{x}` of size :math:`M` can be expressed as: .. math:: \DeclareMathOperator{\sinc}{sinc} y_i = \sum_{j=0}^{M} x_j \sinc(i-j) \quad \forall i=1,2,\ldots,N This operator can be implemented using the :class:`pylops.MatrixMult` operator with a matrix containing the values of the sinc function at all :math:`i,j` possible combinations. """ dims = _value_or_sized_to_tuple(dims) if kind == "nearest": interpop, iava = _nearestinterp(dims, iava, axis=axis, dtype=dtype) elif kind == "linear": interpop, iava, dims, dimsd = _linearinterp(dims, iava, axis=axis, dtype=dtype) elif kind == "sinc": interpop, dims, dimsd = _sincinterp(dims, iava, axis=axis, dtype=dtype) else: raise NotImplementedError("kind is not correct...") # add dims and dimsd to composite operators (not needed for neareast as # interpop is a Restriction operator already if kind != "nearest": interpop = aslinearoperator(interpop) interpop.dims = dims interpop.dimsd = dimsd interpop.name = name return interpop, iava
8,140
33.062762
87
py
pylops
pylops-master/pylops/signalprocessing/dwt.py
__all__ = ["DWT"] import logging from math import ceil, log from typing import Union import numpy as np from pylops import LinearOperator from pylops.basicoperators import Pad from pylops.utils import deps from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray pywt_message = deps.pywt_import("the dwt module") if pywt_message is None: import pywt logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.WARNING) def _checkwavelet(wavelet: str) -> None: """Check that wavelet belongs to pywt.wavelist""" wavelist = pywt.wavelist(kind="discrete") if wavelet not in wavelist: raise ValueError(f"'{wavelet}' not in family set = {wavelist}") def _adjointwavelet(wavelet: str) -> str: """Define adjoint wavelet""" waveletadj = wavelet if "rbio" in wavelet: waveletadj = "bior" + wavelet[-3:] elif "bior" in wavelet: waveletadj = "rbio" + wavelet[-3:] return waveletadj class DWT(LinearOperator): """One dimensional Wavelet operator. Apply 1D-Wavelet Transform along an ``axis`` of a multi-dimensional array of size ``dims``. Note that the Wavelet operator is an overload of the ``pywt`` implementation of the wavelet transform. Refer to https://pywavelets.readthedocs.io for a detailed description of the input parameters. Parameters ---------- dims : :obj:`int` or :obj:`tuple` Number of samples for each dimension axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which DWT is applied wavelet : :obj:`str`, optional Name of wavelet type. Use :func:`pywt.wavelist(kind='discrete')` for a list of available wavelets. level : :obj:`int`, optional Number of scaling levels (must be >=0). dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ModuleNotFoundError If ``pywt`` is not installed ValueError If ``wavelet`` does not belong to ``pywt.families`` Notes ----- The Wavelet operator applies the multilevel Discrete Wavelet Transform (DWT) in forward mode and the multilevel Inverse Discrete Wavelet Transform (IDWT) in adjoint mode. Wavelet transforms can be used to compress signals and present a key advantage over Fourier transforms in that they captures both frequency and location information in time. Consider using this operator as sparsifying transform when using L1 solvers. """ def __init__( self, dims: Union[int, InputDimsLike], axis: int = -1, wavelet: str = "haar", level: int = 1, dtype: DTypeLike = "float64", name: str = "D", ) -> None: if pywt_message is not None: raise ModuleNotFoundError(pywt_message) _checkwavelet(wavelet) dims = _value_or_sized_to_tuple(dims) # define padding for length to be power of 2 ndimpow2 = max(2 ** ceil(log(dims[axis], 2)), 2**level) pad = [(0, 0)] * len(dims) pad[axis] = (0, ndimpow2 - dims[axis]) self.pad = Pad(dims, pad) self.axis = axis dimsd = list(dims) dimsd[self.axis] = ndimpow2 super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dimsd, name=name) # apply transform to find out slices _, self.sl = pywt.coeffs_to_array( pywt.wavedecn( np.ones(self.dimsd), wavelet=wavelet, level=level, mode="periodization", axes=(self.axis,), ), axes=(self.axis,), ) self.wavelet = wavelet self.waveletadj = _adjointwavelet(wavelet) self.level = level def _matvec(self, x: NDArray) -> NDArray: x = self.pad.matvec(x) x = np.reshape(x, self.dimsd) y = pywt.coeffs_to_array( pywt.wavedecn( x, wavelet=self.wavelet, level=self.level, mode="periodization", axes=(self.axis,), ), axes=(self.axis,), )[0] return y.ravel() def _rmatvec(self, x: NDArray) -> NDArray: x = np.reshape(x, self.dimsd) x = pywt.array_to_coeffs(x, self.sl, output_format="wavedecn") y = pywt.waverecn( x, wavelet=self.waveletadj, mode="periodization", axes=(self.axis,) ) y = self.pad.rmatvec(y.ravel()) return y
4,943
29.518519
82
py
pylops
pylops-master/pylops/signalprocessing/_radon3d_numba.py
import os import numpy as np from numba import jit # detect whether to use parallel or not numba_threads = int(os.getenv("NUMBA_NUM_THREADS", "1")) parallel = True if numba_threads != 1 else False @jit(nopython=True) def _linear_numba(y, x, t, py, px): return t + px * x + py * y @jit(nopython=True) def _parabolic_numba(y, x, t, py, px): return t + px * x**2 + py * y**2 @jit(nopython=True) def _hyperbolic_numba(y, x, t, py, px): return np.sqrt(t**2 + (x / px) ** 2 + (y / py) ** 2) @jit(nopython=True, parallel=parallel, nogil=True) def _indices_3d_numba(f, y, x, py, px, t, nt, interp=True): """Compute time and space indices of parametric line in ``f`` function using numba. Refer to ``_indices_3d`` for full documentation. """ tdecscan = f(y, x, t, py, px) if not interp: sscan = (tdecscan >= 0) & (tdecscan < nt) else: sscan = (tdecscan >= 0) & (tdecscan < nt - 1) tscanfs = tdecscan[sscan] tscan = np.zeros(len(tscanfs)) dtscan = np.zeros(len(tscanfs)) for it, tscanf in enumerate(tscanfs): tscan[it] = int(tscanf) if interp: dtscan[it] = tscanf - tscan[it] return sscan, tscan, dtscan @jit(nopython=True, parallel=parallel, nogil=True) def _indices_3d_onthefly_numba(f, y, x, py, px, ip, t, nt, interp=True): """Wrapper around _indices_3d to allow on-the-fly computation of parametric curves using numba """ tscan = np.full(len(y), np.nan, dtype=np.float32) if interp: dtscan = np.full(len(y), np.nan) else: dtscan = None sscan, tscan1, dtscan1 = _indices_3d_numba( f, y, x, py[ip], px[ip], t, nt, interp=interp ) tscan[sscan] = tscan1 if interp: dtscan[sscan] = dtscan1 return sscan, tscan, dtscan @jit(nopython=True, parallel=parallel, nogil=True) def _create_table_numba(f, y, x, pyaxis, pxaxis, nt, npy, npx, ny, nx, interp): """Create look up table using numba""" table = np.full((npx * npy, nt, ny * nx), np.nan, dtype=np.float32) dtable = np.full((npx * npy, nt, ny * nx), np.nan) for ip in range(len(pyaxis)): py = pyaxis[ip] px = pxaxis[ip] for it in range(nt): sscans, tscan, dtscan = _indices_3d_numba( f, y, x, py, px, it, nt, interp=interp ) itscan = 0 for isscan, sscan in enumerate(sscans): if sscan: table[ip, it, isscan] = tscan[itscan] if interp: dtable[ip, it, isscan] = dtscan[itscan] itscan += 1 return table, dtable
2,645
30.129412
79
py
pylops
pylops-master/pylops/signalprocessing/convolve1d.py
__all__ = ["Convolve1D"] from typing import Callable, Tuple, Union import numpy as np import numpy.typing as npt from pylops import LinearOperator from pylops.utils._internal import _value_or_sized_to_tuple from pylops.utils.backend import ( get_convolve, get_fftconvolve, get_oaconvolve, to_cupy_conditional, ) from pylops.utils.decorators import reshaped from pylops.utils.typing import DTypeLike, InputDimsLike, NDArray def _choose_convfunc( x: npt.ArrayLike, method: Union[None, str], dims ) -> Tuple[Callable, str]: """Choose convolution function Choose and return the function handle to be used for convolution """ if len(dims) == 1: if method is None: method = "direct" if method not in ("direct", "fft"): raise NotImplementedError("method must be direct or fft") convfunc = get_convolve(x) else: if method is None: method = "fft" if method == "fft": convfunc = get_fftconvolve(x) elif method == "overlapadd": convfunc = get_oaconvolve(x) else: raise NotImplementedError("method must be fft or overlapadd") return convfunc, method class Convolve1D(LinearOperator): r"""1D convolution operator. Apply one-dimensional convolution with a compact filter to model (and data) along an ``axis`` of a multi-dimensional array. Parameters ---------- dims : :obj:`list` or :obj:`int` Number of samples for each dimension h : :obj:`numpy.ndarray` 1d compact filter to be convolved to input signal offset : :obj:`int` Index of the center of the compact filter axis : :obj:`int`, optional .. versionadded:: 2.0.0 Axis along which convolution is applied method : :obj:`str`, optional Method used to calculate the convolution (``direct``, ``fft``, or ``overlapadd``). Note that only ``direct`` and ``fft`` are allowed when ``dims=None``, whilst ``fft`` and ``overlapadd`` are allowed when ``dims`` is provided. dtype : :obj:`str`, optional Type of elements in input array. name : :obj:`str`, optional .. versionadded:: 2.0.0 Name of operator (to be used by :func:`pylops.utils.describe.describe`) Attributes ---------- shape : :obj:`tuple` Operator shape explicit : :obj:`bool` Operator contains a matrix that can be solved explicitly (``True``) or not (``False``) Raises ------ ValueError If ``offset`` is bigger than ``len(h) - 1`` NotImplementedError If ``method`` provided is not allowed Notes ----- The Convolve1D operator applies convolution between the input signal :math:`x(t)` and a compact filter kernel :math:`h(t)` in forward model: .. math:: y(t) = \int\limits_{-\infty}^{\infty} h(t-\tau) x(\tau) \,\mathrm{d}\tau This operation can be discretized as follows .. math:: y[n] = \sum_{m=-\infty}^{\infty} h[n-m] x[m] as well as performed in the frequency domain. .. math:: Y(f) = \mathscr{F} (h(t)) * \mathscr{F} (x(t)) Convolve1D operator uses :py:func:`scipy.signal.convolve` that automatically chooses the best domain for the operation to be carried out for one dimensional inputs. The fft implementation :py:func:`scipy.signal.fftconvolve` is however enforced for signals in 2 or more dimensions as this routine efficently operates on multi-dimensional arrays. As the adjoint of convolution is correlation, Convolve1D operator applies correlation in the adjoint mode. In time domain: .. math:: x(t) = \int\limits_{-\infty}^{\infty} h(t+\tau) x(\tau) \,\mathrm{d}\tau or in frequency domain: .. math:: y(t) = \mathscr{F}^{-1} (H(f)^* * X(f)) """ def __init__( self, dims: Union[int, InputDimsLike], h: NDArray, offset: int = 0, axis: int = -1, method: str = None, dtype: DTypeLike = "float64", name: str = "C", ) -> None: dims = _value_or_sized_to_tuple(dims) super().__init__(dtype=np.dtype(dtype), dims=dims, dimsd=dims, name=name) self.axis = axis if offset > len(h) - 1: raise ValueError("offset must be smaller than len(h) - 1") self.h = h self.hstar = np.flip(self.h, axis=-1) self.nh = len(h) self.offset = 2 * (self.nh // 2 - int(offset)) if self.nh % 2 == 0: self.offset -= 1 if self.offset != 0: self.h = np.pad( self.h, ( self.offset if self.offset > 0 else 0, -self.offset if self.offset < 0 else 0, ), mode="constant", ) self.hstar = np.flip(self.h, axis=-1) # add dimensions to filter to match dimensions of model and data hdims = np.ones(len(self.dims), dtype=int) hdims[self.axis] = len(self.h) self.h = self.h.reshape(hdims) self.hstar = self.hstar.reshape(hdims) # choose method and function handle self.convfunc, self.method = _choose_convfunc(h, method, self.dims) @reshaped def _matvec(self, x: NDArray) -> NDArray: if type(self.h) is not type(x): self.h = to_cupy_conditional(x, self.h) self.convfunc, self.method = _choose_convfunc( self.h, self.method, self.dims ) return self.convfunc(x, self.h, mode="same") @reshaped def _rmatvec(self, x: NDArray) -> NDArray: if type(self.hstar) is not type(x): self.hstar = to_cupy_conditional(x, self.hstar) self.convfunc, self.method = _choose_convfunc( self.hstar, self.method, self.dims ) return self.convfunc(x, self.hstar, mode="same")
5,978
30.634921
81
py