repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
pylops | pylops-master/examples/plot_nonstatfilter.py | """
Non-stationary Filter Estimation
================================
This example shows how to use the :py:class:`pylops.signalprocessing.NonStationaryFilters1D`
and :py:class:`pylops.signalprocessing.NonStationaryFilters2D` operators to perform non-stationary
filter estimation that when convolved with an input signal produces a desired output signal.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal.windows import gaussian
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
###############################################################################
# We will start by creating a zero signal of length `nt` and we will
# place a comb of unitary spikes. We also create a non-stationary filter defined by
# 5 equally spaced `Ricker wavelets <http://subsurfwiki.org/wiki/Ricker_wavelet>`_
# with dominant frequencies of :math:`f = 10, 15, 20, 25` and :math:`30` Hz.
nt = 601
dt = 0.004
t = np.arange(nt) * dt
tw = ricker(t[:51], f0=5)[1]
fs = [10, 15, 20, 25, 30]
wavs = np.stack([ricker(t[:51], f0=f)[0] for f in fs])
x = np.zeros(nt)
x[64 : nt - 64 : 64] = 1.0
Cop = pylops.signalprocessing.NonStationaryFilters1D(
x, hsize=101, ih=(101, 201, 301, 401, 501)
)
y = Cop @ wavs
wavsinv = Cop.div(y, niter=20)
wavsinv = wavsinv.reshape(wavs.shape)
fig, axs = plt.subplots(1, len(fs), sharey=True, figsize=(14, 3))
fig.suptitle("1D Non-stationary filter estimation")
for i, ax in enumerate(axs):
ax.plot(wavs[i], "k", lw=4, label="True")
ax.plot(wavsinv[i], "r", lw=2, label="Estimate")
ax.set_xlabel("Time [sec]")
axs[0].legend()
plt.tight_layout()
###############################################################################
# Finally, we repeat the same exercise with a 2-dimensional non-stationary
# filter
nx, nz = 201, 101
wav1a, _, _ = ricker(t[:9], f0=12)
wav1b, _, _ = ricker(t[:9], f0=30)
wav2a = gaussian(15, 2.0)
wav2b = gaussian(15, 4.0)
wav11 = np.outer(wav1a, wav2a[np.newaxis]).T
wav12 = np.outer(wav1b, wav2a[np.newaxis]).T
wav21 = np.outer(wav1b, wav2b[np.newaxis]).T
wav22 = np.outer(wav1a, wav2b[np.newaxis]).T
wavsize = wav11.shape
hs = np.zeros((2, 2, *wavsize))
hs[0, 0] = wav11
hs[0, 1] = wav12
hs[1, 0] = wav21
hs[1, 1] = wav22
x = np.zeros((nx, nz))
x[:, 21] = 1.0
x[:, 41] = -1.0
Cop = pylops.signalprocessing.NonStationaryFilters2D(
inp=x, hshape=wavsize, ihx=(21, 41), ihz=(21, 41), engine="numpy"
)
y = Cop @ hs
hsinv = Cop.div(y.ravel(), niter=50)
hsinv = hsinv.reshape(hs.shape)
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
fig.suptitle("True filters")
axs[0, 0].imshow(hs[0, 0], cmap="gray", vmin=-1, vmax=1)
axs[0, 0].axis("tight")
axs[0, 0].set_title(r"$H_{1,1}$")
axs[0, 1].imshow(hs[0, 1], cmap="gray", vmin=-1, vmax=1)
axs[0, 1].axis("tight")
axs[0, 1].set_title(r"$H_{1,2}$")
axs[1, 0].imshow(hs[1, 0], cmap="gray", vmin=-1, vmax=1)
axs[1, 0].axis("tight")
axs[1, 0].set_title(r"$H_{2,1}$")
axs[1, 1].imshow(hs[1, 1], cmap="gray", vmin=-1, vmax=1)
axs[1, 1].axis("tight")
axs[1, 1].set_title(r"$H_{2,2}$")
plt.tight_layout()
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
fig.suptitle("Estimated filters")
axs[0, 0].imshow(hsinv[0, 0], cmap="gray", vmin=-1, vmax=1)
axs[0, 0].axis("tight")
axs[0, 0].set_title(r"$H_{1,1}$")
axs[0, 1].imshow(hsinv[0, 1], cmap="gray", vmin=-1, vmax=1)
axs[0, 1].axis("tight")
axs[0, 1].set_title(r"$H_{1,2}$")
axs[1, 0].imshow(hsinv[1, 0], cmap="gray", vmin=-1, vmax=1)
axs[1, 0].axis("tight")
axs[1, 0].set_title(r"$H_{2,1}$")
axs[1, 1].imshow(hsinv[1, 1], cmap="gray", vmin=-1, vmax=1)
axs[1, 1].axis("tight")
axs[1, 1].set_title(r"$H_{2,2}$")
plt.tight_layout()
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
fig.suptitle("Estimation error")
axs[0, 0].imshow(hs[0, 0] - hsinv[0, 0], cmap="gray", vmin=-1, vmax=1)
axs[0, 0].axis("tight")
axs[0, 0].set_title(r"$H_{1,1}$")
axs[0, 1].imshow(hs[0, 1] - hsinv[0, 1], cmap="gray", vmin=-1, vmax=1)
axs[0, 1].axis("tight")
axs[0, 1].set_title(r"$H_{1,2}$")
axs[1, 0].imshow(hs[1, 0] - hsinv[1, 0], cmap="gray", vmin=-1, vmax=1)
axs[1, 0].axis("tight")
axs[1, 0].set_title(r"$H_{2,1}$")
axs[1, 1].imshow(hs[1, 1] - hsinv[1, 1], cmap="gray", vmin=-1, vmax=1)
axs[1, 1].axis("tight")
axs[1, 1].set_title(r"$H_{2,2}$")
plt.tight_layout()
| 4,246 | 30.459259 | 98 | py |
pylops | pylops-master/examples/plot_zero.py | """
Zero
====
This example shows how to use the :py:class:`pylops.basicoperators.Zero` operator.
This operators simply zeroes the data in forward mode and the model in adjoint mode.
"""
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define an zero operator :math:`\mathbf{0}` with same number of elements for data
# :math:`N` and model :math:`M`.
N, M = 5, 5
x = np.arange(M)
Zop = pylops.basicoperators.Zero(M, dtype="int")
y = Zop * x
xadj = Zop.H * y
gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 4))
ax = plt.subplot(gs[0, 0:3])
ax.imshow(np.zeros((N, N)), cmap="rainbow", vmin=-M, vmax=M)
ax.set_title("A", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 3])
im = ax.imshow(x[:, np.newaxis], cmap="rainbow", vmin=-M, vmax=M)
ax.set_title("x", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 4])
ax.text(
0.35,
0.5,
"=",
horizontalalignment="center",
verticalalignment="center",
size=40,
fontweight="bold",
)
ax.axis("off")
ax = plt.subplot(gs[0, 5])
ax.imshow(y[:, np.newaxis], cmap="rainbow", vmin=-M, vmax=M)
ax.set_title("y", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.colorbar(im, ax=ax, ticks=[0], pad=0.3, shrink=0.7)
plt.tight_layout()
###############################################################################
# Similarly we can consider the case with data bigger than model
N, M = 10, 5
x = np.arange(M)
Zop = pylops.Zero(N, M, dtype="int")
y = Zop * x
xadj = Zop.H * y
print(f"x = {x}")
print(f"0*x = {y}")
print(f"0'*y = {xadj}")
###############################################################################
# and model bigger than data
N, M = 5, 10
x = np.arange(M)
Zop = pylops.Zero(N, M, dtype="int")
y = Zop * x
xadj = Zop.H * y
print(f"x = {x}")
print(f"0*x = {y}")
print(f"0'*y = {xadj}")
###############################################################################
# Note that this operator can be useful in many real-life applications when for
# example we want to manipulate a subset of the model array and keep intact the
# rest of the array. For example:
#
# .. math::
# \begin{bmatrix}
# \mathbf{A} \quad \mathbf{0}
# \end{bmatrix}
# \begin{bmatrix}
# \mathbf{x_1} \\
# \mathbf{x_2}
# \end{bmatrix} = \mathbf{A} \mathbf{x_1}
#
# Refer to the tutorial on *Optimization* for more details on this.
| 2,981 | 26.611111 | 88 | py |
pylops | pylops-master/examples/plot_nonstatconvolve.py | """
Non-stationary Convolution
==========================
This example shows how to use the :py:class:`pylops.signalprocessing.NonStationaryConvolve1D`
and :py:class:`pylops.signalprocessing.NonStationaryConvolve2D` operators to perform non-stationary
convolution between two signals.
Similar to their stationary counterparts, these operators can be used in the forward model of
several common application in signal processing that require filtering of an input signal for a
time- or space-varying instrument response.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal.windows import gaussian
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
###############################################################################
# We will start by creating a zero signal of length `nt` and we will
# place a comb of unitary spikes. We also create a non-stationary filter defined by
# 5 equally spaced `Ricker wavelets <http://subsurfwiki.org/wiki/Ricker_wavelet>`_
# with dominant frequencies of :math:`f = 10, 15, 20, 25` and :math:`30` Hz.
nt = 601
dt = 0.004
t = np.arange(nt) * dt
tw = ricker(t[:51], f0=5)[1]
fs = [10, 15, 20, 25, 30]
wavs = np.stack([ricker(t[:51], f0=f)[0] for f in fs])
Cop = pylops.signalprocessing.NonStationaryConvolve1D(
dims=nt, hs=wavs, ih=(101, 201, 301, 401, 501)
)
x = np.zeros(nt)
x[64 : nt - 64 : 64] = 1.0
y = Cop @ x
plt.figure(figsize=(10, 3))
plt.plot(t, x, "k")
plt.plot(t, y, "k")
plt.xlabel("Time [sec]")
plt.title("Input and output")
plt.xlim(0, t[-1])
plt.tight_layout()
###############################################################################
# Let's now visualize the filters in time and frequency domain
plt.figure(figsize=(10, 3))
plt.pcolormesh(t, tw, Cop.hsinterp.T, cmap="gray")
plt.xlabel("Time [sec]")
plt.ylabel("Wavelet Time [sec]")
plt.title("Wavelets")
plt.xlim(0, t[-1])
plt.tight_layout()
# Spectra
f = np.fft.rfftfreq(nt, dt)
Sh = np.abs(np.fft.rfft(Cop.hsinterp.T, n=nt, axis=0))
plt.figure(figsize=(10, 3))
plt.pcolormesh(t, f, Sh, cmap="jet", vmax=5e0)
plt.ylabel("Frequency [Hz]")
plt.xlabel("Time [sec]")
plt.title("Wavelet spectrogram")
plt.ylim(0, 70)
plt.xlim(0, t[-1])
plt.tight_layout()
###############################################################################
# Finally, we repeat the same exercise with a 2-dimensional non-stationary
# filter
nx, nz = 601, 501
wav1a, _, _ = ricker(t[:17], f0=12)
wav1b, _, _ = ricker(t[:17], f0=30)
wav2a = gaussian(35, 2.0)
wav2b = gaussian(35, 4.0)
wav11 = np.outer(wav1a, wav2a[np.newaxis]).T
wav12 = np.outer(wav1b, wav2a[np.newaxis]).T
wav21 = np.outer(wav1b, wav2b[np.newaxis]).T
wav22 = np.outer(wav1a, wav2b[np.newaxis]).T
wavsize = wav11.shape
hs = np.zeros((2, 2, *wavsize))
hs[0, 0] = wav11
hs[0, 1] = wav12
hs[1, 0] = wav21
hs[1, 1] = wav22
fig, axs = plt.subplots(2, 2, figsize=(10, 5))
axs[0, 0].imshow(wav11, cmap="gray")
axs[0, 0].axis("tight")
axs[0, 0].set_title(r"$H_{1,1}$")
axs[0, 1].imshow(wav12, cmap="gray")
axs[0, 1].axis("tight")
axs[0, 1].set_title(r"$H_{1,2}$")
axs[1, 0].imshow(wav21, cmap="gray")
axs[1, 0].axis("tight")
axs[1, 0].set_title(r"$H_{2,1}$")
axs[1, 1].imshow(wav22, cmap="gray")
axs[1, 1].axis("tight")
axs[1, 1].set_title(r"$H_{2,2}$")
plt.tight_layout()
Cop = pylops.signalprocessing.NonStationaryConvolve2D(
hs=hs, ihx=(201, 401), ihz=(201, 401), dims=(nx, nz), engine="numba"
)
x = np.zeros((nx, nz))
line1 = (np.arange(nx) * np.tan(np.deg2rad(25))).astype(int) + (nz - 1) // 4
line2 = (np.arange(nx) * np.tan(np.deg2rad(-25))).astype(int) + (3 * (nz - 1)) // 4
x[np.arange(nx), np.clip(line1, 0, nz - 1)] = 1.0
x[np.arange(nx), np.clip(line2, 0, nz - 1)] = -1.0
y = Cop @ x
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
axs[0].imshow(x.T, cmap="gray", vmin=-1.0, vmax=1.0)
axs[0].axis("tight")
axs[0].set_title("Input")
axs[1].imshow(y.T, cmap="gray", vmin=-3.0, vmax=3.0)
axs[1].axis("tight")
axs[1].set_title("Output")
plt.tight_layout()
| 3,979 | 28.924812 | 99 | py |
pylops | pylops-master/examples/plot_tvreg.py | r"""
Total Variation (TV) Regularization
===================================
This set of examples shows how to add Total Variation (TV) regularization to an
inverse problem in order to enforce blockiness in the reconstructed model.
To do so we will use the generalizated Split Bregman iterations by means of
:func:`pylops.optimization.sparsity.SplitBregman` solver.
The first example is concerned with denoising of a piece-wise step function
which has been contaminated by noise. The forward model is:
.. math::
\mathbf{y} = \mathbf{x} + \mathbf{n}
meaning that we have an identity operator (:math:`\mathbf{I}`) and inverting
for :math:`\mathbf{x}` from :math:`\mathbf{y}` is impossible without adding
prior information. We will enforce blockiness in the solution by adding a
regularization term that enforces sparsity in the first derivative of
the solution:
.. math::
J = \mu/2 ||\mathbf{y} - \mathbf{I} \mathbf{x}||_2 +
|| \nabla \mathbf{x}||_1
"""
import matplotlib.pyplot as plt
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import pylops
plt.close("all")
np.random.seed(1)
###############################################################################
# Let's start by creating the model and data
nx = 101
x = np.zeros(nx)
x[: nx // 2] = 10
x[nx // 2 : 3 * nx // 4] = -5
Iop = pylops.Identity(nx)
n = np.random.normal(0, 1, nx)
y = Iop * (x + n)
plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.legend()
plt.title("Model and data")
plt.tight_layout()
###############################################################################
# To start we will try to use a simple L2 regularization that enforces
# smoothness in the solution. We can see how denoising is succesfully achieved
# but the solution is much smoother than we wish for.
D2op = pylops.SecondDerivative(nx, edge=True)
lamda = 1e2
xinv = pylops.optimization.leastsquares.regularized_inversion(
Iop, y, [D2op], epsRs=[np.sqrt(lamda / 2)], **dict(iter_lim=30)
)[0]
plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.plot(xinv, "r", lw=5, label="xinv")
plt.legend()
plt.title("L2 inversion")
plt.tight_layout()
###############################################################################
# Now we impose blockiness in the solution using the Split Bregman solver
Dop = pylops.FirstDerivative(nx, edge=True, kind="backward")
mu = 0.01
lamda = 0.3
niter_out = 50
niter_in = 3
xinv = pylops.optimization.sparsity.splitbregman(
Iop,
y,
[Dop],
niter_outer=niter_out,
niter_inner=niter_in,
mu=mu,
epsRL1s=[lamda],
tol=1e-4,
tau=1.0,
**dict(iter_lim=30, damp=1e-10)
)[0]
plt.figure(figsize=(10, 5))
plt.plot(x, "k", lw=3, label="x")
plt.plot(y, ".k", label="y=x+n")
plt.plot(xinv, "r", lw=5, label="xinv")
plt.legend()
plt.title("TV inversion")
plt.tight_layout()
###############################################################################
# Finally, we repeat the same exercise on a 2-dimensional image. In this case
# we mock a medical imaging problem: the data is created by appling a 2D
# Fourier Transform to the input model and by randomly sampling 60% of
# its values.
x = np.load("../testdata/optimization/shepp_logan_phantom.npy")
x = x / x.max()
ny, nx = x.shape
perc_subsampling = 0.6
nxsub = int(np.round(ny * nx * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(ny * nx))[:nxsub])
Rop = pylops.Restriction(ny * nx, iava, axis=0, dtype=np.complex128)
Fop = pylops.signalprocessing.FFT2D(dims=(ny, nx))
n = np.random.normal(0, 0.0, (ny, nx))
y = Rop * Fop * (x.ravel() + n.ravel())
yfft = Fop * (x.ravel() + n.ravel())
yfft = np.fft.fftshift(yfft.reshape(ny, nx))
ymask = Rop.mask(Fop * (x.ravel()) + n.ravel())
ymask = ymask.reshape(ny, nx)
ymask.data[:] = np.fft.fftshift(ymask.data)
ymask.mask[:] = np.fft.fftshift(ymask.mask)
fig, axs = plt.subplots(1, 3, figsize=(14, 5))
axs[0].imshow(x, vmin=0, vmax=1, cmap="gray")
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(np.abs(yfft), vmin=0, vmax=1, cmap="rainbow")
axs[1].set_title("Full data")
axs[1].axis("tight")
axs[2].imshow(np.abs(ymask), vmin=0, vmax=1, cmap="rainbow")
axs[2].set_title("Sampled data")
axs[2].axis("tight")
plt.tight_layout()
###############################################################################
# Let's attempt now to reconstruct the model using the Split Bregman
# with anisotropic TV regularization (aka sum of L1 norms of the
# first derivatives over x and y):
#
# .. math::
# J = \mu/2 ||\mathbf{y} - \mathbf{R} \mathbf{F} \mathbf{x}||_2
# + || \nabla_x \mathbf{x}||_1 + || \nabla_y \mathbf{x}||_1
Dop = [
pylops.FirstDerivative(
(ny, nx), axis=0, edge=False, kind="backward", dtype=np.complex128
),
pylops.FirstDerivative(
(ny, nx), axis=1, edge=False, kind="backward", dtype=np.complex128
),
]
# TV
mu = 1.5
lamda = [0.1, 0.1]
niter_out = 20
niter_in = 10
xinv = pylops.optimization.sparsity.splitbregman(
Rop * Fop,
y.ravel(),
Dop,
niter_outer=niter_out,
niter_inner=niter_in,
mu=mu,
epsRL1s=lamda,
tol=1e-4,
tau=1.0,
show=False,
**dict(iter_lim=5, damp=1e-4)
)[0]
xinv = np.real(xinv.reshape(ny, nx))
fig, axs = plt.subplots(1, 2, figsize=(9, 5))
axs[0].imshow(x, vmin=0, vmax=1, cmap="gray")
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(xinv, vmin=0, vmax=1, cmap="gray")
axs[1].set_title("TV Inversion")
axs[1].axis("tight")
plt.tight_layout()
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
axs[0].plot(x[ny // 2], "k", lw=5, label="x")
axs[0].plot(xinv[ny // 2], "r", lw=3, label="xinv TV")
axs[0].set_title("Horizontal section")
axs[0].legend()
axs[1].plot(x[:, nx // 2], "k", lw=5, label="x")
axs[1].plot(xinv[:, nx // 2], "r", lw=3, label="xinv TV")
axs[1].set_title("Vertical section")
axs[1].legend()
plt.tight_layout()
###############################################################################
# Note that more optimized variations of the Split Bregman algorithm have been
# proposed in the literature for this specific problem, both improving the
# overall quality of the inversion and the speed of convergence.
#
# In PyLops we however prefer to implement the generalized Split Bergman
# algorithm as this can used for any sort of problem where we wish to
# add any number of L1 and/or L2 regularization terms to the cost function
# to minimize.
| 6,483 | 29.729858 | 79 | py |
pylops | pylops-master/examples/plot_conj.py | """
Conj
====
This example shows how to use the :py:class:`pylops.basicoperators.Conj`
operator.
This operator returns the complex conjugate in both forward and adjoint
modes (it is self adjoint).
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define a Conj operator to get the complex conjugate
# of the input.
M = 5
x = np.arange(M) + 1j * np.arange(M)[::-1]
Rop = pylops.basicoperators.Conj(M, dtype="complex128")
y = Rop * x
xadj = Rop.H * y
_, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].plot(np.real(x), lw=2, label="Real")
axs[0].plot(np.imag(x), lw=2, label="Imag")
axs[0].legend()
axs[0].set_title("Input")
axs[1].plot(np.real(y), lw=2, label="Real")
axs[1].plot(np.imag(y), lw=2, label="Imag")
axs[1].legend()
axs[1].set_title("Forward of Input")
axs[2].plot(np.real(xadj), lw=2, label="Real")
axs[2].plot(np.imag(xadj), lw=2, label="Imag")
axs[2].legend()
axs[2].set_title("Adjoint of Forward")
plt.tight_layout()
| 1,064 | 24.357143 | 79 | py |
pylops | pylops-master/examples/plot_wavs.py | """
Wavelets
========
This example shows how to use the different wavelets available PyLops.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with defining a time axis and creating the FFT operator
dt = 0.004
nt = 1001
t = np.arange(nt) * dt
Fop = pylops.signalprocessing.FFT(2 * nt - 1, sampling=dt, real=True)
f = Fop.f
###############################################################################
# We can now create the different wavelets and display them
# Gaussian
wg, twg, wgc = pylops.utils.wavelets.gaussian(t, std=2)
# Gaussian
wk, twk, wgk = pylops.utils.wavelets.klauder(t, f=[4, 30], taper=np.hanning)
# Ormsby
wo, two, woc = pylops.utils.wavelets.ormsby(t, f=[5, 9, 25, 30], taper=np.hanning)
# Ricker
wr, twr, wrc = pylops.utils.wavelets.ricker(t, f0=17)
# Frequency domain
wgf = Fop @ wg
wkf = Fop @ wk
wof = Fop @ wo
wrf = Fop @ wr
###############################################################################
fig, axs = plt.subplots(1, 2, figsize=(14, 6))
axs[0].plot(twg, wg, "k", lw=2, label="Gaussian")
axs[0].plot(twk, wk, "r", lw=2, label="Klauder")
axs[0].plot(two, wo, "b", lw=2, label="Ormsby")
axs[0].plot(twr, wr, "y--", lw=2, label="Ricker")
axs[0].set(xlim=(-0.4, 0.4), xlabel="Time [s]")
axs[0].legend()
axs[1].plot(f, np.abs(wgf) / np.abs(wgf).max(), "k", lw=2, label="Gaussian")
axs[1].plot(f, np.abs(wkf) / np.abs(wkf).max(), "r", lw=2, label="Klauder")
axs[1].plot(f, np.abs(wof) / np.abs(wof).max(), "b", lw=2, label="Ormsby")
axs[1].plot(f, np.abs(wrf) / np.abs(wrf).max(), "y--", lw=2, label="Ricker")
axs[1].set(xlim=(0, 50), xlabel="Frequency [Hz]")
axs[1].legend()
plt.tight_layout()
| 1,771 | 29.551724 | 82 | py |
pylops | pylops-master/examples/plot_seismicevents.py | """
Synthetic seismic
=================
This example shows how to use the :py:mod:`pylops.utils.seismicevents` module
to quickly create synthetic seismic data to be used for toy examples and tests.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
############################################
# Let's first define the time and space axes as well as some auxiliary input
# parameters that we will use to create a Ricker wavelet
par = {
"ox": -200,
"dx": 2,
"nx": 201,
"oy": -100,
"dy": 2,
"ny": 101,
"ot": 0,
"dt": 0.004,
"nt": 501,
"f0": 20,
"nfmax": 210,
}
# Create axis
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
# Create wavelet
wav = pylops.utils.wavelets.ricker(np.arange(41) * par["dt"], f0=par["f0"])[0]
############################################
# We want to create a 2d data with a number of crossing linear events using the
# :py:func:`pylops.utils.seismicevents.linear2d` routine.
v = 1500
t0 = [0.2, 0.7, 1.6]
theta = [40, 0, -60]
amp = [1.0, 0.6, -2.0]
mlin, mlinwav = pylops.utils.seismicevents.linear2d(x, t, v, t0, theta, amp, wav)
############################################
# We can also create a 2d data with a number of crossing parabolic events using the
# :py:func:`pylops.utils.seismicevents.parabolic2d` routine.
px = [0, 0, 0]
pxx = [1e-5, 5e-6, 1e-6]
mpar, mparwav = pylops.utils.seismicevents.parabolic2d(x, t, t0, px, pxx, amp, wav)
############################################
# And similarly we can create a 2d data with a number of crossing hyperbolic
# events using the :py:func:`pylops.utils.seismicevents.hyperbolic2d` routine.
vrms = [500, 700, 1700]
mhyp, mhypwav = pylops.utils.seismicevents.hyperbolic2d(x, t, t0, vrms, amp, wav)
############################################
# We can now visualize the different events
# sphinx_gallery_thumbnail_number = 2
fig, axs = plt.subplots(1, 3, figsize=(9, 5))
axs[0].imshow(
mlinwav.T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_title("Linear events", fontsize=12, fontweight="bold")
axs[0].set_xlabel(r"$x(m)$")
axs[0].set_ylabel(r"$t(s)$")
axs[1].imshow(
mparwav.T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[1].set_title("Parabolic events", fontsize=12, fontweight="bold")
axs[1].set_xlabel(r"$x(m)$")
axs[1].set_ylabel(r"$t(s)$")
axs[2].imshow(
mhypwav.T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[2].set_title("Hyperbolic events", fontsize=12, fontweight="bold")
axs[2].set_xlabel(r"$x(m)$")
axs[2].set_ylabel(r"$t(s)$")
plt.tight_layout()
############################################
# Let's finally repeat the same exercise in 3d
phi = [20, 0, -10]
mlin, mlinwav = pylops.utils.seismicevents.linear3d(
x, y, t, v, t0, theta, phi, amp, wav
)
fig, axs = plt.subplots(1, 2, figsize=(7, 5), sharey=True)
fig.suptitle("Linear events in 3d", fontsize=12, fontweight="bold", y=0.95)
axs[0].imshow(
mlinwav[par["ny"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_xlabel(r"$x(m)$")
axs[0].set_ylabel(r"$t(s)$")
axs[1].imshow(
mlinwav[:, par["nx"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(y.min(), y.max(), t.max(), t.min()),
)
axs[1].set_xlabel(r"$y(m)$")
mhyp, mhypwav = pylops.utils.seismicevents.hyperbolic3d(
x, y, t, t0, vrms, vrms, amp, wav
)
fig, axs = plt.subplots(1, 2, figsize=(7, 5), sharey=True)
fig.suptitle("Hyperbolic events in 3d", fontsize=12, fontweight="bold", y=0.95)
axs[0].imshow(
mhypwav[par["ny"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_xlabel(r"$x(m)$")
axs[0].set_ylabel(r"$t(s)$")
axs[1].imshow(
mhypwav[:, par["nx"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(y.min(), y.max(), t.max(), t.min()),
)
axs[1].set_xlabel(r"$y(m)$")
plt.tight_layout()
| 4,391 | 25.457831 | 83 | py |
pylops | pylops-master/examples/plot_identity.py | """
Identity
========
This example shows how to use the :py:class:`pylops.Identity` operator to transfer model
into data and viceversa.
"""
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define an identity operator :math:`\mathbf{Iop}` with same number of
# elements for data and model (:math:`N=M`).
N, M = 5, 5
x = np.arange(M)
Iop = pylops.Identity(M, dtype="int")
y = Iop * x
xadj = Iop.H * y
gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 4))
ax = plt.subplot(gs[0, 0:3])
im = ax.imshow(np.eye(N), cmap="rainbow")
ax.set_title("A", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 3])
ax.imshow(x[:, np.newaxis], cmap="rainbow")
ax.set_title("x", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 4])
ax.text(
0.35,
0.5,
"=",
horizontalalignment="center",
verticalalignment="center",
size=40,
fontweight="bold",
)
ax.axis("off")
ax = plt.subplot(gs[0, 5])
ax.imshow(y[:, np.newaxis], cmap="rainbow")
ax.set_title("y", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.colorbar(im, ax=ax, ticks=[0, 1], pad=0.3, shrink=0.7)
plt.tight_layout()
###############################################################################
# Similarly we can consider the case with data bigger than model
N, M = 10, 5
x = np.arange(M)
Iop = pylops.Identity(N, M, dtype="int")
y = Iop * x
xadj = Iop.H * y
print(f"x = {x} ")
print(f"I*x = {y} ")
print(f"I'*y = {xadj} ")
###############################################################################
# and model bigger than data
N, M = 5, 10
x = np.arange(M)
Iop = pylops.Identity(N, M, dtype="int")
y = Iop * x
xadj = Iop.H * y
print(f"x = {x} ")
print(f"I*x = {y} ")
print(f"I'*y = {xadj} ")
###############################################################################
# Note that this operator can be useful in many real-life applications when for example
# we want to manipulate a subset of the model array and keep intact the rest of the array.
# For example:
#
# .. math::
# \begin{bmatrix}
# \mathbf{A} \quad \mathbf{I}
# \end{bmatrix}
# \begin{bmatrix}
# \mathbf{x_1} \\
# \mathbf{x_2}
# \end{bmatrix} = \mathbf{A} \mathbf{x_1} + \mathbf{x_2}
#
# Refer to the tutorial on *Optimization* for more details on this.
| 2,896 | 26.330189 | 90 | py |
pylops | pylops-master/examples/plot_derivative.py | """
Derivatives
===========
This example shows how to use the suite of derivative operators, namely
:py:class:`pylops.FirstDerivative`, :py:class:`pylops.SecondDerivative`,
:py:class:`pylops.Laplacian` and :py:class:`pylops.Gradient`,
:py:class:`pylops.FirstDirectionalDerivative` and
:py:class:`pylops.SecondDirectionalDerivative`.
The derivative operators are very useful when the model to be inverted for
is expect to be smooth in one or more directions. As shown in the
*Optimization* tutorial, these operators will be used as part of the
regularization term to obtain a smooth solution.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start by looking at a simple first-order centered derivative and how
# could implement it naively by creating a dense matrix. Note that we will not
# apply the derivative where the stencil is partially outside of the range of
# the input signal (i.e., at the edge of the signal)
nx = 10
D = np.diag(0.5 * np.ones(nx - 1), k=1) - np.diag(0.5 * np.ones(nx - 1), -1)
D[0] = D[-1] = 0
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
im = plt.imshow(D, cmap="rainbow", vmin=-0.5, vmax=0.5)
ax.set_title("First derivative", size=14, fontweight="bold")
ax.set_xticks(np.arange(nx - 1) + 0.5)
ax.set_yticks(np.arange(nx - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.colorbar(im, ax=ax, ticks=[-0.5, 0.5], shrink=0.7)
###############################################################################
# We now create a signal filled with zero and a single one at its center and
# apply the derivative matrix by means of a dot product
x = np.zeros(nx)
x[int(nx / 2)] = 1
y_dir = np.dot(D, x)
xadj_dir = np.dot(D.T, y_dir)
###############################################################################
# Let's now do the same using the :py:class:`pylops.FirstDerivative` operator
# and compare its outputs after applying the forward and adjoint operators
# to those from the dense matrix.
D1op = pylops.FirstDerivative(nx, dtype="float32")
y_lop = D1op * x
xadj_lop = D1op.H * y_lop
fig, axs = plt.subplots(3, 1, figsize=(13, 8), sharex=True)
axs[0].stem(np.arange(nx), x, linefmt="k", markerfmt="ko")
axs[0].set_title("Input", size=20, fontweight="bold")
axs[1].stem(np.arange(nx), y_dir, linefmt="k", markerfmt="ko", label="direct")
axs[1].stem(np.arange(nx), y_lop, linefmt="--r", markerfmt="ro", label="lop")
axs[1].set_title("Forward", size=20, fontweight="bold")
axs[1].legend()
axs[2].stem(np.arange(nx), xadj_dir, linefmt="k", markerfmt="ko", label="direct")
axs[2].stem(np.arange(nx), xadj_lop, linefmt="--r", markerfmt="ro", label="lop")
axs[2].set_title("Adjoint", size=20, fontweight="bold")
axs[2].legend()
plt.tight_layout()
###############################################################################
# As expected we obtain the same result, with the only difference that
# in the second case we did not need to explicitly create a matrix,
# saving memory and computational time.
#
# Let's move onto applying the same first derivative to a 2d array in
# the first direction
nx, ny = 11, 21
A = np.zeros((nx, ny))
A[nx // 2, ny // 2] = 1.0
D1op = pylops.FirstDerivative((nx, ny), axis=0, dtype="float64")
B = D1op * A
fig, axs = plt.subplots(1, 2, figsize=(10, 3), sharey=True)
fig.suptitle(
"First Derivative in 1st direction", fontsize=12, fontweight="bold", y=0.95
)
im = axs[0].imshow(A, interpolation="nearest", cmap="rainbow")
axs[0].axis("tight")
axs[0].set_title("x")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation="nearest", cmap="rainbow")
axs[1].axis("tight")
axs[1].set_title("y")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# We can now do the same for the second derivative
A = np.zeros((nx, ny))
A[nx // 2, ny // 2] = 1.0
D2op = pylops.SecondDerivative(dims=(nx, ny), axis=0, dtype="float64")
B = D2op * A
fig, axs = plt.subplots(1, 2, figsize=(10, 3), sharey=True)
fig.suptitle(
"Second Derivative in 1st direction", fontsize=12, fontweight="bold", y=0.95
)
im = axs[0].imshow(A, interpolation="nearest", cmap="rainbow")
axs[0].axis("tight")
axs[0].set_title("x")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation="nearest", cmap="rainbow")
axs[1].axis("tight")
axs[1].set_title("y")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# We can also apply the second derivative to the second direction of
# our data (``axis=1``)
D2op = pylops.SecondDerivative(dims=(nx, ny), axis=1, dtype="float64")
B = D2op * A
fig, axs = plt.subplots(1, 2, figsize=(10, 3), sharey=True)
fig.suptitle(
"Second Derivative in 2nd direction", fontsize=12, fontweight="bold", y=0.95
)
im = axs[0].imshow(A, interpolation="nearest", cmap="rainbow")
axs[0].axis("tight")
axs[0].set_title("x")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation="nearest", cmap="rainbow")
axs[1].axis("tight")
axs[1].set_title("y")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# We use the symmetrical Laplacian operator as well
# as a asymmetrical version of it (by adding more weight to the
# derivative along one direction)
# symmetrical
L2symop = pylops.Laplacian(dims=(nx, ny), weights=(1, 1), dtype="float64")
# asymmetrical
L2asymop = pylops.Laplacian(dims=(nx, ny), weights=(3, 1), dtype="float64")
Bsym = L2symop * A
Basym = L2asymop * A
fig, axs = plt.subplots(1, 3, figsize=(10, 3), sharey=True)
fig.suptitle("Laplacian", fontsize=12, fontweight="bold", y=0.95)
im = axs[0].imshow(A, interpolation="nearest", cmap="rainbow")
axs[0].axis("tight")
axs[0].set_title("x")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Bsym, interpolation="nearest", cmap="rainbow")
axs[1].axis("tight")
axs[1].set_title("y sym")
plt.colorbar(im, ax=axs[1])
im = axs[2].imshow(Basym, interpolation="nearest", cmap="rainbow")
axs[2].axis("tight")
axs[2].set_title("y asym")
plt.colorbar(im, ax=axs[2])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# We consider now the gradient operator. Given a 2-dimensional array,
# this operator applies first-order derivatives on both dimensions and
# concatenates them.
Gop = pylops.Gradient(dims=(nx, ny), dtype="float64")
B = Gop * A
C = Gop.H * B
fig, axs = plt.subplots(2, 2, figsize=(10, 6), sharex=True, sharey=True)
fig.suptitle("Gradient", fontsize=12, fontweight="bold", y=0.95)
im = axs[0, 0].imshow(A, interpolation="nearest", cmap="rainbow")
axs[0, 0].axis("tight")
axs[0, 0].set_title("x")
plt.colorbar(im, ax=axs[0, 0])
im = axs[0, 1].imshow(B[0, ...], interpolation="nearest", cmap="rainbow")
axs[0, 1].axis("tight")
axs[0, 1].set_title("y - 1st direction")
plt.colorbar(im, ax=axs[0, 1])
im = axs[1, 1].imshow(B[1, ...], interpolation="nearest", cmap="rainbow")
axs[1, 1].axis("tight")
axs[1, 1].set_title("y - 2nd direction")
plt.colorbar(im, ax=axs[1, 1])
im = axs[1, 0].imshow(C, interpolation="nearest", cmap="rainbow")
axs[1, 0].axis("tight")
axs[1, 0].set_title("xadj")
plt.colorbar(im, ax=axs[1, 0])
plt.tight_layout()
###############################################################################
# Finally we use the Gradient operator to compute directional derivatives.
# We create a model which has some layering in the horizontal and vertical
# directions and show how the direction derivatives differs from standard
# derivatives
nx, nz = 60, 40
horlayers = np.cumsum(np.random.uniform(2, 10, 20).astype(int))
horlayers = horlayers[horlayers < nz // 2]
nhorlayers = len(horlayers)
vertlayers = np.cumsum(np.random.uniform(2, 20, 10).astype(int))
vertlayers = vertlayers[vertlayers < nx]
nvertlayers = len(vertlayers)
A = 1500 * np.ones((nz, nx))
for top, base in zip(horlayers[:-1], horlayers[1:]):
A[top:base] = np.random.normal(2000, 200)
for top, base in zip(vertlayers[:-1], vertlayers[1:]):
A[horlayers[-1] :, top:base] = np.random.normal(2000, 200)
v = np.zeros((2, nz, nx))
v[0, : horlayers[-1]] = 1
v[1, horlayers[-1] :] = 1
Ddop = pylops.FirstDirectionalDerivative((nz, nx), v=v, sampling=(nz, nx))
D2dop = pylops.SecondDirectionalDerivative((nz, nx), v=v, sampling=(nz, nx))
dirder = Ddop * A
dir2der = D2dop * A
jump = 4
fig, axs = plt.subplots(3, 1, figsize=(4, 9), sharex=True)
im = axs[0].imshow(A, cmap="gist_rainbow", extent=(0, nx // jump, nz // jump, 0))
q = axs[0].quiver(
np.arange(nx // jump) + 0.5,
np.arange(nz // jump) + 0.5,
np.flipud(v[1, ::jump, ::jump]),
np.flipud(v[0, ::jump, ::jump]),
color="w",
linewidths=20,
)
axs[0].set_title("x")
axs[0].axis("tight")
axs[1].imshow(dirder, cmap="gray", extent=(0, nx // jump, nz // jump, 0))
axs[1].set_title("y = D * x")
axs[1].axis("tight")
axs[2].imshow(dir2der, cmap="gray", extent=(0, nx // jump, nz // jump, 0))
axs[2].set_title("y = D2 * x")
axs[2].axis("tight")
plt.tight_layout()
| 9,276 | 34.544061 | 81 | py |
pylops | pylops-master/examples/plot_transpose.py | r"""
Transpose
=========
This example shows how to use the :py:class:`pylops.Transpose`
operator. For arrays that are 2-dimensional in nature this operator
simply transposes rows and columns. For multi-dimensional arrays, this
operator can be used to permute dimensions
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start by creating a 2-dimensional array
dims = (20, 40)
x = np.arange(800).reshape(dims)
###############################################################################
# We use now the :py:class:`pylops.Transpose` operator to swap the two
# dimensions. As you will see the adjoint of this operator brings the data
# back to its original model, or in other words the adjoint operator is equal
# in this case to the inverse operator.
Top = pylops.Transpose(dims=dims, axes=(1, 0))
y = Top * x
xadj = Top.H * y
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
fig.suptitle("Transpose for 2d data", fontsize=14, fontweight="bold", y=1.15)
axs[0].imshow(x, cmap="rainbow", vmin=0, vmax=800)
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow", vmin=0, vmax=800)
axs[1].set_title(r"$y = F x$")
axs[1].axis("tight")
axs[2].imshow(xadj, cmap="rainbow", vmin=0, vmax=800)
axs[2].set_title(r"$x_{adj} = F^H y$")
axs[2].axis("tight")
plt.tight_layout()
###############################################################################
# A similar approach can of course be taken two swap multiple axes of
# multi-dimensional arrays for any number of dimensions.
| 1,643 | 33.25 | 79 | py |
pylops | pylops-master/examples/plot_sliding.py | r"""
1D, 2D and 3D Sliding
=====================
This example shows how to use the
:py:class:`pylops.signalprocessing.Sliding1D`,
:py:class:`pylops.signalprocessing.Sliding2D`
and :py:class:`pylops.signalprocessing.Sliding3D` operators
to perform repeated transforms over small strides of a 1-, 2- or 3-dimensional
array.
For the 1-d case, the transform that we apply in this example is the
:py:class:`pylops.signalprocessing.FFT`.
For the 2- and 3-d cases, the transform that we apply in this example is the
:py:class:`pylops.signalprocessing.Radon2D`
(and :py:class:`pylops.signalprocessing.Radon3D`) but this operator has been
design to allow a variety of transforms as long as they operate with signals
that are 2 or 3-dimensional in nature, respectively.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by creating a 1-dimensional array of size :math:`n_t` and create
# a sliding operator to compute its transformed representation.
nwins = 4
nwin = 26
nover = 3
nop = 64
dimd = nwin * nwins - 3 * nover
t = np.arange(dimd) * 0.004
data = np.sin(2 * np.pi * 20 * t)
Op = pylops.signalprocessing.FFT(nwin, nfft=nop, real=True)
nwins, dim, mwin_inends, dwin_inends = pylops.signalprocessing.sliding1d_design(
dimd, nwin, nover, (nop + 2) // 2
)
Slid = pylops.signalprocessing.Sliding1D(
Op.H,
dim,
dimd,
nwin,
nover,
tapertype=None,
)
x = Slid.H * data
###############################################################################
# We now create a similar operator but we also add a taper to the overlapping
# parts of the patches and use it to reconstruct the original signal.
# This is done by simply using the adjoint of the
# :py:class:`pylops.signalprocessing.Sliding1D` operator. Note that for non-
# orthogonal operators, this must be replaced by an inverse.
Slid = pylops.signalprocessing.Sliding1D(
Op.H, dim, dimd, nwin, nover, tapertype="cosine"
)
reconstructed_data = Slid * x
fig, axs = plt.subplots(1, 2, figsize=(15, 3))
axs[0].plot(t, data, "k", label="Data")
axs[0].plot(t, reconstructed_data.real, "--r", label="Rec Data")
axs[0].legend()
axs[1].set(xlabel=r"$t$ [s]", title="Original domain")
for i in range(nwins):
axs[1].plot(Op.f, np.abs(x[i, :]), label=f"Window {i+1}/{nwins}")
axs[1].set(xlabel=r"$f$ [Hz]", title="Transformed domain")
axs[1].legend()
plt.tight_layout()
###############################################################################
# We now create a 2-dimensional array of size :math:`n_x \times n_t`
# composed of 3 parabolic events
par = {"ox": -140, "dx": 2, "nx": 140, "ot": 0, "dt": 0.004, "nt": 200, "f0": 20}
v = 1500
t0 = [0.2, 0.4, 0.5]
px = [0, 0, 0]
pxx = [1e-5, 5e-6, 1e-20]
amp = [1.0, -2, 0.5]
# Create axis
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
# Create wavelet
wav = pylops.utils.wavelets.ricker(t[:41], f0=par["f0"])[0]
# Generate model
_, data = pylops.utils.seismicevents.parabolic2d(x, t, t0, px, pxx, amp, wav)
###############################################################################
# We want to divide this 2-dimensional data into small overlapping
# patches in the spatial direction and apply the adjoint of the
# :py:class:`pylops.signalprocessing.Radon2D` operator to each patch. This is
# done by simply using the adjoint of the
# :py:class:`pylops.signalprocessing.Sliding2D` operator
winsize = 36
overlap = 10
npx = 61
px = np.linspace(-5e-3, 5e-3, npx)
dimsd = data.shape
# Sliding window transform without taper
Op = pylops.signalprocessing.Radon2D(
t,
np.linspace(-par["dx"] * winsize // 2, par["dx"] * winsize // 2, winsize),
px,
centeredh=True,
kind="linear",
engine="numba",
)
nwins, dims, mwin_inends, dwin_inends = pylops.signalprocessing.sliding2d_design(
dimsd, winsize, overlap, (npx, par["nt"])
)
Slid = pylops.signalprocessing.Sliding2D(
Op, dims, dimsd, winsize, overlap, tapertype=None
)
radon = Slid.H * data
###############################################################################
# We now create a similar operator but we also add a taper to the overlapping
# parts of the patches.
Slid = pylops.signalprocessing.Sliding2D(
Op, dims, dimsd, winsize, overlap, tapertype="cosine"
)
reconstructed_data = Slid * radon
# Reshape for plotting
radon = radon.reshape(dims)
reconstructed_data = reconstructed_data.reshape(dimsd)
###############################################################################
# We will see that our reconstructed signal presents some small artifacts.
# This is because we have not inverted our operator but simply applied
# the adjoint to estimate the representation of the input data in the Radon
# domain. We can do better if we use the inverse instead.
radoninv = Slid.div(data.ravel(), niter=10)
reconstructed_datainv = Slid * radoninv.ravel()
radoninv = radoninv.reshape(dims)
reconstructed_datainv = reconstructed_datainv.reshape(dimsd)
###############################################################################
# Let's finally visualize all the intermediate results as well as our final
# data reconstruction after inverting the
# :py:class:`pylops.signalprocessing.Sliding2D` operator.
fig, axs = plt.subplots(2, 3, sharey=True, figsize=(12, 10))
im = axs[0][0].imshow(data.T, cmap="gray")
axs[0][0].set_title("Original data")
plt.colorbar(im, ax=axs[0][0])
axs[0][0].axis("tight")
im = axs[0][1].imshow(radon.T, cmap="gray")
axs[0][1].set_title("Adjoint Radon")
plt.colorbar(im, ax=axs[0][1])
axs[0][1].axis("tight")
im = axs[0][2].imshow(reconstructed_data.T, cmap="gray")
axs[0][2].set_title("Reconstruction from adjoint")
plt.colorbar(im, ax=axs[0][2])
axs[0][2].axis("tight")
axs[1][0].axis("off")
im = axs[1][1].imshow(radoninv.T, cmap="gray")
axs[1][1].set_title("Inverse Radon")
plt.colorbar(im, ax=axs[1][1])
axs[1][1].axis("tight")
im = axs[1][2].imshow(reconstructed_datainv.T, cmap="gray")
axs[1][2].set_title("Reconstruction from inverse")
plt.colorbar(im, ax=axs[1][2])
axs[1][2].axis("tight")
for i in range(0, 114, 24):
axs[0][0].axvline(i, color="w", lw=1, ls="--")
axs[0][0].axvline(i + winsize, color="k", lw=1, ls="--")
axs[0][0].text(
i + winsize // 2,
par["nt"] - 10,
"w" + str(i // 24),
ha="center",
va="center",
weight="bold",
color="w",
)
for i in range(0, 305, 61):
axs[0][1].axvline(i, color="w", lw=1, ls="--")
axs[0][1].text(
i + npx // 2,
par["nt"] - 10,
"w" + str(i // 61),
ha="center",
va="center",
weight="bold",
color="w",
)
axs[1][1].axvline(i, color="w", lw=1, ls="--")
axs[1][1].text(
i + npx // 2,
par["nt"] - 10,
"w" + str(i // 61),
ha="center",
va="center",
weight="bold",
color="w",
)
###############################################################################
# We notice two things, i)provided small enough patches and a transform
# that can explain data *locally*, we have been able reconstruct our
# original data almost to perfection. ii) inverse is betten than adjoint as
# expected as the adjoin does not only introduce small artifacts but also does
# not respect the original amplitudes of the data.
#
# An appropriate transform alongside with a sliding window approach will
# result a very good approach for interpolation (or *regularization*) or
# irregularly sampled seismic data.
###############################################################################
# Finally we do the same for a 3-dimensional array of size
# :math:`n_y \times n_x \times n_t` composed of 3 hyperbolic events
par = {
"oy": -13,
"dy": 2,
"ny": 14,
"ox": -17,
"dx": 2,
"nx": 18,
"ot": 0,
"dt": 0.004,
"nt": 50,
"f0": 30,
}
vrms = [200, 200]
t0 = [0.05, 0.1]
amp = [1.0, -2]
# Create axis
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
# Create wavelet
wav = pylops.utils.wavelets.ricker(t[:41], f0=par["f0"])[0]
# Generate model
_, data = pylops.utils.seismicevents.hyperbolic3d(x, y, t, t0, vrms, vrms, amp, wav)
# Sliding window plan
winsize = (5, 6)
overlap = (2, 3)
npx = 21
px = np.linspace(-5e-3, 5e-3, npx)
dimsd = data.shape
# Sliding window transform without taper
Op = pylops.signalprocessing.Radon3D(
t,
np.linspace(-par["dy"] * winsize[0] // 2, par["dy"] * winsize[0] // 2, winsize[0]),
np.linspace(-par["dx"] * winsize[1] // 2, par["dx"] * winsize[1] // 2, winsize[1]),
px,
px,
centeredh=True,
kind="linear",
engine="numba",
)
nwins, dims, mwin_inends, dwin_inends = pylops.signalprocessing.sliding3d_design(
dimsd, winsize, overlap, (npx, npx, par["nt"])
)
Slid = pylops.signalprocessing.Sliding3D(
Op, dims, dimsd, winsize, overlap, (npx, npx), tapertype=None
)
radon = Slid.H * data
Slid = pylops.signalprocessing.Sliding3D(
Op, dims, dimsd, winsize, overlap, (npx, npx), tapertype="cosine"
)
reconstructed_data = Slid * radon
radoninv = Slid.div(data.ravel(), niter=10)
radoninv = radoninv.reshape(Slid.dims)
reconstructed_datainv = Slid * radoninv
fig, axs = plt.subplots(2, 3, sharey=True, figsize=(12, 7))
im = axs[0][0].imshow(data[par["ny"] // 2].T, cmap="gray", vmin=-2, vmax=2)
axs[0][0].set_title("Original data")
plt.colorbar(im, ax=axs[0][0])
axs[0][0].axis("tight")
im = axs[0][1].imshow(
radon[nwins[0] // 2, :, :, npx // 2].reshape(nwins[1] * npx, par["nt"]).T,
cmap="gray",
vmin=-25,
vmax=25,
)
axs[0][1].set_title("Adjoint Radon")
plt.colorbar(im, ax=axs[0][1])
axs[0][1].axis("tight")
im = axs[0][2].imshow(
reconstructed_data[par["ny"] // 2].T, cmap="gray", vmin=-1000, vmax=1000
)
axs[0][2].set_title("Reconstruction from adjoint")
plt.colorbar(im, ax=axs[0][2])
axs[0][2].axis("tight")
axs[1][0].axis("off")
im = axs[1][1].imshow(
radoninv[nwins[0] // 2, :, :, npx // 2].reshape(nwins[1] * npx, par["nt"]).T,
cmap="gray",
vmin=-0.025,
vmax=0.025,
)
axs[1][1].set_title("Inverse Radon")
plt.colorbar(im, ax=axs[1][1])
axs[1][1].axis("tight")
im = axs[1][2].imshow(
reconstructed_datainv[par["ny"] // 2].T, cmap="gray", vmin=-2, vmax=2
)
axs[1][2].set_title("Reconstruction from inverse")
plt.colorbar(im, ax=axs[1][2])
axs[1][2].axis("tight")
fig, axs = plt.subplots(2, 3, figsize=(12, 7))
im = axs[0][0].imshow(data[:, :, 25], cmap="gray", vmin=-2, vmax=2)
axs[0][0].set_title("Original data")
plt.colorbar(im, ax=axs[0][0])
axs[0][0].axis("tight")
im = axs[0][1].imshow(
radon[nwins[0] // 2, :, :, :, 25].reshape(nwins[1] * npx, npx).T,
cmap="gray",
vmin=-25,
vmax=25,
)
axs[0][1].set_title("Adjoint Radon")
plt.colorbar(im, ax=axs[0][1])
axs[0][1].axis("tight")
im = axs[0][2].imshow(reconstructed_data[:, :, 25], cmap="gray", vmin=-1000, vmax=1000)
axs[0][2].set_title("Reconstruction from adjoint")
plt.colorbar(im, ax=axs[0][2])
axs[0][2].axis("tight")
axs[1][0].axis("off")
im = axs[1][1].imshow(
radoninv[nwins[0] // 2, :, :, :, 25].reshape(nwins[1] * npx, npx).T,
cmap="gray",
vmin=-0.025,
vmax=0.025,
)
axs[1][1].set_title("Inverse Radon")
plt.colorbar(im, ax=axs[1][1])
axs[1][1].axis("tight")
im = axs[1][2].imshow(reconstructed_datainv[:, :, 25], cmap="gray", vmin=-2, vmax=2)
axs[1][2].set_title("Reconstruction from inverse")
plt.colorbar(im, ax=axs[1][2])
axs[1][2].axis("tight")
plt.tight_layout()
| 11,470 | 30.34153 | 87 | py |
pylops | pylops-master/examples/plot_ista.py | r"""
MP, OMP, ISTA and FISTA
=======================
This example shows how to use the :py:class:`pylops.optimization.sparsity.omp`,
:py:class:`pylops.optimization.sparsity.irls`,
:py:class:`pylops.optimization.sparsity.ista`, and
:py:class:`pylops.optimization.sparsity.fista` solvers.
These solvers can be used when the model to retrieve is supposed to have
a sparse representation in a certain domain. MP and OMP use a L0 norm and
mathematically translates to solving the following constrained problem:
.. math::
\quad \|\mathbf{Op}\mathbf{x}- \mathbf{b}\|_2 <= \sigma,
while IRLS, ISTA and FISTA solve an uncostrained problem with a L1
regularization term:
.. math::
J = \|\mathbf{d} - \mathbf{Op} \mathbf{x}\|_2 + \epsilon \|\mathbf{x}\|_1
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start with a simple example, where we create a dense mixing matrix
# and a sparse signal and we use OMP and ISTA to recover such a signal.
# Note that the mixing matrix leads to an underdetermined system of equations
# (:math:`N < M`) so being able to add some extra prior information regarding
# the sparsity of our desired model is essential to be able to invert
# such a system.
N, M = 15, 20
A = np.random.randn(N, M)
A = A / np.linalg.norm(A, axis=0)
Aop = pylops.MatrixMult(A)
x = np.random.rand(M)
x[x < 0.9] = 0
y = Aop * x
# MP/OMP
eps = 1e-2
maxit = 500
x_mp = pylops.optimization.sparsity.omp(
Aop, y, niter_outer=maxit, niter_inner=0, sigma=1e-4
)[0]
x_omp = pylops.optimization.sparsity.omp(Aop, y, niter_outer=maxit, sigma=1e-4)[0]
# IRLS
x_irls = pylops.optimization.sparsity.irls(
Aop, y, nouter=50, epsI=1e-5, kind="model", **dict(iter_lim=10)
)[0]
# ISTA
x_ista = pylops.optimization.sparsity.ista(
Aop,
y,
niter=maxit,
eps=eps,
tol=1e-3,
)[0]
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
m, s, b = ax.stem(x, linefmt="k", basefmt="k", markerfmt="ko", label="True")
plt.setp(m, markersize=15)
m, s, b = ax.stem(x_mp, linefmt="--c", basefmt="--c", markerfmt="co", label="MP")
plt.setp(m, markersize=10)
m, s, b = ax.stem(x_omp, linefmt="--g", basefmt="--g", markerfmt="go", label="OMP")
plt.setp(m, markersize=7)
m, s, b = ax.stem(x_irls, linefmt="--m", basefmt="--m", markerfmt="mo", label="IRLS")
plt.setp(m, markersize=7)
m, s, b = ax.stem(x_ista, linefmt="--r", basefmt="--r", markerfmt="ro", label="ISTA")
plt.setp(m, markersize=3)
ax.set_title("Model", size=15, fontweight="bold")
ax.legend()
plt.tight_layout()
###############################################################################
# We now consider a more interesting problem problem, *wavelet deconvolution*
# from a signal that we assume being composed by a train of spikes convolved
# with a certain wavelet. We will see how solving such a problem with a
# least-squares solver such as
# :py:class:`pylops.optimization.leastsquares.regularized_inversion` does not
# produce the expected results (especially in the presence of noisy data),
# conversely using the :py:class:`pylops.optimization.sparsity.ista` and
# :py:class:`pylops.optimization.sparsity.fista` solvers allows us
# to succesfully retrieve the input signal even in the presence of noise.
# :py:class:`pylops.optimization.sparsity.fista` shows faster convergence which
# is particularly useful for this problem.
nt = 61
dt = 0.004
t = np.arange(nt) * dt
x = np.zeros(nt)
x[10] = -0.4
x[int(nt / 2)] = 1
x[nt - 20] = 0.5
h, th, hcenter = pylops.utils.wavelets.ricker(t[:101], f0=20)
Cop = pylops.signalprocessing.Convolve1D(nt, h=h, offset=hcenter, dtype="float32")
y = Cop * x
yn = y + np.random.normal(0, 0.1, y.shape)
# noise free
xls = Cop / y
xomp, nitero, costo = pylops.optimization.sparsity.omp(
Cop, y, niter_outer=200, sigma=1e-8
)
xista, niteri, costi = pylops.optimization.sparsity.ista(
Cop,
y,
niter=400,
eps=5e-1,
tol=1e-8,
)
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(t, x, "k", lw=8, label=r"$x$")
ax.plot(t, y, "r", lw=4, label=r"$y=Ax$")
ax.plot(t, xls, "--g", lw=4, label=r"$x_{LS}$")
ax.plot(t, xomp, "--b", lw=4, label=r"$x_{OMP} (niter=%d)$" % nitero)
ax.plot(t, xista, "--m", lw=4, label=r"$x_{ISTA} (niter=%d)$" % niteri)
ax.set_title("Noise-free deconvolution", fontsize=14, fontweight="bold")
ax.legend()
plt.tight_layout()
# noisy
xls = pylops.optimization.leastsquares.regularized_inversion(
Cop, yn, [], **dict(damp=1e-1, atol=1e-3, iter_lim=100, show=0)
)[0]
xista, niteri, costi = pylops.optimization.sparsity.ista(
Cop,
yn,
niter=100,
eps=5e-1,
tol=1e-5,
)
xfista, niterf, costf = pylops.optimization.sparsity.fista(
Cop,
yn,
niter=100,
eps=5e-1,
tol=1e-5,
)
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.plot(t, x, "k", lw=8, label=r"$x$")
ax.plot(t, y, "r", lw=4, label=r"$y=Ax$")
ax.plot(t, yn, "--b", lw=4, label=r"$y_n$")
ax.plot(t, xls, "--g", lw=4, label=r"$x_{LS}$")
ax.plot(t, xista, "--m", lw=4, label=r"$x_{ISTA} (niter=%d)$" % niteri)
ax.plot(t, xfista, "--y", lw=4, label=r"$x_{FISTA} (niter=%d)$" % niterf)
ax.set_title("Noisy deconvolution", fontsize=14, fontweight="bold")
ax.legend()
plt.tight_layout()
fig, ax = plt.subplots(1, 1, figsize=(8, 3))
ax.semilogy(costi, "m", lw=2, label=r"$x_{ISTA} (niter=%d)$" % niteri)
ax.semilogy(costf, "y", lw=2, label=r"$x_{FISTA} (niter=%d)$" % niterf)
ax.set_title("Cost function", size=15, fontweight="bold")
ax.set_xlabel("Iteration")
ax.legend()
ax.grid(True, which="both")
plt.tight_layout()
| 5,619 | 30.222222 | 85 | py |
pylops | pylops-master/examples/plot_convolve.py | """
Convolution
===========
This example shows how to use the :py:class:`pylops.signalprocessing.Convolve1D`,
:py:class:`pylops.signalprocessing.Convolve2D` and
:py:class:`pylops.signalprocessing.ConvolveND` operators to perform convolution
between two signals.
Such operators can be used in the forward model of several common application
in signal processing that require filtering of an input signal for the
instrument response. Similarly, removing the effect of the instrument
response from signal is equivalent to solving linear system of equations
based on Convolve1D, Convolve2D or ConvolveND operators.
This problem is generally referred to as *Deconvolution*.
A very practical example of deconvolution can be found in the geophysical
processing of seismic data where the effect of the source response
(i.e., airgun or vibroseis) should be removed from the recorded signal
to be able to better interpret the response of the subsurface. Similar examples
can be found in telecommunication and speech analysis.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse.linalg import lsqr
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
###############################################################################
# We will start by creating a zero signal of lenght :math:`nt` and we will
# place a unitary spike at its center. We also create our filter to be
# applied by means of :py:class:`pylops.signalprocessing.Convolve1D` operator.
# Following the seismic example mentioned above, the filter is a
# `Ricker wavelet <http://subsurfwiki.org/wiki/Ricker_wavelet>`_
# with dominant frequency :math:`f_0 = 30 Hz`.
nt = 1001
dt = 0.004
t = np.arange(nt) * dt
x = np.zeros(nt)
x[int(nt / 2)] = 1
h, th, hcenter = ricker(t[:101], f0=30)
Cop = pylops.signalprocessing.Convolve1D(nt, h=h, offset=hcenter, dtype="float32")
y = Cop * x
xinv = Cop / y
fig, ax = plt.subplots(1, 1, figsize=(10, 3))
ax.plot(t, x, "k", lw=2, label=r"$x$")
ax.plot(t, y, "r", lw=2, label=r"$y=Ax$")
ax.plot(t, xinv, "--g", lw=2, label=r"$x_{ext}$")
ax.set_title("Convolve 1d data", fontsize=14, fontweight="bold")
ax.legend()
ax.set_xlim(1.9, 2.1)
plt.tight_layout()
###############################################################################
# We show now that also a filter with mixed phase (i.e., not centered
# around zero) can be applied and inverted for using the
# :py:class:`pylops.signalprocessing.Convolve1D`
# operator.
Cop = pylops.signalprocessing.Convolve1D(nt, h=h, offset=hcenter - 3, dtype="float32")
y = Cop * x
y1 = Cop.H * x
xinv = Cop / y
fig, ax = plt.subplots(1, 1, figsize=(10, 3))
ax.plot(t, x, "k", lw=2, label=r"$x$")
ax.plot(t, y, "r", lw=2, label=r"$y=Ax$")
ax.plot(t, y1, "b", lw=2, label=r"$y=A^Hx$")
ax.plot(t, xinv, "--g", lw=2, label=r"$x_{ext}$")
ax.set_title(
"Convolve 1d data with non-zero phase filter", fontsize=14, fontweight="bold"
)
ax.set_xlim(1.9, 2.1)
ax.legend()
plt.tight_layout()
###############################################################################
# We repeat a similar exercise but using two dimensional signals and
# filters taking advantage of the
# :py:class:`pylops.signalprocessing.Convolve2D` operator.
nt = 51
nx = 81
dt = 0.004
t = np.arange(nt) * dt
x = np.zeros((nt, nx))
x[int(nt / 2), int(nx / 2)] = 1
nh = [11, 5]
h = np.ones((nh[0], nh[1]))
Cop = pylops.signalprocessing.Convolve2D(
(nt, nx),
h=h,
offset=(int(nh[0]) / 2, int(nh[1]) / 2),
dtype="float32",
)
y = Cop * x
xinv = (Cop / y.ravel()).reshape(Cop.dims)
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
fig.suptitle("Convolve 2d data", fontsize=14, fontweight="bold", y=0.95)
axs[0].imshow(x, cmap="gray", vmin=-1, vmax=1)
axs[1].imshow(y, cmap="gray", vmin=-1, vmax=1)
axs[2].imshow(xinv, cmap="gray", vmin=-1, vmax=1)
axs[0].set_title("x")
axs[0].axis("tight")
axs[1].set_title("y")
axs[1].axis("tight")
axs[2].set_title("xlsqr")
axs[2].axis("tight")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
fig, ax = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Convolve in 2d data - traces", fontsize=14, fontweight="bold", y=0.95)
ax[0].plot(x[int(nt / 2), :], "k", lw=2, label=r"$x$")
ax[0].plot(y[int(nt / 2), :], "r", lw=2, label=r"$y=Ax$")
ax[0].plot(xinv[int(nt / 2), :], "--g", lw=2, label=r"$x_{ext}$")
ax[1].plot(x[:, int(nx / 2)], "k", lw=2, label=r"$x$")
ax[1].plot(y[:, int(nx / 2)], "r", lw=2, label=r"$y=Ax$")
ax[1].plot(xinv[:, int(nx / 2)], "--g", lw=2, label=r"$x_{ext}$")
ax[0].legend()
ax[0].set_xlim(30, 50)
ax[1].legend()
ax[1].set_xlim(10, 40)
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# Finally we do the same using three dimensional signals and
# filters taking advantage of the
# :py:class:`pylops.signalprocessing.ConvolveND` operator.
ny, nx, nz = 13, 10, 7
x = np.zeros((ny, nx, nz))
x[ny // 3, nx // 2, nz // 4] = 1
h = np.ones((3, 5, 3))
offset = [1, 2, 1]
Cop = pylops.signalprocessing.ConvolveND(
dims=(ny, nx, nz), h=h, offset=offset, axes=(0, 1, 2), dtype="float32"
)
y = Cop * x
xlsqr = lsqr(Cop, y.ravel(), damp=0, iter_lim=300, show=0)[0]
xlsqr = xlsqr.reshape(Cop.dims)
fig, axs = plt.subplots(3, 3, figsize=(10, 12))
fig.suptitle("Convolve 3d data", y=0.95, fontsize=14, fontweight="bold")
axs[0][0].imshow(x[ny // 3], cmap="gray", vmin=-1, vmax=1)
axs[0][1].imshow(y[ny // 3], cmap="gray", vmin=-1, vmax=1)
axs[0][2].imshow(xlsqr[ny // 3], cmap="gray", vmin=-1, vmax=1)
axs[0][0].set_title("x")
axs[0][0].axis("tight")
axs[0][1].set_title("y")
axs[0][1].axis("tight")
axs[0][2].set_title("xlsqr")
axs[0][2].axis("tight")
axs[1][0].imshow(x[:, nx // 2], cmap="gray", vmin=-1, vmax=1)
axs[1][1].imshow(y[:, nx // 2], cmap="gray", vmin=-1, vmax=1)
axs[1][2].imshow(xlsqr[:, nx // 2], cmap="gray", vmin=-1, vmax=1)
axs[1][0].axis("tight")
axs[1][1].axis("tight")
axs[1][2].axis("tight")
axs[2][0].imshow(x[..., nz // 4], cmap="gray", vmin=-1, vmax=1)
axs[2][1].imshow(y[..., nz // 4], cmap="gray", vmin=-1, vmax=1)
axs[2][2].imshow(xlsqr[..., nz // 4], cmap="gray", vmin=-1, vmax=1)
axs[2][0].axis("tight")
axs[2][1].axis("tight")
axs[2][2].axis("tight")
plt.tight_layout()
| 6,196 | 34.411429 | 86 | py |
pylops | pylops-master/examples/plot_l1l1.py | r"""
L1-L1 IRLS
==========
This example shows how to use the :py:class:`pylops.optimization.sparsity.irls` solver to
solve problems in the form:
.. math::
J = \left\| \mathbf{y}-\mathbf{Ax}\right\|_{1} + \epsilon \left\|\mathbf{x}\right\|_{1}
This can be easily achieved by recasting the problem into this equivalent formulation:
.. math::
J = \left\|\left[\begin{array}{c}
\mathbf{A} \\
\epsilon \mathbf{I}
\end{array}\right] \mathbf{x}-\left[\begin{array}{l}
\mathbf{y} \\
\mathbf{0}
\end{array}\right]\right\|_{1}
and solving it using the classical version of the IRLS solver with L1 norm on the data term. In PyLops,
the creation of the augmented system happens under the hood when users provide the following optional
parameter (``kind="datamodel"``) to the solver.
We will now consider a 1D deconvolution problem where the signal is contaminated with Laplace noise.
We will compare the classical L2-L1 IRLS solver that works optimally under the condition of Gaussian
noise with the above descrived L1-L1 IRLS solver that is best suited to the case of Laplace noise.
"""
import random
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(10)
random.seed(0)
###############################################################################
# Let's start by creating a spiky input signal and convolving it with a Ricker
# wavelet.
dt = 0.004
nt = 201
t = np.arange(nt) * dt
nspikes = 5
x = np.zeros(nt)
x[random.sample(range(0, nt - 1), nspikes)] = -1 + 2 * np.random.rand(nspikes)
h, th, hcenter = pylops.utils.wavelets.ricker(t[:101], f0=20)
Cop = pylops.signalprocessing.Convolve1D(nt, h=h, offset=hcenter)
y = Cop @ x
###############################################################################
# We add now a realization of Laplace-distributed noise to our signal and
# perform a standard spiky deconvolution
yn = y + np.random.laplace(loc=0.0, scale=0.05, size=y.shape)
xl2l1 = pylops.optimization.sparsity.irls(
Cop,
yn,
threshR=True,
kind="model",
nouter=100,
epsR=1e-4,
epsI=1.0,
warm=True,
**dict(iter_lim=100),
)[0]
xl1l1 = pylops.optimization.sparsity.irls(
Cop,
yn,
threshR=True,
kind="datamodel",
nouter=100,
epsR=1e-4,
epsI=1.0,
warm=True,
**dict(iter_lim=100),
)[0]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(12, 5))
axs[0].plot(t, y, "k", lw=4, label="Clean")
axs[0].plot(t, yn, "r", lw=2, label="Noisy")
axs[0].legend()
axs[0].set_title("Data")
axs[1].plot(t, x, "k", lw=4, label="L2-L1")
axs[1].plot(
t,
xl2l1,
"r",
lw=2,
label=f"L2-L1 (NMSE={(np.linalg.norm(xl2l1 - x)/np.linalg.norm(x)):.2f})",
)
axs[1].plot(
t,
xl1l1,
"c",
lw=2,
label=f"L1-L1 (NMSE={(np.linalg.norm(xl1l1 - x)/np.linalg.norm(x)):.2f})",
)
axs[1].legend()
axs[1].set_xlabel("t")
plt.tight_layout()
| 2,945 | 26.027523 | 103 | py |
pylops | pylops-master/examples/plot_shift.py | r"""
Shift
=====
This example shows how to use the :py:class:`pylops.signalprocessing.Shift`
operator to apply fractional delay to an input signal. Whilst this operator
acts on 1D signals it can also be applied on any multi-dimensional signal on
a specific direction of it.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with a 1D example. Define the input parameters: number of samples
# of input signal (``nt``), sampling step (``dt``) as well as the input
# signal which will be equal to a ricker wavelet:
nt = 127
dt = 0.004
t = np.arange(nt) * dt
ntwav = 41
wav = pylops.utils.wavelets.ricker(t[:ntwav], f0=20)[0]
wav = np.pad(wav, [0, nt - len(wav)])
WAV = np.fft.rfft(wav, n=nt)
###############################################################################
# We can shift this wavelet by :math:`5.5\mathrm{dt}`:
shift = 5.5 * dt
Op = pylops.signalprocessing.Shift(nt, shift, sampling=dt, real=True, dtype=np.float64)
wavshift = Op * wav
wavshiftback = Op.H * wavshift
plt.figure(figsize=(10, 3))
plt.plot(t, wav, "k", lw=2, label="Original")
plt.plot(t, wavshift, "r", lw=2, label="Shifted")
plt.plot(t, wavshiftback, "--b", lw=2, label="Adjoint")
plt.axvline(t[ntwav - 1], color="k")
plt.axvline(t[ntwav - 1] + shift, color="r")
plt.xlim(0, 0.3)
plt.legend()
plt.title("1D Shift")
plt.tight_layout()
###############################################################################
# We can repeat the same exercise for a 2D signal and perform the shift
# along the first and second dimensions.
shift = 10.5 * dt
# 1st axis
wav2d = np.outer(wav, np.ones(10))
Op = pylops.signalprocessing.Shift(
(nt, 10), shift, axis=0, sampling=dt, real=True, dtype=np.float64
)
wav2dshift = Op * wav2d
wav2dshiftback = Op.H * wav2dshift
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
axs[0].imshow(wav2d, cmap="gray")
axs[0].axis("tight")
axs[0].set_title("Original")
axs[1].imshow(wav2dshift, cmap="gray")
axs[1].set_title("Shifted")
axs[1].axis("tight")
axs[2].imshow(wav2dshiftback, cmap="gray")
axs[2].set_title("Adjoint")
axs[2].axis("tight")
fig.tight_layout()
# 2nd axis
wav2d = np.outer(wav, np.ones(10)).T
Op = pylops.signalprocessing.Shift(
(10, nt), shift, axis=1, sampling=dt, real=True, dtype=np.float64
)
wav2dshift = Op * wav2d
wav2dshiftback = Op.H * wav2dshift
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
axs[0].imshow(wav2d, cmap="gray")
axs[0].axis("tight")
axs[0].set_title("Original")
axs[1].imshow(wav2dshift, cmap="gray")
axs[1].set_title("Shifted")
axs[1].axis("tight")
axs[2].imshow(wav2dshiftback, cmap="gray")
axs[2].set_title("Adjoint")
axs[2].axis("tight")
fig.tight_layout()
###############################################################################
# Finally we consider a more generic case where we apply a trace varying shift
shift = dt * np.arange(10)
wav2d = np.outer(wav, np.ones(10))
Op = pylops.signalprocessing.Shift(
(nt, 10), shift, axis=0, sampling=dt, real=True, dtype=np.float64
)
wav2dshift = Op * wav2d
wav2dshiftback = Op.H * wav2dshift
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
axs[0].imshow(wav2d, cmap="gray")
axs[0].axis("tight")
axs[0].set_title("Original")
axs[1].imshow(wav2dshift, cmap="gray")
axs[1].set_title("Shifted")
axs[1].axis("tight")
axs[2].imshow(wav2dshiftback, cmap="gray")
axs[2].set_title("Adjoint")
axs[2].axis("tight")
fig.tight_layout()
| 3,478 | 28.483051 | 87 | py |
pylops | pylops-master/examples/plot_spread.py | r"""
Spread How-to
================
This example focuses on the :py:class:`pylops.basicoperators.Spread` operator,
which is a highly versatile operator in PyLops to perform spreading/stacking
operations in a vectorized manner (or efficiently via Numba-jitted ``for`` loops).
The :py:class:`pylops.basicoperators.Spread` is powerful in its generality, but
it may not be obvious for at first how to structure your code to leverage it properly.
While it is highly recommended for advanced users to inspect the
:py:class:`pylops.signalprocessing.Radon2D` and
:py:class:`pylops.signalprocessing.Radon3D` operators since
they are built using the :py:class:`pylops.basicoperators.Spread` class,
here we provide a simple example on how to get started.
In this example we will recreate a simplified version of the famous linear
`Radon operator <https://en.wikipedia.org/wiki/Radon_transform>`_, which stacks
data along straight lines with a given intercept and slope.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
############################################
# Let's first define the time and space axes as well as some auxiliary input
# parameters that we will use to create a Ricker wavelet
par = {
"ox": -200,
"dx": 2,
"nx": 201,
"ot": 0,
"dt": 0.004,
"nt": 501,
"f0": 20,
"nfmax": 210,
}
# Create axis
t, _, x, _ = pylops.utils.seismicevents.makeaxis(par)
# Create centered Ricker wavelet
t_wav = np.arange(41) * par["dt"]
wav, _, _ = pylops.utils.wavelets.ricker(t_wav, f0=par["f0"])
############################################
# We will create a 2d data with a number of crossing linear events, to which we will
# later apply our Radon transforms. We use the convenience function
# :py:func:`pylops.utils.seismicevents.linear2d`.
v = 1500 # m/s
t0 = [0.2, 0.7, 1.6] # seconds
theta = [40, 0, -60] # degrees
amp = [1.0, 0.6, -2.0]
mlin, mlinwav = pylops.utils.seismicevents.linear2d(x, t, v, t0, theta, amp, wav)
############################################
# Let's now define the slowness axis and use :py:class:`pylops.signalprocessing.Radon2D`
# to implement our benchmark linear Radon. Refer to the documentation of the
# operator for a more detailed mathematical description of linear Radon.
# Note that ``pxmax`` is in s/m, which explains the small value. Its highest value
# corresponds to the lowest value of velocity in the transform. In this case we choose that
# to be 1000 m/s.
npx, pxmax = 41, 1e-3
px = np.linspace(-pxmax, pxmax, npx)
RLop = pylops.signalprocessing.Radon2D(
t, x, px, centeredh=False, kind="linear", interp=False, engine="numpy"
)
# Compute adjoint = Radon transform
mlinwavR = RLop.H * mlinwav
############################################
# Now, let's try to reimplement this operator from scratch using :py:class:`pylops.basicoperators.Spread`.
# Using the on-the-fly approach, and we need to create a function which takes
# indices of the model domain, here :math:`(p_x, t_0)`
# where :math:`p_x` is the slope and :math:`t_0` is the intercept of the
# parametric curve :math:`t(x) = t_0 + p_x x` we wish to spread the model over
# in the data domain. The function must return an array of size ``nx``, containing
# the indices corresponding to :math:`t(x)`.
#
# The on-the-fly approach is useful when storing the indices in RAM may exhaust
# resources, especially when computing the indices is fast. When there is
# enough memory to store the full table of indices
# (an array of size :math:`n_x \times n_t \times n_{p_x}`) the
# :py:class:`pylops.basicoperators.Spread` operator can be used with tables instead.
# We will see an example of this later.
#
# Returning to our on-the-fly example, we need to create a function which only depends on
# ``ipx`` and ``it0``, so we create a closure around it with all our other auxiliary
# variables.
def create_radon_fh(xaxis, taxis, pxaxis):
ot = taxis[0]
dt = taxis[1] - taxis[0]
nt = len(taxis)
def fh(ipx, it0):
tx = t[it0] + xaxis * pxaxis[ipx]
it0_frac = (tx - ot) / dt
itx = np.rint(it0_frac)
# Indices outside time axis set to nan
itx[np.isin(itx, range(nt), invert=True)] = np.nan
return itx
return fh
fRad = create_radon_fh(x, t, px)
ROTFOp = pylops.Spread((npx, par["nt"]), (par["nx"], par["nt"]), fh=fRad)
mlinwavROTF = ROTFOp.H * mlinwav
############################################
# Compare the results between the native Radon transform and the one using our
# on-the-fly :py:class:`pylops.basicoperators.Spread`.
fig, axs = plt.subplots(1, 3, figsize=(9, 5), sharey=True)
axs[0].imshow(
mlinwav.T,
aspect="auto",
interpolation="nearest",
vmin=-1,
vmax=1,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_title("Linear events", fontsize=12, fontweight="bold")
axs[0].set_xlabel(r"$x$ [m]")
axs[0].set_ylabel(r"$t$ [s]")
axs[1].imshow(
mlinwavR.T,
aspect="auto",
interpolation="nearest",
vmin=-10,
vmax=10,
cmap="gray",
extent=(px.min(), px.max(), t.max(), t.min()),
)
axs[1].set_title("Native Linear Radon", fontsize=12, fontweight="bold")
axs[1].set_xlabel(r"$p_x$ [s/m]")
axs[1].ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
axs[2].imshow(
mlinwavROTF.T,
aspect="auto",
interpolation="nearest",
vmin=-10,
vmax=10,
cmap="gray",
extent=(px.min(), px.max(), t.max(), t.min()),
)
axs[2].set_title("On-the-fly Linear Radon", fontsize=12, fontweight="bold")
axs[2].set_xlabel(r"$p_x$ [s/m]")
axs[2].ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
fig.tight_layout()
############################################
# Finally, we will re-implement the example above using pre-computed tables.
# This is useful when ``fh`` is expensive to compute, or requires manual edition
# prior to usage.
#
# Using a table instead of a function is simple, we just need to apply ``fh`` to
# all our points and store the results.
def create_table(npx, nt, nx):
table = np.full((npx, nt, nx), fill_value=np.nan)
for ipx in range(npx):
for it0 in range(nt):
table[ipx, it0, :] = fRad(ipx, it0)
return table
table = create_table(npx, par["nt"], par["nx"])
RPCOp = pylops.Spread((npx, par["nt"]), (par["nx"], par["nt"]), table=table)
mlinwavRPC = RPCOp.H * mlinwav
############################################
# Compare the results between the pre-computed or on-the-fly Radon transforms
fig, axs = plt.subplots(1, 3, figsize=(9, 5), sharey=True)
axs[0].imshow(
mlinwav.T,
aspect="auto",
interpolation="nearest",
vmin=-1,
vmax=1,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_title("Linear events", fontsize=12, fontweight="bold")
axs[0].set_xlabel(r"$x$ [m]")
axs[0].set_ylabel(r"$t$ [s]")
axs[1].imshow(
mlinwavRPC.T,
aspect="auto",
interpolation="nearest",
vmin=-10,
vmax=10,
cmap="gray",
extent=(px.min(), px.max(), t.max(), t.min()),
)
axs[1].set_title("Pre-computed Linear Radon", fontsize=12, fontweight="bold")
axs[1].set_xlabel(r"$p_x$ [s/m]")
axs[1].ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
axs[2].imshow(
mlinwavROTF.T,
aspect="auto",
interpolation="nearest",
vmin=-10,
vmax=10,
cmap="gray",
extent=(px.min(), px.max(), t.max(), t.min()),
)
axs[2].set_title("On-the-fly Linear Radon", fontsize=12, fontweight="bold")
axs[2].set_xlabel(r"$p_x$ [s/m]")
axs[2].ticklabel_format(style="sci", axis="x", scilimits=(0, 0))
fig.tight_layout()
| 7,574 | 32.223684 | 106 | py |
pylops | pylops-master/examples/plot_chirpradon.py | r"""
Chirp Radon Transform
=====================
This example shows how to use the :py:class:`pylops.signalprocessing.ChirpRadon2D`
and :py:class:`pylops.signalprocessing.ChirpRadon3D` operators to apply the
linear Radon Transform to 2-dimensional or 3-dimensional signals, respectively.
When working with the linear Radon transform, this is a faster implementation
compared to in :py:class:`pylops.signalprocessing.Radon2D` and
:py:class:`pylops.signalprocessing.Radon3D` and should be preferred.
This method provides also an analytical inverse.
Note that the forward and adjoint definitions in these two pairs of operators
are swapped.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by creating a empty 2d matrix of size :math:`n_x \times n_t`
# with a single linear event.
par = {
"ot": 0,
"dt": 0.004,
"nt": 51,
"ox": -250,
"dx": 10,
"nx": 51,
"oy": -250,
"dy": 10,
"ny": 51,
"f0": 40,
}
theta = [0.0]
t0 = [0.1]
amp = [1.0]
# Create axes
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
dt, dx, dy = par["dt"], par["dx"], par["dy"]
# Create wavelet
wav, _, wav_c = pylops.utils.wavelets.ricker(t[:41], f0=par["f0"])
# Generate data
_, d = pylops.utils.seismicevents.linear2d(x, t, 1500.0, t0, theta, amp, wav)
###############################################################################
# We can now define our operators and apply the forward, adjoint and inverse
# steps.
npx, pxmax = par["nx"], 5e-4
px = np.linspace(-pxmax, pxmax, npx)
R2Op = pylops.signalprocessing.ChirpRadon2D(t, x, pxmax * dx / dt, dtype="float64")
dL_chirp = R2Op * d
dadj_chirp = R2Op.H * dL_chirp
dinv_chirp = R2Op.inverse(dL_chirp).reshape(R2Op.dimsd)
fig, axs = plt.subplots(1, 4, figsize=(12, 4), sharey=True)
axs[0].imshow(d.T, vmin=-1, vmax=1, cmap="bwr_r", extent=(x[0], x[-1], t[-1], t[0]))
axs[0].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Input model")
axs[0].axis("tight")
axs[1].imshow(
dL_chirp.T,
cmap="bwr_r",
vmin=-dL_chirp.max(),
vmax=dL_chirp.max(),
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1].set(xlabel=r"$p$ [s/km]", title="Radon Chirp")
axs[1].axis("tight")
axs[2].imshow(
dadj_chirp.T,
cmap="bwr_r",
vmin=-dadj_chirp.max(),
vmax=dadj_chirp.max(),
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[2].set(xlabel=r"$x$ [m]", title="Adj Radon Chirp")
axs[2].axis("tight")
axs[3].imshow(
dinv_chirp.T,
cmap="bwr_r",
vmin=-d.max(),
vmax=d.max(),
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[3].set(xlabel=r"$x$ [m]", title="Inv Radon Chirp")
axs[3].axis("tight")
plt.tight_layout()
###############################################################################
# Finally we repeat the same exercise with 3d data.
par = {
"ot": 0,
"dt": 0.004,
"nt": 51,
"ox": -400,
"dx": 10,
"nx": 81,
"oy": -600,
"dy": 10,
"ny": 61,
"f0": 20,
}
theta = [10]
phi = [0]
t0 = [0.1]
amp = [1.0]
# Create axes
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
dt, dx, dy = par["dt"], par["dx"], par["dy"]
# Generate data
_, d = pylops.utils.seismicevents.linear3d(x, y, t, 1500.0, t0, theta, phi, amp, wav)
npy, pymax = par["ny"], 3e-4
npx, pxmax = par["nx"], 5e-4
py = np.linspace(-pymax, pymax, npy)
px = np.linspace(-pxmax, pxmax, npx)
R3Op = pylops.signalprocessing.ChirpRadon3D(
t, y, x, (pymax * dy / dt, pxmax * dx / dt), dtype="float64"
)
dL_chirp = R3Op * d
dadj_chirp = R3Op.H * dL_chirp
dinv_chirp = R3Op.inverse(dL_chirp).reshape(R3Op.dimsd)
fig, axs = plt.subplots(1, 4, figsize=(12, 4), sharey=True)
axs[0].imshow(
d[par["ny"] // 2].T,
vmin=-1,
vmax=1,
cmap="bwr_r",
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[0].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Input model")
axs[0].axis("tight")
axs[1].imshow(
dL_chirp[par["ny"] // 2].T,
cmap="bwr_r",
vmin=-dL_chirp.max(),
vmax=dL_chirp.max(),
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1].set(xlabel=r"$p_x$ [s/km]", title="Radon Chirp")
axs[1].axis("tight")
axs[2].imshow(
dadj_chirp[par["ny"] // 2].T,
cmap="bwr_r",
vmin=-dadj_chirp.max(),
vmax=dadj_chirp.max(),
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[2].set(xlabel=r"$x$ [m]", title="Adj Radon Chirp")
axs[2].axis("tight")
axs[3].imshow(
dinv_chirp[par["ny"] // 2].T,
cmap="bwr_r",
vmin=-d.max(),
vmax=d.max(),
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[3].set(xlabel=r"$x$ [m]", title="Inv Radon Chirp")
axs[3].axis("tight")
plt.tight_layout()
fig, axs = plt.subplots(1, 4, figsize=(12, 4), sharey=True)
axs[0].imshow(
d[:, par["nx"] // 2].T,
vmin=-1,
vmax=1,
cmap="bwr_r",
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[0].set(xlabel=r"$y$ [m]", ylabel=r"$t$ [s]", title="Input model")
axs[0].axis("tight")
axs[1].imshow(
dL_chirp[:, 2 * par["nx"] // 3].T,
cmap="bwr_r",
vmin=-dL_chirp.max(),
vmax=dL_chirp.max(),
extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]),
)
axs[1].set(xlabel=r"$p_y$ [s/km]", title="Radon Chirp")
axs[1].axis("tight")
axs[2].imshow(
dadj_chirp[:, par["nx"] // 2].T,
cmap="bwr_r",
vmin=-dadj_chirp.max(),
vmax=dadj_chirp.max(),
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[2].set(xlabel=r"$y$ [m]", title="Adj Radon Chirp")
axs[2].axis("tight")
axs[3].imshow(
dinv_chirp[:, par["nx"] // 2].T,
cmap="bwr_r",
vmin=-d.max(),
vmax=d.max(),
extent=(x[0], x[-1], t[-1], t[0]),
)
axs[3].set(xlabel=r"$y$ [m]", title="Inv Radon Chirp")
axs[3].axis("tight")
plt.tight_layout()
| 5,674 | 25.273148 | 85 | py |
pylops | pylops-master/examples/plot_slopeest.py | r"""
Slope estimation via Structure Tensor algorithm
===============================================
This example shows how to estimate local slopes or local dips of a two-dimensional
array using :py:func:`pylops.utils.signalprocessing.slope_estimate` and
:py:func:`pylops.utils.signalprocessing.dip_estimate`.
Knowing the local slopes of an image (or a seismic data) can be useful for
a variety of tasks in image (or geophysical) processing such as denoising,
smoothing, or interpolation. When slopes are used with the
:py:class:`pylops.signalprocessing.Seislet` operator, the input dataset can be
compressed and the sparse nature of the Seislet transform can also be used to
precondition sparsity-promoting inverse problems.
We will show examples of a variety of different settings, including a comparison
with the original implementation in [1].
.. [1] van Vliet, L. J., Verbeek, P. W., "Estimators for orientation and
anisotropy in digitized images", Journal ASCI Imaging Workshop. 1995.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.image import imread
from matplotlib.ticker import FuncFormatter, MultipleLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pylops
from pylops.signalprocessing.seislet import _predict_trace
from pylops.utils.signalprocessing import dip_estimate, slope_estimate
plt.close("all")
np.random.seed(10)
###############################################################################
# Python logo
# -----------
# To start we import a 2d image and estimate the local dips of the image.
im = np.load("../testdata/python.npy")[..., 0]
im = im / 255.0 - 0.5
angles, anisotropy = dip_estimate(im, smooth=7)
angles = -np.rad2deg(angles)
###############################################################################
fig, axs = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True)
iax = axs[0].imshow(im, cmap="viridis", origin="lower")
axs[0].set_title("Data")
cax = make_axes_locatable(axs[0]).append_axes("right", size="5%", pad=0.05)
cax.axis("off")
iax = axs[1].imshow(angles, cmap="twilight_shifted", origin="lower", vmin=-90, vmax=90)
axs[1].set_title("Angle of incline")
cax = make_axes_locatable(axs[1]).append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(
iax,
ticks=MultipleLocator(30),
format=FuncFormatter(lambda x, pos: "{:.0f}°".format(x)),
cax=cax,
orientation="vertical",
)
iax = axs[2].imshow(anisotropy, cmap="Reds", origin="lower", vmin=0, vmax=1)
axs[2].set_title("Anisotropy")
cax = make_axes_locatable(axs[2]).append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(iax, cax=cax, orientation="vertical")
fig.tight_layout()
###############################################################################
# Seismic data
# ------------
# We can now repeat the same using some seismic data. We will first define
# a single trace and a slope field, apply such slope field to the trace
# recursively to create the other traces of the data and finally try to recover
# the underlying slope field from the data alone.
# Reflectivity model
nx, nt = 2**7, 121
dx, dt = 0.01, 0.004
x, t = np.arange(nx) * dx, np.arange(nt) * dt
nspike = nt // 8
refl = np.zeros(nt)
it = np.sort(np.random.permutation(range(10, nt - 20))[:nspike])
refl[it] = np.random.normal(0.0, 1.0, nspike)
# Wavelet
ntwav = 41
f0 = 30
twav = np.arange(ntwav) * dt
wav, *_ = pylops.utils.wavelets.ricker(twav, f0)
# Input trace
trace = np.convolve(refl, wav, mode="same")
# Slopes
theta = np.deg2rad(np.linspace(0, 30, nx))
slope = np.outer(np.ones(nt), np.tan(theta) * dt / dx)
# Model data
d = np.zeros((nt, nx))
tr = trace.copy()
for ix in range(nx):
tr = _predict_trace(tr, t, dt, dx, slope[:, ix])
d[:, ix] = tr
# Estimate slopes
slope_est, _ = slope_estimate(d, dt, dx, smooth=10)
slope_est *= -1
###############################################################################
fig, axs = plt.subplots(2, 2, figsize=(6, 6), sharex=True, sharey=True)
opts = dict(aspect="auto", extent=(x[0], x[-1], t[-1], t[0]))
iax = axs[0, 0].imshow(d, cmap="gray", vmin=-1, vmax=1, **opts)
axs[0, 0].set(title="Data", ylabel="Time [s]")
cax = make_axes_locatable(axs[0, 0]).append_axes("right", size="5%", pad=0.05)
fig.colorbar(iax, cax=cax, orientation="vertical")
opts.update(dict(cmap="cividis", vmin=np.min(slope), vmax=np.max(slope)))
iax = axs[0, 1].imshow(slope, **opts)
axs[0, 1].set(title="True Slope")
cax = make_axes_locatable(axs[0, 1]).append_axes("right", size="5%", pad=0.05)
fig.colorbar(iax, cax=cax, orientation="vertical")
cax.set_ylabel("[s/km]")
iax = axs[1, 0].imshow(np.abs(slope - slope_est), **opts)
axs[1, 0].set(
title="Estimate absolute error", ylabel="Time [s]", xlabel="Position [km]"
)
cax = make_axes_locatable(axs[1, 0]).append_axes("right", size="5%", pad=0.05)
fig.colorbar(iax, cax=cax, orientation="vertical")
cax.set_ylabel("[s/km]")
iax = axs[1, 1].imshow(slope_est, **opts)
axs[1, 1].set(title="Estimated Slope", xlabel="Position [km]")
cax = make_axes_locatable(axs[1, 1]).append_axes("right", size="5%", pad=0.05)
fig.colorbar(iax, cax=cax, orientation="vertical")
cax.set_ylabel("[s/km]")
fig.tight_layout()
###############################################################################
# Concentric circles
# ------------------
# The original paper by van Vliet and Verbeek [1] has an example with concentric
# circles. We recover their original images and compare our implementation with
# theirs.
def rgb2gray(rgb):
return np.dot(rgb[..., :3], [0.2989, 0.5870, 0.1140])
circles_input = rgb2gray(imread("../testdata/slope_estimate/concentric.png"))
circles_angles = rgb2gray(imread("../testdata/slope_estimate/concentric_angles.png"))
angles, anisos_sm0 = dip_estimate(circles_input, smooth=0)
angles_sm0 = np.rad2deg(angles)
angles, anisos_sm4 = dip_estimate(circles_input, smooth=4)
angles_sm4 = np.rad2deg(angles)
###############################################################################
fig, axs = plt.subplots(2, 3, figsize=(6, 4), sharex=True, sharey=True)
axs[0, 0].imshow(circles_input, cmap="gray", aspect="equal")
axs[0, 0].set(title="Original Image")
cax = make_axes_locatable(axs[0, 0]).append_axes("right", size="5%", pad=0.05)
cax.axis("off")
axs[1, 0].imshow(-circles_angles, cmap="twilight_shifted")
axs[1, 0].set(title="Original Angles")
cax = make_axes_locatable(axs[1, 0]).append_axes("right", size="5%", pad=0.05)
cax.axis("off")
im = axs[0, 1].imshow(angles_sm0, cmap="twilight_shifted", vmin=-90, vmax=90)
cax = make_axes_locatable(axs[0, 1]).append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(
im,
ticks=MultipleLocator(30),
format=FuncFormatter(lambda x, pos: "{:.0f}°".format(x)),
cax=cax,
orientation="vertical",
)
axs[0, 1].set(title="Angles (smooth=0)")
im = axs[1, 1].imshow(angles_sm4, cmap="twilight_shifted", vmin=-90, vmax=90)
cax = make_axes_locatable(axs[1, 1]).append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(
im,
ticks=MultipleLocator(30),
format=FuncFormatter(lambda x, pos: "{:.0f}°".format(x)),
cax=cax,
orientation="vertical",
)
axs[1, 1].set(title="Angles (smooth=4)")
im = axs[0, 2].imshow(anisos_sm0, cmap="Reds", vmin=0, vmax=1)
cax = make_axes_locatable(axs[0, 2]).append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
axs[0, 2].set(title="Anisotropy (smooth=0)")
im = axs[1, 2].imshow(anisos_sm4, cmap="Reds", vmin=0, vmax=1)
cax = make_axes_locatable(axs[1, 2]).append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
axs[1, 2].set(title="Anisotropy (smooth=4)")
for ax in axs.ravel():
ax.axis("off")
fig.tight_layout()
###############################################################################
# Core samples
# ------------------
# The original paper by van Vliet and Verbeek [1] also has an example with images
# of core samples. Since the original paper does not have a scale with which to
# plot the angles, we have chosen ours it to match their image as closely as
# possible.
core_input = rgb2gray(imread("../testdata/slope_estimate/core_sample.png"))
core_angles = rgb2gray(imread("../testdata/slope_estimate/core_sample_orientation.png"))
core_aniso = rgb2gray(imread("../testdata/slope_estimate/core_sample_anisotropy.png"))
angles, anisos_sm4 = dip_estimate(core_input, smooth=4)
angles_sm4 = np.rad2deg(angles)
angles, anisos_sm8 = dip_estimate(core_input, smooth=8)
angles_sm8 = np.rad2deg(angles)
###############################################################################
fig, axs = plt.subplots(1, 6, figsize=(10, 6))
axs[0].imshow(core_input, cmap="gray_r", aspect="equal")
axs[0].set(title="Original\nImage")
cax = make_axes_locatable(axs[0]).append_axes("right", size="20%", pad=0.05)
cax.axis("off")
axs[1].imshow(-core_angles, cmap="YlGnBu_r")
axs[1].set(title="Original\nAngles")
cax = make_axes_locatable(axs[1]).append_axes("right", size="20%", pad=0.05)
cax.axis("off")
im = axs[2].imshow(angles_sm8, cmap="YlGnBu_r", vmin=-49, vmax=-11)
cax = make_axes_locatable(axs[2]).append_axes("right", size="20%", pad=0.05)
cb = fig.colorbar(
im,
ticks=MultipleLocator(30),
format=FuncFormatter(lambda x, pos: "{:.0f}°".format(x)),
cax=cax,
orientation="vertical",
)
axs[2].set(title="Angles\n(smooth=8)")
im = axs[3].imshow(angles_sm4, cmap="YlGnBu_r", vmin=-49, vmax=-11)
cax = make_axes_locatable(axs[3]).append_axes("right", size="20%", pad=0.05)
cb = fig.colorbar(
im,
ticks=MultipleLocator(30),
format=FuncFormatter(lambda x, pos: "{:.0f}°".format(x)),
cax=cax,
orientation="vertical",
)
axs[3].set(title="Angles\n(smooth=4)")
im = axs[4].imshow(anisos_sm8, cmap="Reds", vmin=0, vmax=1)
cax = make_axes_locatable(axs[4]).append_axes("right", size="20%", pad=0.05)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
axs[4].set(title="Anisotropy\n(smooth=8)")
im = axs[5].imshow(anisos_sm4, cmap="Reds", vmin=0, vmax=1)
cax = make_axes_locatable(axs[5]).append_axes("right", size="20%", pad=0.05)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
axs[5].set(title="Anisotropy\n(smooth=4)")
for ax in axs.ravel():
ax.axis("off")
fig.tight_layout()
###############################################################################
# Final considerations
# --------------------
# As you can see the Structure Tensor algorithm is a very fast, general purpose
# algorithm that can be used to estimate local slopes to input datasets of
# very different natures.
| 10,546 | 35.49481 | 88 | py |
pylops | pylops-master/examples/plot_linearregr.py | r"""
Linear Regression
=================
This example shows how to use the :py:class:`pylops.LinearRegression` operator
to perform *Linear regression analysis*.
In short, linear regression is the problem of finding the best fitting
coefficients, namely intercept :math:`\mathbf{x_0}` and gradient
:math:`\mathbf{x_1}`, for this equation:
.. math::
y_i = x_0 + x_1 t_i \qquad \forall i=0,1,\ldots,N-1
As we can express this problem in a matrix form:
.. math::
\mathbf{y}= \mathbf{A} \mathbf{x}
our solution can be obtained by solving the following optimization problem:
.. math::
J= \|\mathbf{y} - \mathbf{A} \mathbf{x}\|_2
See documentation of :py:class:`pylops.LinearRegression` for more detailed
definition of the forward problem.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Define the input parameters: number of samples along the t-axis (``N``),
# linear regression coefficients (``x``), and standard deviation of noise
# to be added to data (``sigma``).
N = 30
x = np.array([1.0, 2.0])
sigma = 1
###############################################################################
# Let's create the time axis and initialize the
# :py:class:`pylops.LinearRegression` operator
t = np.arange(N, dtype="float64")
LRop = pylops.LinearRegression(t, dtype="float64")
###############################################################################
# We can then apply the operator in forward mode to compute our data points
# along the x-axis (``y``). We will also generate some random gaussian noise
# and create a noisy version of the data (``yn``).
y = LRop * x
yn = y + np.random.normal(0, sigma, N)
###############################################################################
# We are now ready to solve our problem. As we are using an operator from the
# :py:class:`pylops.LinearOperator` family, we can simply use ``/``,
# which in this case will solve the system by means of an iterative solver
# (i.e., :py:func:`scipy.sparse.linalg.lsqr`).
xest = LRop / y
xnest = LRop / yn
###############################################################################
# Let's plot the best fitting line for the case of noise free and noisy data
plt.figure(figsize=(5, 7))
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * x[1] + x[0],
"k",
lw=4,
label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xest[1] + xest[0],
"--r",
lw=4,
label=rf"est noise-free: $x_0$ = {xest[0]:.2f}, $x_1$ = {xest[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xnest[1] + xnest[0],
"--g",
lw=4,
label=rf"est noisy: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}",
)
plt.scatter(t, y, c="r", s=70)
plt.scatter(t, yn, c="g", s=70)
plt.legend()
plt.tight_layout()
###############################################################################
# Once that we have estimated the best fitting coefficients :math:`\mathbf{x}`
# we can now use them to compute the *y values* for a different set of values
# along the *t-axis*.
t1 = np.linspace(-N, N, 2 * N, dtype="float64")
y1 = LRop.apply(t1, xest)
plt.figure(figsize=(5, 7))
plt.plot(t, y, "k", label="Original axis")
plt.plot(t1, y1, "r", label="New axis")
plt.scatter(t, y, c="k", s=70)
plt.scatter(t1, y1, c="r", s=40)
plt.legend()
plt.tight_layout()
###############################################################################
# We consider now the case where some of the observations have large errors.
# Such elements are generally referred to as *outliers* and can affect the
# quality of the least-squares solution if not treated with care. In this
# example we will see how using a L1 solver such as
# :py:func:`pylops.optimization.sparsity.IRLS` can drammatically improve the
# quality of the estimation of intercept and gradient.
class CallbackIRLS(pylops.optimization.callback.Callbacks):
def __init__(self, n):
self.n = n
self.xirls_hist = []
self.rw_hist = []
def on_step_end(self, solver, x):
if solver.iiter > 1:
self.xirls_hist.append(x)
self.rw_hist.append(solver.rw)
else:
self.rw_hist.append(np.ones(self.n))
# Add outliers
yn[1] += 40
yn[N - 2] -= 20
# IRLS
nouter = 20
epsR = 1e-2
epsI = 0
tolIRLS = 1e-2
xnest = LRop / yn
cb = CallbackIRLS(N)
irlssolve = pylops.optimization.sparsity.IRLS(
LRop,
[
cb,
],
)
xirls, nouter = irlssolve.solve(
yn, nouter=nouter, threshR=False, epsR=epsR, epsI=epsI, tolIRLS=tolIRLS
)
xirls_hist, rw_hist = np.array(cb.xirls_hist), cb.rw_hist
print(f"IRLS converged at {nouter} iterations...")
plt.figure(figsize=(5, 7))
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * x[1] + x[0],
"k",
lw=4,
label=rf"true: $x_0$ = {x[0]:.2f}, $x_1$ = {x[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xnest[1] + xnest[0],
"--r",
lw=4,
label=rf"L2: $x_0$ = {xnest[0]:.2f}, $x_1$ = {xnest[1]:.2f}",
)
plt.plot(
np.array([t.min(), t.max()]),
np.array([t.min(), t.max()]) * xirls[1] + xirls[0],
"--g",
lw=4,
label=rf"L1 - IRSL: $x_0$ = {xirls[0]:.2f}, $x_1$ = {xirls[1]:.2f}",
)
plt.scatter(t, y, c="r", s=70)
plt.scatter(t, yn, c="g", s=70)
plt.legend()
plt.tight_layout()
###############################################################################
# Let's finally take a look at the convergence of IRLS. First we visualize
# the evolution of intercept and gradient
fig, axs = plt.subplots(2, 1, figsize=(8, 10))
fig.suptitle("IRLS evolution", fontsize=14, fontweight="bold", y=0.95)
axs[0].plot(xirls_hist[:, 0], xirls_hist[:, 1], ".-k", lw=2, ms=20)
axs[0].scatter(x[0], x[1], c="r", s=70)
axs[0].set_title("Intercept and gradient")
axs[0].grid()
for iiter in range(nouter):
axs[1].semilogy(
rw_hist[iiter],
color=(iiter / nouter, iiter / nouter, iiter / nouter),
label="iter%d" % iiter,
)
axs[1].set_title("Weights")
axs[1].legend(loc=5, fontsize="small")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 6,329 | 29.878049 | 79 | py |
pylops | pylops-master/examples/plot_blending.py | """
Blending
========
This example shows how to use the :py:class:`pylops.waveeqprocessing.blending.Blending`
operator to blend seismic data to mimic state-of-the-art simultaneous shooting
acquisition systems.
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start by considering a streamer seismic dataset and apply blending in
# so-called continuous blending mode
inputdata = np.load("../testdata/marchenko/input.npz")
data = inputdata["R"]
data = np.pad(data, ((0, 0), (0, 0), (0, 50)))
wav = inputdata["wav"]
wav_c = np.argmax(wav)
ns, nr, nt = data.shape
# time axis
dt = 0.004
t = np.arange(nt) * dt
# convolve with wavelet
data = np.apply_along_axis(sp.signal.convolve, -1, data, wav, mode="full")
data = data[..., wav_c:][..., :nt]
# obc data
data_obc = data[:-1, :-1]
ns_obc, nr_obc, _ = data_obc.shape
# streamer data
nr_streamer = 21
ns_streamer = ns - nr_streamer
data_streamer = np.zeros((ns_streamer, nr_streamer, nt))
for isrc in range(ns_streamer):
data_streamer[isrc] = data[isrc, isrc : isrc + nr_streamer]
# visualize
isrcplot = [0, ns_obc // 2, ns_obc - 1]
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 8))
fig.suptitle("OBC data")
for i, ax in enumerate(axs):
ax.imshow(
data_obc[isrcplot[i]].T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(0, nr, t[-1], 0),
interpolation="none",
)
ax.set_title(f"CSG {isrcplot[i]}")
ax.set_xlabel("#Rec")
ax.axis("tight")
axs[0].set_ylabel("t [s]")
plt.tight_layout()
isrcplot = [0, ns_streamer // 2, ns_streamer - 1]
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 8))
fig.suptitle("Streamer data")
for i, ax in enumerate(axs):
ax.imshow(
data_streamer[isrcplot[i]].T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(0, nr_streamer, t[-1], 0),
interpolation="none",
)
ax.set_title(f"CSG {isrcplot[i]}")
ax.set_xlabel("#Rec")
ax.axis("tight")
axs[0].set_ylabel("t [s]")
plt.tight_layout()
irecplot = [0, nr_streamer // 2, nr_streamer - 1]
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(12, 8))
fig.suptitle("Streamer data")
for i, ax in enumerate(axs):
ax.imshow(
data_streamer[:, irecplot[i]].T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
ax.set_title(f"CRG {irecplot[i]}")
ax.set_xlabel("#Src")
ax.axis("tight")
axs[0].set_ylabel("t [s]")
plt.tight_layout()
################################################################################
# We can now consider the streamer seismic dataset and apply blending in
# so-called continuous blending mode
overlap = 0.5
ignition_times = np.random.normal(0, 0.6, ns_streamer)
ignition_times += (1 - overlap) * nt * dt
ignition_times[0] = 0.0
ignition_times = np.cumsum(ignition_times)
plt.figure(figsize=(12, 4))
plt.plot(ignition_times, "k")
plt.title("Continuous blending times")
Bop = pylops.waveeqprocessing.BlendingContinuous(
nt,
nr_streamer,
ns_streamer,
dt,
ignition_times,
dtype="complex128",
)
data_blended = Bop * data_streamer
data_pseudo = Bop.H * data_blended
fig, ax = plt.subplots(1, 1, figsize=(4, 19))
ax.imshow(
data_blended.real.T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(0, ns_streamer, Bop.nttot * dt, 0),
interpolation="none",
)
ax.set_title("Blended CSG")
ax.set_xlabel("#Rec")
ax.set_ylabel("t [s]")
ax.axis("tight")
ax.set_ylim(10, 0)
plt.tight_layout()
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(12, 8))
axs[0].imshow(
data_streamer[:, 0].real.T,
cmap="gray",
vmin=-0.01,
vmax=0.01,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
axs[0].set_title("Unblended CRG")
axs[0].set_xlabel("#Src")
axs[0].set_ylabel("t [s]")
axs[0].axis("tight")
axs[1].imshow(
data_pseudo[:, 0].real.T,
cmap="gray",
vmin=-0.01,
vmax=0.01,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
axs[1].set_title("Pseudo-deblended CRG")
axs[1].set_xlabel("#Src")
axs[1].axis("tight")
plt.tight_layout()
################################################################################
# Similarly we can consider the OBC data and apply both group and half blending
# Group
group_size = 2
n_groups = ns_obc // 2
ignition_times = np.abs(np.random.normal(0.2, 0.5, ns_obc)) # only positive shifts
ignition_times[0] = 0.0
plt.figure(figsize=(12, 4))
plt.plot(ignition_times.reshape(group_size, n_groups).T, "k")
plt.title("Group blending times")
Bop = pylops.waveeqprocessing.BlendingGroup(
nt,
nr_obc,
ns_obc,
dt,
ignition_times.reshape(group_size, n_groups),
group_size=group_size,
n_groups=n_groups,
dtype="complex128",
)
data_blended = Bop * data_obc
data_pseudo = Bop.H * data_blended
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
ax.imshow(
data_blended[n_groups // 2].real.T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
ax.set_title("Blended CSG")
ax.set_xlabel("#Rec")
ax.set_ylabel("t [s]")
ax.axis("tight")
plt.tight_layout()
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(12, 8))
axs[0].imshow(
data_obc[:, 10].real.T,
cmap="gray",
vmin=-0.01,
vmax=0.01,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
axs[0].set_title("Unblended CRG")
axs[0].set_xlabel("#Src")
axs[0].set_ylabel("t [s]")
axs[0].axis("tight")
axs[1].imshow(
data_pseudo[:, 10].real.T,
cmap="gray",
vmin=-0.01,
vmax=0.01,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
axs[1].set_title("Pseudo-deblended CRG")
axs[1].set_xlabel("#Src")
axs[1].axis("tight")
plt.tight_layout()
# Half
group_size = 2
n_groups = ns_obc // 2
ignition_times = np.abs(np.random.normal(0.1, 0.5, ns_obc)) # only positive shifts
ignition_times[0] = 0.0
plt.figure(figsize=(12, 4))
plt.plot(ignition_times.reshape(group_size, n_groups).T, "k")
plt.title("Half blending times")
Bop = pylops.waveeqprocessing.BlendingHalf(
nt,
nr_obc,
ns_obc,
dt,
ignition_times.reshape(group_size, n_groups),
group_size=group_size,
n_groups=n_groups,
dtype="complex128",
name=None,
)
data_blended = Bop * data_obc
data_pseudo = Bop.H * data_blended
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
ax.imshow(
data_blended[n_groups // 2].real.T,
cmap="gray",
vmin=-0.1,
vmax=0.1,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
ax.set_title("Blended CSG")
ax.set_xlabel("#Rec")
ax.set_ylabel("t [s]")
ax.axis("tight")
plt.tight_layout()
fig, axs = plt.subplots(1, 2, sharey=True, figsize=(12, 8))
axs[0].imshow(
data_obc[:, 10].real.T,
cmap="gray",
vmin=-0.01,
vmax=0.01,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
axs[0].set_title("Unblended CRG")
axs[0].set_xlabel("#Src")
axs[0].set_ylabel("t [s]")
axs[0].axis("tight")
axs[1].imshow(
data_pseudo[:, 10].real.T,
cmap="gray",
vmin=-0.01,
vmax=0.01,
extent=(0, ns_streamer, t[-1], 0),
interpolation="none",
)
axs[1].set_title("Pseudo-deblended CRG")
axs[1].set_xlabel("#Src")
axs[1].axis("tight")
plt.tight_layout()
| 7,398 | 23.5 | 87 | py |
pylops | pylops-master/examples/plot_fft.py | """
Fourier Transform
=================
This example shows how to use the :py:class:`pylops.signalprocessing.FFT`,
:py:class:`pylops.signalprocessing.FFT2D`
and :py:class:`pylops.signalprocessing.FFTND` operators to apply the Fourier
Transform to the model and the inverse Fourier Transform to the data.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by applying the one dimensional FFT to a one dimensional
# sinusoidal signal :math:`d(t)=sin(2 \pi f_0t)` using a time axis of
# lenght :math:`nt` and sampling :math:`dt`
dt = 0.005
nt = 100
t = np.arange(nt) * dt
f0 = 10
nfft = 2**10
d = np.sin(2 * np.pi * f0 * t)
FFTop = pylops.signalprocessing.FFT(dims=nt, nfft=nfft, sampling=dt, engine="numpy")
D = FFTop * d
# Adjoint = inverse for FFT
dinv = FFTop.H * D
dinv = FFTop / D
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
axs[0].plot(t, d, "k", lw=2, label="True")
axs[0].plot(t, dinv.real, "--r", lw=2, label="Inverted")
axs[0].legend()
axs[0].set_title("Signal")
axs[1].plot(FFTop.f[: int(FFTop.nfft / 2)], np.abs(D[: int(FFTop.nfft / 2)]), "k", lw=2)
axs[1].set_title("Fourier Transform")
axs[1].set_xlim([0, 3 * f0])
plt.tight_layout()
###############################################################################
# In this example we used numpy as our engine for the ``fft`` and ``ifft``.
# PyLops implements a second engine (``engine='fftw'``) which uses the
# well-known `FFTW <http://www.fftw.org>`_ via the python wrapper
# :py:class:`pyfftw.FFTW`. This optimized fft tends to outperform the one from
# numpy in many cases but it is not inserted in the mandatory requirements of
# PyLops. If interested to use ``FFTW`` backend, read the `fft routines`
# section at :ref:`performance`.
FFTop = pylops.signalprocessing.FFT(dims=nt, nfft=nfft, sampling=dt, engine="fftw")
D = FFTop * d
# Adjoint = inverse for FFT
dinv = FFTop.H * D
dinv = FFTop / D
fig, axs = plt.subplots(1, 2, figsize=(10, 4))
axs[0].plot(t, d, "k", lw=2, label="True")
axs[0].plot(t, dinv.real, "--r", lw=2, label="Inverted")
axs[0].legend()
axs[0].set_title("Signal")
axs[1].plot(FFTop.f[: int(FFTop.nfft / 2)], np.abs(D[: int(FFTop.nfft / 2)]), "k", lw=2)
axs[1].set_title("Fourier Transform with fftw")
axs[1].set_xlim([0, 3 * f0])
plt.tight_layout()
###############################################################################
# We can also apply the one dimensional FFT to to a two-dimensional
# signal (along one of the first axis)
dt = 0.005
nt, nx = 100, 20
t = np.arange(nt) * dt
f0 = 10
nfft = 2**10
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(nx) + 1)
FFTop = pylops.signalprocessing.FFT(dims=(nt, nx), axis=0, nfft=nfft, sampling=dt)
D = FFTop * d.ravel()
# Adjoint = inverse for FFT
dinv = FFTop.H * D
dinv = FFTop / D
dinv = np.real(dinv).reshape(nt, nx)
fig, axs = plt.subplots(2, 2, figsize=(10, 6))
axs[0][0].imshow(d, vmin=-20, vmax=20, cmap="bwr")
axs[0][0].set_title("Signal")
axs[0][0].axis("tight")
axs[0][1].imshow(np.abs(D.reshape(nfft, nx)[:200, :]), cmap="bwr")
axs[0][1].set_title("Fourier Transform")
axs[0][1].axis("tight")
axs[1][0].imshow(dinv, vmin=-20, vmax=20, cmap="bwr")
axs[1][0].set_title("Inverted")
axs[1][0].axis("tight")
axs[1][1].imshow(d - dinv, vmin=-20, vmax=20, cmap="bwr")
axs[1][1].set_title("Error")
axs[1][1].axis("tight")
fig.tight_layout()
###############################################################################
# We can also apply the two dimensional FFT to to a two-dimensional signal
dt, dx = 0.005, 5
nt, nx = 100, 201
t = np.arange(nt) * dt
x = np.arange(nx) * dx
f0 = 10
nfft = 2**10
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(nx) + 1)
FFTop = pylops.signalprocessing.FFT2D(
dims=(nt, nx), nffts=(nfft, nfft), sampling=(dt, dx)
)
D = FFTop * d.ravel()
dinv = FFTop.H * D
dinv = FFTop / D
dinv = np.real(dinv).reshape(nt, nx)
fig, axs = plt.subplots(2, 2, figsize=(10, 6))
axs[0][0].imshow(d, vmin=-100, vmax=100, cmap="bwr")
axs[0][0].set_title("Signal")
axs[0][0].axis("tight")
axs[0][1].imshow(
np.abs(np.fft.fftshift(D.reshape(nfft, nfft), axes=1)[:200, :]), cmap="bwr"
)
axs[0][1].set_title("Fourier Transform")
axs[0][1].axis("tight")
axs[1][0].imshow(dinv, vmin=-100, vmax=100, cmap="bwr")
axs[1][0].set_title("Inverted")
axs[1][0].axis("tight")
axs[1][1].imshow(d - dinv, vmin=-100, vmax=100, cmap="bwr")
axs[1][1].set_title("Error")
axs[1][1].axis("tight")
fig.tight_layout()
###############################################################################
# Finally can apply the three dimensional FFT to to a three-dimensional signal
dt, dx, dy = 0.005, 5, 3
nt, nx, ny = 30, 21, 11
t = np.arange(nt) * dt
x = np.arange(nx) * dx
y = np.arange(nx) * dy
f0 = 10
nfft = 2**6
nfftk = 2**5
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(nx) + 1)
d = np.tile(d[:, :, np.newaxis], [1, 1, ny])
FFTop = pylops.signalprocessing.FFTND(
dims=(nt, nx, ny), nffts=(nfft, nfftk, nfftk), sampling=(dt, dx, dy)
)
D = FFTop * d.ravel()
dinv = FFTop.H * D
dinv = FFTop / D
dinv = np.real(dinv).reshape(nt, nx, ny)
fig, axs = plt.subplots(2, 2, figsize=(10, 6))
axs[0][0].imshow(d[:, :, ny // 2], vmin=-20, vmax=20, cmap="bwr")
axs[0][0].set_title("Signal")
axs[0][0].axis("tight")
axs[0][1].imshow(
np.abs(np.fft.fftshift(D.reshape(nfft, nfftk, nfftk), axes=1)[:20, :, nfftk // 2]),
cmap="bwr",
)
axs[0][1].set_title("Fourier Transform")
axs[0][1].axis("tight")
axs[1][0].imshow(dinv[:, :, ny // 2], vmin=-20, vmax=20, cmap="bwr")
axs[1][0].set_title("Inverted")
axs[1][0].axis("tight")
axs[1][1].imshow(d[:, :, ny // 2] - dinv[:, :, ny // 2], vmin=-20, vmax=20, cmap="bwr")
axs[1][1].set_title("Error")
axs[1][1].axis("tight")
fig.tight_layout()
| 5,786 | 31.329609 | 88 | py |
pylops | pylops-master/examples/plot_restriction.py | """
Restriction and Interpolation
=============================
This example shows how to use the :py:class:`pylops.Restriction` operator
to sample a certain input vector at desired locations ``iava``. Moreover,
we go one step further and use the :py:class:`pylops.signalprocessing.Interp`
operator to show how we can also sample values at locations that are not
exactly on the grid of the input vector.
As explained in the :ref:`sphx_glr_tutorials_solvers.py` tutorial, such
operators can be used as forward model in an inverse problem aimed at
interpolate irregularly sampled 1d or 2d signals onto a regular grid.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Let's create a signal of size ``nt`` and sampling ``dt`` that is composed
# of three sinusoids at frequencies ``freqs``.
nt = 200
dt = 0.004
freqs = [5.0, 3.0, 8.0]
t = np.arange(nt) * dt
x = np.zeros(nt)
for freq in freqs:
x = x + np.sin(2 * np.pi * freq * t)
###############################################################################
# First of all, we subsample the signal at random locations and we retain 40%
# of the initial samples.
perc_subsampling = 0.4
ntsub = int(np.round(nt * perc_subsampling))
isample = np.arange(nt)
iava = np.sort(np.random.permutation(np.arange(nt))[:ntsub])
###############################################################################
# We then create the restriction and interpolation operators and display
# the original signal as well as the subsampled signal.
Rop = pylops.Restriction(nt, iava, dtype="float64")
NNop, iavann = pylops.signalprocessing.Interp(
nt, iava + 0.4, kind="nearest", dtype="float64"
)
LIop, iavali = pylops.signalprocessing.Interp(
nt, iava + 0.4, kind="linear", dtype="float64"
)
SIop, iavasi = pylops.signalprocessing.Interp(
nt, iava + 0.4, kind="sinc", dtype="float64"
)
y = Rop * x
ynn = NNop * x
yli = LIop * x
ysi = SIop * x
ymask = Rop.mask(x)
# Visualize data
fig = plt.figure(figsize=(15, 5))
plt.plot(isample, x, ".-k", lw=3, ms=10, label="all samples")
plt.plot(isample, ymask, ".g", ms=35, label="available samples")
plt.plot(iavann, ynn, ".r", ms=25, label="NN interp samples")
plt.plot(iavali, yli, ".m", ms=20, label="Linear interp samples")
plt.plot(iavasi, ysi, ".y", ms=15, label="Sinc interp samples")
plt.legend(loc="right")
plt.title("Data restriction")
subax = fig.add_axes([0.2, 0.2, 0.15, 0.6])
subax.plot(isample, x, ".-k", lw=3, ms=10)
subax.plot(isample, ymask, ".g", ms=35)
subax.plot(iavann, ynn, ".r", ms=25)
subax.plot(iavali, yli, ".m", ms=20)
subax.plot(iavasi, ysi, ".y", ms=15)
subax.set_xlim([120, 127])
subax.set_ylim([-0.5, 0.5])
plt.tight_layout()
###############################################################################
# Finally we show how the :py:class:`pylops.Restriction` is not limited to
# one dimensional signals but can be applied to sample locations of a specific
# axis of a multi-dimensional array.
# subsampling locations
nx, nt = 100, 50
x = np.random.normal(0, 1, (nx, nt))
perc_subsampling = 0.4
nxsub = int(np.round(nx * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(nx))[:nxsub])
Rop = pylops.Restriction((nx, nt), iava, axis=0, dtype="float64")
y = Rop * x
ymask = Rop.mask(x)
fig, axs = plt.subplots(1, 3, figsize=(10, 5), sharey=True)
axs[0].imshow(x.T, cmap="gray")
axs[0].set_title("Model")
axs[0].axis("tight")
axs[1].imshow(y.T, cmap="gray")
axs[1].set_title("Data")
axs[1].axis("tight")
axs[2].imshow(ymask.T, cmap="gray")
axs[2].set_title("Masked model")
axs[2].axis("tight")
plt.tight_layout()
| 3,699 | 31.45614 | 79 | py |
pylops | pylops-master/examples/plot_real.py | """
Real
====
This example shows how to use the :py:class:`pylops.basicoperators.Real`
operator.
This operator returns the real part of the data in forward and adjoint mode,
but the forward output will be a real number, while the adjoint output will
be a complex number with a zero-valued imaginary part.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define a Real operator :math:`\mathbf{\Re}` to extract the real
# component of the input.
M = 5
x = np.arange(M) + 1j * np.arange(M)[::-1]
Rop = pylops.basicoperators.Real(M, dtype="complex128")
y = Rop * x
xadj = Rop.H * y
_, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].plot(np.real(x), lw=2, label="Real")
axs[0].plot(np.imag(x), lw=2, label="Imag")
axs[0].legend()
axs[0].set_title("Input")
axs[1].plot(np.real(y), lw=2, label="Real")
axs[1].plot(np.imag(y), lw=2, label="Imag")
axs[1].legend()
axs[1].set_title("Forward of Input")
axs[2].plot(np.real(xadj), lw=2, label="Real")
axs[2].plot(np.imag(xadj), lw=2, label="Imag")
axs[2].legend()
axs[2].set_title("Adjoint of Forward")
plt.tight_layout()
| 1,194 | 26.790698 | 79 | py |
pylops | pylops-master/examples/plot_seislet.py | """
Seislet transform
=================
This example shows how to use the :py:class:`pylops.signalprocessing.Seislet`
operator. This operator the forward, adjoint and inverse Seislet transform
that is a modification of the well-know Wavelet transform where local slopes
are used in the prediction and update steps to further improve the prediction
of a trace from its previous (or subsequent) one and reduce the amount of
information passed to the subsequent scale. While this transform was initially
developed in the context of processing and compression of seismic data, it is
also suitable to any other oscillatory dataset such as GPR or Acoustic
recordings.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pylops
plt.close("all")
############################################
# In this example we use the same benchmark
# `dataset <http://ahay.org/blog/2014/10/08/program-of-the-month-sfsigmoid/>`_
# that was used in the original paper describing the Seislet transform. First,
# local slopes are estimated using
# :py:func:`pylops.utils.signalprocessing.slope_estimate`.
inputfile = "../testdata/sigmoid.npz"
d = np.load(inputfile)
d = d["sigmoid"]
nx, nt = d.shape
dx, dt = 0.008, 0.004
x, t = np.arange(nx) * dx, np.arange(nt) * dt
# slope estimation
slope, _ = pylops.utils.signalprocessing.slope_estimate(d.T, dt, dx, smooth=2.5)
slope = -slope.T # t-axis points down, reshape
# clip slopes above 80°
pmax = np.arctan(80 * np.pi / 180)
slope[slope > pmax] = pmax
slope[slope < -pmax] = -pmax
############################################
clip = 0.5 * np.max(np.abs(d))
clip_s = min(pmax, np.max(np.abs(slope)))
opts = dict(aspect=2, extent=(x[0], x[-1], t[-1], t[0]))
fig, axs = plt.subplots(1, 2, figsize=(14, 7), sharey=True, sharex=True)
axs[0].imshow(d.T, cmap="gray", vmin=-clip, vmax=clip, **opts)
axs[0].set(xlabel="Position [km]", ylabel="Time [s]", title="Data")
im = axs[1].imshow(slope.T, cmap="RdBu_r", vmin=-clip_s, vmax=clip_s, **opts)
axs[1].set(xlabel="Position [km]", title="Slopes")
fig.tight_layout()
pos = axs[1].get_position()
cbpos = [
pos.x0 + 0.1 * pos.width,
pos.y0 + 0.9 * pos.height,
0.8 * pos.width,
0.05 * pos.height,
]
cax = fig.add_axes(cbpos)
cb = fig.colorbar(im, cax=cax, orientation="horizontal")
cb.set_label("[s/km]")
############################################
# Next the Seislet transform is computed.
Sop = pylops.signalprocessing.Seislet(slope, sampling=(dx, dt))
seis = Sop * d
nlevels_max = int(np.log2(nx))
levels_size = np.flip(np.array([2**i for i in range(nlevels_max)]))
levels_cum = np.cumsum(levels_size)
############################################
fig, ax = plt.subplots(figsize=(14, 6))
im = ax.imshow(
seis.T,
cmap="gray",
vmin=-clip,
vmax=clip,
aspect="auto",
interpolation="none",
extent=(1, seis.shape[0], t[-1], t[0]),
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=20, integer=True))
for level in levels_cum:
ax.axvline(level + 0.5, color="w")
ax.set(xlabel="Scale", ylabel="Time [s]", title="Seislet transform")
cax = make_axes_locatable(ax).append_axes("right", size="2%", pad=0.1)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
cb.formatter.set_powerlimits((0, 0))
fig.tight_layout()
############################################
# We may also stretch the finer scales to be the width of the image
fig, axs = plt.subplots(2, nlevels_max // 2, figsize=(14, 7), sharex=True, sharey=True)
for i, ax in enumerate(axs.ravel()[:-1]):
curdata = seis[levels_cum[i] : levels_cum[i + 1], :].T
vmax = np.max(np.abs(curdata))
ax.imshow(curdata, vmin=-vmax, vmax=vmax, cmap="gray", interpolation="none", **opts)
ax.set(title=f"Scale {i+1}")
if i + 1 > nlevels_max // 2:
ax.set(xlabel="Position [km]")
curdata = seis[levels_cum[-1] :, :].T
vmax = np.max(np.abs(curdata))
axs[-1, -1].imshow(
curdata, vmin=-vmax, vmax=vmax, cmap="gray", interpolation="none", **opts
)
axs[0, 0].set(ylabel="Time [s]")
axs[1, 0].set(ylabel="Time [s]")
axs[-1, -1].set(xlabel="Position [km]", title=f"Scale {nlevels_max}")
fig.tight_layout()
############################################
# As a comparison we also compute the Seislet transform fixing slopes to zero.
# This way we turn the Seislet tranform into a basic 1D Wavelet transform
# performed over the spatial axis.
Wop = pylops.signalprocessing.Seislet(np.zeros_like(slope), sampling=(dx, dt))
dwt = Wop * d
############################################
fig, ax = plt.subplots(figsize=(14, 6))
im = ax.imshow(
dwt.T,
cmap="gray",
vmin=-clip,
vmax=clip,
aspect="auto",
interpolation="none",
extent=(1, dwt.shape[0], t[-1], t[0]),
)
ax.xaxis.set_major_locator(MaxNLocator(nbins=20, integer=True))
for level in levels_cum:
ax.axvline(level + 0.5, color="w")
ax.set(xlabel="Scale", ylabel="Time [s]", title="Wavelet transform")
cax = make_axes_locatable(ax).append_axes("right", size="2%", pad=0.1)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
cb.formatter.set_powerlimits((0, 0))
fig.tight_layout()
############################################
# Again, we may decompress the finer scales
fig, axs = plt.subplots(2, nlevels_max // 2, figsize=(14, 7), sharex=True, sharey=True)
for i, ax in enumerate(axs.ravel()[:-1]):
curdata = dwt[levels_cum[i] : levels_cum[i + 1], :].T
vmax = np.max(np.abs(curdata))
ax.imshow(curdata, vmin=-vmax, vmax=vmax, cmap="gray", interpolation="none", **opts)
ax.set(title=f"Scale {i+1}")
if i + 1 > nlevels_max // 2:
ax.set(xlabel="Position [km]")
curdata = dwt[levels_cum[-1] :, :].T
vmax = np.max(np.abs(curdata))
axs[-1, -1].imshow(
curdata, vmin=-vmax, vmax=vmax, cmap="gray", interpolation="none", **opts
)
axs[0, 0].set(ylabel="Time [s]")
axs[1, 0].set(ylabel="Time [s]")
axs[-1, -1].set(xlabel="Position [km]", title=f"Scale {nlevels_max}")
fig.tight_layout()
############################################
# Finally we evaluate the compression capabilities of the Seislet transform
# compared to the 1D Wavelet transform. We zero-out all but the strongest 25%
# of the components. We perform the inverse transforms and assess the
# compression error.
perc = 0.25
seis_strong_idx = np.argsort(-np.abs(seis.ravel()))
dwt_strong_idx = np.argsort(-np.abs(dwt.ravel()))
seis_strong = np.abs(seis.ravel())[seis_strong_idx]
dwt_strong = np.abs(dwt.ravel())[dwt_strong_idx]
############################################
fig, ax = plt.subplots()
ax.plot(range(1, len(seis_strong) + 1), seis_strong / seis_strong[0], label="Seislet")
ax.plot(
range(1, len(dwt_strong) + 1), dwt_strong / dwt_strong[0], "--", label="Wavelet"
)
ax.set(xlabel="n", ylabel="Coefficient strength [%]", title="Transform Coefficients")
ax.axvline(np.rint(len(seis_strong) * perc), color="k", label=f"{100*perc:.0f}%")
ax.legend()
fig.tight_layout()
############################################
seis1 = np.zeros_like(seis.ravel())
seis_strong_idx = seis_strong_idx[: int(np.rint(len(seis_strong) * perc))]
seis1[seis_strong_idx] = seis.ravel()[seis_strong_idx]
d_seis = Sop.inverse(seis1).reshape(Sop.dims)
dwt1 = np.zeros_like(dwt.ravel())
dwt_strong_idx = dwt_strong_idx[: int(np.rint(len(dwt_strong) * perc))]
dwt1[dwt_strong_idx] = dwt.ravel()[dwt_strong_idx]
d_dwt = Wop.inverse(dwt1).reshape(Wop.dims)
############################################
opts.update(dict(cmap="gray", vmin=-clip, vmax=clip))
fig, axs = plt.subplots(2, 3, figsize=(14, 7), sharex=True, sharey=True)
axs[0, 0].imshow(d.T, **opts)
axs[0, 0].set(title="Data")
axs[0, 1].imshow(d_seis.T, **opts)
axs[0, 1].set(title=f"Rec. from Seislet ({100*perc:.0f}% of coeffs.)")
axs[0, 2].imshow((d - d_seis).T, **opts)
axs[0, 2].set(title="Error from Seislet Rec.")
axs[1, 0].imshow(d.T, **opts)
axs[1, 0].set(ylabel="Time [s]", title="Data [Repeat]")
axs[1, 1].imshow(d_dwt.T, **opts)
axs[1, 1].set(title=f"Rec. from Wavelet ({100*perc:.0f}% of coeffs.)")
axs[1, 2].imshow((d - d_dwt).T, **opts)
axs[1, 2].set(title="Error from Wavelet Rec.")
for i in range(3):
axs[1, i].set(xlabel="Position [km]")
plt.tight_layout()
############################################
# To conclude it is worth noting that the Seislet transform, differently to the
# Wavelet transform, is not orthogonal: in other words, its adjoint and
# inverse are not equivalent. While we have used the forward and inverse
# transformations, when used as linear operator in composition with other
# operators, the Seislet transform requires the adjoint be defined and that it
# also passes the dot-test pair that is. As shown below, this is the case
# when using the implementation in the PyLops package.
pylops.utils.dottest(Sop, verb=True)
| 8,773 | 37.31441 | 88 | py |
pylops | pylops-master/examples/plot_nmo.py | r"""
Normal Moveout (NMO) Correction
===============================
This example shows how to create your own operator for performing
normal moveout (NMO) correction to a seismic record.
We will perform classic NMO using an operator created from scratch,
as well as using the :py:class:`pylops.Spread` operator.
"""
from math import floor
from time import time
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid, make_axes_locatable
from numba import jit, prange
from scipy.interpolate import griddata
from scipy.ndimage import gaussian_filter
from pylops import LinearOperator, Spread
from pylops.utils import dottest
from pylops.utils.decorators import reshaped
from pylops.utils.seismicevents import hyperbolic2d, makeaxis
from pylops.utils.wavelets import ricker
def create_colorbar(im, ax):
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cb = fig.colorbar(im, cax=cax, orientation="vertical")
return cax, cb
###############################################################################
# Given a common-shot or common-midpoint (CMP) record, the objective of NMO
# correction is to "flatten" events, that is, align events at later offsets
# to that of the zero offset. NMO has long been a staple of seismic data
# processing, used even today for initial velocity analysis and QC purposes.
# In addition, it can be the domain of choice for many useful processing
# steps, such as angle muting.
#
# To get started, let us create a 2D seismic dataset containing some hyperbolic
# events representing reflections from flat reflectors.
# Events are created with a true RMS velocity, which we will be using as if we
# picked them from, for example, a semblance panel.
par = dict(ox=0, dx=40, nx=80, ot=0, dt=0.004, nt=520)
t, _, x, _ = makeaxis(par)
t0s_true = np.array([0.5, 1.22, 1.65])
vrms_true = np.array([2000.0, 2400.0, 2500.0])
amps = np.array([1, 0.2, 0.5])
freq = 10 # Hz
wav, *_ = ricker(t[:41], f0=freq)
_, data = hyperbolic2d(x, t, t0s_true, vrms_true, amp=amps, wav=wav)
###############################################################################
# NMO correction plot
pclip = 0.5
dmax = np.max(np.abs(data))
opts = dict(
cmap="gray_r",
extent=[x[0], x[-1], t[-1], t[0]],
aspect="auto",
vmin=-pclip * dmax,
vmax=pclip * dmax,
)
# Offset-dependent traveltime of the first hyperbolic event
t_nmo_ev1 = np.sqrt(t0s_true[0] ** 2 + (x / vrms_true[0]) ** 2)
fig, ax = plt.subplots(figsize=(4, 5))
vmax = np.max(np.abs(data))
im = ax.imshow(data.T, **opts)
ax.plot(x, t_nmo_ev1, "C1--", label="Hyperbolic moveout")
ax.plot(x, t0s_true[0] + x * 0, "C1", label="NMO-corrected")
idx = 3 * par["nx"] // 4
ax.annotate(
"",
xy=(x[idx], t0s_true[0]),
xycoords="data",
xytext=(x[idx], t_nmo_ev1[idx]),
textcoords="data",
fontsize=7,
arrowprops=dict(edgecolor="w", arrowstyle="->", shrinkA=10),
)
ax.set(title="Data", xlabel="Offset [m]", ylabel="Time [s]")
cax, _ = create_colorbar(im, ax)
cax.set_ylabel("Amplitude")
ax.legend()
fig.tight_layout()
################################################################################
# NMO correction consists of applying an offset- and time-dependent shift to
# each sample of the trace in such a way that all events corresponding to the
# same reflection will be located at the same time intercept after correction.
#
# An arbitrary hyperbolic event at position :math:`(t, h)` is linked to its
# zero-offset traveltime :math:`t_0` by the following equation
#
# .. math::
# t(x) = \sqrt{t_0^2 + \frac{h^2}{v_\text{rms}^2(t_0)}}
#
# Our strategy in applying the correction is to loop over our time axis
# (which we will associate to :math:`t_0`) and respective RMS velocities
# and, for each offset, move the sample at :math:`t(x)` to location
# :math:`t_0(x) \equiv t_0`. In the figure above, we are considering a
# single :math:`t_0 = 0.5\mathrm{s}` which would have values along the dotted curve
# (i.e., :math:`t(x)`) moved to :math:`t_0` for every offset.
#
# Notice that we need NMO velocities for each sample of our time axis.
# In this example, we actually only have 3 samples, when we need ``nt`` samples.
# In practice, we would have many more samples, but probably not one for each
# ``nt``. To resolve this issue, we will interpolate these 3 samples to all samples
# of our time axis (or, more accurately, their slownesses to preserve traveltimes).
def interpolate_vrms(t0_picks, vrms_picks, taxis, smooth=None):
assert len(t0_picks) == len(vrms_picks)
# Sampled points in time axis
points = np.zeros((len(t0_picks) + 2,))
points[0] = taxis[0]
points[-1] = taxis[-1]
points[1:-1] = t0_picks
# Sampled values of slowness (in s/km)
values = np.zeros((len(vrms_picks) + 2,))
values[0] = 1000.0 / vrms_picks[0] # Use first slowness before t0_picks[0]
values[-1] = 1000.0 / vrms_picks[-1] # Use the last slowness after t0_picks[-1]
values[1:-1] = 1000.0 / vrms_picks
slowness = griddata(points, values, taxis, method="linear")
if smooth is not None:
slowness = gaussian_filter(slowness, sigma=smooth)
return 1000.0 / slowness
vel_t = interpolate_vrms(t0s_true, vrms_true, t, smooth=11)
###############################################################################
# Plot interpolated RMS velocities which will be used for NMO
fig, ax = plt.subplots(figsize=(4, 5))
ax.plot(vel_t, t, "k", lw=3, label="Interpolated", zorder=-1)
ax.plot(vrms_true, t0s_true, "C1o", markersize=10, label="Picks")
ax.invert_yaxis()
ax.set(xlabel="RMS Velocity [m/s]", ylabel="Time [s]", ylim=[t[-1], t[0]])
ax.legend()
fig.tight_layout()
###############################################################################
# NMO from scratch
# ----------------
# We are very close to building our NMO correction, we just need to take care of
# one final issue. When moving the sample from :math:`t(x)` to :math:`t_0`, we
# know that, by definition, :math:`t_0` is always on our time axis grid. In contrast,
# :math:`t(x)` may not fall exactly on a multiple of ``dt`` (our time axis
# sampling). Suppose its nearest sample smaller than itself (floor) is ``i``.
# Instead of moving only sample `i`, we will be moving samples both samples
# ``i`` and ``i+1`` with an appropriate weight to account for how far
# :math:`t(x)` is from ``i*dt`` and ``(i+1)*dt``.
@jit(nopython=True, fastmath=True, nogil=True, parallel=True)
def nmo_forward(data, taxis, haxis, vels_rms):
dt = taxis[1] - taxis[0]
ot = taxis[0]
nt = len(taxis)
nh = len(haxis)
dnmo = np.zeros_like(data)
# Parallel outer loop on slow axis
for ih in prange(nh):
h = haxis[ih]
for it0, (t0, vrms) in enumerate(zip(taxis, vels_rms)):
# Compute NMO traveltime
tx = np.sqrt(t0**2 + (h / vrms) ** 2)
it_frac = (tx - ot) / dt # Fractional index
it_floor = floor(it_frac)
it_ceil = it_floor + 1
w = it_frac - it_floor
if 0 <= it_floor and it_ceil < nt: # it_floor and it_ceil must be valid
# Linear interpolation
dnmo[ih, it0] += (1 - w) * data[ih, it_floor] + w * data[ih, it_ceil]
return dnmo
dnmo = nmo_forward(data, t, x, vel_t) # Compile and run
# Time execution
start = time()
nmo_forward(data, t, x, vel_t)
end = time()
print(f"Ran in {1e6*(end-start):.0f} μs")
###############################################################################
# Plot Data and NMO-corrected data
fig = plt.figure(figsize=(6.5, 5))
grid = ImageGrid(
fig,
111,
nrows_ncols=(1, 2),
axes_pad=0.15,
cbar_location="right",
cbar_mode="single",
cbar_size="7%",
cbar_pad=0.15,
aspect=False,
share_all=True,
)
im = grid[0].imshow(data.T, **opts)
grid[0].set(title="Data", xlabel="Offset [m]", ylabel="Time [s]")
grid[0].cax.colorbar(im)
grid[0].cax.set_ylabel("Amplitude")
grid[1].imshow(dnmo.T, **opts)
grid[1].set(title="NMO-corrected Data", xlabel="Offset [m]")
plt.show()
###############################################################################
# Now that we know how to compute the forward, we'll compute the adjoint pass.
# With these two functions, we can create a ``LinearOperator`` and ensure that
# it passes the dot-test.
@jit(nopython=True, fastmath=True, nogil=True, parallel=True)
def nmo_adjoint(dnmo, taxis, haxis, vels_rms):
dt = taxis[1] - taxis[0]
ot = taxis[0]
nt = len(taxis)
nh = len(haxis)
data = np.zeros_like(dnmo)
# Parallel outer loop on slow axis; use range if Numba is not installed
for ih in prange(nh):
h = haxis[ih]
for it0, (t0, vrms) in enumerate(zip(taxis, vels_rms)):
# Compute NMO traveltime
tx = np.sqrt(t0**2 + (h / vrms) ** 2)
it_frac = (tx - ot) / dt # Fractional index
it_floor = floor(it_frac)
it_ceil = it_floor + 1
w = it_frac - it_floor
if 0 <= it_floor and it_ceil < nt:
# Linear interpolation
# In the adjoint, we must spread the same it0 to both it_floor and
# it_ceil, since in the forward pass, both of these samples were
# pushed onto it0
data[ih, it_floor] += (1 - w) * dnmo[ih, it0]
data[ih, it_ceil] += w * dnmo[ih, it0]
return data
###############################################################################
# Finally, we can create our linear operator. To exemplify the
# class-based interface we will subclass :py:class:`pylops.LinearOperator` and
# implement the required methods: ``_matvec`` which will compute the forward and
# ``_rmatvec`` which will compute the adjoint. Note the use of the ``reshaped``
# decorator which allows us to pass ``x`` directly into our auxiliary function
# without having to do ``x.reshape(self.dims)`` and to output without having to
# call ``ravel()``.
class NMO(LinearOperator):
def __init__(self, taxis, haxis, vels_rms, dtype=None):
self.taxis = taxis
self.haxis = haxis
self.vels_rms = vels_rms
dims = (len(haxis), len(taxis))
if dtype is None:
dtype = np.result_type(taxis.dtype, haxis.dtype, vels_rms.dtype)
super().__init__(dims=dims, dimsd=dims, dtype=dtype)
@reshaped
def _matvec(self, x):
return nmo_forward(x, self.taxis, self.haxis, self.vels_rms)
@reshaped
def _rmatvec(self, y):
return nmo_adjoint(y, self.taxis, self.haxis, self.vels_rms)
###############################################################################
# With our new ``NMO`` linear operator, we can instantiate it with our current
# example and ensure that it passes the dot test which proves that our forward
# and adjoint transforms truly are adjoints of each other.
NMOOp = NMO(t, x, vel_t)
dottest(NMOOp, rtol=1e-4, verb=True)
###############################################################################
# NMO using :py:class:`pylops.Spread`
# -----------------------------------
# We learned how to implement an NMO correction and its adjoint from scratch.
# The adjoint has an interesting pattern, where energy taken from one domain
# is "spread" along a previously-defined parametric curve (the NMO hyperbola
# in this case). This pattern is very common in many algorithms, including
# Radon transform, Kirchhoff migration (also known as Total Focusing Method in
# ultrasonics) and many others.
#
# For these classes of operators, PyLops offers a :py:class:`pylops.Spread`
# constructor, which we will leverage to implement a version of the NMO correction.
# The :py:class:`pylops.Spread` operator will take a value in the "input" domain,
# and spread it along a parametric curve, defined in the "output" domain.
# In our case, the spreading operation is the *adjoint* of the NMO, so our
# "input" domain is the NMO domain, and the "output" domain is the original
# data domain.
#
# In order to use :py:class:`pylops.Spread`, we need to define the
# parametric curves. This can be done through the use of a table with shape
# :math:`(n_{x_i}, n_{t}, n_{x_o})`, where :math:`n_{x_i}` and :math:`n_{t}`
# represent the 2d dimensions of the "input" domain (NMO domain) and :math:`n_{x_o}`
# and :math:`n_{t}` the 2d dimensions of the "output" domain. In our NMO case,
# :math:`n_{x_i} = n_{x_o} = n_h` represents the number of offsets.
# Following the documentation of :py:class:`pylops.Spread`, the table will be
# used in the following manner:
#
# ``d_out[ix_o, table[ix_i, it, ix_o]] += d_in[ix_i, it]``
#
# In our case, ``ix_o = ix_i = ih``, and comparing with our NMO adjoint, ``it``
# refers to :math:`t_0` while ``table[ix, it, ix]`` should then provide the
# appropriate index for :math:`t(x)`. In our implementation we will also be
# constructing a second table containing the weights to be used for linear
# interpolation.
def create_tables(taxis, haxis, vels_rms):
dt = taxis[1] - taxis[0]
ot = taxis[0]
nt = len(taxis)
nh = len(haxis)
# NaN values will be not be spread.
# Using np.zeros has the same result but much slower.
table = np.full((nh, nt, nh), fill_value=np.nan)
dtable = np.full((nh, nt, nh), fill_value=np.nan)
for ih, h in enumerate(haxis):
for it0, (t0, vrms) in enumerate(zip(taxis, vels_rms)):
# Compute NMO traveltime
tx = np.sqrt(t0**2 + (h / vrms) ** 2)
it_frac = (tx - ot) / dt
it_floor = floor(it_frac)
w = it_frac - it_floor
# Both it_floor and it_floor + 1 must be valid indices for taxis
# when using two tables (interpolation).
if 0 <= it_floor and it_floor + 1 < nt:
table[ih, it0, ih] = it_floor
dtable[ih, it0, ih] = w
return table, dtable
nmo_table, nmo_dtable = create_tables(t, x, vel_t)
###############################################################################
SpreadNMO = Spread(
dims=data.shape, # "Input" shape: NMO-ed data shape
dimsd=data.shape, # "Output" shape: original data shape
table=nmo_table, # Table of time indices
dtable=nmo_dtable, # Table of weights for linear interpolation
engine="numba", # numba or numpy
).H # To perform NMO *correction*, we need the adjoint
dottest(SpreadNMO, rtol=1e-4)
###############################################################################
# We see it passes the dot test, but are the results right? Let's find out.
dnmo_spr = SpreadNMO @ data
start = time()
SpreadNMO @ data
end = time()
print(f"Ran in {1e6*(end-start):.0f} μs")
###############################################################################
# Note that since v2.0, we do not need to pass a flattened array. Consequently,
# the output will not be flattened, but will have ``SpreadNMO.dimsd`` as shape.
# Plot Data and NMO-corrected data
fig = plt.figure(figsize=(6.5, 5))
grid = ImageGrid(
fig,
111,
nrows_ncols=(1, 2),
axes_pad=0.15,
cbar_location="right",
cbar_mode="single",
cbar_size="7%",
cbar_pad=0.15,
aspect=False,
share_all=True,
)
im = grid[0].imshow(data.T, **opts)
grid[0].set(title="Data", xlabel="Offset [m]", ylabel="Time [s]")
grid[0].cax.colorbar(im)
grid[0].cax.set_ylabel("Amplitude")
grid[1].imshow(dnmo_spr.T, **opts)
grid[1].set(title="NMO correction using Spread", xlabel="Offset [m]")
plt.show()
###############################################################################
# Not as blazing fast as out original implementation, but pretty good (try the
# "numpy" backend for comparison!). In fact, using the ``Spread`` operator for
# NMO will always have a speed disadvantage. While iterating over the table, it must
# loop over the offsets twice: one for the "input" offsets and one for the "output"
# offsets. We know they are the same for NMO, but since ``Spread`` is a generic
# operator, it does not know that. So right off the bat we can expect an 80x
# slowdown (nh = 80). We diminished this cost to about 30x by setting values where
# ``ix_i != ix_o`` to NaN, but nothing beats the custom implementation. Despite this,
# we can still produce the same result to numerical accuracy:
np.allclose(dnmo, dnmo_spr)
| 16,337 | 37.71564 | 85 | py |
pylops | pylops-master/examples/plot_dct.py | """
Discrete Cosine Transform
=========================
This example shows how to use the :py:class:`pylops.signalprocessing.DCT` operator.
This operator performs the Discrete Cosine Transform on a (single or multi-dimensional)
input array.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define a 1D array x of increasing numbers
n = 21
x = np.arange(n) + 1
###############################################################################
# Next we create the DCT operator with the shape of our input array as
# parameter, and we store the DCT coefficients in the array `y`. Finally, we
# perform the inverse using the adjoint of the operator, and we obtain the
# original input signal.
DOp = pylops.signalprocessing.DCT(dims=x.shape)
y = DOp @ x
xadj = DOp.H @ y
plt.figure(figsize=(8, 5))
plt.plot(x, "k", label="input array")
plt.plot(y, "r", label="transformed array")
plt.plot(xadj, "--b", label="transformed array")
plt.title("1D Discrete Cosine Transform")
plt.legend()
plt.tight_layout()
################################################################################
# Next we apply the DCT to a sine wave
cycles = 2
resolution = 100
length = np.pi * 2 * cycles
s = np.sin(np.arange(0, length, length / resolution))
DOp = pylops.signalprocessing.DCT(dims=s.shape)
y = DOp @ s
plt.figure(figsize=(8, 5))
plt.plot(s, "k", label="sine wave")
plt.plot(y, "r", label="dct of sine wave")
plt.title("Discrete Cosine Transform of Sine wave")
plt.legend()
plt.tight_layout()
###############################################################################
# The Discrete Cosine Transform is commonly used in lossy image compression
# (i.e., JPEG encoding) due to its strong energy compaction nature. Here is an
# example of DCT being used for image compression.
# Note: This code is just an example and may not provide the best results
# for all images. You may need to adjust the threshold value to get better
# results.
img = np.load("../testdata/python.npy")[::5, ::5, 0]
DOp = pylops.signalprocessing.DCT(dims=img.shape)
dct_img = DOp @ img
# Set a threshold for the DCT coefficients to zero out
threshold = np.percentile(np.abs(dct_img), 70)
dct_img[np.abs(dct_img) < threshold] = 0
# Inverse DCT to get back the image
compressed_img = DOp.H @ dct_img
# Plot original and compressed images
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax[0].imshow(img, cmap="gray")
ax[0].set_title("Original Image")
ax[1].imshow(compressed_img, cmap="gray")
ax[1].set_title("Compressed Image")
plt.tight_layout()
| 2,648 | 29.802326 | 87 | py |
pylops | pylops-master/examples/plot_imag.py | """
Imag
====
This example shows how to use the :py:class:`pylops.basicoperators.Imag`
operator.
This operator returns the imaginary part of the data as a real value in
forward mode, and the real part of the model as an imaginary value in
adjoint mode (with zero real part).
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define a Imag operator :math:`\mathbf{\Im}` to extract the imaginary
# component of the input.
M = 5
x = np.arange(M) + 1j * np.arange(M)[::-1]
Rop = pylops.basicoperators.Imag(M, dtype="complex128")
y = Rop * x
xadj = Rop.H * y
_, axs = plt.subplots(1, 3, figsize=(10, 4))
axs[0].plot(np.real(x), lw=2, label="Real")
axs[0].plot(np.imag(x), lw=2, label="Imag")
axs[0].legend()
axs[0].set_title("Input")
axs[1].plot(np.real(y), lw=2, label="Real")
axs[1].plot(np.imag(y), lw=2, label="Imag")
axs[1].legend()
axs[1].set_title("Forward of Input")
axs[2].plot(np.real(xadj), lw=2, label="Real")
axs[2].plot(np.imag(xadj), lw=2, label="Imag")
axs[2].legend()
axs[2].set_title("Adjoint of Forward")
plt.tight_layout()
| 1,169 | 26.209302 | 79 | py |
pylops | pylops-master/examples/plot_causalintegration.py | r"""
Causal Integration
==================
This example shows how to use the :py:class:`pylops.CausalIntegration`
operator to integrate an input signal (in forward mode) and to apply a smooth,
regularized derivative (in inverse mode). This is a very interesting
by-product of this operator which may result very useful when the data
to which you want to apply a numerical derivative is noisy.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with a 1D example. Define the input parameters: number of samples
# of input signal (``nt``), sampling step (``dt``) as well as the input
# signal which will be equal to :math:`x(t)=\sin(t)`:
nt = 81
dt = 0.3
t = np.arange(nt) * dt
x = np.sin(t)
###############################################################################
# We can now create our causal integration operator and apply it to the input
# signal. We can also compute the analytical integral
# :math:`y(t)=\int \sin(t)\,\mathrm{d}t=-\cos(t)` and compare the results. We can also
# invert the integration operator and by remembering that this is equivalent
# to a first order derivative, we will compare our inverted model with the
# result obtained by simply applying the :py:class:`pylops.FirstDerivative`
# forward operator to the same data.
#
# Note that, as explained in details in :py:class:`pylops.CausalIntegration`,
# integration has no unique solution, as any constant :math:`c` can be added
# to the integrated signal :math:`y`, for example if :math:`x(t)=t^2` the
# :math:`y(t) = \int t^2 \,\mathrm{d}t = \frac{t^3}{3} + c`. We thus subtract first
# sample from the analytical integral to obtain the same result as the
# numerical one.
Cop = pylops.CausalIntegration(nt, sampling=dt, kind="half")
yana = -np.cos(t) + np.cos(t[0])
y = Cop * x
xinv = Cop / y
# Numerical derivative
Dop = pylops.FirstDerivative(nt, sampling=dt)
xder = Dop * y
# Visualize data and inversion
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(t, yana, "r", lw=5, label="analytic integration")
axs[0].plot(t, y, "--g", lw=3, label="numerical integration")
axs[0].legend()
axs[0].set_title("Causal integration")
axs[1].plot(t, x, "k", lw=8, label="original")
axs[1].plot(t[1:-1], xder[1:-1], "r", lw=5, label="numerical")
axs[1].plot(t, xinv, "--g", lw=3, label="inverted")
axs[1].legend()
axs[1].set_title("Inverse causal integration = Derivative")
plt.tight_layout()
###############################################################################
# As expected we obtain the same result. Let's see what happens if we now
# add some random noise to our data.
# Add noise
yn = y + np.random.normal(0, 4e-1, y.shape)
# Numerical derivative
Dop = pylops.FirstDerivative(nt, sampling=dt)
xder = Dop * yn
# Regularized derivative
Rop = pylops.SecondDerivative(nt)
xreg = pylops.optimization.leastsquares.regularized_inversion(
Cop, yn, [Rop], epsRs=[1e0], **dict(iter_lim=100, atol=1e-5)
)[0]
# Preconditioned derivative
Sop = pylops.Smoothing1D(41, nt)
xp = pylops.optimization.leastsquares.preconditioned_inversion(
Cop, yn, Sop, **dict(iter_lim=10, atol=1e-3)
)[0]
# Visualize data and inversion
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(t, y, "k", lw=3, label="data")
axs[0].plot(t, yn, "--g", lw=3, label="noisy data")
axs[0].legend()
axs[0].set_title("Causal integration")
axs[1].plot(t, x, "k", lw=8, label="original")
axs[1].plot(t[1:-1], xder[1:-1], "r", lw=3, label="numerical derivative")
axs[1].plot(t, xreg, "g", lw=3, label="regularized")
axs[1].plot(t, xp, "m", lw=3, label="preconditioned")
axs[1].legend()
axs[1].set_title("Inverse causal integration")
plt.tight_layout()
###############################################################################
# We can see here the great advantage of framing our numerical derivative
# as an inverse problem, and more specifically as the inverse of the
# causal integration operator.
#
# Let's conclude with a 2d example where again the integration/derivative will
# be performed along the first axis
nt, nx = 41, 11
dt = 0.3
ot = 0
t = np.arange(nt) * dt + ot
x = np.outer(np.sin(t), np.ones(nx))
Cop = pylops.CausalIntegration(dims=(nt, nx), sampling=dt, axis=0, kind="half")
y = Cop * x
yn = y + np.random.normal(0, 4e-1, y.shape)
# Numerical derivative
Dop = pylops.FirstDerivative(dims=(nt, nx), axis=0, sampling=dt)
xder = Dop * yn
# Regularized derivative
Rop = pylops.Laplacian(dims=(nt, nx))
xreg = pylops.optimization.leastsquares.regularized_inversion(
Cop, yn.ravel(), [Rop], epsRs=[1e0], **dict(iter_lim=100, atol=1e-5)
)[0]
xreg = xreg.reshape(nt, nx)
# Preconditioned derivative
Sop = pylops.Smoothing2D((11, 21), dims=(nt, nx))
xp = pylops.optimization.leastsquares.preconditioned_inversion(
Cop, yn.ravel(), Sop, **dict(iter_lim=10, atol=1e-2)
)[0]
xp = xp.reshape(nt, nx)
# Visualize data and inversion
vmax = 2 * np.max(np.abs(x))
fig, axs = plt.subplots(2, 3, figsize=(18, 12))
axs[0][0].imshow(x, cmap="seismic", vmin=-vmax, vmax=vmax)
axs[0][0].set_title("Model")
axs[0][0].axis("tight")
axs[0][1].imshow(y, cmap="seismic", vmin=-vmax, vmax=vmax)
axs[0][1].set_title("Data")
axs[0][1].axis("tight")
axs[0][2].imshow(yn, cmap="seismic", vmin=-vmax, vmax=vmax)
axs[0][2].set_title("Noisy data")
axs[0][2].axis("tight")
axs[1][0].imshow(xder, cmap="seismic", vmin=-vmax, vmax=vmax)
axs[1][0].set_title("Numerical derivative")
axs[1][0].axis("tight")
axs[1][1].imshow(xreg, cmap="seismic", vmin=-vmax, vmax=vmax)
axs[1][1].set_title("Regularized")
axs[1][1].axis("tight")
axs[1][2].imshow(xp, cmap="seismic", vmin=-vmax, vmax=vmax)
axs[1][2].set_title("Preconditioned")
axs[1][2].axis("tight")
plt.tight_layout()
# Visualize data and inversion at a chosen xlocation
fig, axs = plt.subplots(1, 2, figsize=(18, 5))
axs[0].plot(t, y[:, nx // 2], "k", lw=3, label="data")
axs[0].plot(t, yn[:, nx // 2], "--g", lw=3, label="noisy data")
axs[0].legend()
axs[0].set_title("Causal integration")
axs[1].plot(t, x[:, nx // 2], "k", lw=8, label="original")
axs[1].plot(t, xder[:, nx // 2], "r", lw=3, label="numerical derivative")
axs[1].plot(t, xreg[:, nx // 2], "g", lw=3, label="regularized")
axs[1].plot(t, xp[:, nx // 2], "m", lw=3, label="preconditioned")
axs[1].legend()
axs[1].set_title("Inverse causal integration")
plt.tight_layout()
| 6,401 | 34.966292 | 86 | py |
pylops | pylops-master/examples/plot_smoothing2d.py | """
2D Smoothing
============
This example shows how to use the :py:class:`pylops.Smoothing2D` operator
to smooth a multi-dimensional input signal along two given axes.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Define the input parameters: number of samples of input signal (``N`` and ``M``) and
# lenght of the smoothing filter regression coefficients
# (:math:`n_{smooth,1}` and :math:`n_{smooth,2}`). In this first case the input
# signal is one at the center and zero elsewhere.
N, M = 11, 21
nsmooth1, nsmooth2 = 5, 3
A = np.zeros((N, M))
A[5, 10] = 1
Sop = pylops.Smoothing2D(nsmooth=[nsmooth1, nsmooth2], dims=[N, M], dtype="float64")
B = Sop * A
###############################################################################
# After applying smoothing, we will also try to invert it.
Aest = (Sop / B.ravel()).reshape(Sop.dims)
fig, axs = plt.subplots(1, 3, figsize=(10, 3))
im = axs[0].imshow(A, interpolation="nearest", vmin=0, vmax=1)
axs[0].axis("tight")
axs[0].set_title("Model")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(B, interpolation="nearest", vmin=0, vmax=1)
axs[1].axis("tight")
axs[1].set_title("Data")
plt.colorbar(im, ax=axs[1])
im = axs[2].imshow(Aest, interpolation="nearest", vmin=0, vmax=1)
axs[2].axis("tight")
axs[2].set_title("Estimated model")
plt.colorbar(im, ax=axs[2])
plt.tight_layout()
| 1,462 | 30.12766 | 86 | py |
pylops | pylops-master/examples/plot_phaseshift.py | """
PhaseShift operator
====================
This example shows how to use the :class:`pylops.waveeqprocessing.PhaseShift`
operator to perform frequency-wavenumber shift of an input multi-dimensional
signal. Such a procedure is applied in a variety of disciplines including
geophysics, medical imaging and non-destructive testing.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
############################################
# Let's first create a synthetic dataset composed of a number of hyperbolas
par = {
"ox": -300,
"dx": 20,
"nx": 31,
"oy": -200,
"dy": 20,
"ny": 21,
"ot": 0,
"dt": 0.004,
"nt": 201,
"f0": 20,
"nfmax": 210,
}
# Create axis
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
# Create wavelet
wav = pylops.utils.wavelets.ricker(np.arange(41) * par["dt"], f0=par["f0"])[0]
vrms = [900, 1300, 1800]
t0 = [0.2, 0.3, 0.6]
amp = [1.0, 0.6, -2.0]
_, m = pylops.utils.seismicevents.hyperbolic2d(x, t, t0, vrms, amp, wav)
############################################
# We can now apply a taper at the edges and also pad the input to avoid
# artifacts during the phase shift
pad = 11
taper = pylops.utils.tapers.taper2d(par["nt"], par["nx"], 5)
mpad = np.pad(m * taper, ((pad, pad), (0, 0)), mode="constant")
############################################
# We perform now forward propagation in a constant velocity :math:`v=2000` for
# a depth of :math:`z_{prop} = 100 m`. We should expect the hyperbolas to move
# forward in time and become flatter.
vel = 1500.0
zprop = 100
freq = np.fft.rfftfreq(par["nt"], par["dt"])
kx = np.fft.fftshift(np.fft.fftfreq(par["nx"] + 2 * pad, par["dx"]))
Pop = pylops.waveeqprocessing.PhaseShift(vel, zprop, par["nt"], freq, kx)
mdown = Pop * mpad.T.ravel()
############################################
# We now take the forward propagated wavefield and apply backward propagation,
# which is in this case simply the adjoint of our operator.
# We should expect the hyperbolas to move backward in time and show the same
# traveltime as the original dataset. Of course, as we are only performing the
# adjoint operation we should expect some small differences between this
# wavefield and the input dataset.
mup = Pop.H * mdown.ravel()
mdown = np.real(mdown.reshape(par["nt"], par["nx"] + 2 * pad)[:, pad:-pad])
mup = np.real(mup.reshape(par["nt"], par["nx"] + 2 * pad)[:, pad:-pad])
fig, axs = plt.subplots(1, 3, figsize=(10, 6), sharey=True)
fig.suptitle("2D Phase shift", fontsize=12, fontweight="bold")
axs[0].imshow(
m.T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_xlabel(r"$x(m)$")
axs[0].set_ylabel(r"$t(s)$")
axs[0].set_title("Original data")
axs[1].imshow(
mdown,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[1].set_xlabel(r"$x(m)$")
axs[1].set_title("Forward propagation")
axs[2].imshow(
mup,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[2].set_xlabel(r"$x(m)$")
axs[2].set_title("Backward propagation")
plt.tight_layout()
############################################
# Finally we perform the same for a 3-dimensional signal
_, m = pylops.utils.seismicevents.hyperbolic3d(x, y, t, t0, vrms, vrms, amp, wav)
pad = 11
taper = pylops.utils.tapers.taper3d(par["nt"], (par["ny"], par["nx"]), (3, 3))
mpad = np.pad(m * taper, ((pad, pad), (pad, pad), (0, 0)), mode="constant")
kx = np.fft.fftshift(np.fft.fftfreq(par["nx"] + 2 * pad, par["dx"]))
ky = np.fft.fftshift(np.fft.fftfreq(par["ny"] + 2 * pad, par["dy"]))
Pop = pylops.waveeqprocessing.PhaseShift(vel, zprop, par["nt"], freq, kx, ky)
mdown = Pop * mpad.transpose(2, 1, 0).ravel()
mup = Pop.H * mdown.ravel()
mdown = np.real(
mdown.reshape(par["nt"], par["nx"] + 2 * pad, par["ny"] + 2 * pad)[
:, pad:-pad, pad:-pad
]
)
mup = np.real(
mup.reshape(par["nt"], par["nx"] + 2 * pad, par["ny"] + 2 * pad)[
:, pad:-pad, pad:-pad
]
)
fig, axs = plt.subplots(2, 3, figsize=(10, 12), sharey=True)
fig.suptitle("3D Phase shift", fontsize=12, fontweight="bold")
axs[0][0].imshow(
m[:, par["nx"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0][0].set_xlabel(r"$y(m)$")
axs[0][0].set_ylabel(r"$t(s)$")
axs[0][0].set_title("Original data")
axs[0][1].imshow(
mdown[:, par["nx"] // 2],
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0][1].set_xlabel(r"$y(m)$")
axs[0][1].set_title("Forward propagation")
axs[0][2].imshow(
mup[:, par["nx"] // 2],
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0][2].set_xlabel(r"$y(m)$")
axs[0][2].set_title("Backward propagation")
axs[1][0].imshow(
m[par["ny"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[1][0].set_xlabel(r"$x(m)$")
axs[1][0].set_ylabel(r"$t(s)$")
axs[1][0].set_title("Original data")
axs[1][1].imshow(
mdown[:, :, par["ny"] // 2],
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[1][1].set_xlabel(r"$x(m)$")
axs[1][1].set_title("Forward propagation")
axs[1][2].imshow(
mup[:, :, par["ny"] // 2],
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[1][2].set_xlabel(r"$x(m)$")
axs[1][2].set_title("Backward propagation")
plt.tight_layout()
| 5,968 | 27.15566 | 81 | py |
pylops | pylops-master/examples/plot_describe.py | r"""
Describe
========
This example focuses on the usage of the :func:`pylops.utils.describe.describe`
method, which allows expressing any PyLops operator into its equivalent
mathematical representation. This is done with the aid of
`sympy <https://docs.sympy.org>`_, a Python library for symbolic computing
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
from pylops.utils.describe import describe
plt.close("all")
###############################################################################
# Let's start by defining 3 PyLops operators. Note that once an operator is
# defined we can attach a name to the operator; by doing so, this name will
# be used in the mathematical description of the operator. Alternatively,
# the describe method will randomly choose a name for us.
A = pylops.MatrixMult(np.ones((10, 5)))
A.name = "A"
B = pylops.Diagonal(np.ones(5))
B.name = "A"
C = pylops.MatrixMult(np.ones((10, 5)))
# Simple operator
describe(A)
# Transpose
AT = A.T
describe(AT)
# Adjoint
AH = A.H
describe(AH)
# Scaled
A3 = 3 * A
describe(A3)
# Sum
D = A + C
describe(D)
###############################################################################
# So far so good. Let's see what happens if we accidentally call two different
# operators with the same name. You will see that PyLops catches that and
# changes the name for us (and provides us with a nice warning!)
D = A * B
describe(D)
###############################################################################
# We can move now to something more complicated using various composition
# operators
H = pylops.HStack((A * B, C * B))
describe(H)
H = pylops.Block([[A * B, C], [A, A]])
describe(H)
###############################################################################
# Finally, note that you can get the best out of the describe method if working
# inside a Jupyter notebook. There, the mathematical expression will be
# rendered using a LeTex format! See an example `notebook <https://github.com/mrava87/pylops_notebooks/blob/master/developement/Sympy.ipynb>`_.
| 2,071 | 28.183099 | 143 | py |
pylops | pylops-master/examples/plot_sum.py | """
Sum
===
This example shows how to use the :py:class:`pylops.Sum` operator to stack
values along an axis of a multi-dimensional array
"""
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by defining a 2-dimensional data
ny, nx = 5, 7
x = (np.arange(ny * nx)).reshape(ny, nx)
###############################################################################
# We can now create the operator and peform forward and adjoint
Sop = pylops.Sum(dims=(ny, nx), axis=0)
y = Sop * x
xadj = Sop.H * y
gs = pltgs.GridSpec(1, 7)
fig = plt.figure(figsize=(7, 4))
ax = plt.subplot(gs[0, 0:3])
im = ax.imshow(x, cmap="rainbow", vmin=0, vmax=ny * nx)
ax.set_title("x", size=20, fontweight="bold")
ax.set_xticks(np.arange(nx - 1) + 0.5)
ax.set_yticks(np.arange(ny - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
ax = plt.subplot(gs[0, 3])
ax.imshow(y[:, np.newaxis], cmap="rainbow", vmin=0, vmax=ny * nx)
ax.set_title("y", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(nx - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
ax = plt.subplot(gs[0, 4:])
ax.imshow(xadj, cmap="rainbow", vmin=0, vmax=ny * nx)
ax.set_title("xadj", size=20, fontweight="bold")
ax.set_xticks(np.arange(nx - 1) + 0.5)
ax.set_yticks(np.arange(ny - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
plt.tight_layout()
###############################################################################
# Note that since the Sum operator creates and under-determined system of
# equations (data has always lower dimensionality than the model), an exact
# inverse is not possible for this operator.
| 1,955 | 30.548387 | 79 | py |
pylops | pylops-master/examples/plot_cgls.py | r"""
CGLS and LSQR Solvers
=====================
This example shows how to use the :py:func:`pylops.optimization.leastsquares.cgls`
and :py:func:`pylops.optimization.leastsquares.lsqr` PyLops solvers
to minimize the following cost function:
.. math::
J = \| \mathbf{y} - \mathbf{Ax} \|_2^2 + \epsilon \| \mathbf{x} \|_2^2
Note that the LSQR solver behaves in the same way as the scipy's
:py:func:`scipy.sparse.linalg.lsqr` solver. However, our solver is also able
to operate on cupy arrays and perform computations on a GPU.
"""
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
warnings.filterwarnings("ignore")
###############################################################################
# Let's define a matrix :math:`\mathbf{A}` or size (``N`` and ``M``) and
# fill the matrix with random numbers
N, M = 20, 10
A = np.random.normal(0, 1, (N, M))
Aop = pylops.MatrixMult(A, dtype="float64")
x = np.ones(M)
###############################################################################
# We can now use the cgls solver to invert this matrix
y = Aop * x
xest, istop, nit, r1norm, r2norm, cost_cgls = pylops.optimization.basic.cgls(
Aop, y, x0=np.zeros_like(x), niter=10, tol=1e-10, show=True
)
print(f"x= {x}")
print(f"cgls solution xest= {xest}")
###############################################################################
# And the lsqr solver to invert this matrix
y = Aop * x
(
xest,
istop,
itn,
r1norm,
r2norm,
anorm,
acond,
arnorm,
xnorm,
var,
cost_lsqr,
) = pylops.optimization.basic.lsqr(Aop, y, x0=np.zeros_like(x), niter=10, show=True)
print(f"x= {x}")
print(f"lsqr solution xest= {xest}")
###############################################################################
# Finally we show that the L2 norm of the residual of the two solvers decays
# in the same way, as LSQR is algebrically equivalent to CG on the normal
# equations and CGLS
plt.figure(figsize=(12, 3))
plt.plot(cost_cgls, "k", lw=2, label="CGLS")
plt.plot(cost_lsqr, "--r", lw=2, label="LSQR")
plt.title("Cost functions")
plt.legend()
plt.tight_layout()
###############################################################################
# Note that while we used a dense matrix here, any other linear operator
# can be fed to cgls and lsqr as is the case for any other PyLops solver.
| 2,394 | 26.848837 | 84 | py |
pylops | pylops-master/examples/plot_multiproc.py | """
Operators with Multiprocessing
==============================
This example shows how perform a scalability test for one of PyLops operators
that uses ``multiprocessing`` to spawn multiple processes. Operators that
support such feature are :class:`pylops.basicoperators.VStack`,
:class:`pylops.basicoperators.HStack`, and
:class:`pylops.basicoperators.BlockDiagonal`, and
:class:`pylops.basicoperators.Block`.
In this example we will consider the BlockDiagonal operator which contains
:class:`pylops.basicoperators.MatrixMult` operators along its main diagonal.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by creating N MatrixMult operators and the BlockDiag operator
N = 100
Nops = 32
Ops = [pylops.MatrixMult(np.random.normal(0.0, 1.0, (N, N))) for _ in range(Nops)]
Op = pylops.BlockDiag(Ops, nproc=1)
###############################################################################
# We can now perform a scalability test on the forward operation
workers = [2, 3, 4]
compute_times, speedup = pylops.utils.multiproc.scalability_test(
Op, np.ones(Op.shape[1]), workers=workers, forward=True
)
plt.figure(figsize=(12, 3))
plt.plot(workers, speedup, "ko-")
plt.xlabel("# Workers")
plt.ylabel("Speed Up")
plt.title("Forward scalability test")
plt.tight_layout()
###############################################################################
# And likewise on the adjoint operation
compute_times, speedup = pylops.utils.multiproc.scalability_test(
Op, np.ones(Op.shape[0]), workers=workers, forward=False
)
plt.figure(figsize=(12, 3))
plt.plot(workers, speedup, "ko-")
plt.xlabel("# Workers")
plt.ylabel("Speed Up")
plt.title("Adjoint scalability test")
plt.tight_layout()
###############################################################################
# Note that we have not tested here the case with 1 worker. In this specific
# case, since the computations are very small, the overhead of spawning processes
# is actually dominating the time of computations and so computing the
# forward and adjoint operations with a single worker is more efficient. We
# hope that this example can serve as a basis to inspect the scalability of
# multiprocessing-enabled operators and choose the best number of processes.
| 2,365 | 37.786885 | 82 | py |
pylops | pylops-master/examples/plot_wavelet.py | """
Wavelet transform
=================
This example shows how to use the :py:class:`pylops.DWT` and
:py:class:`pylops.DWT2D` operators to perform 1- and 2-dimensional DWT.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with a 1-dimensional signal. We apply the 1-dimensional
# wavelet transform, keep only the first 30 coefficients and perform the
# inverse transform.
nt = 200
dt = 0.004
t = np.arange(nt) * dt
freqs = [10, 7, 9]
amps = [1, -2, 0.5]
x = np.sum([amp * np.sin(2 * np.pi * f * t) for (f, amp) in zip(freqs, amps)], axis=0)
Wop = pylops.signalprocessing.DWT(nt, wavelet="dmey", level=5)
y = Wop * x
yf = y.copy()
yf[25:] = 0
xinv = Wop.H * yf
plt.figure(figsize=(8, 2))
plt.plot(y, "k", label="Full")
plt.plot(yf, "r", label="Extracted")
plt.title("Discrete Wavelet Transform")
plt.tight_layout()
plt.figure(figsize=(8, 2))
plt.plot(x, "k", label="Original")
plt.plot(xinv, "r", label="Reconstructed")
plt.title("Reconstructed signal")
plt.tight_layout()
###############################################################################
# We repeat the same procedure with an image. In this case the 2-dimensional
# DWT will be applied instead. Only a quarter of the coefficients of the DWT
# will be retained in this case.
im = np.load("../testdata/python.npy")[::5, ::5, 0]
Nz, Nx = im.shape
Wop = pylops.signalprocessing.DWT2D((Nz, Nx), wavelet="haar", level=5)
y = Wop * im
yf = y.copy()
yf.flat[y.size // 4 :] = 0
iminv = Wop.H * yf
fig, axs = plt.subplots(2, 2, figsize=(6, 6))
axs[0, 0].imshow(im, cmap="gray")
axs[0, 0].set_title("Image")
axs[0, 0].axis("tight")
axs[0, 1].imshow(y, cmap="gray_r", vmin=-1e2, vmax=1e2)
axs[0, 1].set_title("DWT2 coefficients")
axs[0, 1].axis("tight")
axs[1, 0].imshow(iminv, cmap="gray")
axs[1, 0].set_title("Reconstructed image")
axs[1, 0].axis("tight")
axs[1, 1].imshow(yf, cmap="gray_r", vmin=-1e2, vmax=1e2)
axs[1, 1].set_title("DWT2 coefficients (zeroed)")
axs[1, 1].axis("tight")
plt.tight_layout()
| 2,101 | 29.028571 | 86 | py |
pylops | pylops-master/examples/plot_regr.py | r"""
Polynomial Regression
=====================
This example shows how to use the :py:class:`pylops.Regression` operator
to perform *Polynomial regression analysis*.
In short, polynomial regression is the problem of finding the best fitting
coefficients for the following equation:
.. math::
y_i = \sum_{n=0}^\text{order} x_n t_i^n \qquad \forall i=0,1,\ldots,N-1
As we can express this problem in a matrix form:
.. math::
\mathbf{y}= \mathbf{A} \mathbf{x}
our solution can be obtained by solving the following optimization problem:
.. math::
J= ||\mathbf{y} - \mathbf{A} \mathbf{x}||_2
See documentation of :py:class:`pylops.Regression` for more detailed
definition of the forward problem.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
np.random.seed(10)
###############################################################################
# Define the input parameters: number of samples along the t-axis (``N``),
# order (``order``), regression coefficients (``x``), and standard deviation
# of noise to be added to data (``sigma``).
N = 30
order = 3
x = np.array([1.0, 0.05, 0.0, -0.01])
sigma = 1
###############################################################################
# Let's create the time axis and initialize the
# :py:class:`pylops.Regression` operator
t = np.arange(N, dtype="float64") - N // 2
PRop = pylops.Regression(t, order=order, dtype="float64")
###############################################################################
# We can then apply the operator in forward mode to compute our data points
# along the x-axis (``y``). We will also generate some random gaussian noise
# and create a noisy version of the data (``yn``).
y = PRop * x
yn = y + np.random.normal(0, sigma, N)
###############################################################################
# We are now ready to solve our problem. As we are using an operator from the
# :py:class:`pylops.LinearOperator` family, we can simply use ``/``,
# which in this case will solve the system by means of an iterative solver
# (i.e., :py:func:`scipy.sparse.linalg.lsqr`).
xest = PRop / y
xnest = PRop / yn
###############################################################################
# Let's plot the best fitting curve for the case of noise free and noisy data
plt.figure(figsize=(5, 7))
plt.plot(
t,
PRop * x,
"k",
lw=4,
label=r"true: $x_0$ = %.2f, $x_1$ = %.2f, "
r"$x_2$ = %.2f, $x_3$ = %.2f" % (x[0], x[1], x[2], x[3]),
)
plt.plot(
t,
PRop * xest,
"--r",
lw=4,
label="est noise-free: $x_0$ = %.2f, $x_1$ = %.2f, "
r"$x_2$ = %.2f, $x_3$ = %.2f" % (xest[0], xest[1], xest[2], xest[3]),
)
plt.plot(
t,
PRop * xnest,
"--g",
lw=4,
label="est noisy: $x_0$ = %.2f, $x_1$ = %.2f, "
r"$x_2$ = %.2f, $x_3$ = %.2f" % (xnest[0], xnest[1], xnest[2], xnest[3]),
)
plt.scatter(t, y, c="r", s=70)
plt.scatter(t, yn, c="g", s=70)
plt.legend(fontsize="x-small")
plt.tight_layout()
###############################################################################
# We consider now the case where some of the observations have large errors.
# Such elements are generally referred to as *outliers* and can affect the
# quality of the least-squares solution if not treated with care. In this
# example we will see how using a L1 solver such as
# :py:func:`pylops.optimization.sparsity.IRLS` can drammatically improve the
# quality of the estimation of intercept and gradient.
# Add outliers
yn[1] += 40
yn[N - 2] -= 20
# IRLS
nouter = 20
epsR = 1e-2
epsI = 0
tolIRLS = 1e-2
xnest = PRop / yn
xirls, nouter = pylops.optimization.sparsity.irls(
PRop,
yn,
nouter=nouter,
threshR=False,
epsR=epsR,
epsI=epsI,
tolIRLS=tolIRLS,
)
print(f"IRLS converged at {nouter} iterations...")
plt.figure(figsize=(5, 7))
plt.plot(
t,
PRop * x,
"k",
lw=4,
label=r"true: $x_0$ = %.2f, $x_1$ = %.2f, "
r"$x_2$ = %.2f, $x_3$ = %.2f" % (x[0], x[1], x[2], x[3]),
)
plt.plot(
t,
PRop * xnest,
"--r",
lw=4,
label=r"L2: $x_0$ = %.2f, $x_1$ = %.2f, "
r"$x_2$ = %.2f, $x_3$ = %.2f" % (xnest[0], xnest[1], xnest[2], xnest[3]),
)
plt.plot(
t,
PRop * xirls,
"--g",
lw=4,
label=r"IRLS: $x_0$ = %.2f, $x_1$ = %.2f, "
r"$x_2$ = %.2f, $x_3$ = %.2f" % (xirls[0], xirls[1], xirls[2], xirls[3]),
)
plt.scatter(t, y, c="r", s=70)
plt.scatter(t, yn, c="g", s=70)
plt.legend(fontsize="x-small")
plt.tight_layout()
| 4,506 | 27.891026 | 80 | py |
pylops | pylops-master/examples/plot_wavest.py | r"""
Wavelet estimation
==================
This example shows how to use the :py:class:`pylops.avo.prestack.PrestackWaveletModelling` to
estimate a wavelet from pre-stack seismic data. This problem can be written in mathematical
form as:
.. math::
\mathbf{d}= \mathbf{G} \mathbf{w}
where :math:`\mathbf{G}` is an operator that convolves an angle-variant reflectivity series
with the wavelet :math:`\mathbf{w}` that we aim to retrieve.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import filtfilt
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start by creating the input elastic property profiles and wavelet
nt0 = 501
dt0 = 0.004
ntheta = 21
t0 = np.arange(nt0) * dt0
thetamin, thetamax = 0, 40
theta = np.linspace(thetamin, thetamax, ntheta)
# Elastic property profiles
vp = (
2000
+ 5 * np.arange(nt0)
+ 2 * filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 160, nt0))
)
vs = 600 + vp / 2 + 3 * filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 100, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 120, nt0))
vp[201:] += 1500
vs[201:] += 500
rho[201:] += 100
# Wavelet
ntwav = 41
wavoff = 10
wav, twav, wavc = ricker(t0[: ntwav // 2 + 1], 20)
wav_phase = np.hstack((wav[wavoff:], np.zeros(wavoff)))
# vs/vp profile
vsvp = vs / vp
# Model
m = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)
fig, axs = plt.subplots(1, 3, figsize=(9, 7), sharey=True)
axs[0].plot(vp, t0, "k", lw=3)
axs[0].set(xlabel="[m/s]", ylabel=r"$t$ [s]", ylim=[t0[0], t0[-1]], title="Vp")
axs[0].grid()
axs[1].plot(vp / vs, t0, "k", lw=3)
axs[1].set(title="Vp/Vs")
axs[1].grid()
axs[2].plot(rho, t0, "k", lw=3)
axs[2].set(xlabel="[kg/m³]", title="Rho")
axs[2].invert_yaxis()
axs[2].grid()
plt.tight_layout()
###############################################################################
# We create now the operators to model a synthetic pre-stack seismic gather
# with a zero-phase as well as a mixed phase wavelet.
# Create operators
Wavesop = pylops.avo.prestack.PrestackWaveletModelling(
m, theta, nwav=ntwav, wavc=wavc, vsvp=vsvp, linearization="akirich"
)
Wavesop_phase = pylops.avo.prestack.PrestackWaveletModelling(
m, theta, nwav=ntwav, wavc=wavc, vsvp=vsvp, linearization="akirich"
)
###############################################################################
# Let's apply those operators to the elastic model and create some synthetic data
d = (Wavesop * wav).reshape(ntheta, nt0).T
d_phase = (Wavesop_phase * wav_phase).reshape(ntheta, nt0).T
# add noise
dn = d + np.random.normal(0, 3e-2, d.shape)
fig, axs = plt.subplots(1, 3, figsize=(13, 7), sharey=True)
axs[0].imshow(
d, cmap="gray", extent=(theta[0], theta[-1], t0[-1], t0[0]), vmin=-0.1, vmax=0.1
)
axs[0].axis("tight")
axs[0].set(xlabel=r"$\theta$ [°]", ylabel=r"$t$ [s]")
axs[0].set_title("Data with zero-phase wavelet", fontsize=10)
axs[1].imshow(
d_phase,
cmap="gray",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.1,
vmax=0.1,
)
axs[1].axis("tight")
axs[1].set_title("Data with non-zero-phase wavelet", fontsize=10)
axs[1].set_xlabel(r"$\theta$ [°]")
axs[2].imshow(
dn, cmap="gray", extent=(theta[0], theta[-1], t0[-1], t0[0]), vmin=-0.1, vmax=0.1
)
axs[2].axis("tight")
axs[2].set_title("Noisy Data with zero-phase wavelet", fontsize=10)
axs[2].set_xlabel(r"$\theta$ [°]")
plt.tight_layout()
###############################################################################
# We can invert the data. First we will consider noise-free data, subsequently
# we will add some noise and add a regularization terms in the inversion
# process to obtain a well-behaved wavelet also under noise conditions.
wav_est = Wavesop / d.T.ravel()
wav_phase_est = Wavesop_phase / d_phase.T.ravel()
wavn_est = Wavesop / dn.T.ravel()
# Create regularization operator
D2op = pylops.SecondDerivative(ntwav, dtype="float64")
# Invert for wavelet
(
wavn_reg_est,
istop,
itn,
r1norm,
r2norm,
) = pylops.optimization.leastsquares.regularized_inversion(
Wavesop,
dn.T.ravel(),
[D2op],
epsRs=[np.sqrt(0.1)],
**dict(damp=np.sqrt(1e-4), iter_lim=200, show=0)
)
###############################################################################
# As expected, the regularization helps to retrieve a smooth wavelet
# even under noisy conditions.
# sphinx_gallery_thumbnail_number = 3
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(8, 6))
axs[0].plot(wav, "k", lw=6, label="True")
axs[0].plot(wav_est, "--r", lw=4, label="Estimated (noise-free)")
axs[0].plot(wavn_est, "--g", lw=4, label="Estimated (noisy)")
axs[0].plot(wavn_reg_est, "--m", lw=4, label="Estimated (noisy regularized)")
axs[0].set_title("Zero-phase wavelet")
axs[0].grid()
axs[0].legend(loc="upper right")
axs[0].axis("tight")
axs[1].plot(wav_phase, "k", lw=6, label="True")
axs[1].plot(wav_phase_est, "--r", lw=4, label="Estimated")
axs[1].set_title("Wavelet with phase")
axs[1].grid()
axs[1].legend(loc="upper right")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# Finally we repeat the same exercise, but this time we use a *preconditioner*.
# Initially, our preconditioner is a :py:class:`pylops.Symmetrize` operator
# to ensure that our estimated wavelet is zero-phase. After we chain
# the :py:class:`pylops.Symmetrize` and the :py:class:`pylops.Smoothing1D`
# operators to also guarantee a smooth wavelet.
# Create symmetrize operator
Sop = pylops.Symmetrize((ntwav + 1) // 2)
# Create smoothing operator
Smop = pylops.Smoothing1D(5, dims=((ntwav + 1) // 2,), dtype="float64")
# Invert for wavelet
wavn_prec_est = pylops.optimization.leastsquares.preconditioned_inversion(
Wavesop, dn.T.ravel(), Sop, **dict(damp=np.sqrt(1e-4), iter_lim=200, show=0)
)[0]
wavn_smooth_est = pylops.optimization.leastsquares.preconditioned_inversion(
Wavesop, dn.T.ravel(), Sop * Smop, **dict(damp=np.sqrt(1e-4), iter_lim=200, show=0)
)[0]
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(8, 3))
ax.plot(wav, "k", lw=6, label="True")
ax.plot(wav_est, "--r", lw=4, label="Estimated (noise-free)")
ax.plot(wavn_prec_est, "--g", lw=4, label="Estimated (noisy symmetric)")
ax.plot(wavn_smooth_est, "--m", lw=4, label="Estimated (noisy smoothed)")
ax.set_title("Zero-phase wavelet")
ax.grid()
ax.legend(loc="upper right")
plt.tight_layout()
| 6,524 | 32.290816 | 93 | py |
pylops | pylops-master/examples/plot_radon.py | r"""
Radon Transform
===============
This example shows how to use the :py:class:`pylops.signalprocessing.Radon2D`
and :py:class:`pylops.signalprocessing.Radon3D` operators to apply the Radon
Transform to 2-dimensional or 3-dimensional signals, respectively.
In our implementation both linear, parabolic and hyperbolic parametrization
can be chosen.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by creating an empty 2d matrix of size :math:`n_{p_x} \times n_t`
# and add a single spike in it. We will see that applying the forward
# Radon operator will result in a single event (linear, parabolic or
# hyperbolic) in the resulting data vector.
nt, nh = 41, 51
npx, pxmax = 41, 1e-2
dt, dh = 0.005, 1
t = np.arange(nt) * dt
h = np.arange(nh) * dh
px = np.linspace(0, pxmax, npx)
x = np.zeros((npx, nt))
x[4, nt // 2] = 1
###############################################################################
# We can now define our operators for different parametric curves and apply
# them to the input model vector. We also apply the adjoint to the resulting
# data vector.
RLop = pylops.signalprocessing.Radon2D(
t, h, px, centeredh=True, kind="linear", interp=False, engine="numpy"
)
RPop = pylops.signalprocessing.Radon2D(
t, h, px, centeredh=True, kind="parabolic", interp=False, engine="numpy"
)
RHop = pylops.signalprocessing.Radon2D(
t, h, px, centeredh=True, kind="hyperbolic", interp=False, engine="numpy"
)
# forward
yL = RLop * x
yP = RPop * x
yH = RHop * x
# adjoint
xadjL = RLop.H * yL
xadjP = RPop.H * yP
xadjH = RHop.H * yH
###############################################################################
# Let's now visualize the input model in the Radon domain, the data, and
# the adjoint model the different parametric curves.
fig, axs = plt.subplots(2, 4, figsize=(10, 6), sharey=True)
axs[0, 0].axis("off")
axs[1, 0].imshow(
x.T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 0].set(xlabel=r"$p$ [s/km]", ylabel=r"$t$ [s]", title="Input model")
axs[1, 0].axis("tight")
axs[0, 1].imshow(
yL.T, vmin=-1, vmax=1, cmap="seismic_r", extent=(h[0], h[-1], t[-1], t[0])
)
axs[0, 1].tick_params(labelleft=True)
axs[0, 1].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Linear data")
axs[0, 1].axis("tight")
axs[0, 2].imshow(
yP.T, vmin=-1, vmax=1, cmap="seismic_r", extent=(h[0], h[-1], t[-1], t[0])
)
axs[0, 2].set(xlabel=r"$x$ [m]", title="Parabolic data")
axs[0, 2].axis("tight")
axs[0, 3].imshow(
yH.T, vmin=-1, vmax=1, cmap="seismic_r", extent=(h[0], h[-1], t[-1], t[0])
)
axs[0, 3].set(xlabel=r"$x$ [m]", title="Hyperbolic data")
axs[0, 3].axis("tight")
axs[1, 1].imshow(
xadjL.T,
vmin=-20,
vmax=20,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 1].set(xlabel=r"$p$ [s/km]", title="Linear adjoint")
axs[1, 1].axis("tight")
axs[1, 2].imshow(
xadjP.T,
vmin=-20,
vmax=20,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 2].set(xlabel=r"$p$ [s/km]", title="Parabolic adjoint")
axs[1, 2].axis("tight")
axs[1, 3].imshow(
xadjH.T,
vmin=-20,
vmax=20,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 3].set(xlabel=r"$p$ [s/km]", title="Hyperbolic adjoint")
axs[1, 3].axis("tight")
fig.tight_layout()
###############################################################################
# As we can see in the bottom figures, the adjoint Radon transform is far
# from being close to the inverse Radon transform, i.e.
# :math:`\mathbf{R^H}\mathbf{R} \neq \mathbf{I}` (compared to the case of FFT
# where the adjoint and inverse are equivalent, i.e.
# :math:`\mathbf{F^H}\mathbf{F} = \mathbf{I}`). In fact when we apply the
# adjoint Radon Transform we obtain a *model* that
# is a smoothed version of the original model polluted by smearing and
# artifacts. In tutorial :ref:`sphx_glr_tutorials_radonfiltering.py` we will
# exploit a sparsity-promiting Radon transform to perform filtering of unwanted
# signals from an input data.
#
# Finally we repeat the same exercise with 3d data.
nt, ny, nx = 21, 21, 11
npy, pymax = 13, 5e-3
npx, pxmax = 11, 5e-3
dt, dy, dx = 0.005, 1, 1
t = np.arange(nt) * dt
hy = np.arange(ny) * dy
hx = np.arange(nx) * dx
py = np.linspace(0, pymax, npy)
px = np.linspace(0, pxmax, npx)
x = np.zeros((npy, npx, nt))
x[npy // 2, npx // 2 - 2, nt // 2] = 1
RLop = pylops.signalprocessing.Radon3D(
t, hy, hx, py, px, centeredh=True, kind="linear", interp=False, engine="numpy"
)
RPop = pylops.signalprocessing.Radon3D(
t, hy, hx, py, px, centeredh=True, kind="parabolic", interp=False, engine="numpy"
)
RHop = pylops.signalprocessing.Radon3D(
t, hy, hx, py, px, centeredh=True, kind="hyperbolic", interp=False, engine="numpy"
)
# forward
yL = RLop * x.reshape(npy * npx, nt)
yP = RPop * x.reshape(npy * npx, nt)
yH = RHop * x.reshape(npy * npx, nt)
# adjoint
xadjL = RLop.H * yL
xadjP = RPop.H * yP
xadjH = RHop.H * yH
# reshape
yL = yL.reshape(ny, nx, nt)
yP = yP.reshape(ny, nx, nt)
yH = yH.reshape(ny, nx, nt)
xadjL = xadjL.reshape(npy, npx, nt)
xadjP = xadjP.reshape(npy, npx, nt)
xadjH = xadjH.reshape(npy, npx, nt)
# plotting
fig, axs = plt.subplots(2, 4, figsize=(10, 6), sharey=True)
axs[1, 0].imshow(
x[npy // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 0].set(xlabel=r"$p_x$ [s/km]", ylabel=r"$t$ [s]", title="Input model")
axs[1, 0].axis("tight")
axs[0, 1].imshow(
yL[ny // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(hx[0], hx[-1], t[-1], t[0]),
)
axs[0, 1].tick_params(labelleft=True)
axs[0, 1].set(xlabel=r"$x$ [m]", ylabel=r"$t$ [s]", title="Linear data")
axs[0, 1].axis("tight")
axs[0, 2].imshow(
yP[ny // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(hx[0], hx[-1], t[-1], t[0]),
)
axs[0, 2].set(xlabel=r"$x$ [m]", title="Parabolic data")
axs[0, 2].axis("tight")
axs[0, 3].imshow(
yH[ny // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(hx[0], hx[-1], t[-1], t[0]),
)
axs[0, 3].set(xlabel=r"$x$ [m]", title="Hyperbolic data")
axs[0, 3].axis("tight")
axs[1, 1].imshow(
xadjL[npy // 2].T,
vmin=-100,
vmax=100,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[0, 0].axis("off")
axs[1, 1].set(xlabel=r"$p_x$ [s/km]", title="Linear adjoint")
axs[1, 1].axis("tight")
axs[1, 2].imshow(
xadjP[npy // 2].T,
vmin=-100,
vmax=100,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 2].set(xlabel=r"$p_x$ [s/km]", title="Parabolic adjoint")
axs[1, 2].axis("tight")
axs[1, 3].imshow(
xadjH[npy // 2].T,
vmin=-100,
vmax=100,
cmap="seismic_r",
extent=(1e3 * px[0], 1e3 * px[-1], t[-1], t[0]),
)
axs[1, 3].set(xlabel=r"$p_x$ [s/km]", title="Hyperbolic adjoint")
axs[1, 3].axis("tight")
fig.tight_layout()
fig, axs = plt.subplots(2, 4, figsize=(10, 6), sharey=True)
axs[1, 0].imshow(
x[:, npx // 2 - 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]),
)
axs[1, 0].set(xlabel=r"$p_y$ [s/km]", ylabel=r"$t$ [s]", title="Input model")
axs[1, 0].axis("tight")
axs[0, 1].imshow(
yL[:, nx // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(hy[0], hy[-1], t[-1], t[0]),
)
axs[0, 1].tick_params(labelleft=True)
axs[0, 1].set(xlabel=r"$y$ [m]", ylabel=r"$t$ [s]", title="Linear data")
axs[0, 1].axis("tight")
axs[0, 2].imshow(
yP[:, nx // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(hy[0], hy[-1], t[-1], t[0]),
)
axs[0, 2].set(xlabel=r"$y$ [m]", title="Parabolic data")
axs[0, 2].axis("tight")
axs[0, 3].imshow(
yH[:, nx // 2].T,
vmin=-1,
vmax=1,
cmap="seismic_r",
extent=(hy[0], hy[-1], t[-1], t[0]),
)
axs[0, 3].set(xlabel=r"$y$ [m]", title="Hyperbolic data")
axs[0, 3].axis("tight")
axs[1, 1].imshow(
xadjL[:, npx // 2 - 5].T,
vmin=-100,
vmax=100,
cmap="seismic_r",
extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]),
)
axs[0, 0].axis("off")
axs[1, 1].set(xlabel=r"$p_y$ [s/km]", title="Linear adjoint")
axs[1, 1].axis("tight")
axs[1, 2].imshow(
xadjP[:, npx // 2 - 2].T,
vmin=-100,
vmax=100,
cmap="seismic_r",
extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]),
)
axs[1, 2].set(xlabel=r"$p_y$ [s/km]", title="Parabolic adjoint")
axs[1, 2].axis("tight")
axs[1, 3].imshow(
xadjH[:, npx // 2 - 2].T,
vmin=-100,
vmax=100,
cmap="seismic_r",
extent=(1e3 * py[0], 1e3 * py[-1], t[-1], t[0]),
)
axs[1, 3].set(xlabel=r"$p_y$ [s/km]", title="Hyperbolic adjoint")
axs[1, 3].axis("tight")
fig.tight_layout()
| 8,846 | 27.631068 | 86 | py |
pylops | pylops-master/examples/plot_matrixmult.py | r"""
Matrix Multiplication
=====================
This example shows how to use the :py:class:`pylops.MatrixMult` operator
to perform *Matrix inversion* of the following linear system.
.. math::
\mathbf{y}= \mathbf{A} \mathbf{x}
You will see that since this operator is a simple overloading to a
:py:func:`numpy.ndarray` object, the solution of the linear system
can be obtained via both direct inversion (i.e., by means explicit
solver such as :py:func:`scipy.linalg.solve` or :py:func:`scipy.linalg.lstsq`)
and iterative solver (i.e., :py:func:`from scipy.sparse.linalg.lsqr`).
Note that in case of rectangular :math:`\mathbf{A}`, an exact inverse does
not exist and a least-square solution is computed instead.
"""
import warnings
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import rand
from scipy.sparse.linalg import lsqr
import pylops
plt.close("all")
warnings.filterwarnings("ignore")
# sphinx_gallery_thumbnail_number = 2
###############################################################################
# Let's define the sizes of the matrix :math:`\mathbf{A}` (``N`` and ``M``) and
# fill the matrix with random numbers
N, M = 20, 20
A = np.random.normal(0, 1, (N, M))
Aop = pylops.MatrixMult(A, dtype="float64")
x = np.ones(M)
###############################################################################
# We can now apply the forward operator to create the data vector :math:`\mathbf{y}`
# and use ``/`` to solve the system by means of an explicit solver.
y = Aop * x
xest = Aop / y
###############################################################################
# Let's visually plot the system of equations we just solved.
gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 3))
ax = plt.subplot(gs[0, 0])
ax.imshow(y[:, np.newaxis], cmap="rainbow")
ax.set_title("y", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 1])
ax.text(
0.35,
0.5,
"=",
horizontalalignment="center",
verticalalignment="center",
size=40,
fontweight="bold",
)
ax.axis("off")
ax = plt.subplot(gs[0, 2:5])
ax.imshow(Aop.A, cmap="rainbow")
ax.set_title("A", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 5])
ax.imshow(x[:, np.newaxis], cmap="rainbow")
ax.set_title("x", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 3))
ax = plt.subplot(gs[0, 0])
ax.imshow(x[:, np.newaxis], cmap="rainbow")
ax.set_title("xest", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 1])
ax.text(
0.35,
0.5,
"=",
horizontalalignment="center",
verticalalignment="center",
size=40,
fontweight="bold",
)
ax.axis("off")
ax = plt.subplot(gs[0, 2:5])
ax.imshow(Aop.inv(), cmap="rainbow")
ax.set_title(r"A$^{-1}$", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(M - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 5])
ax.imshow(y[:, np.newaxis], cmap="rainbow")
ax.set_title("y", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
plt.tight_layout()
###############################################################################
# Let's also plot the matrix eigenvalues
plt.figure(figsize=(8, 3))
plt.plot(Aop.eigs(), "k", lw=2)
plt.title("Eigenvalues", size=16, fontweight="bold")
plt.xlabel("#eigenvalue")
plt.xlabel("intensity")
plt.tight_layout()
###############################################################################
# We can also repeat the same exercise for a non-square matrix
N, M = 200, 50
A = np.random.normal(0, 1, (N, M))
x = np.ones(M)
Aop = pylops.MatrixMult(A, dtype="float64")
y = Aop * x
yn = y + np.random.normal(0, 0.3, N)
xest = Aop / y
xnest = Aop / yn
plt.figure(figsize=(8, 3))
plt.plot(x, "k", lw=2, label="True")
plt.plot(xest, "--r", lw=2, label="Noise-free")
plt.plot(xnest, "--g", lw=2, label="Noisy")
plt.title("Matrix inversion", size=16, fontweight="bold")
plt.legend()
plt.tight_layout()
###############################################################################
# And we can also use a sparse matrix from the :obj:`scipy.sparse`
# family of sparse matrices.
N, M = 5, 5
A = rand(N, M, density=0.75)
x = np.ones(M)
Aop = pylops.MatrixMult(A, dtype="float64")
y = Aop * x
xest = Aop / y
print(f"A= {Aop.A.todense()}")
print(f"A^-1= {Aop.inv().todense()}")
print(f"eigs= {Aop.eigs()}")
print(f"x= {x}")
print(f"y= {y}")
print(f"lsqr solution xest= {xest}")
###############################################################################
# Finally, in several circumstances the input model :math:`\mathbf{x}` may
# be more naturally arranged as a matrix or a multi-dimensional array and
# it may be desirable to apply the same matrix to every columns of the model.
# This can be mathematically expressed as:
#
# .. math::
# \mathbf{y} =
# \begin{bmatrix}
# \mathbf{A} \quad \mathbf{0} \quad \mathbf{0} \\
# \mathbf{0} \quad \mathbf{A} \quad \mathbf{0} \\
# \mathbf{0} \quad \mathbf{0} \quad \mathbf{A}
# \end{bmatrix}
# \begin{bmatrix}
# \mathbf{x_1} \\
# \mathbf{x_2} \\
# \mathbf{x_3}
# \end{bmatrix}
#
# and apply it using the same implementation of the
# :py:class:`pylops.MatrixMult` operator by simply telling the operator how we
# want the model to be organized through the ``otherdims`` input parameter.
A = np.array([[1.0, 2.0], [4.0, 5.0]])
x = np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]])
Aop = pylops.MatrixMult(A, otherdims=(3,), dtype="float64")
y = Aop * x.ravel()
xest, istop, itn, r1norm, r2norm = lsqr(Aop, y, damp=1e-10, iter_lim=10, show=0)[0:5]
xest = xest.reshape(3, 2)
print(f"A= {A}")
print(f"x= {x}")
print(f"y={y}")
print(f"lsqr solution xest= {xest}")
| 6,598 | 29.550926 | 85 | py |
pylops | pylops-master/examples/plot_patching.py | r"""
Patching
========
This example shows how to use the :py:class:`pylops.signalprocessing.Patch2D`
and :py:class:`pylops.signalprocessing.Patch3D` operators to perform repeated
transforms over small patches of a 2-dimensional or 3-dimensional
array. The transforms that we apply in this example are the
:py:class:`pylops.signalprocessing.FFT2D` and
:py:class:`pylops.signalprocessing.FFT3D` but this operator has been
designed to allow a variety of transforms as long as they operate with signals
that are 2- or 3-dimensional in nature, respectively.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by creating an 2-dimensional array of size :math:`n_x \times n_t`
# composed of 3 parabolic events
par = {"ox": -140, "dx": 2, "nx": 140, "ot": 0, "dt": 0.004, "nt": 200, "f0": 20}
v = 1500
t0 = [0.2, 0.4, 0.5]
px = [0, 0, 0]
pxx = [1e-5, 5e-6, 1e-20]
amp = [1.0, -2, 0.5]
# Create axis
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
# Create wavelet
wav = pylops.utils.wavelets.ricker(t[:41], f0=par["f0"])[0]
# Generate model
_, data = pylops.utils.seismicevents.parabolic2d(x, t, t0, px, pxx, amp, wav)
###############################################################################
# We want to divide this 2-dimensional data into small overlapping
# patches in the spatial direction and apply the adjoint of the
# :py:class:`pylops.signalprocessing.FFT2D` operator to each patch. This is
# done by simply using the adjoint of the
# :py:class:`pylops.signalprocessing.Patch2D` operator. Note that for non-
# orthogonal operators, this must be replaced by an inverse.
nwin = (20, 34) # window size in data domain
nop = (
128,
128 // 2 + 1,
) # window size in model domain; we use real FFT, second axis is half
nover = (10, 4) # overlap between windows
dimsd = data.shape
# Sliding window transform without taper
Op = pylops.signalprocessing.FFT2D(nwin, nffts=(128, 128), real=True)
nwins, dims, mwin_inends, dwin_inends = pylops.signalprocessing.patch2d_design(
dimsd, nwin, nover, (128, 65)
)
Patch = pylops.signalprocessing.Patch2D(
Op.H, dims, dimsd, nwin, nover, nop, tapertype=None
)
fftdata = Patch.H * data
###############################################################################
# We now create a similar operator but we also add a taper to the overlapping
# parts of the patches. We then apply the forward to restore the original
# signal.
Patch = pylops.signalprocessing.Patch2D(
Op.H, dims, dimsd, nwin, nover, nop, tapertype="hanning"
)
reconstructed_data = Patch * fftdata
###############################################################################
# Finally we re-arrange the transformed patches so that we can also display
# them
fftdatareshaped = np.zeros((nop[0] * nwins[0], nop[1] * nwins[1]), dtype=fftdata.dtype)
iwin = 1
for ix in range(nwins[0]):
for it in range(nwins[1]):
fftdatareshaped[
ix * nop[0] : (ix + 1) * nop[0], it * nop[1] : (it + 1) * nop[1]
] = np.fft.fftshift(fftdata[ix, it])
iwin += 1
###############################################################################
# Let's finally visualize all the intermediate results as well as our final
# data reconstruction after inverting the
# :py:class:`pylops.signalprocessing.Sliding2D` operator.
fig, axs = plt.subplots(1, 3, figsize=(12, 5))
im = axs[0].imshow(data.T, cmap="gray")
axs[0].set_title("Original data")
plt.colorbar(im, ax=axs[0])
axs[0].axis("tight")
im = axs[1].imshow(reconstructed_data.real.T, cmap="gray")
axs[1].set_title("Reconstruction from adjoint")
plt.colorbar(im, ax=axs[1])
axs[1].axis("tight")
axs[2].imshow(np.abs(fftdatareshaped).T, cmap="jet")
axs[2].set_title("FFT data")
axs[2].axis("tight")
plt.tight_layout()
###############################################################################
# We repeat now the same exercise in 3d
par = {
"oy": -60,
"dy": 2,
"ny": 60,
"ox": -50,
"dx": 2,
"nx": 50,
"ot": 0,
"dt": 0.004,
"nt": 100,
"f0": 20,
}
v = 1500
t0 = [0.05, 0.2, 0.3]
vrms = [500, 700, 1700]
amp = [1.0, -2, 0.5]
# Create axis
t, t2, x, y = pylops.utils.seismicevents.makeaxis(par)
# Create wavelet
wav = pylops.utils.wavelets.ricker(t[:41], f0=par["f0"])[0]
# Generate model
_, data = pylops.utils.seismicevents.hyperbolic3d(x, y, t, t0, vrms, vrms, amp, wav)
fig, axs = plt.subplots(1, 3, figsize=(12, 5))
fig.suptitle("Original data", fontsize=12, fontweight="bold", y=0.95)
axs[0].imshow(
data[par["ny"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_xlabel(r"$x(m)$")
axs[0].set_ylabel(r"$t(s)$")
axs[1].imshow(
data[:, par["nx"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(y.min(), y.max(), t.max(), t.min()),
)
axs[1].set_xlabel(r"$y(m)$")
axs[1].set_ylabel(r"$t(s)$")
axs[2].imshow(
data[:, :, par["nt"] // 2],
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), y.max(), x.min()),
)
axs[2].set_xlabel(r"$x(m)$")
axs[2].set_ylabel(r"$y(m)$")
plt.tight_layout()
###############################################################################
# Let's create now the :py:class:`pylops.signalprocessing.Patch3D` operator
# applying the adjoint of the :py:class:`pylops.signalprocessing.FFT3D`
# operator to each patch.
nwin = (20, 20, 34) # window size in data domain
nop = (
128,
128,
128 // 2 + 1,
) # window size in model domain; we use real FFT, third axis is half
nover = (10, 10, 4) # overlap between windows
dimsd = data.shape
# Sliding window transform without taper
Op = pylops.signalprocessing.FFTND(nwin, nffts=(128, 128, 128), real=True)
nwins, dims, mwin_inends, dwin_inends = pylops.signalprocessing.patch3d_design(
dimsd, nwin, nover, (128, 128, 65)
)
Patch = pylops.signalprocessing.Patch3D(
Op.H, dims, dimsd, nwin, nover, nop, tapertype=None
)
fftdata = Patch.H * data
Patch = pylops.signalprocessing.Patch3D(
Op.H, dims, dimsd, nwin, nover, nop, tapertype="hanning"
)
reconstructed_data = np.real(Patch * fftdata)
fig, axs = plt.subplots(1, 3, figsize=(12, 5))
fig.suptitle("Reconstructed data", fontsize=12, fontweight="bold", y=0.95)
axs[0].imshow(
reconstructed_data[par["ny"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), t.max(), t.min()),
)
axs[0].set_xlabel(r"$x(m)$")
axs[0].set_ylabel(r"$t(s)$")
axs[1].imshow(
reconstructed_data[:, par["nx"] // 2].T,
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(y.min(), y.max(), t.max(), t.min()),
)
axs[1].set_xlabel(r"$y(m)$")
axs[1].set_ylabel(r"$t(s)$")
axs[2].imshow(
reconstructed_data[:, :, par["nt"] // 2],
aspect="auto",
interpolation="nearest",
vmin=-2,
vmax=2,
cmap="gray",
extent=(x.min(), x.max(), y.max(), x.min()),
)
axs[2].set_xlabel(r"$x(m)$")
axs[2].set_ylabel(r"$y(m)$")
plt.tight_layout()
| 7,252 | 29.220833 | 87 | py |
pylops | pylops-master/examples/plot_pad.py | """
Padding
=======
This example shows how to use the :py:class:`pylops.Pad` operator to zero-pad a
model
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define a pad operator ``Pop`` for one dimensional data
dims = 10
pad = (2, 3)
Pop = pylops.Pad(dims, pad)
x = np.arange(dims) + 1.0
y = Pop * x
xadj = Pop.H * y
print(f"x = {x}")
print(f"P*x = {y}")
print(f"P'*y = {xadj}")
###############################################################################
# We move now to a multi-dimensional case. We pad the input model
# with different extents along both dimensions
dims = (5, 4)
pad = ((1, 0), (3, 4))
Pop = pylops.Pad(dims, pad)
x = (np.arange(np.prod(np.array(dims))) + 1.0).reshape(dims)
y = Pop * x
xadj = Pop.H * y
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
fig.suptitle("Pad for 2d data", fontsize=14, fontweight="bold", y=1.15)
axs[0].imshow(x, cmap="rainbow", vmin=0, vmax=np.prod(np.array(dims)) + 1)
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow", vmin=0, vmax=np.prod(np.array(dims)) + 1)
axs[1].set_title(r"$y = P x$")
axs[1].axis("tight")
axs[2].imshow(xadj, cmap="rainbow", vmin=0, vmax=np.prod(np.array(dims)) + 1)
axs[2].set_title(r"$x_{adj} = P^{H} y$")
axs[2].axis("tight")
plt.tight_layout()
| 1,387 | 25.188679 | 79 | py |
pylops | pylops-master/examples/plot_avo.py | r"""
AVO modelling
===================
This example shows how to create pre-stack angle gathers using
the :py:class:`pylops.avo.avo.AVOLinearModelling` operator.
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from scipy.signal import filtfilt
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start by creating the input elastic property profiles
nt0 = 501
dt0 = 0.004
ntheta = 21
t0 = np.arange(nt0) * dt0
thetamin, thetamax = 0.0, 40.0
theta = np.linspace(thetamin, thetamax, ntheta)
# Elastic property profiles
vp = (
2000
+ 5 * np.arange(nt0)
+ 2 * filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 160, nt0))
)
vs = 600 + vp / 2 + 3 * filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 100, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 120, nt0))
vp[201:] += 1500
vs[201:] += 500
rho[201:] += 100
# Wavelet
ntwav = 41
wavoff = 10
wav, twav, wavc = ricker(t0[: ntwav // 2 + 1], 20)
wav_phase = np.hstack((wav[wavoff:], np.zeros(wavoff)))
# vs/vp profile
vsvp = 0.5
vsvp_z = vs / vp
# Model
m = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)
fig, axs = plt.subplots(1, 3, figsize=(9, 7), sharey=True)
axs[0].plot(vp, t0, "k", lw=3)
axs[0].set(xlabel="[m/s]", ylabel=r"$t$ [s]", ylim=[t0[0], t0[-1]], title="Vp")
axs[0].grid()
axs[1].plot(vp / vs, t0, "k", lw=3)
axs[1].set(title="Vp/Vs")
axs[1].grid()
axs[2].plot(rho, t0, "k", lw=3)
axs[2].set(xlabel="[kg/m³]", title="Rho")
axs[2].invert_yaxis()
axs[2].grid()
###############################################################################
# We create now the operators to model the AVO responses for a set of
# elastic profiles
# constant vsvp
PPop_const = pylops.avo.avo.AVOLinearModelling(
theta, vsvp=vsvp, nt0=nt0, linearization="akirich", dtype=np.float64
)
# depth-variant vsvp
PPop_variant = pylops.avo.avo.AVOLinearModelling(
theta, vsvp=vsvp_z, linearization="akirich", dtype=np.float64
)
###############################################################################
# We can then apply those operators to the elastic model and
# create some synthetic reflection responses
dPP_const = PPop_const * m
dPP_variant = PPop_variant * m
###############################################################################
# To visualize these responses, we will plot their anomaly - how much they
# deveiate from their mean
mean_dPP_const = dPP_const.mean()
dPP_const -= mean_dPP_const
mean_dPP_variant = dPP_variant.mean()
dPP_variant -= mean_dPP_variant
fig, axs = plt.subplots(1, 2, figsize=(10, 5), sharey=True)
im = axs[0].imshow(
dPP_const,
cmap="RdBu_r",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-dPP_const.max(),
vmax=dPP_const.max(),
)
cax = make_axes_locatable(axs[0]).append_axes("right", size="5%", pad="2%")
cb = fig.colorbar(im, cax=cax)
cb.set_label(f"Deviation from mean = {mean_dPP_const:.2f}")
axs[0].set(xlabel=r"$\theta$ [°]", ylabel=r"$t$ [s]", title="Data with constant VP/VS")
axs[0].axis("tight")
im = axs[1].imshow(
dPP_variant,
cmap="RdBu_r",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-dPP_variant.max(),
vmax=dPP_variant.max(),
)
cax = make_axes_locatable(axs[1]).append_axes("right", size="5%", pad="2%")
cb = fig.colorbar(im, cax=cax)
cb.set_label(f"Deviation from mean = {mean_dPP_variant:.2f}")
axs[1].set(xlabel=r"$\theta$ [°]", title="Data with variable VP/VS")
axs[1].axis("tight")
plt.tight_layout()
###############################################################################
# Finally we can also model the PS response by simply changing the
# ``linearization`` choice as follows
PSop = pylops.avo.avo.AVOLinearModelling(
theta, vsvp=vsvp, nt0=nt0, linearization="ps", dtype=np.float64
)
###############################################################################
# We can then apply those operators to the elastic model and
# create some synthetic reflection responses
dPS = PSop * m
mean_dPS = dPS.mean()
dPS -= mean_dPS
fig, axs = plt.subplots(1, 2, figsize=(10, 5), sharey=True)
im = axs[0].imshow(
dPP_const,
cmap="RdBu_r",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-dPP_const.max(),
vmax=dPP_const.max(),
)
cax = make_axes_locatable(axs[0]).append_axes("right", size="5%", pad="2%")
cb = fig.colorbar(im, cax=cax)
cb.set_label(f"Deviation from mean = {mean_dPP_const:.2f}")
axs[0].set(xlabel=r"$\theta$ [°]", ylabel=r"$t$ [s]", title="PP Data")
axs[0].axis("tight")
im = axs[1].imshow(
dPS,
cmap="RdBu_r",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-dPS.max(),
vmax=dPS.max(),
)
cax = make_axes_locatable(axs[1]).append_axes("right", size="5%", pad="2%")
cb = fig.colorbar(im, cax=cax)
cb.set_label(f"Deviation from mean = {mean_dPS:.2f}")
axs[1].set(xlabel=r"$\theta$ [°]", title="PS Data")
axs[1].axis("tight")
plt.tight_layout()
| 5,038 | 29.72561 | 87 | py |
pylops | pylops-master/examples/plot_roll.py | """
Roll
====
This example shows how to use the :py:class:`pylops.Roll` operator.
This operator simply shifts elements of multi-dimensional array along a
specified direction a chosen number of samples.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start with a 1d example. We make a signal, shift it by two samples
# and then shift it back using its adjoint. We can immediately see how the
# adjoint of this operator is equivalent to its inverse.
nx = 10
x = np.arange(nx)
Rop = pylops.Roll(nx, shift=2)
y = Rop * x
xadj = Rop.H * y
plt.figure()
plt.plot(x, "k", lw=2, label="x")
plt.plot(y, "b", lw=2, label="y")
plt.plot(xadj, "--r", lw=2, label="xadj")
plt.title("1D Roll")
plt.legend()
plt.tight_layout()
###############################################################################
# We can now do the same with a 2d array.
ny, nx = 10, 5
x = np.arange(ny * nx).reshape(ny, nx)
Rop = pylops.Roll(dims=(ny, nx), axis=1, shift=-2)
y = Rop * x
xadj = Rop.H * y
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
fig.suptitle("Roll for 2d data", fontsize=14, fontweight="bold", y=1.15)
axs[0].imshow(x, cmap="rainbow", vmin=0, vmax=50)
axs[0].set_title(r"$x$")
axs[0].axis("tight")
axs[1].imshow(y, cmap="rainbow", vmin=0, vmax=50)
axs[1].set_title(r"$y = R x$")
axs[1].axis("tight")
axs[2].imshow(xadj, cmap="rainbow", vmin=0, vmax=50)
axs[2].set_title(r"$x_{adj} = R^H y$")
axs[2].axis("tight")
plt.tight_layout()
| 1,554 | 25.810345 | 79 | py |
pylops | pylops-master/examples/plot_mdc.py | """
Multi-Dimensional Convolution
=============================
This example shows how to use the :py:class:`pylops.waveeqprocessing.MDC` operator
to convolve a 3D kernel with an input seismic data. The resulting data is
a blurred version of the input data and the problem of removing such blurring
is reffered to as *Multi-dimensional Deconvolution (MDD)* and its implementation
is discussed in more details in the **MDD** tutorial.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
from pylops.utils.seismicevents import hyperbolic2d, makeaxis
from pylops.utils.tapers import taper3d
from pylops.utils.wavelets import ricker
plt.close("all")
###############################################################################
# Let's start by creating a set of hyperbolic events to be used as our MDC kernel
# Input parameters
par = {
"ox": -300,
"dx": 10,
"nx": 61,
"oy": -500,
"dy": 10,
"ny": 101,
"ot": 0,
"dt": 0.004,
"nt": 400,
"f0": 20,
"nfmax": 200,
}
t0_m = 0.2
vrms_m = 1100.0
amp_m = 1.0
t0_G = (0.2, 0.5, 0.7)
vrms_G = (1200.0, 1500.0, 2000.0)
amp_G = (1.0, 0.6, 0.5)
# Taper
tap = taper3d(par["nt"], (par["ny"], par["nx"]), (5, 5), tapertype="hanning")
# Create axis
t, t2, x, y = makeaxis(par)
# Create wavelet
wav = ricker(t[:41], f0=par["f0"])[0]
# Generate model
m, mwav = hyperbolic2d(x, t, t0_m, vrms_m, amp_m, wav)
# Generate operator
G, Gwav = np.zeros((par["ny"], par["nx"], par["nt"])), np.zeros(
(par["ny"], par["nx"], par["nt"])
)
for iy, y0 in enumerate(y):
G[iy], Gwav[iy] = hyperbolic2d(x - y0, t, t0_G, vrms_G, amp_G, wav)
G, Gwav = G * tap, Gwav * tap
# Add negative part to data and model
m = np.concatenate((np.zeros((par["nx"], par["nt"] - 1)), m), axis=-1)
mwav = np.concatenate((np.zeros((par["nx"], par["nt"] - 1)), mwav), axis=-1)
Gwav2 = np.concatenate((np.zeros((par["ny"], par["nx"], par["nt"] - 1)), Gwav), axis=-1)
# Define MDC linear operator
Gwav_fft = np.fft.rfft(Gwav2, 2 * par["nt"] - 1, axis=-1)
Gwav_fft = Gwav_fft[..., : par["nfmax"]]
# Move frequency/time to first axis
m, mwav = m.T, mwav.T
Gwav_fft = Gwav_fft.transpose(2, 0, 1)
# Create operator
MDCop = pylops.waveeqprocessing.MDC(
Gwav_fft,
nt=2 * par["nt"] - 1,
nv=1,
dt=0.004,
dr=1.0,
)
# Create data
d = MDCop * m.ravel()
d = d.reshape(2 * par["nt"] - 1, par["ny"])
# Apply adjoint operator to data
madj = MDCop.H * d.ravel()
madj = madj.reshape(2 * par["nt"] - 1, par["nx"])
###############################################################################
# Finally let's display the operator, input model, data and adjoint model
fig, axs = plt.subplots(1, 2, figsize=(9, 6))
axs[0].imshow(
Gwav2[int(par["ny"] / 2)].T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-Gwav2.max(),
vmax=Gwav2.max(),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[0].set_title("G - inline view", fontsize=15)
axs[0].set_xlabel("r")
axs[1].set_ylabel("t")
axs[1].imshow(
Gwav2[:, int(par["nx"] / 2)].T,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-Gwav2.max(),
vmax=Gwav2.max(),
extent=(y.min(), y.max(), t2.max(), t2.min()),
)
axs[1].set_title("G - inline view", fontsize=15)
axs[1].set_xlabel("s")
axs[1].set_ylabel("t")
fig.tight_layout()
fig, axs = plt.subplots(1, 3, figsize=(9, 6))
axs[0].imshow(
mwav,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-mwav.max(),
vmax=mwav.max(),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[0].set_title(r"$m$", fontsize=15)
axs[0].set_xlabel("r")
axs[0].set_ylabel("t")
axs[1].imshow(
d,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-d.max(),
vmax=d.max(),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[1].set_title(r"$d$", fontsize=15)
axs[1].set_xlabel("s")
axs[1].set_ylabel("t")
axs[2].imshow(
madj,
aspect="auto",
interpolation="nearest",
cmap="gray",
vmin=-madj.max(),
vmax=madj.max(),
extent=(x.min(), x.max(), t2.max(), t2.min()),
)
axs[2].set_title(r"$m_{adj}$", fontsize=15)
axs[2].set_xlabel("s")
axs[2].set_ylabel("t")
fig.tight_layout()
| 4,199 | 24.609756 | 88 | py |
pylops | pylops-master/examples/plot_stacking.py | """
Operators concatenation
=======================
This example shows how to use 'stacking' operators such as
:py:class:`pylops.VStack`, :py:class:`pylops.HStack`,
:py:class:`pylops.Block`, :py:class:`pylops.BlockDiag`,
and :py:class:`pylops.Kronecker`.
These operators allow for different combinations of multiple linear operators
in a single operator. Such functionalities are used within PyLops as the basis
for the creation of complex operators as well as in the definition of various
types of optimization problems with regularization or preconditioning.
Some of this operators naturally lend to embarassingly parallel computations.
Within PyLops we leverage the multiprocessing module to run multiple processes
at the same time evaluating a subset of the operators involved in one of the
stacking operations.
"""
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's start by defining two second derivatives :py:class:`pylops.SecondDerivative`
# that we will be using in this example.
D2hop = pylops.SecondDerivative(dims=(11, 21), axis=1, dtype="float32")
D2vop = pylops.SecondDerivative(dims=(11, 21), axis=0, dtype="float32")
###############################################################################
# Chaining of operators represents the simplest concatenation that
# can be performed between two or more linear operators.
# This can be easily achieved using the ``*`` symbol
#
# .. math::
# \mathbf{D_{cat}}= \mathbf{D_v} \mathbf{D_h}
Nv, Nh = 11, 21
X = np.zeros((Nv, Nh))
X[int(Nv / 2), int(Nh / 2)] = 1
D2op = D2vop * D2hop
Y = D2op * X
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Chain", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y=(D_x+D_y) x$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# We now want to *vertically stack* three operators
#
# .. math::
# \mathbf{D_{Vstack}} =
# \begin{bmatrix}
# \mathbf{D_v} \\
# \mathbf{D_h}
# \end{bmatrix}, \qquad
# \mathbf{y} =
# \begin{bmatrix}
# \mathbf{D_v}\mathbf{x} \\
# \mathbf{D_h}\mathbf{x}
# \end{bmatrix}
Nv, Nh = 11, 21
X = np.zeros((Nv, Nh))
X[int(Nv / 2), int(Nh / 2)] = 1
Dstack = pylops.VStack([D2vop, D2hop])
Y = np.reshape(Dstack * X.ravel(), (Nv * 2, Nh))
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Vertical stacking", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# Similarly we can now *horizontally stack* three operators
#
# .. math::
# \mathbf{D_{Hstack}} =
# \begin{bmatrix}
# \mathbf{D_v} & 0.5*\mathbf{D_v} & -1*\mathbf{D_h}
# \end{bmatrix}, \qquad
# \mathbf{y} =
# \mathbf{D_v}\mathbf{x}_1 + 0.5*\mathbf{D_v}\mathbf{x}_2 -
# \mathbf{D_h}\mathbf{x}_3
Nv, Nh = 11, 21
X = np.zeros((Nv * 3, Nh))
X[int(Nv / 2), int(Nh / 2)] = 1
X[int(Nv / 2) + Nv, int(Nh / 2)] = 1
X[int(Nv / 2) + 2 * Nv, int(Nh / 2)] = 1
Hstackop = pylops.HStack([D2vop, 0.5 * D2vop, -1 * D2hop])
Y = np.reshape(Hstackop * X.ravel(), (Nv, Nh))
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Horizontal stacking", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# We can even stack them both *horizontally* and *vertically* such that we
# create a *block* operator
#
# .. math::
# \mathbf{D_{Block}} =
# \begin{bmatrix}
# \mathbf{D_v} & 0.5*\mathbf{D_v} & -1*\mathbf{D_h} \\
# \mathbf{D_h} & 2*\mathbf{D_h} & \mathbf{D_v} \\
# \end{bmatrix}, \qquad
# \mathbf{y} =
# \begin{bmatrix}
# \mathbf{D_v} \mathbf{x_1} + 0.5*\mathbf{D_v} \mathbf{x_2} -
# \mathbf{D_h} \mathbf{x_3} \\
# \mathbf{D_h} \mathbf{x_1} + 2*\mathbf{D_h} \mathbf{x_2} +
# \mathbf{D_v} \mathbf{x_3}
# \end{bmatrix}
Bop = pylops.Block([[D2vop, 0.5 * D2vop, -1 * D2hop], [D2hop, 2 * D2hop, D2vop]])
Y = np.reshape(Bop * X.ravel(), (2 * Nv, Nh))
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Block", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# Finally we can use the *block-diagonal operator* to apply three operators
# to three different subset of the model and data
#
# .. math::
# \mathbf{D_{BDiag}} =
# \begin{bmatrix}
# \mathbf{D_v} & \mathbf{0} & \mathbf{0} \\
# \mathbf{0} & 0.5*\mathbf{D_v} & \mathbf{0} \\
# \mathbf{0} & \mathbf{0} & -\mathbf{D_h}
# \end{bmatrix}, \qquad
# \mathbf{y} =
# \begin{bmatrix}
# \mathbf{D_v} \mathbf{x_1} \\
# 0.5*\mathbf{D_v} \mathbf{x_2} \\
# -\mathbf{D_h} \mathbf{x_3}
# \end{bmatrix}
BD = pylops.BlockDiag([D2vop, 0.5 * D2vop, -1 * D2hop])
Y = np.reshape(BD * X.ravel(), (3 * Nv, Nh))
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Block-diagonal", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# If we consider now the case of having a large number of operators inside a
# blockdiagonal structure, it may be convenient to span multiple processes
# handling subset of operators at the same time. This can be easily achieved
# by simply defining the number of processes we want to use via ``nproc``.
X = np.zeros((Nv * 10, Nh))
for iv in range(10):
X[int(Nv / 2) + iv * Nv, int(Nh / 2)] = 1
BD = pylops.BlockDiag([D2vop] * 10, nproc=2)
print("BD Operator multiprocessing pool", BD.pool)
Y = np.reshape(BD * X.ravel(), (10 * Nv, Nh))
BD.pool.close()
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Block-diagonal", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
###############################################################################
# Finally we use the *Kronecker operator* and replicate this example on
# `wiki <https://en.wikipedia.org/wiki/Kronecker_product>`_.
#
# .. math::
# \begin{bmatrix}
# 1 & 2 \\
# 3 & 4 \\
# \end{bmatrix} \otimes
# \begin{bmatrix}
# 0 & 5 \\
# 6 & 7 \\
# \end{bmatrix} =
# \begin{bmatrix}
# 0 & 5 & 0 & 10 \\
# 6 & 7 & 12 & 14 \\
# 0 & 15 & 0 & 20 \\
# 18 & 21 & 24 & 28 \\
# \end{bmatrix}
A = np.array([[1, 2], [3, 4]])
B = np.array([[0, 5], [6, 7]])
AB = np.kron(A, B)
n1, m1 = A.shape
n2, m2 = B.shape
Aop = pylops.MatrixMult(A)
Bop = pylops.MatrixMult(B)
ABop = pylops.Kronecker(Aop, Bop)
x = np.ones(m1 * m2)
y = AB.dot(x)
yop = ABop * x
xinv = ABop / yop
print(f"AB = \n {AB}")
print(f"x = {x}")
print(f"y = {y}")
print(f"yop = {yop}")
print(f"xinv = {xinv}")
###############################################################################
# We can also use :py:class:`pylops.Kronecker` to do something more
# interesting. Any operator can in fact be applied on a single direction of a
# multi-dimensional input array if combined with an :py:class:`pylops.Identity`
# operator via Kronecker product. We apply here the
# :py:class:`pylops.FirstDerivative` to the second dimension of the model.
#
# Note that for those operators whose implementation allows their application
# to a single axis via the ``axis`` parameter, using the Kronecker product
# would lead to slower performance. Nevertheless, the Kronecker product allows
# any other operator to be applied to a single dimension.
Nv, Nh = 11, 21
Iop = pylops.Identity(Nv, dtype="float32")
D2hop = pylops.FirstDerivative(Nh, dtype="float32")
X = np.zeros((Nv, Nh))
X[Nv // 2, Nh // 2] = 1
D2hop = pylops.Kronecker(Iop, D2hop)
Y = D2hop * X.ravel()
Y = Y.reshape(Nv, Nh)
fig, axs = plt.subplots(1, 2, figsize=(10, 3))
fig.suptitle("Kronecker", fontsize=14, fontweight="bold", y=0.95)
im = axs[0].imshow(X, interpolation="nearest")
axs[0].axis("tight")
axs[0].set_title(r"$x$")
plt.colorbar(im, ax=axs[0])
im = axs[1].imshow(Y, interpolation="nearest")
axs[1].axis("tight")
axs[1].set_title(r"$y$")
plt.colorbar(im, ax=axs[1])
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 10,054 | 32.741611 | 84 | py |
pylops | pylops-master/examples/plot_prestack.py | r"""
Pre-stack modelling
===================
This example shows how to create pre-stack angle gathers using
the :py:class:`pylops.avo.prestack.PrestackLinearModelling` operator.
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
from scipy.signal import filtfilt
import pylops
from pylops.utils.wavelets import ricker
plt.close("all")
np.random.seed(0)
###############################################################################
# Let's start by creating the input elastic property profiles and wavelet
nt0 = 501
dt0 = 0.004
ntheta = 21
t0 = np.arange(nt0) * dt0
thetamin, thetamax = 0, 40
theta = np.linspace(thetamin, thetamax, ntheta)
# Elastic property profiles
vp = (
2000
+ 5 * np.arange(nt0)
+ 2 * filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 160, nt0))
)
vs = 600 + vp / 2 + 3 * filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 100, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 120, nt0))
vp[201:] += 1500
vs[201:] += 500
rho[201:] += 100
# Wavelet
ntwav = 81
wav, twav, wavc = ricker(t0[: ntwav // 2 + 1], 5)
# vs/vp profile
vsvp = 0.5
vsvp_z = vs / vp
# Model
m = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)
fig, axs = plt.subplots(1, 3, figsize=(9, 7), sharey=True)
axs[0].plot(vp, t0, "k", lw=3)
axs[0].set(xlabel="[m/s]", ylabel=r"$t$ [s]", ylim=[t0[0], t0[-1]], title="Vp")
axs[0].grid()
axs[1].plot(vp / vs, t0, "k", lw=3)
axs[1].set(title="Vp/Vs")
axs[1].grid()
axs[2].plot(rho, t0, "k", lw=3)
axs[2].set(xlabel="[kg/m³]", title="Rho")
axs[2].invert_yaxis()
axs[2].grid()
###############################################################################
# We create now the operators to model a synthetic pre-stack seismic gather
# with a zero-phase using both a constant and a depth-variant ``vsvp`` profile
# constant vsvp
PPop_const = pylops.avo.prestack.PrestackLinearModelling(
wav, theta, vsvp=vsvp, nt0=nt0, linearization="akirich"
)
# depth-variant vsvp
PPop_variant = pylops.avo.prestack.PrestackLinearModelling(
wav, theta, vsvp=vsvp_z, linearization="akirich"
)
###############################################################################
# Let's apply those operators to the elastic model and create some
# synthetic data
dPP_const = PPop_const * m
dPP_variant = PPop_variant * m
###############################################################################
# Finally we visualize the two datasets
# sphinx_gallery_thumbnail_number = 2
fig = plt.figure(figsize=(6, 7))
ax1 = plt.subplot2grid((3, 2), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 2), (0, 1), rowspan=2, sharey=ax1)
ax3 = plt.subplot2grid((3, 2), (2, 0), sharex=ax1)
ax4 = plt.subplot2grid((3, 2), (2, 1), sharex=ax2)
im = ax1.imshow(
dPP_const,
cmap="bwr",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.2,
vmax=0.2,
)
cax = make_axes_locatable(ax1).append_axes("bottom", size="5%", pad="3%")
cb = fig.colorbar(im, cax=cax, orientation="horizontal")
cb.ax.xaxis.set_ticks_position("bottom")
ax1.set(ylabel=r"$t$ [s]")
ax1.set_title(r"Data with constant $VP/VS$", fontsize=10)
ax1.tick_params(labelbottom=False)
ax1.axhline(t0[nt0 // 4], color="k", linestyle="--")
ax1.axhline(t0[nt0 // 2], color="k", linestyle="--")
ax1.axis("tight")
im = ax2.imshow(
dPP_variant,
cmap="bwr",
extent=(theta[0], theta[-1], t0[-1], t0[0]),
vmin=-0.2,
vmax=0.2,
)
cax = make_axes_locatable(ax2).append_axes("bottom", size="5%", pad="3%")
cb = fig.colorbar(im, cax=cax, orientation="horizontal")
cb.ax.xaxis.set_ticks_position("bottom")
ax2.set_title(r"Data with depth-variant $VP/VS$", fontsize=10)
ax2.tick_params(labelbottom=False, labelleft=False)
ax2.axhline(t0[nt0 // 4], color="k", linestyle="--")
ax2.axhline(t0[nt0 // 2], color="k", linestyle="--")
ax2.axis("tight")
ax3.plot(theta, dPP_const[nt0 // 4], "k", lw=2)
ax3.plot(theta, dPP_variant[nt0 // 4], "--r", lw=2)
ax3.set(xlabel=r"$\theta$ [°]")
ax3.set_title("AVO curve at t=%.2f s" % t0[nt0 // 4], fontsize=10)
ax4.plot(theta, dPP_const[nt0 // 2], "k", lw=2, label=r"constant $VP/VS$")
ax4.plot(theta, dPP_variant[nt0 // 2], "--r", lw=2, label=r"variable $VP/VS$")
ax4.set(xlabel=r"$\theta$ [°]")
ax4.set_title("AVO curve at t=%.2f s" % t0[nt0 // 2], fontsize=10)
ax4.legend()
plt.tight_layout()
| 4,344 | 31.185185 | 84 | py |
pylops | pylops-master/examples/plot_bilinear.py | """
Bilinear Interpolation
======================
This example shows how to use the :py:class:`pylops.signalprocessing.Bilinar`
operator to perform bilinear interpolation to a 2-dimensional input vector.
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy import misc
import pylops
plt.close("all")
np.random.seed(0)
###############################################################################
# First of all, we create a 2-dimensional input vector containing an image
# from the ``scipy.misc`` family.
x = misc.face()[::5, ::5, 0]
nz, nx = x.shape
###############################################################################
# We can now define a set of available samples in the
# first and second direction of the array and apply bilinear interpolation.
nsamples = 2000
iava = np.vstack(
(np.random.uniform(0, nz - 1, nsamples), np.random.uniform(0, nx - 1, nsamples))
)
Bop = pylops.signalprocessing.Bilinear(iava, (nz, nx))
y = Bop * x
###############################################################################
# At this point we try to reconstruct the input signal imposing a smooth
# solution by means of a regularization term that minimizes the Laplacian of
# the solution.
D2op = pylops.Laplacian((nz, nx), weights=(1, 1), dtype="float64")
xadj = Bop.H * y
xinv = pylops.optimization.leastsquares.normal_equations_inversion(
Bop, y, [D2op], epsRs=[np.sqrt(0.1)], **dict(maxiter=100)
)[0]
xadj = xadj.reshape(nz, nx)
xinv = xinv.reshape(nz, nx)
fig, axs = plt.subplots(1, 3, figsize=(10, 4))
fig.suptitle("Bilinear interpolation", fontsize=14, fontweight="bold", y=0.95)
axs[0].imshow(x, cmap="gray_r", vmin=0, vmax=250)
axs[0].axis("tight")
axs[0].set_title("Original")
axs[1].imshow(xadj, cmap="gray_r", vmin=0, vmax=250)
axs[1].axis("tight")
axs[1].set_title("Sampled")
axs[2].imshow(xinv, cmap="gray_r", vmin=0, vmax=250)
axs[2].axis("tight")
axs[2].set_title("2D Regularization")
plt.tight_layout()
plt.subplots_adjust(top=0.8)
| 1,982 | 32.05 | 84 | py |
pylops | pylops-master/examples/plot_diagonal.py | r"""
Diagonal
========
This example shows how to use the :py:class:`pylops.Diagonal` operator
to perform *Element-wise multiplication* between the input vector and a vector :math:`\mathbf{d}`.
In other words, the operator acts as a diagonal operator :math:`\mathbf{D}` whose elements along
the diagonal are the elements of the vector :math:`\mathbf{d}`.
"""
import matplotlib.gridspec as pltgs
import matplotlib.pyplot as plt
import numpy as np
import pylops
plt.close("all")
###############################################################################
# Let's define a diagonal operator :math:`\mathbf{d}` with increasing numbers from
# ``0`` to ``N`` and a unitary model :math:`\mathbf{x}`.
N = 10
d = np.arange(N)
x = np.ones(N)
Dop = pylops.Diagonal(d)
y = Dop * x
y1 = Dop.H * x
gs = pltgs.GridSpec(1, 6)
fig = plt.figure(figsize=(7, 4))
ax = plt.subplot(gs[0, 0:3])
im = ax.imshow(Dop.matrix(), cmap="rainbow", vmin=0, vmax=N)
ax.set_title("A", size=20, fontweight="bold")
ax.set_xticks(np.arange(N - 1) + 0.5)
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.axis("tight")
ax = plt.subplot(gs[0, 3])
ax.imshow(x[:, np.newaxis], cmap="rainbow", vmin=0, vmax=N)
ax.set_title("x", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax = plt.subplot(gs[0, 4])
ax.text(
0.35,
0.5,
"=",
horizontalalignment="center",
verticalalignment="center",
size=40,
fontweight="bold",
)
ax.axis("off")
ax = plt.subplot(gs[0, 5])
ax.imshow(y[:, np.newaxis], cmap="rainbow", vmin=0, vmax=N)
ax.set_title("y", size=20, fontweight="bold")
ax.set_xticks([])
ax.set_yticks(np.arange(N - 1) + 0.5)
ax.grid(linewidth=3, color="white")
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
fig.colorbar(im, ax=ax, ticks=[0, N], pad=0.3, shrink=0.7)
plt.tight_layout()
###############################################################################
# Similarly we can consider the input model as composed of two or more
# dimensions. In this case the diagonal operator can be still applied to
# each element or broadcasted along a specific direction. Let's start with the
# simplest case where each element is multipled by a different value
nx, ny = 3, 5
x = np.ones((nx, ny))
print(f"x =\n{x}")
d = np.arange(nx * ny).reshape(nx, ny)
Dop = pylops.Diagonal(d)
y = Dop * x.ravel()
y1 = Dop.H * x.ravel()
print(f"y = D*x =\n{y.reshape(nx, ny)}")
print(f"xadj = D'*x =\n{y1.reshape(nx, ny)}")
###############################################################################
# And we now broadcast
nx, ny = 3, 5
x = np.ones((nx, ny))
print(f"x =\n{x}")
# 1st dim
d = np.arange(nx)
Dop = pylops.Diagonal(d, dims=(nx, ny), axis=0)
y = Dop * x.ravel()
y1 = Dop.H * x.ravel()
print(f"1st dim: y = D*x =\n{y.reshape(nx, ny)}")
print(f"1st dim: xadj = D'*x =\n{y1.reshape(nx, ny)}")
# 2nd dim
d = np.arange(ny)
Dop = pylops.Diagonal(d, dims=(nx, ny), axis=1)
y = Dop * x.ravel()
y1 = Dop.H * x.ravel()
print(f"2nd dim: y = D*x =\n{y.reshape(nx, ny)}")
print(f"2nd dim: xadj = D'*x =\n{y1.reshape(nx, ny)}")
| 3,237 | 27.156522 | 98 | py |
pylops | pylops-master/docs/source/conf.py | # -*- coding: utf-8 -*-
import sys
import os
import datetime
from sphinx_gallery.sorting import ExampleTitleSortKey
from pylops import __version__
# Sphinx needs to be able to import the package to use autodoc and get the version number
sys.path.insert(0, os.path.abspath("../../pylops"))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.doctest",
"sphinx.ext.viewcode",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"matplotlib.sphinxext.plot_directive",
"numpydoc",
"nbsphinx",
"sphinx_gallery.gen_gallery",
# 'sphinx.ext.napoleon',
]
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"sklearn": ("http://scikit-learn.org/stable/", None),
"pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None),
"matplotlib": ("https://matplotlib.org/", None),
"pyfftw": ("https://pyfftw.readthedocs.io/en/latest/", None),
"spgl1": ("https://spgl1.readthedocs.io/en/latest/", None),
}
# Generate autodoc stubs with summaries from code
autosummary_generate = True
# Include Python objects as they appear in source files
autodoc_member_order = "bysource"
# Default flags used by autodoc directives
autodoc_default_flags = ["members"]
# Avoid showing typing annotations in doc
autodoc_typehints = "none"
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = False
numpydoc_class_members_toctree = False
sphinx_gallery_conf = {
# path to your examples scripts
"examples_dirs": [
"../../examples",
"../../tutorials",
],
# path where to save gallery generated examples
"gallery_dirs": ["gallery", "tutorials"],
"filename_pattern": r"\.py",
# Remove the "Download all examples" button from the top level gallery
"download_all_examples": False,
# Sort gallery example by file name instead of number of lines (default)
"within_subsection_order": ExampleTitleSortKey,
# directory where function granular galleries are stored
"backreferences_dir": "api/generated/backreferences",
# Modules for which function level galleries are created.
"doc_module": "pylops",
# Insert links to documentation of objects in the examples
"reference_url": {"pylops": None},
}
# Always show the source code that generates a plot
plot_include_source = True
plot_formats = ["png"]
# Sphinx project configuration
templates_path = ["_templates"]
exclude_patterns = ["_build", "**.ipynb_checkpoints", "**.ipynb", "**.md5"]
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8-sig"
master_doc = "index"
# General information about the project
year = datetime.date.today().year
project = "PyLops"
copyright = "{}, PyLops Development Team".format(year)
# Version
version = __version__
if len(version.split("+")) > 1 or version == "unknown":
version = "dev"
# These enable substitutions using |variable| in the rst files
rst_epilog = """
.. |year| replace:: {year}
""".format(
year=year
)
html_static_path = ["_static"]
html_last_updated_fmt = "%b %d, %Y"
html_title = "PyLops"
html_short_title = "PyLops"
html_logo = "_static/pylops.png"
html_favicon = "_static/favicon.ico"
html_extra_path = []
pygments_style = "default"
add_function_parentheses = False
html_show_sourcelink = False
html_show_sphinx = True
html_show_copyright = True
# Theme config
html_theme = "pydata_sphinx_theme"
html_theme_options = {
"logo_only": True,
"display_version": True,
"logo": {
"image_light": "pylops_b.png",
"image_dark": "pylops.png",
}
}
html_css_files = [
'css/custom.css',
]
html_context = {
"menu_links_name": "Repository",
"menu_links": [
(
'<i class="fa fa-github fa-fw"></i> Source Code',
"https://github.com/PyLops/pylops",
),
(
'<i class="fa fa-users fa-fw"></i> Contributing',
"https://github.com/PyLops/pylops/blob/master/CONTRIBUTING.md",
),
],
# Custom variables to enable "Improve this page"" and "Download notebook"
# links
"doc_path": "docs/source",
"galleries": sphinx_gallery_conf["gallery_dirs"],
"gallery_dir": dict(
zip(sphinx_gallery_conf["gallery_dirs"], sphinx_gallery_conf["examples_dirs"])
),
"github_project": "PyLops",
"github_repo": "pylops",
"github_version": "master",
}
# Load the custom CSS files (needs sphinx >= 1.6 for this to work)
def setup(app):
app.add_css_file("style.css")
| 4,717 | 28.304348 | 89 | py |
pylops | pylops-master/pytests/test_fredholm.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr
from pylops.signalprocessing import Fredholm1
from pylops.utils import dottest
par1 = {
"nsl": 3,
"ny": 6,
"nx": 4,
"nz": 5,
"usematmul": True,
"saveGt": True,
"imag": 0,
"dtype": "float32",
} # real, saved Gt
par2 = {
"nsl": 3,
"ny": 6,
"nx": 4,
"nz": 5,
"usematmul": True,
"saveGt": False,
"imag": 0,
"dtype": "float32",
} # real, unsaved Gt
par3 = {
"nsl": 3,
"ny": 6,
"nx": 4,
"nz": 5,
"usematmul": False,
"saveGt": True,
"imag": 1j,
"dtype": "complex64",
} # complex, saved Gt
par4 = {
"nsl": 3,
"ny": 6,
"nx": 4,
"nz": 5,
"saveGt": False,
"usematmul": False,
"saveGt": False,
"imag": 1j,
"dtype": "complex64",
} # complex, unsaved Gt
par5 = {
"nsl": 3,
"ny": 6,
"nx": 4,
"nz": 1,
"usematmul": True,
"saveGt": True,
"imag": 0,
"dtype": "float32",
} # real, saved Gt, nz=1
par6 = {
"nsl": 3,
"ny": 6,
"nx": 4,
"nz": 1,
"usematmul": True,
"saveGt": False,
"imag": 0,
"dtype": "float32",
} # real, unsaved Gt, nz=1
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_Fredholm1(par):
"""Dot-test and inversion for Fredholm1 operator"""
np.random.seed(10)
_F = np.arange(par["nsl"] * par["nx"] * par["ny"]).reshape(
par["nsl"], par["nx"], par["ny"]
)
F = _F - par["imag"] * _F
x = np.ones((par["nsl"], par["ny"], par["nz"])) + par["imag"] * np.ones(
(par["nsl"], par["ny"], par["nz"])
)
Fop = Fredholm1(
F,
nz=par["nz"],
saveGt=par["saveGt"],
usematmul=par["usematmul"],
dtype=par["dtype"],
)
assert dottest(
Fop,
par["nsl"] * par["nx"] * par["nz"],
par["nsl"] * par["ny"] * par["nz"],
complexflag=0 if par["imag"] == 0 else 3,
)
xlsqr = lsqr(Fop, Fop * x.ravel(), damp=1e-20, iter_lim=30, atol=1e-8, btol=1e-8, show=0)[0]
xlsqr = xlsqr.reshape(par["nsl"], par["ny"], par["nz"])
assert_array_almost_equal(x, xlsqr, decimal=3)
| 2,240 | 20.970588 | 96 | py |
pylops | pylops-master/pytests/test_solver.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr as sp_lsqr
from pylops.basicoperators import MatrixMult
from pylops.optimization.basic import cg, cgls, lsqr
par1 = {
"ny": 11,
"nx": 11,
"imag": 0,
"x0": False,
"dtype": "float64",
} # square real, zero initial guess
par2 = {
"ny": 11,
"nx": 11,
"imag": 0,
"x0": True,
"dtype": "float64",
} # square real, non-zero initial guess
par3 = {
"ny": 31,
"nx": 11,
"imag": 0,
"x0": False,
"dtype": "float64",
} # overdetermined real, zero initial guess
par4 = {
"ny": 31,
"nx": 11,
"imag": 0,
"x0": True,
"dtype": "float64",
} # overdetermined real, non-zero initial guess
par1j = {
"ny": 11,
"nx": 11,
"imag": 1j,
"x0": False,
"dtype": "complex64",
} # square complex, zero initial guess
par2j = {
"ny": 11,
"nx": 11,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # square complex, non-zero initial guess
par3j = {
"ny": 31,
"nx": 11,
"imag": 1j,
"x0": False,
"dtype": "complex64",
} # overdetermined complex, zero initial guess
par4j = {
"ny": 31,
"nx": 11,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # overdetermined complex, non-zero initial guess
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par3j)]
)
def test_cg(par):
"""CG with linear operator"""
np.random.seed(10)
A = np.random.normal(0, 10, (par["ny"], par["nx"])) + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"]))
A = np.conj(A).T @ A # to ensure definite positive matrix
Aop = MatrixMult(A, dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
if par["x0"]:
x0 = np.random.normal(0, 10, par["nx"]) + par["imag"] * np.random.normal(
0, 10, par["nx"]
)
else:
x0 = None
y = Aop * x
xinv = cg(Aop, y, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0]
assert_array_almost_equal(x, xinv, decimal=4)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par3j)]
)
def test_cg_ndarray(par):
"""CG with linear operator"""
np.random.seed(10)
dims = dimsd = (par["nx"], par["ny"])
x = np.ones(dims) + par["imag"] * np.ones(dims)
A = np.random.normal(0, 10, (x.size, x.size)) + par["imag"] * np.random.normal(
0, 10, (x.size, x.size)
)
A = np.conj(A).T @ A # to ensure definite positive matrix
Aop = MatrixMult(A, dtype=par["dtype"])
Aop.dims = dims
Aop.dimsd = dimsd
if par["x0"]:
x0 = np.random.normal(0, 10, dims) + par["imag"] * np.random.normal(0, 10, dims)
else:
x0 = None
y = Aop * x
xinv = cg(Aop, y, x0=x0, niter=2 * x.size, tol=1e-5, show=True)[0]
assert xinv.shape == x.shape
assert_array_almost_equal(x, xinv, decimal=4)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par3j)]
)
def test_cgls(par):
"""CGLS with linear operator"""
np.random.seed(10)
A = np.random.normal(0, 10, (par["ny"], par["nx"])) + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"]))
Aop = MatrixMult(A, dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
if par["x0"]:
x0 = np.random.normal(0, 10, par["nx"]) + par["imag"] * np.random.normal(
0, 10, par["nx"]
)
else:
x0 = None
y = Aop * x
xinv = cgls(Aop, y, x0=x0, niter=par["nx"], tol=1e-5, show=True)[0]
assert_array_almost_equal(x, xinv, decimal=4)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par3j)]
)
def test_lsqr(par):
"""Compare local Pylops and scipy LSQR"""
np.random.seed(10)
A = np.random.normal(0, 10, (par["ny"], par["nx"])) + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"]))
Aop = MatrixMult(A, dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
if par["x0"]:
x0 = np.random.normal(0, 10, par["nx"]) + par["imag"] * np.random.normal(
0, 10, par["nx"]
)
else:
x0 = None
y = Aop * x
if par["x0"]:
y_sp = y - Aop * x0
else:
y_sp = y.copy()
xinv = lsqr(Aop, y, x0, niter=par["nx"])[0]
xinv_sp = sp_lsqr(Aop, y_sp, iter_lim=par["nx"])[0]
if par["x0"]:
xinv_sp += x0
assert_array_almost_equal(xinv, x, decimal=4)
assert_array_almost_equal(xinv_sp, x, decimal=4)
| 4,695 | 25.382022 | 88 | py |
pylops | pylops-master/pytests/test_pad.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from pylops.basicoperators import Pad
from pylops.utils import dottest
par1 = {"ny": 11, "nx": 11, "pad": ((0, 2), (4, 5)), "dtype": "float64"} # square
par2 = {"ny": 21, "nx": 11, "pad": ((3, 1), (0, 3)), "dtype": "float64"} # rectangular
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1)])
def test_Pad_1d_negative(par):
"""Check error is raised when pad has negative number"""
with pytest.raises(ValueError):
_ = Pad(dims=par["ny"], pad=(-10, 0))
@pytest.mark.parametrize("par", [(par1)])
def test_Pad_2d_negative(par):
"""Check error is raised when pad has negative number for 2d"""
with pytest.raises(ValueError):
_ = Pad(dims=(par["ny"], par["nx"]), pad=((-10, 0), (3, -5)))
@pytest.mark.parametrize("par", [(par1)])
def test_Pad1d(par):
"""Dot-test and adjoint for Pad operator on 1d signal"""
Pop = Pad(dims=par["ny"], pad=par["pad"][0], dtype=par["dtype"])
assert dottest(Pop, Pop.shape[0], Pop.shape[1])
x = np.arange(par["ny"], dtype=par["dtype"]) + 1.0
y = Pop * x
xinv = Pop.H * y
assert_array_equal(x, xinv)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Pad2d(par):
"""Dot-test and adjoint for Pad operator on 2d signal"""
Pop = Pad(dims=(par["ny"], par["nx"]), pad=par["pad"], dtype=par["dtype"])
assert dottest(Pop, Pop.shape[0], Pop.shape[1])
x = (np.arange(par["ny"] * par["nx"], dtype=par["dtype"]) + 1.0).reshape(
par["ny"], par["nx"]
)
y = Pop * x.ravel()
xadj = Pop.H * y
assert_array_equal(x.ravel(), xadj)
| 1,645 | 30.653846 | 87 | py |
pylops | pylops-master/pytests/test_sparsity.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import FirstDerivative, Identity, MatrixMult
from pylops.optimization.sparsity import fista, irls, ista, omp, spgl1, splitbregman
par1 = {
"ny": 11,
"nx": 11,
"imag": 0,
"x0": False,
"dtype": "float64",
} # square real, zero initial guess
par2 = {
"ny": 11,
"nx": 11,
"imag": 0,
"x0": True,
"dtype": "float64",
} # square real, non-zero initial guess
par3 = {
"ny": 31,
"nx": 11,
"imag": 0,
"x0": False,
"dtype": "float64",
} # overdetermined real, zero initial guess
par4 = {
"ny": 31,
"nx": 11,
"imag": 0,
"x0": True,
"dtype": "float64",
} # overdetermined real, non-zero initial guess
par5 = {
"ny": 21,
"nx": 41,
"imag": 0,
"x0": True,
"dtype": "float64",
} # underdetermined real, non-zero initial guess
par1j = {
"ny": 11,
"nx": 11,
"imag": 1j,
"x0": False,
"dtype": "complex64",
} # square complex, zero initial guess
par2j = {
"ny": 11,
"nx": 11,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # square complex, non-zero initial guess
par3j = {
"ny": 31,
"nx": 11,
"imag": 1j,
"x0": False,
"dtype": "complex64",
} # overdetermined complex, zero initial guess
par4j = {
"ny": 31,
"nx": 11,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # overdetermined complex, non-zero initial guess
par5j = {
"ny": 21,
"nx": 41,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # underdetermined complex, non-zero initial guess
@pytest.mark.parametrize("par", [(par3), (par4), (par3j), (par4j)])
def test_IRLS_data(par):
"""Invert problem with outliers using data IRLS"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
x0 = (
np.random.normal(0, 10, par["nx"])
+ par["imag"] * np.random.normal(0, 10, par["nx"])
if par["x0"]
else None
)
y = Gop * x
# add outlier
y[par["ny"] - 2] *= 5
# irls inversion
xinv = irls(
Gop,
y,
x0=x0,
nouter=10,
threshR=False,
epsR=1e-2,
epsI=0,
tolIRLS=1e-3,
kind="data",
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
@pytest.mark.parametrize("par", [(par3), (par4), (par3j), (par4j)])
def test_IRLS_datamodel(par):
"""Invert problem with outliers using data-model IRLS"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
x = np.zeros(par["nx"]) + par["imag"] * np.ones(par["nx"])
x[par["nx"] // 2] = 1
x[3] = 1
x[par["nx"] - 4] = -1
x0 = (
np.random.normal(0, 10, par["nx"])
+ par["imag"] * np.random.normal(0, 10, par["nx"])
if par["x0"]
else None
)
y = Gop * x
# add outlier
y[par["ny"] - 2] *= 5
# irls inversion
xinv = irls(
Gop,
y,
x0=x0,
nouter=10,
threshR=False,
epsR=1e-2,
epsI=0,
tolIRLS=1e-3,
kind="datamodel",
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par3), (par5), (par1j), (par3j), (par5j)])
def test_IRLS_model(par):
"""Invert problem with model IRLS"""
np.random.seed(42)
Aop = MatrixMult(np.random.randn(par["ny"], par["nx"]))
x = np.zeros(par["nx"])
x[par["nx"] // 2] = 1
x[3] = 1
x[par["nx"] - 4] = -1
y = Aop * x
maxit = 100
xinv = irls(Aop, y, nouter=maxit, tolIRLS=1e-3, kind="model")[0]
assert_array_almost_equal(x, xinv, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par3), (par5), (par1j), (par3j), (par5j)])
def test_OMP(par):
"""Invert problem with OMP"""
np.random.seed(42)
Aop = MatrixMult(np.random.randn(par["ny"], par["nx"]))
x = np.zeros(par["nx"])
x[par["nx"] // 2] = 1
x[3] = 1
x[par["nx"] - 4] = -1
y = Aop * x
sigma = 1e-4
maxit = 100
xinv, _, _ = omp(Aop, y, maxit, sigma=sigma, show=False)
assert_array_almost_equal(x, xinv, decimal=1)
def test_ISTA_FISTA_unknown_threshkind():
"""Check error is raised if unknown threshkind is passed"""
with pytest.raises(NotImplementedError):
_ = ista(Identity(5), np.ones(5), 10, threshkind="foo")
with pytest.raises(NotImplementedError):
_ = fista(Identity(5), np.ones(5), 10, threshkind="foo")
def test_ISTA_FISTA_missing_perc():
"""Check error is raised if perc=None and threshkind is percentile based"""
with pytest.raises(ValueError):
_ = ista(Identity(5), np.ones(5), 10, perc=None, threshkind="soft-percentile")
with pytest.raises(ValueError):
_ = fista(Identity(5), np.ones(5), 10, perc=None, threshkind="soft-percentile")
@pytest.mark.parametrize("par", [(par1), (par3), (par5), (par1j), (par3j), (par5j)])
def test_ISTA_FISTA(par):
"""Invert problem with ISTA/FISTA"""
np.random.seed(42)
Aop = MatrixMult(np.random.randn(par["ny"], par["nx"]))
x = np.zeros(par["nx"])
x[par["nx"] // 2] = 1
x[3] = 1
x[par["nx"] - 4] = -1
y = Aop * x
eps = 0.5
perc = 30
maxit = 2000
# ISTA with too high alpha (check that exception is raised)
with pytest.raises(ValueError):
xinv, _, _ = ista(
Aop,
y,
niter=maxit,
eps=eps,
alpha=1e5,
monitorres=True,
tol=0,
)
# Regularization based ISTA and FISTA
for threshkind in ["hard", "soft", "half"]:
# ISTA
xinv, _, _ = ista(
Aop,
y,
niter=maxit,
eps=eps,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
# FISTA
xinv, _, _ = fista(
Aop,
y,
niter=maxit,
eps=eps,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
# Percentile based ISTA and FISTA
for threshkind in ["hard-percentile", "soft-percentile", "half-percentile"]:
# ISTA
xinv, _, _ = ista(
Aop,
y,
niter=maxit,
perc=perc,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
# FISTA
xinv, _, _ = fista(
Aop,
y,
niter=maxit,
perc=perc,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par3), (par5), (par1j), (par3j), (par5j)])
def test_ISTA_FISTA_multiplerhs(par):
"""Invert problem with ISTA/FISTA with multiple RHS"""
np.random.seed(42)
Aop = MatrixMult(np.random.randn(par["ny"], par["nx"]))
x = np.zeros(par["nx"])
x[par["nx"] // 2] = 1
x[3] = 1
x[par["nx"] - 4] = -1
x = np.outer(x, np.ones(3))
y = Aop * x
eps = 0.5
perc = 30
maxit = 2000
# Regularization based ISTA and FISTA
for threshkind in ["hard", "soft", "half"]:
# ISTA
xinv, _, _ = ista(
Aop,
y,
niter=maxit,
eps=eps,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
# FISTA
xinv, _, _ = fista(
Aop,
y,
niter=maxit,
eps=eps,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
# Percentile based ISTA and FISTA
for threshkind in ["hard-percentile", "soft-percentile", "half-percentile"]:
# ISTA
xinv, _, _ = ista(
Aop,
y,
niter=maxit,
perc=perc,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
# FISTA
xinv, _, _ = fista(
Aop,
y,
niter=maxit,
perc=perc,
threshkind=threshkind,
tol=0,
show=False,
)
assert_array_almost_equal(x, xinv, decimal=1)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par5), (par1j), (par3j)]
)
def test_SPGL1(par):
"""Invert problem with SPGL1"""
np.random.seed(42)
Aop = MatrixMult(np.random.randn(par["ny"], par["nx"]))
x = np.zeros(par["nx"])
x[par["nx"] // 2] = 1
x[3] = 1
x[par["nx"] - 4] = -1
x0 = (
np.random.normal(0, 10, par["nx"])
+ par["imag"] * np.random.normal(0, 10, par["nx"])
if par["x0"]
else None
)
y = Aop * x
xinv = spgl1(Aop, y, x0=x0, iter_lim=5000)[0]
assert_array_almost_equal(x, xinv, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_SplitBregman(par):
"""Invert denoise problem with SplitBregman"""
np.random.seed(42)
# need enough samples for TV regularization to be effective
nx = 3 * par["nx"]
Iop = Identity(nx)
Dop = FirstDerivative(nx, edge=True)
x = np.zeros(nx)
x[: nx // 2] = 10
x[nx // 2 : 3 * nx // 4] = -5
n = np.random.normal(0, 1, nx)
y = x + n
mu = 0.05
lamda = 0.3
niter_end = 50
niter_in = 3
x0 = np.ones(nx)
xinv, _, _ = splitbregman(
Iop,
y,
[Dop],
niter_outer=niter_end,
niter_inner=niter_in,
mu=mu,
epsRL1s=[lamda],
tol=1e-4,
tau=1,
x0=x0 if par["x0"] else None,
restart=False,
show=False,
**dict(iter_lim=5, damp=1e-3)
)
assert (np.linalg.norm(x - xinv) / np.linalg.norm(x)) < 1e-1
| 10,449 | 24 | 87 | py |
pylops | pylops-master/pytests/test_memoizeoperator.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops import MemoizeOperator
from pylops.basicoperators import MatrixMult, VStack
par1 = {"ny": 11, "nx": 11, "imag": 0, "dtype": "float32"} # square real
par1j = {"ny": 11, "nx": 11, "imag": 1j, "dtype": "complex64"} # square imag
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_memoize_evals(par):
"""Check nevals counter when same model/data vectors are inputted
to the operator
"""
np.random.seed(0)
A = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Aop = MatrixMult(A, dtype=par["dtype"])
Amemop = MemoizeOperator(Aop, max_neval=2)
# 1st evaluation
Amemop * np.ones(par["nx"])
assert Amemop.neval == 1
# repeat 1st evaluation multiple times
for _ in range(2):
Amemop * np.ones(par["nx"])
assert Amemop.neval == 1
# 2nd evaluation
Amemop * np.full(par["nx"], 2) # same
assert Amemop.neval == 2
# 3rd evaluation (np.ones goes out of store)
Amemop * np.full(par["nx"], 3) # same
assert Amemop.neval == 3
# 4th evaluation
Amemop * np.ones(par["nx"])
assert Amemop.neval == 4
@pytest.mark.parametrize(
"par",
[
(par1j),
],
)
def test_memoize_evals_2(par):
"""Inversion of problem with real model and complex data, using two
equivalent approaches: 1. complex operator enforcing the output of adjoint
to be real, 2. joint system of equations for real and complex parts
"""
np.random.seed(0)
rdtype = np.real(np.ones(1, dtype=par["dtype"])).dtype
A = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(rdtype) + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype(rdtype)
Aop = MatrixMult(A, dtype=par["dtype"])
x = np.ones(par["nx"], dtype=rdtype)
y = Aop * x
# Approach 1
Aop1 = Aop.toreal(forw=False, adj=True)
xinv1 = Aop1.div(y)
assert_array_almost_equal(x, xinv1)
# Approach 2
Amop = MemoizeOperator(Aop, max_neval=10)
Aop2 = VStack([Amop.toreal(), Amop.toimag()])
y2 = np.concatenate([np.real(y), np.imag(y)])
xinv2 = Aop2.div(y2)
assert_array_almost_equal(x, xinv2)
| 2,334 | 30.986301 | 80 | py |
pylops | pylops-master/pytests/test_combine.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse import random as sp_random
from scipy.sparse.linalg import lsqr
from pylops.basicoperators import Block, BlockDiag, HStack, MatrixMult, Real, VStack
from pylops.utils import dottest
par1 = {"ny": 101, "nx": 101, "imag": 0, "dtype": "float64"} # square real
par2 = {"ny": 301, "nx": 101, "imag": 0, "dtype": "float64"} # overdetermined real
par1j = {"ny": 101, "nx": 101, "imag": 1j, "dtype": "complex128"} # square imag
par2j = {"ny": 301, "nx": 101, "imag": 1j, "dtype": "complex128"} # overdetermined imag
@pytest.mark.parametrize("par", [(par1)])
def test_VStack_incosistent_columns(par):
"""Check error is raised if operators with different number of columns
are passed to VStack
"""
G1 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G2 = np.random.normal(0, 10, (par["ny"], par["nx"] + 1)).astype(par["dtype"])
with pytest.raises(ValueError):
VStack(
[MatrixMult(G1, dtype=par["dtype"]), MatrixMult(G2, dtype=par["dtype"])],
dtype=par["dtype"],
)
@pytest.mark.parametrize("par", [(par1)])
def test_HStack_incosistent_columns(par):
"""Check error is raised if operators with different number of rows
are passed to VStack
"""
G1 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G2 = np.random.normal(0, 10, (par["ny"] + 1, par["nx"])).astype(par["dtype"])
with pytest.raises(ValueError):
HStack(
[MatrixMult(G1, dtype=par["dtype"]), MatrixMult(G2, dtype=par["dtype"])],
dtype=par["dtype"],
)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_VStack(par):
"""Dot-test and inversion for VStack operator"""
np.random.seed(0)
G1 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G2 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
Vop = VStack(
[MatrixMult(G1, dtype=par["dtype"]), MatrixMult(G2, dtype=par["dtype"])],
dtype=par["dtype"],
)
assert dottest(
Vop, 2 * par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
xlsqr = lsqr(Vop, Vop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[
0
]
assert_array_almost_equal(x, xlsqr, decimal=4)
# use numpy matrix directly in the definition of the operator
V1op = VStack([G1, MatrixMult(G2, dtype=par["dtype"])], dtype=par["dtype"])
assert dottest(
V1op, 2 * par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# use scipy matrix directly in the definition of the operator
G1 = sp_random(par["ny"], par["nx"], density=0.4).astype("float32")
V2op = VStack([G1, MatrixMult(G2, dtype=par["dtype"])], dtype=par["dtype"])
assert dottest(
V2op, 2 * par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
@pytest.mark.parametrize("par", [(par2), (par2j)])
def test_HStack(par):
"""Dot-test and inversion for HStack operator with numpy array as input"""
np.random.seed(0)
G1 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
G2 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
x = np.ones(2 * par["nx"]) + par["imag"] * np.ones(2 * par["nx"])
Hop = HStack([G1, MatrixMult(G2, dtype=par["dtype"])], dtype=par["dtype"])
assert dottest(
Hop, par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
xlsqr = lsqr(Hop, Hop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[
0
]
assert_array_almost_equal(x, xlsqr, decimal=4)
# use numpy matrix directly in the definition of the operator
H1op = HStack([G1, MatrixMult(G2, dtype=par["dtype"])], dtype=par["dtype"])
assert dottest(
H1op, par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# use scipy matrix directly in the definition of the operator
G1 = sp_random(par["ny"], par["nx"], density=0.4).astype("float32")
H2op = HStack([G1, MatrixMult(G2, dtype=par["dtype"])], dtype=par["dtype"])
assert dottest(
H2op, par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Block(par):
"""Dot-test and inversion for Block operator"""
np.random.seed(0)
G11 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G12 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G21 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G22 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(2 * par["nx"]) + par["imag"] * np.ones(2 * par["nx"])
Bop = Block(
[
[MatrixMult(G11, dtype=par["dtype"]), MatrixMult(G12, dtype=par["dtype"])],
[MatrixMult(G21, dtype=par["dtype"]), MatrixMult(G22, dtype=par["dtype"])],
],
dtype=par["dtype"],
)
assert dottest(
Bop, 2 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
xlsqr = lsqr(Bop, Bop * x, damp=1e-20, iter_lim=500, atol=1e-8, btol=1e-8, show=0)[
0
]
assert_array_almost_equal(x, xlsqr, decimal=3)
# use numpy matrix directly in the definition of the operator
B1op = Block(
[
[G11, MatrixMult(G12, dtype=par["dtype"])],
[MatrixMult(G21, dtype=par["dtype"]), G22],
],
dtype=par["dtype"],
)
assert dottest(
B1op, 2 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# use scipy matrix directly in the definition of the operator
G11 = sp_random(par["ny"], par["nx"], density=0.4).astype("float32")
B2op = Block(
[
[G11, MatrixMult(G12, dtype=par["dtype"])],
[MatrixMult(G21, dtype=par["dtype"]), G22],
],
dtype=par["dtype"],
)
assert dottest(
B2op, 2 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_BlockDiag(par):
"""Dot-test and inversion for BlockDiag operator"""
np.random.seed(0)
G1 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G2 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(2 * par["nx"]) + par["imag"] * np.ones(2 * par["nx"])
BDop = BlockDiag(
[MatrixMult(G1, dtype=par["dtype"]), MatrixMult(G2, dtype=par["dtype"])],
dtype=par["dtype"],
)
assert dottest(
BDop, 2 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
xlsqr = lsqr(
BDop, BDop * x, damp=1e-20, iter_lim=500, atol=1e-8, btol=1e-8, show=0
)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
# use numpy matrix directly in the definition of the operator
BD1op = BlockDiag([MatrixMult(G1, dtype=par["dtype"]), G2], dtype=par["dtype"])
assert dottest(
BD1op, 2 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# use scipy matrix directly in the definition of the operator
G2 = sp_random(par["ny"], par["nx"], density=0.4).astype("float32")
BD2op = BlockDiag([MatrixMult(G1, dtype=par["dtype"]), G2], dtype=par["dtype"])
assert dottest(
BD2op, 2 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_VStack_multiproc(par):
"""Single and multiprocess consistentcy for VStack operator"""
np.random.seed(0)
nproc = 2
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
y = np.ones(4 * par["ny"]) + par["imag"] * np.ones(4 * par["ny"])
Vop = VStack([MatrixMult(G, dtype=par["dtype"])] * 4, dtype=par["dtype"])
Vmultiop = VStack(
[MatrixMult(G, dtype=par["dtype"])] * 4, nproc=nproc, dtype=par["dtype"]
)
assert dottest(
Vmultiop, 4 * par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# forward
assert_array_almost_equal(Vop * x, Vmultiop * x, decimal=4)
# adjoint
assert_array_almost_equal(Vop.H * y, Vmultiop.H * y, decimal=4)
# close pool
Vmultiop.pool.close()
@pytest.mark.parametrize("par", [(par2), (par2j)])
def test_HStack_multiproc(par):
"""Single and multiprocess consistentcy for HStack operator"""
np.random.seed(0)
nproc = 2
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(4 * par["nx"]) + par["imag"] * np.ones(4 * par["nx"])
y = np.ones(par["ny"]) + par["imag"] * np.ones(par["ny"])
Hop = HStack([MatrixMult(G, dtype=par["dtype"])] * 4, dtype=par["dtype"])
Hmultiop = HStack(
[MatrixMult(G, dtype=par["dtype"])] * 4, nproc=nproc, dtype=par["dtype"]
)
assert dottest(
Hmultiop, par["ny"], 4 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# forward
assert_array_almost_equal(Hop * x, Hmultiop * x, decimal=4)
# adjoint
assert_array_almost_equal(Hop.H * y, Hmultiop.H * y, decimal=4)
# close pool
Hmultiop.pool.close()
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Block_multiproc(par):
"""Single and multiprocess consistentcy for Block operator"""
np.random.seed(0)
nproc = 2
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
Gvert = [MatrixMult(G, dtype=par["dtype"])] * 2
Ghor = [Gvert] * 4
x = np.ones(2 * par["nx"]) + par["imag"] * np.ones(2 * par["nx"])
y = np.ones(4 * par["ny"]) + par["imag"] * np.ones(4 * par["ny"])
Bop = Block(Ghor, dtype=par["dtype"])
Bmultiop = Block(Ghor, nproc=nproc, dtype=par["dtype"])
assert dottest(
Bmultiop, 4 * par["ny"], 2 * par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# forward
assert_array_almost_equal(Bop * x, Bmultiop * x, decimal=3)
# adjoint
assert_array_almost_equal(Bop.H * y, Bmultiop.H * y, decimal=3)
# close pool
Bmultiop.pool.close()
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_BlockDiag_multiproc(par):
"""Single and multiprocess consistentcy for BlockDiag operator"""
np.random.seed(0)
nproc = 2
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(4 * par["nx"]) + par["imag"] * np.ones(4 * par["nx"])
y = np.ones(4 * par["ny"]) + par["imag"] * np.ones(4 * par["ny"])
BDop = BlockDiag([MatrixMult(G, dtype=par["dtype"])] * 4, dtype=par["dtype"])
BDmultiop = BlockDiag(
[MatrixMult(G, dtype=par["dtype"])] * 4, nproc=nproc, dtype=par["dtype"]
)
assert dottest(
BDmultiop,
4 * par["ny"],
4 * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
# forward
assert_array_almost_equal(BDop * x, BDmultiop * x, decimal=4)
# adjoint
assert_array_almost_equal(BDop.H * y, BDmultiop.H * y, decimal=4)
# close pool
BDmultiop.pool.close()
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_VStack_rlinear(par):
"""VStack operator applied to mix of R-linear and C-linear operators"""
np.random.seed(0)
if np.dtype(par["dtype"]).kind == "c":
G = (
np.random.normal(0, 10, (par["ny"], par["nx"]))
+ 1j * np.random.normal(0, 10, (par["ny"], par["nx"]))
).astype(par["dtype"])
else:
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
Rop = Real(dims=(par["nx"],), dtype=par["dtype"])
VSop = VStack([Rop, MatrixMult(G, dtype=par["dtype"])], dtype=par["dtype"])
assert VSop.clinear is False
assert dottest(
VSop, par["nx"] + par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3
)
# forward
x = np.random.randn(par["nx"]) + par["imag"] * np.random.randn(par["nx"])
expected = np.concatenate([np.real(x), G @ x])
assert_array_almost_equal(expected, VSop * x, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_HStack_rlinear(par):
"""HStack operator applied to mix of R-linear and C-linear operators"""
np.random.seed(0)
if np.dtype(par["dtype"]).kind == "c":
G = (
np.random.normal(0, 10, (par["ny"], par["nx"]))
+ 1j * np.random.normal(0, 10, (par["ny"], par["nx"]))
).astype(par["dtype"])
else:
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
Rop = Real(dims=(par["ny"],), dtype=par["dtype"])
HSop = HStack([Rop, MatrixMult(G, dtype=par["dtype"])], dtype=par["dtype"])
assert HSop.clinear is False
assert dottest(
HSop, par["ny"], par["nx"] + par["ny"], complexflag=0 if par["imag"] == 0 else 3
)
# forward
x = np.random.randn(par["nx"] + par["ny"]) + par["imag"] * np.random.randn(
par["nx"] + par["ny"]
)
expected = np.sum([np.real(x[: par["ny"]]), G @ x[par["ny"] :]], axis=0)
assert_array_almost_equal(expected, HSop * x, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_BlockDiag_rlinear(par):
"""BlockDiag operator applied to mix of R-linear and C-linear operators"""
np.random.seed(0)
if np.dtype(par["dtype"]).kind == "c":
G = (
np.random.normal(0, 10, (par["ny"], par["nx"]))
+ 1j * np.random.normal(0, 10, (par["ny"], par["nx"]))
).astype(par["dtype"])
else:
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
Rop = Real(dims=(par["nx"],), dtype=par["dtype"])
BDop = BlockDiag([Rop, MatrixMult(G, dtype=par["dtype"])], dtype=par["dtype"])
assert BDop.clinear is False
assert dottest(
BDop,
par["nx"] + par["ny"],
2 * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
# forward
x = np.random.randn(2 * par["nx"]) + par["imag"] * np.random.randn(2 * par["nx"])
expected = np.concatenate([np.real(x[: par["nx"]]), G @ x[par["nx"] :]])
assert_array_almost_equal(expected, BDop * x, decimal=4)
| 14,453 | 37.339523 | 88 | py |
pylops | pylops-master/pytests/test_smoothing.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr
from pylops.basicoperators import Smoothing1D, Smoothing2D
from pylops.utils import dottest
par1 = {"nz": 10, "ny": 30, "nx": 20, "axis": 0} # even, first direction
par2 = {"nz": 11, "ny": 51, "nx": 31, "axis": 0} # odd, first direction
par3 = {"nz": 10, "ny": 30, "nx": 20, "axis": 1} # even, second direction
par4 = {"nz": 11, "ny": 51, "nx": 31, "axis": 1} # odd, second direction
par5 = {"nz": 10, "ny": 30, "nx": 20, "axis": 2} # even, third direction
par6 = {"nz": 11, "ny": 51, "nx": 31, "axis": 2} # odd, third direction
np.random.seed(0)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Smoothing1D(par):
"""Dot-test and inversion for smoothing"""
# 1d kernel on 1d signal
D1op = Smoothing1D(nsmooth=5, dims=par["nx"], dtype="float64")
assert dottest(D1op, par["nx"], par["nx"])
x = np.random.normal(0, 1, par["nx"])
y = D1op * x
xlsqr = lsqr(D1op, y, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
# 1d kernel on 2d signal
D1op = Smoothing1D(
nsmooth=5, dims=(par["ny"], par["nx"]), axis=par["axis"], dtype="float64"
)
assert dottest(D1op, par["ny"] * par["nx"], par["ny"] * par["nx"])
x = np.random.normal(0, 1, (par["ny"], par["nx"])).ravel()
y = D1op * x
xlsqr = lsqr(D1op, y, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
# 1d kernel on 3d signal
D1op = Smoothing1D(
nsmooth=5,
dims=(par["nz"], par["ny"], par["nx"]),
axis=par["axis"],
dtype="float64",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
x = np.random.normal(0, 1, (par["nz"], par["ny"], par["nx"])).ravel()
y = D1op * x
xlsqr = lsqr(D1op, y, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_Smoothing2D(par):
"""Dot-test for smoothing"""
# 2d kernel on 2d signal
if par["axis"] < 2:
D2op = Smoothing2D(nsmooth=(5, 5), dims=(par["ny"], par["nx"]), dtype="float64")
assert dottest(D2op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# forward
x = np.zeros((par["ny"], par["nx"]))
x[par["ny"] // 2, par["nx"] // 2] = 1.0
x = x.ravel()
y = D2op * x
y = y.reshape(par["ny"], par["nx"])
assert_array_almost_equal(
y[par["ny"] // 2 - 2 : par["ny"] // 2 + 3 :, par["nx"] // 2],
np.ones(5) / 25,
)
assert_array_almost_equal(
y[par["ny"] // 2, par["nx"] // 2 - 2 : par["nx"] // 2 + 3], np.ones(5) / 25
)
# inverse
xlsqr = lsqr(D2op, y.ravel(), damp=1e-10, iter_lim=400, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
# 2d kernel on 3d signal
axes = list(range(3))
axes.remove(par["axis"])
D2op = Smoothing2D(
nsmooth=(5, 5),
dims=(par["nz"], par["ny"], par["nx"]),
axes=axes,
dtype="float64",
)
assert dottest(
D2op, par["nz"] * par["ny"] * par["nx"], par["nz"] * par["ny"] * par["nx"]
)
# forward
x = np.zeros((par["nz"], par["ny"], par["nx"]))
x[par["nz"] // 2, par["ny"] // 2, par["nx"] // 2] = 1.0
x = x.ravel()
y = D2op * x
y = y.reshape(par["nz"], par["ny"], par["nx"])
if par["axis"] == 0:
assert_array_almost_equal(
y[par["nz"] // 2, par["ny"] // 2 - 2 : par["ny"] // 2 + 3, par["nx"] // 2],
np.ones(5) / 25,
)
assert_array_almost_equal(
y[par["nz"] // 2, par["ny"] // 2, par["nx"] // 2 - 2 : par["nx"] // 2 + 3],
np.ones(5) / 25,
)
elif par["axis"] == 1:
assert_array_almost_equal(
y[par["nz"] // 2 - 2 : par["nz"] // 2 + 3, par["ny"] // 2, par["nx"] // 2],
np.ones(5) / 25,
)
assert_array_almost_equal(
y[par["nz"] // 2, par["ny"] // 2, par["nx"] // 2 - 2 : par["nx"] // 2 + 3],
np.ones(5) / 25,
)
elif par["axis"] == 2:
assert_array_almost_equal(
y[par["nz"] // 2 - 2 : par["nz"] // 2 + 3, par["ny"] // 2, par["nx"] // 2],
np.ones(5) / 25,
)
assert_array_almost_equal(
y[par["nz"] // 2, par["ny"] // 2 - 2 : par["ny"] // 2 + 3, par["nx"] // 2],
np.ones(5) / 25,
)
# inverse
xlsqr = lsqr(D2op, y.ravel(), damp=1e-10, iter_lim=400, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
| 4,926 | 34.702899 | 96 | py |
pylops | pylops-master/pytests/test_dwts.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr
from pylops.signalprocessing import DWT, DWT2D
from pylops.utils import dottest
par1 = {"ny": 7, "nx": 9, "nt": 10, "imag": 0, "dtype": "float32"} # real
par2 = {"ny": 7, "nx": 9, "nt": 10, "imag": 1j, "dtype": "complex64"} # complex
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1)])
def test_unknown_wavelet(par):
"""Check error is raised if unknown wavelet is chosen is passed"""
with pytest.raises(ValueError):
_ = DWT(dims=par["nt"], wavelet="foo")
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_DWT_1dsignal(par):
"""Dot-test and inversion for DWT operator for 1d signal"""
DWTop = DWT(dims=[par["nt"]], axis=0, wavelet="haar", level=3)
x = np.random.normal(0.0, 1.0, par["nt"]) + par["imag"] * np.random.normal(
0.0, 1.0, par["nt"]
)
assert dottest(
DWTop, DWTop.shape[0], DWTop.shape[1], complexflag=0 if par["imag"] == 0 else 3
)
y = DWTop * x
xadj = DWTop.H * y # adjoint is same as inverse for dwt
xinv = lsqr(DWTop, y, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=8)
assert_array_almost_equal(x, xinv, decimal=8)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_DWT_2dsignal(par):
"""Dot-test and inversion for DWT operator for 2d signal"""
for axis in [0, 1]:
DWTop = DWT(dims=(par["nt"], par["nx"]), axis=axis, wavelet="haar", level=3)
x = np.random.normal(0.0, 1.0, (par["nt"], par["nx"])) + par[
"imag"
] * np.random.normal(0.0, 1.0, (par["nt"], par["nx"]))
assert dottest(
DWTop,
DWTop.shape[0],
DWTop.shape[1],
complexflag=0 if par["imag"] == 0 else 3,
)
y = DWTop * x.ravel()
xadj = DWTop.H * y # adjoint is same as inverse for dwt
xinv = lsqr(DWTop, y, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xadj, decimal=8)
assert_array_almost_equal(x.ravel(), xinv, decimal=8)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_DWT_3dsignal(par):
"""Dot-test and inversion for DWT operator for 3d signal"""
for axis in [0, 1, 2]:
DWTop = DWT(
dims=(par["nt"], par["nx"], par["ny"]), axis=axis, wavelet="haar", level=3
)
x = np.random.normal(0.0, 1.0, (par["nt"], par["nx"], par["ny"])) + par[
"imag"
] * np.random.normal(0.0, 1.0, (par["nt"], par["nx"], par["ny"]))
assert dottest(
DWTop,
DWTop.shape[0],
DWTop.shape[1],
complexflag=0 if par["imag"] == 0 else 3,
)
y = DWTop * x.ravel()
xadj = DWTop.H * y # adjoint is same as inverse for dwt
xinv = lsqr(DWTop, y, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xadj, decimal=8)
assert_array_almost_equal(x.ravel(), xinv, decimal=8)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_DWT2D_2dsignal(par):
"""Dot-test and inversion for DWT2D operator for 2d signal"""
DWTop = DWT2D(dims=(par["nt"], par["nx"]), axes=(0, 1), wavelet="haar", level=3)
x = np.random.normal(0.0, 1.0, (par["nt"], par["nx"])) + par[
"imag"
] * np.random.normal(0.0, 1.0, (par["nt"], par["nx"]))
assert dottest(
DWTop, DWTop.shape[0], DWTop.shape[1], complexflag=0 if par["imag"] == 0 else 3
)
y = DWTop * x.ravel()
xadj = DWTop.H * y # adjoint is same as inverse for dwt
xinv = lsqr(DWTop, y, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xadj, decimal=8)
assert_array_almost_equal(x.ravel(), xinv, decimal=8)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_DWT2D_3dsignal(par):
"""Dot-test and inversion for DWT operator for 3d signal"""
for axes in [(0, 1), (0, 2), (1, 2)]:
DWTop = DWT2D(
dims=(par["nt"], par["nx"], par["ny"]), axes=axes, wavelet="haar", level=3
)
x = np.random.normal(0.0, 1.0, (par["nt"], par["nx"], par["ny"])) + par[
"imag"
] * np.random.normal(0.0, 1.0, (par["nt"], par["nx"], par["ny"]))
assert dottest(
DWTop,
DWTop.shape[0],
DWTop.shape[1],
complexflag=0 if par["imag"] == 0 else 3,
)
y = DWTop * x.ravel()
xadj = DWTop.H * y # adjoint is same as inverse for dwt
xinv = lsqr(DWTop, y, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xadj, decimal=8)
assert_array_almost_equal(x.ravel(), xinv, decimal=8)
| 4,860 | 34.742647 | 87 | py |
pylops | pylops-master/pytests/test_sliding.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import MatrixMult
from pylops.signalprocessing import Sliding1D, Sliding2D, Sliding3D
from pylops.signalprocessing.sliding1d import sliding1d_design
from pylops.signalprocessing.sliding2d import sliding2d_design
from pylops.signalprocessing.sliding3d import sliding3d_design
from pylops.utils import dottest
par1 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 5,
"novery": 0,
# "winsy": 3,
"npx": 10,
"nwinx": 5,
"noverx": 0,
# "winsx": 2,
"tapertype": None,
} # no overlap, no taper
par2 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 5,
"novery": 0,
# "winsy": 3,
"npx": 10,
"nwinx": 5,
"noverx": 0,
# "winsx": 2,
"tapertype": "hanning",
} # no overlap, with taper
par3 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 7,
"novery": 3,
# "winsy": 3,
"npx": 10,
"nwinx": 4,
"noverx": 2,
# "winsx": 4,
"tapertype": None,
} # overlap, no taper
par4 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 7,
"novery": 3,
# "winsy": 3,
"npx": 10,
"nwinx": 4,
"noverx": 2,
# "winsx": 4,
"tapertype": "hanning",
} # overlap, with taper
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Sliding1D(par):
"""Dot-test and inverse for Sliding1D operator"""
Op = MatrixMult(np.ones((par["nwiny"], par["ny"])))
nwins, dim, mwin_inends, dwin_inends = sliding1d_design(
par["npy"], par["nwiny"], par["novery"], par["ny"]
)
Slid = Sliding1D(
Op,
dim=dim,
dimd=par["npy"],
nwin=par["nwiny"],
nover=par["novery"],
tapertype=par["tapertype"],
)
assert dottest(Slid, par["npy"], par["ny"] * nwins)
x = np.ones(par["ny"] * nwins)
y = Slid * x.ravel()
xinv = Slid / y
assert_array_almost_equal(x.ravel(), xinv)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Sliding2D(par):
"""Dot-test and inverse for Sliding2D operator"""
Op = MatrixMult(np.ones((par["nwiny"] * par["nt"], par["ny"] * par["nt"])))
nwins, dims, mwin_inends, dwin_inends = sliding2d_design(
(par["npy"], par["nt"]), par["nwiny"], par["novery"], (par["ny"], par["nt"])
)
Slid = Sliding2D(
Op,
dims=dims,
dimsd=(par["npy"], par["nt"]),
nwin=par["nwiny"],
nover=par["novery"],
tapertype=par["tapertype"],
)
assert dottest(Slid, par["npy"] * par["nt"], par["ny"] * par["nt"] * nwins)
x = np.ones((par["ny"] * nwins, par["nt"]))
y = Slid * x.ravel()
xinv = Slid / y
assert_array_almost_equal(x.ravel(), xinv)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Sliding3D(par):
"""Dot-test and inverse for Sliding3D operator"""
Op = MatrixMult(
np.ones(
(par["nwiny"] * par["nwinx"] * par["nt"], par["ny"] * par["nx"] * par["nt"])
)
)
nwins, dims, mwin_inends, dwin_inends = sliding3d_design(
(par["npy"], par["npx"], par["nt"]),
(par["nwiny"], par["nwinx"]),
(par["novery"], par["noverx"]),
(par["ny"], par["nx"], par["nt"]),
)
Slid = Sliding3D(
Op,
dims=dims, # (par["ny"] * par["winsy"], par["nx"] * par["winsx"], par["nt"]),
dimsd=(par["npy"], par["npx"], par["nt"]),
nwin=(par["nwiny"], par["nwinx"]),
nover=(par["novery"], par["noverx"]),
nop=(par["ny"], par["nx"]),
tapertype=par["tapertype"],
)
assert dottest(
Slid,
par["npy"] * par["npx"] * par["nt"],
par["ny"] * par["nx"] * par["nt"] * nwins[0] * nwins[1],
)
x = np.ones((par["ny"] * par["nx"] * nwins[0] * nwins[1], par["nt"]))
y = Slid * x.ravel()
xinv = Slid / y
assert_array_almost_equal(x.ravel(), xinv)
| 4,013 | 25.064935 | 88 | py |
pylops | pylops-master/pytests/test_signalutils.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.utils.signalprocessing import convmtx, nonstationary_convmtx
par1 = {"nt": 51, "imag": 0, "dtype": "float32"} # real
par2 = {"nt": 51, "imag": 1j, "dtype": "complex64"} # complex
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_convmtx(par):
"""Compare convmtx with np.convolve"""
x = np.random.normal(0, 1, par["nt"]) + par["imag"] * np.random.normal(
0, 1, par["nt"]
)
nh = 7
h = np.hanning(7)
H = convmtx(h, par["nt"])
H = H[:, nh // 2 : -nh // 2 + 1]
y = np.convolve(x, h, mode="same")
y1 = np.dot(H, x)
assert_array_almost_equal(y, y1, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_nonstationary_convmtx(par):
"""Compare nonstationary_convmtx with convmtx for stationary filter"""
x = np.random.normal(0, 1, par["nt"]) + par["imag"] * np.random.normal(
0, 1, par["nt"]
)
nh = 7
h = np.hanning(7)
H = convmtx(h, par["nt"])
H = H[:, nh // 2 : -nh // 2 + 1]
H1 = nonstationary_convmtx(
np.repeat(h[:, np.newaxis], par["nt"], axis=1).T,
par["nt"],
hc=nh // 2,
pad=(par["nt"], par["nt"]),
)
y = np.dot(H, x)
y1 = np.dot(H1, x)
assert_array_almost_equal(y, y1, decimal=4)
| 1,370 | 25.882353 | 75 | py |
pylops | pylops-master/pytests/test_twoway.py | import devito
import numpy as np
from pylops.utils import dottest
from pylops.waveeqprocessing.twoway import AcousticWave2D
devito.configuration["log-level"] = "ERROR"
par = {
"ny": 10,
"nx": 12,
"nz": 20,
"tn": 500,
"dy": 3,
"dx": 1,
"dz": 2,
"nr": 8,
"ns": 2,
}
v0 = 2
y = np.arange(par["ny"]) * par["dy"]
x = np.arange(par["nx"]) * par["dx"]
z = np.arange(par["nz"]) * par["dz"]
sx = np.linspace(x.min(), x.max(), par["ns"])
rx = np.linspace(x.min(), x.max(), par["nr"])
def test_acwave2d():
"""Dot-test for AcousticWave2D operator"""
Dop = AcousticWave2D(
(par["nx"], par["nz"]),
(0, 0),
(par["dx"], par["dz"]),
np.ones((par["nx"], par["nz"])) * 2e3,
sx,
5,
rx,
5,
0.0,
par["tn"],
"Ricker",
space_order=4,
nbl=30,
f0=15,
dtype="float32",
)
assert dottest(
Dop, par["ns"] * par["nr"] * Dop.geometry.nt, par["nz"] * par["nx"], atol=1e-1
)
| 1,034 | 18.166667 | 86 | py |
pylops | pylops-master/pytests/test_patching.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import MatrixMult
from pylops.signalprocessing import Patch2D, Patch3D
from pylops.signalprocessing.patch2d import patch2d_design
from pylops.signalprocessing.patch3d import patch3d_design
from pylops.utils import dottest
par1 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 5,
"novery": 0,
# "winsy": 3,
"npx": 13,
"nwinx": 5,
"noverx": 0,
# "winsx": 2,
"npt": 10,
"nwint": 5,
"novert": 0,
# "winst": 2,
"tapertype": None,
} # no overlap, no taper
par2 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 5,
"novery": 0,
# "winsy": 3,
"npx": 13,
"nwinx": 5,
"noverx": 0,
# "winsx": 2,
"npt": 10,
"nwint": 5,
"novert": 0,
# "winst": 2,
"tapertype": "hanning",
} # no overlap, with taper
par3 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 7,
"novery": 3,
# "winsy": 3,
"npx": 13,
"nwinx": 5,
"noverx": 2,
# "winsx": 3,
"npt": 10,
"nwint": 4,
"novert": 2,
# "winst": 4,
"tapertype": None,
} # overlap, no taper
par4 = {
"ny": 6,
"nx": 7,
"nt": 10,
"npy": 15,
"nwiny": 7,
"novery": 3,
# "winsy": 3,
"npx": 13,
"nwinx": 5,
"noverx": 2,
# "winsx": 3,
"npt": 10,
"nwint": 4,
"novert": 2,
# "winst": 4,
"tapertype": "hanning",
} # overlap, with taper
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Patch2D(par):
"""Dot-test and inverse for Patch2D operator"""
Op = MatrixMult(np.ones((par["nwiny"] * par["nwint"], par["ny"] * par["nt"])))
nwins, dims, mwin_inends, dwin_inends = patch2d_design(
(par["npy"], par["npt"]),
(par["nwiny"], par["nwint"]),
(par["novery"], par["novert"]),
(par["ny"], par["nt"]),
)
Pop = Patch2D(
Op,
dims=dims, # (par["ny"] * par["winsy"], par["nt"] * par["winst"]),
dimsd=(par["npy"], par["npt"]),
nwin=(par["nwiny"], par["nwint"]),
nover=(par["novery"], par["novert"]),
nop=(par["ny"], par["nt"]),
tapertype=par["tapertype"],
)
assert dottest(
Pop,
par["npy"] * par["npt"],
par["ny"] * par["nt"] * nwins[0] * nwins[1],
)
x = np.ones((par["ny"] * nwins[0], par["nt"] * nwins[1]))
y = Pop * x.ravel()
xinv = Pop / y
assert_array_almost_equal(x.ravel(), xinv)
@pytest.mark.parametrize("par", [(par1), (par4)])
def test_Patch2D_scalings(par):
"""Dot-test and inverse for Patch2D operator with scalings"""
Op = MatrixMult(np.ones((par["nwiny"] * par["nwint"], par["ny"] * par["nt"])))
scalings = np.arange(par["nwiny"] * par["nwint"]) + 1.0
nwins, dims, mwin_inends, dwin_inends = patch2d_design(
(par["npy"], par["npt"]),
(par["nwiny"], par["nwint"]),
(par["novery"], par["novert"]),
(par["ny"], par["nt"]),
)
Pop = Patch2D(
Op,
dims=dims, # (par["ny"] * par["winsy"], par["nt"] * par["winst"]),
dimsd=(par["npy"], par["npt"]),
nwin=(par["nwiny"], par["nwint"]),
nover=(par["novery"], par["novert"]),
nop=(par["ny"], par["nt"]),
tapertype=par["tapertype"],
scalings=scalings,
)
assert dottest(
Pop,
par["npy"] * par["npt"],
par["ny"] * par["nt"] * nwins[0] * nwins[1],
)
x = np.ones((par["ny"] * nwins[0], par["nt"] * nwins[1]))
y = Pop * x.ravel()
xinv = Pop / y
assert_array_almost_equal(x.ravel(), xinv)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Patch3D(par):
"""Dot-test and inverse for Patch3D operator"""
Op = MatrixMult(
np.ones(
(
par["nwiny"] * par["nwinx"] * par["nwint"],
par["ny"] * par["nx"] * par["nt"],
)
)
)
nwins, dims, mwin_inends, dwin_inends = patch3d_design(
(par["npy"], par["npx"], par["npt"]),
(par["nwiny"], par["nwinx"], par["nwint"]),
(par["novery"], par["noverx"], par["novert"]),
(par["ny"], par["nx"], par["nt"]),
)
Pop = Patch3D(
Op,
dims=dims, # (
# par["ny"] * par["winsy"],
# par["nx"] * par["winsx"],
# par["nt"] * par["winst"],
# ),
dimsd=(par["npy"], par["npx"], par["npt"]),
nwin=(par["nwiny"], par["nwinx"], par["nwint"]),
nover=(par["novery"], par["noverx"], par["novert"]),
nop=(par["ny"], par["nx"], par["nt"]),
tapertype=par["tapertype"],
)
assert dottest(
Pop,
par["npy"] * par["npx"] * par["npt"],
par["ny"] * par["nx"] * par["nt"] * nwins[0] * nwins[1] * nwins[2],
)
x = np.ones((par["ny"] * nwins[0], par["nx"] * nwins[1], par["nt"] * nwins[2]))
y = Pop * x.ravel()
xinv = Pop / y
assert_array_almost_equal(x.ravel(), xinv)
| 5,060 | 25.222798 | 83 | py |
pylops | pylops-master/pytests/test_linearoperator.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy.sparse.linalg import LinearOperator as spLinearOperator
import pylops
from pylops import LinearOperator
from pylops.basicoperators import (
Diagonal,
FirstDerivative,
HStack,
MatrixMult,
Real,
Symmetrize,
VStack,
Zero,
)
from pylops.utils import dottest
par1 = {"ny": 11, "nx": 11, "imag": 0, "dtype": "float64"} # square real
par2 = {"ny": 21, "nx": 11, "imag": 0, "dtype": "float64"} # overdetermined real
par1j = {"ny": 11, "nx": 11, "imag": 1j, "dtype": "complex128"} # square imag
par2j = {"ny": 21, "nx": 11, "imag": 1j, "dtype": "complex128"} # overdetermined imag
@pytest.mark.parametrize("par", [(par1), (par2), (par1j)])
def test_overloads(par):
"""Apply various overloaded operators (.H, -, +, *) and ensure that the
returned operator is still of pylops LinearOperator type
"""
diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
Dop = Diagonal(diag, dtype=par["dtype"])
# .H
assert isinstance(Dop.H, LinearOperator)
# negate
assert isinstance(-Dop, LinearOperator)
# multiply by scalar
assert isinstance(2 * Dop, LinearOperator)
# +
assert isinstance(Dop + Dop, LinearOperator)
# -
assert isinstance(Dop - 2 * Dop, LinearOperator)
# *
assert isinstance(Dop * Dop, LinearOperator)
# **
assert isinstance(Dop**2, LinearOperator)
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_scaled(par):
"""Verify that _ScaledLinearOperator produces the correct type based
on its inputs types
"""
dtypes = [np.float32, np.float64]
for dtype in dtypes:
diag = np.arange(par["nx"], dtype=dtype) + par["imag"] * np.arange(
par["nx"], dtype=dtype
)
Dop = Diagonal(diag, dtype=dtype)
Sop = 3.0 * Dop
S1op = -3.0 * Dop
S2op = Dop * 3.0
S3op = Dop * -3.0
assert Sop.dtype == dtype
assert S1op.dtype == dtype
assert S2op.dtype == dtype
assert S3op.dtype == dtype
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_scipyop(par):
"""Verify interaction between pylops and scipy Linear operators"""
class spDiag(spLinearOperator):
def __init__(self, x):
self.x = x
self.shape = (len(x), len(x))
self.dtype = x.dtype
def _matvec(self, x):
return x * self.x
def _rmatvec(self, x):
return x * self.x
Dop = Diagonal(np.ones(5))
Dspop = spDiag(2 * np.ones(5))
# sum pylops to scipy linear ops
assert isinstance(Dop + Dspop, LinearOperator)
# multiply pylops to scipy linear ops
assert isinstance(Dop * Dspop, LinearOperator)
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_dense(par):
"""Dense matrix representation of square matrix"""
diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
D = np.diag(diag)
Dop = Diagonal(diag, dtype=par["dtype"])
assert_array_equal(Dop.todense(), D)
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_dense_skinny(par):
"""Dense matrix representation of skinny matrix"""
diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
D = np.diag(diag)
Dop = Diagonal(diag, dtype=par["dtype"])
Zop = Zero(par["nx"], 3, dtype=par["dtype"])
Op = HStack([Dop, Zop])
OOp = np.hstack((D, np.zeros((par["nx"], 3))))
assert_array_equal(Op.todense(), OOp)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j)])
def test_sparse(par):
"""Sparse matrix representation"""
diag = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
D = np.diag(diag)
Dop = Diagonal(diag, dtype=par["dtype"])
S = Dop.tosparse()
assert_array_equal(S.A, D)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j)])
def test_eigs(par):
"""Eigenvalues and condition number estimate with ARPACK"""
# explicit=True
diag = np.arange(par["nx"], 0, -1) + par["imag"] * np.arange(par["nx"], 0, -1)
Op = MatrixMult(
np.vstack((np.diag(diag), np.zeros((par["ny"] - par["nx"], par["nx"]))))
)
eigs = Op.eigs()
assert_array_almost_equal(diag[: eigs.size], eigs, decimal=3)
cond = Op.cond()
assert_array_almost_equal(np.real(cond), par["nx"], decimal=3)
# explicit=False
Op = Diagonal(diag, dtype=par["dtype"])
if par["ny"] > par["nx"]:
Op = VStack([Op, Zero(par["ny"] - par["nx"], par["nx"])])
eigs = Op.eigs()
assert_array_almost_equal(diag[: eigs.size], eigs, decimal=3)
# uselobpcg cannot be used for square non-symmetric complex matrices
if np.iscomplex(Op):
eigs1 = Op.eigs(uselobpcg=True)
assert_array_almost_equal(eigs, eigs1, decimal=3)
cond = Op.cond()
assert_array_almost_equal(np.real(cond), par["nx"], decimal=3)
if np.iscomplex(Op):
cond1 = Op.cond(uselobpcg=True, niter=100)
assert_array_almost_equal(np.real(cond), np.real(cond1), decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_conj(par):
"""Complex conjugation operator"""
M = 1j * np.ones((par["ny"], par["nx"]))
Op = MatrixMult(M, dtype=np.complex128)
Opconj = Op.conj()
x = np.arange(par["nx"]) + par["imag"] * np.arange(par["nx"])
y = Opconj * x
# forward
assert_array_almost_equal(Opconj * x, np.dot(M.conj(), x))
# adjoint
assert_array_almost_equal(Opconj.H * y, np.dot(M.T, y))
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_apply_columns_explicit(par):
"""Apply columns to explicit and non-explicit operator"""
M = np.ones((par["ny"], par["nx"]))
Mop = MatrixMult(M, dtype=par["dtype"])
M1op = MatrixMult(M, dtype=par["dtype"])
M1op.explicit = False
cols = np.sort(np.random.permutation(np.arange(par["nx"]))[: par["nx"] // 2])
Mcols = M[:, cols]
Mcolsop = Mop.apply_columns(cols)
M1colsop = M1op.apply_columns(cols)
x = np.arange(len(cols))
y = np.arange(par["ny"])
# forward
assert_array_almost_equal(Mcols @ x, Mcolsop.matvec(x))
assert_array_almost_equal(Mcols @ x, M1colsop.matvec(x))
# adjoint
assert_array_almost_equal(Mcols.T @ y, Mcolsop.rmatvec(y))
assert_array_almost_equal(Mcols.T @ y, M1colsop.rmatvec(y))
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_realimag(par):
"""Real/imag extraction"""
M = np.random.normal(0, 1, (par["ny"], par["nx"])) + 1j * np.random.normal(
0, 1, (par["ny"], par["nx"])
)
Op = MatrixMult(M, dtype=np.complex128)
Opr = Op.toreal()
Opi = Op.toimag()
# forward
x = np.arange(par["nx"])
y = Op * x
yr = Opr * x
yi = Opi * x
assert_array_equal(np.real(y), yr)
assert_array_equal(np.imag(y), yi)
# adjoint
y = np.arange(par["ny"]) + 1j * np.arange(par["ny"])
x = Op.H * y
xr = Opr.H * y
xi = Opi.H * y
assert_array_equal(np.real(x), xr)
assert_array_equal(np.imag(x), -xi)
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_rlinear(par):
"""R-linear"""
np.random.seed(123)
if np.dtype(par["dtype"]).kind == "c":
M = (
np.random.randn(par["ny"], par["nx"])
+ 1j * np.random.randn(par["ny"], par["nx"])
).astype(par["dtype"])
else:
M = np.random.randn(par["ny"], par["nx"]).astype(par["dtype"])
OpM = MatrixMult(M, dtype=par["dtype"])
OpR_left = Real(par["ny"], dtype=par["dtype"])
OpR_right = Real(par["nx"], dtype=par["dtype"])
Op_left = OpR_left * OpM
Op_right = OpM * OpR_right
assert Op_left.clinear is False
assert Op_right.clinear is False
# forward
x = np.random.randn(par["nx"])
y_left = Op_left * x
y_right = Op_right * x
assert_array_equal(np.real(M @ x), y_left)
assert_array_equal(M @ np.real(x), y_right)
# adjoint (dot product test)
for complexflag in range(4):
assert dottest(Op_left, par["ny"], par["nx"], complexflag=complexflag)
assert dottest(Op_right, par["ny"], par["nx"], complexflag=complexflag)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_copy_dims_dimsd(par):
"""Apply various overloaded operators (.H, -, +, *) and ensure that the
dims and dimsd properties are propagated
"""
Dy = FirstDerivative((par["ny"], par["nx"]), axis=0)
Dx = FirstDerivative((par["ny"], par["nx"]), axis=-1)
dims = (par["ny"], par["nx"])
dimsd = (par["ny"], par["nx"])
dimsd_sym = (2 * par["ny"] - 1, par["nx"])
S = Symmetrize(dims, axis=0)
# .H
assert S.H.dims == dimsd_sym
assert S.H.dimsd == dims
# .T
assert S.T.dims == dimsd_sym
assert S.T.dimsd == dims
# negate
assert (-Dx).dims == dims
assert (-Dx).dimsd == dimsd
# multiply by scalar
assert (2 * Dx).dims == dims
assert (2 * Dx).dimsd == dimsd
assert (Dx * 2).dims == dims
assert (Dx * 2).dimsd == dimsd
# +
assert (Dx + Dy).dims == dims
assert (Dx + Dy).dimsd == dimsd
# -
assert (Dx - 2 * Dy).dims == dims
assert (Dx - 2 * Dy).dimsd == dimsd
# *
assert (S @ Dx).dims == dims
assert (S @ Dx).dimsd == dimsd_sym
# **
assert (Dx**3).dims == dims
assert (Dx**3).dimsd == dimsd
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_non_flattened_arrays(par):
"""Apply operators on arrays which are not 1D."""
dims = (par["ny"], par["nx"])
D = FirstDerivative(dims, axis=-1)
S = Symmetrize(dims, axis=0)
x_nd = np.random.randn(*dims)
x_1d = x_nd.ravel()
k = 4
X_nd = np.repeat(x_nd[..., np.newaxis], k, axis=-1)
X_1d = np.repeat(x_1d[..., np.newaxis], k, axis=-1)
Y_D = np.empty((*D.dimsd, k))
Y_S = np.empty((*S.dimsd, k))
for i in range(k):
Y_D[..., i] = D @ x_nd
Y_S[..., i] = S @ D @ x_nd
assert_array_equal((D @ x_1d).reshape(D.dimsd), D @ x_nd)
assert_array_equal((S @ D @ x_1d).reshape(S.dimsd), S @ D @ x_nd)
assert_array_equal(Y_D, D @ X_nd)
assert_array_equal(Y_D, (D @ X_1d).reshape((*D.dimsd, -1)))
assert_array_equal(Y_S, S @ D @ X_nd)
assert_array_equal(Y_S, (S @ D @ X_1d).reshape((*S.dimsd, -1)))
with pylops.disabled_ndarray_multiplication():
with pytest.raises(ValueError):
D @ x_nd
with pytest.raises(ValueError):
D @ X_nd
@pytest.mark.parametrize("par", [(par1), (par2j)])
def test_counts(par):
"""Assess counters and the associated reset method"""
A = np.ones((par["ny"], par["nx"])) + par["imag"] * np.ones((par["ny"], par["nx"]))
Aop = MatrixMult(A, dtype=par["dtype"])
_ = Aop.matvec(np.ones(par["nx"]))
_ = Aop.matvec(np.ones(par["nx"]))
_ = Aop.rmatvec(np.ones(par["ny"]))
_ = Aop @ np.ones(par["nx"])
_ = Aop.rmatmat(np.ones((par["ny"], 2)))
_ = Aop.matmat(np.ones((par["nx"], 2)))
_ = Aop.rmatmat(np.ones((par["ny"], 2)))
assert Aop.matvec_count == 3
assert Aop.rmatvec_count == 1
assert Aop.matmat_count == 1
assert Aop.rmatmat_count == 2
# reset
Aop.reset_count()
assert Aop.matvec_count == 0
assert Aop.rmatvec_count == 0
assert Aop.matmat_count == 0
assert Aop.rmatmat_count == 0
| 11,395 | 29.883469 | 87 | py |
pylops | pylops-master/pytests/test_functionoperator.py | """
test_functionoperator.py
Test module for FunctionOperator. Tests 32 and 64 bit float and complex number
by wrapping a matrix multiplication as a FunctionOperator.
Also provides a good starting point for new tests.
"""
import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy.sparse.linalg import lsqr
from pylops.basicoperators import FunctionOperator
from pylops.utils import dottest
PARS_LISTS = [
[11, 21], # nr
[11, 21], # nc
["float32", "float64", "complex64", "complex128"], # dtypes
]
PARS = []
for nr, nc, dtype in itertools.product(*PARS_LISTS):
PARS += [
{
"nr": nr,
"nc": nc,
"imag": 0 if dtype.startswith("float") else 1j,
"dtype": dtype,
"rtol": 1e-3 if dtype in ["float32", "complex64"] else 1e-6,
}
]
@pytest.mark.parametrize("par", PARS)
def test_FunctionOperator(par):
"""Dot-test and inversion for FunctionOperator operator."""
np.random.seed(10)
G = (
np.random.normal(0, 1, (par["nr"], par["nc"]))
+ np.random.normal(0, 1, (par["nr"], par["nc"])) * par["imag"]
).astype(par["dtype"])
def forward_f(x):
return G @ x
def adjoint_f(y):
return np.conj(G.T) @ y
if par["nr"] == par["nc"]:
Fop = FunctionOperator(forward_f, adjoint_f, par["nr"], dtype=par["dtype"])
else:
Fop = FunctionOperator(
forward_f, adjoint_f, par["nr"], par["nc"], dtype=par["dtype"]
)
assert dottest(
Fop,
par["nr"],
par["nc"],
complexflag=0 if par["imag"] == 0 else 3,
rtol=par["rtol"],
)
x = (np.ones(par["nc"]) + np.ones(par["nc"]) * par["imag"]).astype(par["dtype"])
y = (np.ones(par["nr"]) + np.ones(par["nr"]) * par["imag"]).astype(par["dtype"])
F_x = Fop @ x
FH_y = Fop.H @ y
G_x = np.asarray(G @ x)
GH_y = np.asarray(np.conj(G.T) @ y)
assert_array_equal(F_x, G_x)
assert_array_equal(FH_y, GH_y)
# Only test inversion for square or overdetermined systems
if par["nc"] <= par["nr"]:
xlsqr = lsqr(Fop, F_x, damp=0, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
@pytest.mark.parametrize("par", PARS)
def test_FunctionOperator_NoAdjoint(par):
"""Forward and adjoint for FunctionOperator operator where the adjoint
is not implemented.
"""
np.random.seed(10)
G = (
np.random.normal(0, 1, (par["nr"], par["nc"]))
+ np.random.normal(0, 1, (par["nr"], par["nc"])) * par["imag"]
).astype(par["dtype"])
def forward_f(x):
return G @ x
if par["nr"] == par["nc"]:
Fop = FunctionOperator(forward_f, par["nr"], dtype=par["dtype"])
else:
Fop = FunctionOperator(forward_f, par["nr"], par["nc"], dtype=par["dtype"])
x = (np.ones(par["nc"]) + np.ones(par["nc"]) * par["imag"]).astype(par["dtype"])
y = (np.ones(par["nr"]) + np.ones(par["nr"]) * par["imag"]).astype(par["dtype"])
F_x = Fop @ x
G_x = np.asarray(G @ x)
assert_array_equal(F_x, G_x)
# check error is raised when applying the adjoint
with pytest.raises(NotImplementedError):
_ = Fop.H @ y
| 3,293 | 28.150442 | 85 | py |
pylops | pylops-master/pytests/test_chirpradon.py | import pytest
from numpy.testing import assert_array_almost_equal
from pylops.optimization.sparsity import fista
from pylops.signalprocessing import ChirpRadon2D, ChirpRadon3D
from pylops.utils import dottest
from pylops.utils.seismicevents import linear2d, linear3d, makeaxis
from pylops.utils.wavelets import ricker
par1 = {
"nt": 11,
"nhx": 21,
"nhy": 13,
"pymax": 1e-2,
"pxmax": 2e-2,
"engine": "numpy",
} # odd, numpy
par2 = {
"nt": 11,
"nhx": 20,
"nhy": 10,
"pymax": 1e-2,
"pxmax": 2e-2,
"engine": "numpy",
} # even, numpy
par1f = {
"nt": 11,
"nhx": 21,
"nhy": 13,
"pymax": 1e-2,
"pxmax": 2e-2,
"engine": "fftw",
} # odd, fftw
par2f = {
"nt": 11,
"nhx": 20,
"nhy": 10,
"pymax": 1e-2,
"pxmax": 2e-2,
"engine": "fftw",
} # even, fftw
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_ChirpRadon2D(par):
"""Dot-test, forward, analytical inverse and sparse inverse
for ChirpRadon2D operator
"""
parmod = {
"ot": 0,
"dt": 0.004,
"nt": par["nt"],
"ox": par["nhx"] * 10 / 2,
"dx": 10,
"nx": par["nhx"],
"f0": 40,
}
theta = [
20,
]
t0 = [
0.1,
]
amp = [
1.0,
]
# Create axis
t, t2, hx, _ = makeaxis(parmod)
# Create wavelet
wav, _, wav_c = ricker(t[:41], f0=parmod["f0"])
# Generate model
_, x = linear2d(hx, t, 1500.0, t0, theta, amp, wav)
Rop = ChirpRadon2D(t, hx, par["pxmax"], dtype="float64")
assert dottest(Rop, par["nhx"] * par["nt"], par["nhx"] * par["nt"])
y = Rop * x.ravel()
xinvana = Rop.inverse(y)
assert_array_almost_equal(x.ravel(), xinvana, decimal=3)
xinv, _, _ = fista(Rop, y, niter=30, eps=1e0)
assert_array_almost_equal(x.ravel(), xinv, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1f), (par2f)])
def test_ChirpRadon3D(par):
"""Dot-test, forward, analytical inverse and sparse inverse
for ChirpRadon3D operator
"""
parmod = {
"ot": 0,
"dt": 0.004,
"nt": par["nt"],
"ox": par["nhx"] * 10 / 2,
"dx": 10,
"nx": par["nhx"],
"oy": par["nhy"] * 10 / 2,
"dy": 10,
"ny": par["nhy"],
"f0": 40,
}
theta = [
20,
]
phi = [
0,
]
t0 = [
0.1,
]
amp = [
1.0,
]
# Create axis
t, t2, hx, hy = makeaxis(parmod)
# Create wavelet
wav, _, wav_c = ricker(t[:41], f0=parmod["f0"])
# Generate model
_, x = linear3d(hy, hx, t, 1500.0, t0, theta, phi, amp, wav)
Rop = ChirpRadon3D(
t,
hy,
hx,
(par["pymax"], par["pxmax"]),
engine=par["engine"],
dtype="float64",
**dict(flags=("FFTW_ESTIMATE",), threads=2)
)
assert dottest(
Rop, par["nhy"] * par["nhx"] * par["nt"], par["nhy"] * par["nhx"] * par["nt"]
)
y = Rop * x.ravel()
xinvana = Rop.inverse(y)
assert_array_almost_equal(x.ravel(), xinvana, decimal=3)
xinv, _, _ = fista(Rop, y, niter=30, eps=1e0)
assert_array_almost_equal(x.ravel(), xinv, decimal=3)
| 3,206 | 21.117241 | 85 | py |
pylops | pylops-master/pytests/test_leastsquares.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import Diagonal, HStack, Identity, MatrixMult, Smoothing1D
from pylops.optimization.leastsquares import (
normal_equations_inversion,
preconditioned_inversion,
regularized_inversion,
)
par1 = {
"ny": 11,
"nx": 11,
"imag": 0,
"x0": False,
"dtype": "float64",
} # square real with zero initial guess
par2 = {
"ny": 11,
"nx": 11,
"imag": 0,
"x0": True,
"dtype": "float64",
} # square real with non-zero initial guess
par3 = {
"ny": 31,
"nx": 11,
"imag": 0,
"x0": False,
"dtype": "float64",
} # overdetermined real with zero initial guess
par4 = {
"ny": 31,
"nx": 11,
"imag": 0,
"x0": True,
"dtype": "float64",
} # overdetermined real with non-zero initial guess
par1j = {
"ny": 11,
"nx": 11,
"imag": 1j,
"x0": False,
"dtype": "complex64",
} # square complex with zero initial guess
par2j = {
"ny": 11,
"nx": 11,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # square complex with non-zero initial guess
par3j = {
"ny": 31,
"nx": 11,
"imag": 1j,
"x0": False,
"dtype": "complex64",
} # overdetermined complex with zero initial guess
par4j = {
"ny": 31,
"nx": 11,
"imag": 1j,
"x0": True,
"dtype": "complex64",
} # overdetermined complex with non-zero
# initial guess
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par4j)]
)
def test_NormalEquationsInversion(par):
"""Solve normal equations in least squares sense"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
Reg = MatrixMult(np.eye(par["nx"]), dtype=par["dtype"])
NReg = MatrixMult(np.eye(par["nx"]), dtype=par["dtype"])
Weigth = Diagonal(np.ones(par["ny"]), dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
x0 = (
np.random.normal(0, 10, par["nx"])
+ par["imag"] * np.random.normal(0, 10, par["nx"])
if par["x0"]
else None
)
y = Gop * x
# normal equations with regularization
xinv = normal_equations_inversion(
Gop, y, [Reg], epsI=1e-5, epsRs=[1e-8], x0=x0, **dict(maxiter=200, tol=1e-10)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight
xinv = normal_equations_inversion(
Gop, y, None, Weight=Weigth, epsI=1e-5, x0=x0, **dict(maxiter=200, tol=1e-10)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight and small regularization
xinv = normal_equations_inversion(
Gop,
y,
[Reg],
Weight=Weigth,
epsI=1e-5,
epsRs=[1e-8],
x0=x0,
**dict(maxiter=200, tol=1e-10)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
# normal equations with weight and small normal regularization
xinv = normal_equations_inversion(
Gop,
y,
[],
NRegs=[NReg],
Weight=Weigth,
epsI=1e-5,
epsNRs=[1e-8],
x0=x0,
**dict(maxiter=200, tol=1e-10)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par4j)]
)
def test_RegularizedInversion(par):
"""Solve regularized inversion in least squares sense"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
Reg = MatrixMult(np.eye(par["nx"]), dtype=par["dtype"])
Weigth = Diagonal(np.ones(par["ny"]), dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
x0 = (
np.random.normal(0, 10, par["nx"])
+ par["imag"] * np.random.normal(0, 10, par["nx"])
if par["x0"]
else None
)
y = Gop * x
# regularized inversion with regularization
xinv = regularized_inversion(
Gop, y, [Reg], epsRs=[1e-8], x0=x0, **dict(damp=0, iter_lim=200, show=0)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
# regularized inversion with weight
xinv = regularized_inversion(
Gop, y, None, Weight=Weigth, x0=x0, **dict(damp=0, iter_lim=200, show=0)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
# regularized inversion with regularization
xinv = regularized_inversion(
Gop,
y,
[Reg],
Weight=Weigth,
epsRs=[1e-8],
x0=x0,
**dict(damp=0, iter_lim=200, show=0)
)[0]
assert_array_almost_equal(x, xinv, decimal=3)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par4j)]
)
def test_WeightedInversion(par):
"""Compare results for normal equations and regularized inversion
when used to solve weighted least square inversion
"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
w = np.arange(par["ny"])
w1 = np.sqrt(w)
Weigth = Diagonal(w, dtype=par["dtype"])
Weigth1 = Diagonal(w1, dtype=par["dtype"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
y = Gop * x
xne = normal_equations_inversion(
Gop, y, None, Weight=Weigth, **dict(maxiter=5, tol=1e-10)
)[0]
xreg = regularized_inversion(
Gop, y, None, Weight=Weigth1, **dict(damp=0, iter_lim=5, show=0)
)[0]
assert_array_almost_equal(xne, xreg, decimal=3)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par4j)]
)
def test_PreconditionedInversion(par):
"""Solve regularized inversion in least squares sense"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
Pre = Smoothing1D(nsmooth=5, dims=[par["nx"]], dtype=par["dtype"])
p = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
x = Pre * p
x0 = (
np.random.normal(0, 1, par["nx"])
+ par["imag"] * np.random.normal(0, 1, par["nx"])
if par["x0"]
else None
)
y = Gop * x
xinv = preconditioned_inversion(
Gop, y, Pre, x0=x0, **dict(damp=0, iter_lim=800, show=0)
)[0]
assert_array_almost_equal(x, xinv, decimal=2)
@pytest.mark.parametrize("par", [(par1)])
def test_skinnyregularization(par):
"""Solve inversion with a skinny regularization (rows are smaller than
the number of elements in the model vector)
"""
np.random.seed(10)
d = np.arange(par["nx"] - 1).astype(par["dtype"]) + 1.0
Dop = Diagonal(d, dtype=par["dtype"])
Regop = HStack([Identity(par["nx"] // 2), Identity(par["nx"] // 2)])
x = np.arange(par["nx"] - 1)
y = Dop * x
xinv = normal_equations_inversion(Dop, y, [Regop], epsRs=[1e-4])[0]
assert_array_almost_equal(x, xinv, decimal=2)
xinv = regularized_inversion(Dop, y, [Regop], epsRs=[1e-4])[0]
assert_array_almost_equal(x, xinv, decimal=2)
| 7,592 | 29.616935 | 85 | py |
pylops | pylops-master/pytests/test_basicoperators.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy.sparse import rand
from scipy.sparse.linalg import lsqr
from pylops.basicoperators import (
Conj,
Flip,
Identity,
Imag,
LinearRegression,
MatrixMult,
Real,
Regression,
Roll,
Sum,
Symmetrize,
Zero,
)
from pylops.utils import dottest
par1 = {"ny": 11, "nx": 11, "imag": 0, "dtype": "float64"} # square real
par2 = {"ny": 21, "nx": 11, "imag": 0, "dtype": "float64"} # overdetermined real
par1j = {"ny": 11, "nx": 11, "imag": 1j, "dtype": "complex128"} # square complex
par2j = {
"ny": 21,
"nx": 11,
"imag": 1j,
"dtype": "complex128",
} # overdetermined complex
par3 = {"ny": 11, "nx": 21, "imag": 0, "dtype": "float64"} # underdetermined real
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Regression(par):
"""Dot-test, inversion and apply for Regression operator"""
np.random.seed(10)
order = 4
t = np.arange(par["ny"], dtype=np.float32)
LRop = Regression(t, order=order, dtype=par["dtype"])
assert dottest(LRop, par["ny"], order + 1)
x = np.array([1.0, 2.0, 0.0, 3.0, -1.0], dtype=np.float32)
xlsqr = lsqr(
LRop, LRop * x, damp=1e-10, iter_lim=300, atol=1e-8, btol=1e-8, show=0
)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
y = LRop * x
y1 = LRop.apply(t, x)
assert_array_almost_equal(y, y1, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_LinearRegression(par):
"""Dot-test and inversion for LinearRegression operator"""
np.random.seed(10)
t = np.arange(par["ny"], dtype=np.float32)
LRop = LinearRegression(t, dtype=par["dtype"])
assert dottest(LRop, par["ny"], 2)
x = np.array([1.0, 2.0], dtype=np.float32)
xlsqr = lsqr(LRop, LRop * x, damp=1e-10, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=3)
y = LRop * x
y1 = LRop.apply(t, x)
assert_array_almost_equal(y, y1, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_MatrixMult(par):
"""Dot-test and inversion for MatrixMult operator"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
assert dottest(Gop, par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3)
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
xlsqr = lsqr(Gop, Gop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_MatrixMult_sparse(par):
"""Dot-test and inversion for MatrixMult operator using sparse
matrix
"""
np.random.seed(10)
G = rand(par["ny"], par["nx"], density=0.75).astype("float32") + par["imag"] * rand(
par["ny"], par["nx"], density=0.75
).astype("float32")
Gop = MatrixMult(G, dtype=par["dtype"])
assert dottest(Gop, par["ny"], par["nx"], complexflag=0 if par["imag"] == 1 else 3)
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
xlsqr = lsqr(Gop, Gop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
@pytest.mark.parametrize("par", [(par1j), (par2j)])
def test_MatrixMult_complexcast(par):
"""Automatic upcasting of MatrixMult operator dtype based on complex
matrix
"""
np.random.seed(10)
G = rand(par["ny"], par["nx"], density=0.75).astype("float32") + par["imag"] * rand(
par["ny"], par["nx"], density=0.75
).astype("float32")
Gop = MatrixMult(G, dtype="float32")
assert Gop.dtype == "complex64"
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_MatrixMult_repeated(par):
"""Dot-test and inversion for test_MatrixMult operator repeated
along another dimension
"""
np.random.seed(10)
G = np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32") + par[
"imag"
] * np.random.normal(0, 10, (par["ny"], par["nx"])).astype("float32")
Gop = MatrixMult(G, otherdims=5, dtype=par["dtype"])
assert dottest(
Gop, par["ny"] * 5, par["nx"] * 5, complexflag=0 if par["imag"] == 1 else 3
)
x = (np.ones((par["nx"], 5)) + par["imag"] * np.ones((par["nx"], 5))).ravel()
xlsqr = lsqr(Gop, Gop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Identity_inplace(par):
"""Dot-test, forward and adjoint for Identity operator"""
np.random.seed(10)
Iop = Identity(par["ny"], par["nx"], dtype=par["dtype"], inplace=True)
assert dottest(Iop, par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3)
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
y = Iop * x
x1 = Iop.H * y
assert_array_almost_equal(
x[: min(par["ny"], par["nx"])], y[: min(par["ny"], par["nx"])], decimal=4
)
assert_array_almost_equal(
x[: min(par["ny"], par["nx"])], x1[: min(par["ny"], par["nx"])], decimal=4
)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Identity_noinplace(par):
"""Dot-test, forward and adjoint for Identity operator (not in place)"""
np.random.seed(10)
Iop = Identity(par["ny"], par["nx"], dtype=par["dtype"], inplace=False)
assert dottest(Iop, par["ny"], par["nx"], complexflag=0 if par["imag"] == 0 else 3)
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
y = Iop * x
x1 = Iop.H * y
assert_array_almost_equal(
x[: min(par["ny"], par["nx"])], y[: min(par["ny"], par["nx"])], decimal=4
)
assert_array_almost_equal(
x[: min(par["ny"], par["nx"])], x1[: min(par["ny"], par["nx"])], decimal=4
)
# change value in x and check it doesn't change in y
x[0] = 10
assert x[0] != y[0]
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Zero(par):
"""Dot-test, forward and adjoint for Zero operator"""
np.random.seed(10)
Zop = Zero(par["ny"], par["nx"], dtype=par["dtype"])
assert dottest(Zop, par["ny"], par["nx"])
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
y = Zop * x
x1 = Zop.H * y
assert_array_almost_equal(y, np.zeros(par["ny"]))
assert_array_almost_equal(x1, np.zeros(par["nx"]))
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Flip1D(par):
"""Dot-test, forward and adjoint for Flip operator on 1d signal"""
np.random.seed(10)
x = np.arange(par["ny"]) + par["imag"] * np.arange(par["ny"])
Fop = Flip(par["ny"], dtype=par["dtype"])
assert dottest(Fop, par["ny"], par["ny"])
y = Fop * x
xadj = Fop.H * y
assert_array_equal(x, xadj)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Flip2D(par):
"""Dot-test, forward and adjoint for Flip operator on 2d signal"""
np.random.seed(10)
x = {}
x["0"] = np.outer(np.arange(par["ny"]), np.ones(par["nx"])) + par[
"imag"
] * np.outer(np.arange(par["ny"]), np.ones(par["nx"]))
x["1"] = np.outer(np.ones(par["ny"]), np.arange(par["nx"])) + par[
"imag"
] * np.outer(np.ones(par["ny"]), np.arange(par["nx"]))
for axis in [0, 1]:
Fop = Flip(
(par["ny"], par["nx"]),
axis=axis,
dtype=par["dtype"],
)
assert dottest(Fop, par["ny"] * par["nx"], par["ny"] * par["nx"])
y = Fop * x[str(axis)].ravel()
xadj = Fop.H * y.ravel()
xadj = xadj.reshape(par["ny"], par["nx"])
assert_array_equal(x[str(axis)], xadj)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Flip3D(par):
"""Dot-test, forward and adjoint for Flip operator on 3d signal"""
np.random.seed(10)
x = {}
x["0"] = np.outer(np.arange(par["ny"]), np.ones(par["nx"]))[
:, :, np.newaxis
] * np.ones(par["nx"]) + par["imag"] * np.outer(
np.arange(par["ny"]), np.ones(par["nx"])
)[
:, :, np.newaxis
] * np.ones(
par["nx"]
)
x["1"] = np.outer(np.ones(par["ny"]), np.arange(par["nx"]))[
:, :, np.newaxis
] * np.ones(par["nx"]) + par["imag"] * np.outer(
np.ones(par["ny"]), np.arange(par["nx"])
)[
:, :, np.newaxis
] * np.ones(
par["nx"]
)
x["2"] = np.outer(np.ones(par["ny"]), np.ones(par["nx"]))[
:, :, np.newaxis
] * np.arange(par["nx"]) + par["imag"] * np.outer(
np.ones(par["ny"]), np.ones(par["nx"])
)[
:, :, np.newaxis
] * np.arange(
par["nx"]
)
for axis in [0, 1, 2]:
Fop = Flip(
(par["ny"], par["nx"], par["nx"]),
axis=axis,
dtype=par["dtype"],
)
assert dottest(
Fop, par["ny"] * par["nx"] * par["nx"], par["ny"] * par["nx"] * par["nx"]
)
y = Fop * x[str(axis)].ravel()
xadj = Fop.H * y.ravel()
xadj = xadj.reshape(par["ny"], par["nx"], par["nx"])
assert_array_equal(x[str(axis)], xadj)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Symmetrize1D(par):
"""Dot-test, forward and inverse for Symmetrize operator on 1d signal"""
np.random.seed(10)
x = np.arange(par["ny"]) + par["imag"] * np.arange(par["ny"])
Sop = Symmetrize(par["ny"], dtype=par["dtype"])
dottest(Sop, par["ny"] * 2 - 1, par["ny"], verb=True)
y = Sop * x
xinv = Sop / y
assert_array_almost_equal(x, xinv, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Symmetrize2D(par):
"""Dot-test, forward and inverse for Symmetrize operator on 2d signal"""
np.random.seed(10)
x = {}
x["0"] = np.outer(np.arange(par["ny"]), np.ones(par["nx"])) + par[
"imag"
] * np.outer(np.arange(par["ny"]), np.ones(par["nx"]))
x["1"] = np.outer(np.ones(par["ny"]), np.arange(par["nx"])) + par[
"imag"
] * np.outer(np.ones(par["ny"]), np.arange(par["nx"]))
for axis in [0, 1]:
Sop = Symmetrize(
(par["ny"], par["nx"]),
axis=axis,
dtype=par["dtype"],
)
y = Sop * x[str(axis)].ravel()
assert dottest(Sop, y.size, par["ny"] * par["nx"])
xinv = Sop / y
assert_array_almost_equal(x[str(axis)].ravel(), xinv, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Symmetrize3D(par):
"""Dot-test, forward and adjoint for Symmetrize operator on 3d signal"""
np.random.seed(10)
x = {}
x["0"] = np.outer(np.arange(par["ny"]), np.ones(par["nx"]))[
:, :, np.newaxis
] * np.ones(par["nx"]) + par["imag"] * np.outer(
np.arange(par["ny"]), np.ones(par["nx"])
)[
:, :, np.newaxis
] * np.ones(
par["nx"]
)
x["1"] = np.outer(np.ones(par["ny"]), np.arange(par["nx"]))[
:, :, np.newaxis
] * np.ones(par["nx"]) + par["imag"] * np.outer(
np.ones(par["ny"]), np.arange(par["nx"])
)[
:, :, np.newaxis
] * np.ones(
par["nx"]
)
x["2"] = np.outer(np.ones(par["ny"]), np.ones(par["nx"]))[
:, :, np.newaxis
] * np.arange(par["nx"]) + par["imag"] * np.outer(
np.ones(par["ny"]), np.ones(par["nx"])
)[
:, :, np.newaxis
] * np.arange(
par["nx"]
)
for axis in [0, 1, 2]:
Sop = Symmetrize(
(par["ny"], par["nx"], par["nx"]),
axis=axis,
dtype=par["dtype"],
)
y = Sop * x[str(axis)].ravel()
assert dottest(Sop, y.size, par["ny"] * par["nx"] * par["nx"])
xinv = Sop / y
assert_array_almost_equal(x[str(axis)].ravel(), xinv, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Roll1D(par):
"""Dot-test, forward and adjoint for Roll operator on 1d signal"""
np.random.seed(10)
x = np.arange(par["ny"]) + par["imag"] * np.arange(par["ny"])
Rop = Roll(par["ny"], shift=2, dtype=par["dtype"])
assert dottest(Rop, par["ny"], par["ny"])
y = Rop * x
xadj = Rop.H * y
assert_array_almost_equal(x, xadj, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Roll2D(par):
"""Dot-test, forward and inverse for Roll operator on 2d signal"""
np.random.seed(10)
x = {}
x["0"] = np.outer(np.arange(par["ny"]), np.ones(par["nx"])) + par[
"imag"
] * np.outer(np.arange(par["ny"]), np.ones(par["nx"]))
x["1"] = np.outer(np.ones(par["ny"]), np.arange(par["nx"])) + par[
"imag"
] * np.outer(np.ones(par["ny"]), np.arange(par["nx"]))
for axis in [0, 1]:
Rop = Roll(
(par["ny"], par["nx"]),
axis=axis,
shift=-2,
dtype=par["dtype"],
)
y = Rop * x[str(axis)].ravel()
assert dottest(Rop, par["ny"] * par["nx"], par["ny"] * par["nx"])
xadj = Rop.H * y
assert_array_almost_equal(x[str(axis)].ravel(), xadj, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Roll3D(par):
"""Dot-test, forward and adjoint for Roll operator on 3d signal"""
np.random.seed(10)
x = {}
x["0"] = np.outer(np.arange(par["ny"]), np.ones(par["nx"]))[
:, :, np.newaxis
] * np.ones(par["nx"]) + par["imag"] * np.outer(
np.arange(par["ny"]), np.ones(par["nx"])
)[
:, :, np.newaxis
] * np.ones(
par["nx"]
)
x["1"] = np.outer(np.ones(par["ny"]), np.arange(par["nx"]))[
:, :, np.newaxis
] * np.ones(par["nx"]) + par["imag"] * np.outer(
np.ones(par["ny"]), np.arange(par["nx"])
)[
:, :, np.newaxis
] * np.ones(
par["nx"]
)
x["2"] = np.outer(np.ones(par["ny"]), np.ones(par["nx"]))[
:, :, np.newaxis
] * np.arange(par["nx"]) + par["imag"] * np.outer(
np.ones(par["ny"]), np.ones(par["nx"])
)[
:, :, np.newaxis
] * np.arange(
par["nx"]
)
for axis in [0, 1, 2]:
Rop = Roll(
(par["ny"], par["nx"], par["nx"]),
axis=axis,
shift=3,
dtype=par["dtype"],
)
y = Rop * x[str(axis)].ravel()
assert dottest(
Rop, par["ny"] * par["nx"] * par["nx"], par["ny"] * par["nx"] * par["nx"]
)
xinv = Rop.H * y
assert_array_almost_equal(x[str(axis)].ravel(), xinv, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Sum2D(par):
"""Dot-test for Sum operator on 2d signal"""
for axis in [0, 1]:
dim_d = [par["ny"], par["nx"]]
dim_d.pop(axis)
Sop = Sum(dims=(par["ny"], par["nx"]), axis=axis, dtype=par["dtype"])
assert dottest(Sop, np.prod(dim_d), par["ny"] * par["nx"])
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Sum3D(par):
"""Dot-test, forward and adjoint for Sum operator on 3d signal"""
for axis in [0, 1, 2]:
dim_d = [par["ny"], par["nx"], par["nx"]]
dim_d.pop(axis)
Sop = Sum(dims=(par["ny"], par["nx"], par["nx"]), axis=axis, dtype=par["dtype"])
assert dottest(Sop, np.prod(dim_d), par["ny"] * par["nx"] * par["nx"])
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Real(par):
"""Dot-test, forward and adjoint for Real operator"""
Rop = Real(dims=(par["ny"], par["nx"]), dtype=par["dtype"])
if np.dtype(par["dtype"]).kind == "c":
complexflag = 3
else:
complexflag = 0
assert dottest(
Rop, par["ny"] * par["nx"], par["ny"] * par["nx"], complexflag=complexflag
)
np.random.seed(10)
x = np.random.randn(par["nx"] * par["ny"]) + par["imag"] * np.random.randn(
par["nx"] * par["ny"]
)
y = Rop * x
assert_array_equal(y, np.real(x))
y = np.random.randn(par["nx"] * par["ny"]) + par["imag"] * np.random.randn(
par["nx"] * par["ny"]
)
x = Rop.H * y
assert_array_equal(x, np.real(y) + 0j)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Imag(par):
"""Dot-test, forward and adjoint for Imag operator"""
Iop = Imag(dims=(par["ny"], par["nx"]), dtype=par["dtype"])
if np.dtype(par["dtype"]).kind == "c":
complexflag = 3
else:
complexflag = 0
assert dottest(
Iop, par["ny"] * par["nx"], par["ny"] * par["nx"], complexflag=complexflag
)
np.random.seed(10)
x = np.random.randn(par["nx"] * par["ny"]) + par["imag"] * np.random.randn(
par["nx"] * par["ny"]
)
y = Iop * x
assert_array_equal(y, np.imag(x))
y = np.random.randn(par["nx"] * par["ny"]) + par["imag"] * np.random.randn(
par["nx"] * par["ny"]
)
x = Iop.H * y
if np.dtype(par["dtype"]).kind == "c":
assert_array_equal(x, 0 + 1j * np.real(y))
else:
assert_array_equal(x, 0)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j), (par3)])
def test_Conj(par):
"""Dot-test, forward and adjoint for Conj operator"""
Cop = Conj(dims=(par["ny"], par["nx"]), dtype=par["dtype"])
if np.dtype(par["dtype"]).kind == "c":
complexflag = 3
else:
complexflag = 0
assert dottest(
Cop, par["ny"] * par["nx"], par["ny"] * par["nx"], complexflag=complexflag
)
np.random.seed(10)
x = np.random.randn(par["nx"] * par["ny"]) + par["imag"] * np.random.randn(
par["nx"] * par["ny"]
)
y = Cop * x
xadj = Cop.H * y
assert_array_equal(x, xadj)
assert_array_equal(y, np.conj(x))
assert_array_equal(xadj, np.conj(y))
| 18,151 | 31.588869 | 91 | py |
pylops | pylops-master/pytests/test_kronecker.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from scipy.sparse.linalg import lsqr
from pylops.basicoperators import FirstDerivative, Identity, Kronecker, MatrixMult
from pylops.utils import dottest
par1 = {"ny": 11, "nx": 11, "imag": 0, "dtype": "float64"} # square real
par2 = {"ny": 21, "nx": 11, "imag": 0, "dtype": "float64"} # overdetermined real
par1j = {"ny": 11, "nx": 11, "imag": 1j, "dtype": "complex128"} # square imag
par2j = {"ny": 21, "nx": 11, "imag": 1j, "dtype": "complex128"} # overdetermined imag
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Kroneker(par):
"""Dot-test, inversion and comparison with np.kron for Kronecker operator"""
np.random.seed(10)
G1 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
G2 = np.random.normal(0, 10, (par["ny"], par["nx"])).astype(par["dtype"])
x = np.ones(par["nx"] ** 2) + par["imag"] * np.ones(par["nx"] ** 2)
Kop = Kronecker(
MatrixMult(G1, dtype=par["dtype"]),
MatrixMult(G2, dtype=par["dtype"]),
dtype=par["dtype"],
)
assert dottest(
Kop, par["ny"] ** 2, par["nx"] ** 2, complexflag=0 if par["imag"] == 0 else 3
)
xlsqr = lsqr(Kop, Kop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=2)
# Comparison with numpy
assert_array_almost_equal(np.kron(G1, G2), Kop * np.eye(par["nx"] ** 2), decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Kroneker_Derivative(par):
"""Use Kronecker operator to apply the Derivative operator over one axis
and compare with FirstDerivative(... axis=axis)
"""
Dop = FirstDerivative(par["ny"], sampling=1, edge=True, dtype="float32")
D2op = FirstDerivative(
(par["ny"], par["nx"]), axis=0, sampling=1, edge=True, dtype="float32"
)
Kop = Kronecker(Dop, Identity(par["nx"], dtype=par["dtype"]), dtype=par["dtype"])
x = np.zeros((par["ny"], par["nx"])) + par["imag"] * np.zeros(
(par["ny"], par["nx"])
)
x[par["ny"] // 2, par["nx"] // 2] = 1
y = D2op * x.ravel()
yk = Kop * x.ravel()
assert_array_equal(y, yk)
| 2,270 | 37.491525 | 89 | py |
pylops | pylops-master/pytests/test_interpolation.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.signalprocessing import Bilinear, Interp
from pylops.utils import dottest
par1 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 0,
"dtype": "float64",
"kind": "nearest",
} # real, nearest
par2 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 1j,
"dtype": "complex128",
"kind": "nearest",
} # complex, nearest
par3 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 0,
"dtype": "float64",
"kind": "linear",
} # real, linear
par4 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 1j,
"dtype": "complex128",
"kind": "linear",
} # complex, linear
par5 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 0,
"dtype": "float64",
"kind": "sinc",
} # real, sinc
par6 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 1j,
"dtype": "complex128",
"kind": "sinc",
} # complex, sinc
# subsampling factor
perc_subsampling = 0.4
def test_sincinterp():
"""Check accuracy of sinc interpolation of subsampled version of input
signal
"""
nt = 81
dt = 0.004
t = np.arange(nt) * dt
ntsub = 10
dtsub = dt / ntsub
tsub = np.arange(nt * ntsub) * dtsub
tsub = tsub[: np.where(tsub == t[-1])[0][0] + 1]
x = (
np.sin(2 * np.pi * 10 * t)
+ 0.4 * np.sin(2 * np.pi * 20 * t)
- 2 * np.sin(2 * np.pi * 5 * t)
)
xsub = (
np.sin(2 * np.pi * 10 * tsub)
+ 0.4 * np.sin(2 * np.pi * 20 * tsub)
- 2 * np.sin(2 * np.pi * 5 * tsub)
)
iava = tsub[20:-20] / (dtsub * ntsub) # exclude edges
SI1op, iava = Interp(nt, iava, kind="sinc", dtype="float64")
y = SI1op * x
assert_array_almost_equal(xsub[20:-20], y, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_Interp_1dsignal(par):
"""Dot-test and forward for Interp operator for 1d signal"""
np.random.seed(1)
x = np.random.normal(0, 1, par["nx"]) + par["imag"] * np.random.normal(
0, 1, par["nx"]
)
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
# fixed indeces
Iop, _ = Interp(par["nx"], iava, kind=par["kind"], dtype=par["dtype"])
assert dottest(Iop, Nsub, par["nx"], complexflag=0 if par["imag"] == 0 else 3)
# decimal indeces
Idecop, _ = Interp(par["nx"], iava + 0.3, kind=par["kind"], dtype=par["dtype"])
assert dottest(Iop, Nsub, par["nx"], complexflag=0 if par["imag"] == 0 else 3)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = 0
iava_rep[-1] = 0
_, _ = Interp(par["nx"], iava_rep + 0.3, kind=par["kind"], dtype=par["dtype"])
# forward
y = Iop * x
ydec = Idecop * x
assert_array_almost_equal(y, x[iava])
if par["kind"] == "nearest":
assert_array_almost_equal(ydec, x[iava])
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_Interp_2dsignal(par):
"""Dot-test and forward for Restriction operator for 2d signal"""
np.random.seed(1)
x = np.random.normal(0, 1, (par["nx"], par["nt"])) + par["imag"] * np.random.normal(
0, 1, (par["nx"], par["nt"])
)
# 1st direction
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
# fixed indeces
Iop, _ = Interp(
(par["nx"], par["nt"]),
iava,
axis=0,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Iop,
Nsub * par["nt"],
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# decimal indeces
Idecop, _ = Interp(
(par["nx"], par["nt"]),
iava + 0.3,
axis=0,
kind=par["kind"],
dtype=par["dtype"],
)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = 0
iava_rep[-1] = 0
_, _ = Interp(
(par["nx"], par["nt"]),
iava_rep + 0.3,
axis=0,
kind=par["kind"],
dtype=par["dtype"],
)
y = (Iop * x.ravel()).reshape(Nsub, par["nt"])
ydec = (Idecop * x.ravel()).reshape(Nsub, par["nt"])
assert_array_almost_equal(y, x[iava])
if par["kind"] == "nearest":
assert_array_almost_equal(ydec, x[iava])
# 2nd direction
Nsub = int(np.round(par["nt"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nt"]))[:Nsub])
# fixed indeces
Iop, _ = Interp(
(par["nx"], par["nt"]),
iava,
axis=1,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Iop,
par["nx"] * Nsub,
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# decimal indeces
Idecop, _ = Interp(
(par["nx"], par["nt"]),
iava + 0.3,
axis=1,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Idecop,
par["nx"] * Nsub,
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Iop * x.ravel()).reshape(par["nx"], Nsub)
ydec = (Idecop * x.ravel()).reshape(par["nx"], Nsub)
assert_array_almost_equal(y, x[:, iava])
if par["kind"] == "nearest":
assert_array_almost_equal(ydec, x[:, iava])
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_Interp_3dsignal(par):
"""Dot-test and forward for Interp operator for 3d signal"""
np.random.seed(1)
x = np.random.normal(0, 1, (par["ny"], par["nx"], par["nt"])) + par[
"imag"
] * np.random.normal(0, 1, (par["ny"], par["nx"], par["nt"]))
# 1st direction
Nsub = int(np.round(par["ny"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["ny"]))[:Nsub])
# fixed indeces
Iop, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava,
axis=0,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Iop,
Nsub * par["nx"] * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# decimal indeces
Idecop, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava + 0.3,
axis=0,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Idecop,
Nsub * par["nx"] * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = 0
iava_rep[-1] = 0
_, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava_rep + 0.3,
axis=0,
kind=par["kind"],
dtype=par["dtype"],
)
y = (Iop * x.ravel()).reshape(Nsub, par["nx"], par["nt"])
ydec = (Idecop * x.ravel()).reshape(Nsub, par["nx"], par["nt"])
assert_array_almost_equal(y, x[iava])
if par["kind"] == "nearest":
assert_array_almost_equal(ydec, x[iava])
# 2nd direction
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
# fixed indeces
Iop, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava,
axis=1,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Iop,
par["ny"] * Nsub * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# decimal indeces
Idecop, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava + 0.3,
axis=1,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Idecop,
par["ny"] * Nsub * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Iop * x.ravel()).reshape(par["ny"], Nsub, par["nt"])
ydec = (Idecop * x.ravel()).reshape(par["ny"], Nsub, par["nt"])
assert_array_almost_equal(y, x[:, iava])
if par["kind"] == "nearest":
assert_array_almost_equal(ydec, x[:, iava])
# 3rd direction
Nsub = int(np.round(par["nt"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nt"]))[:Nsub])
# fixed indeces
Iop, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava,
axis=2,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Iop,
par["ny"] * par["nx"] * Nsub,
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# decimal indeces
Idecop, _ = Interp(
(par["ny"], par["nx"], par["nt"]),
iava + 0.3,
axis=2,
kind=par["kind"],
dtype=par["dtype"],
)
assert dottest(
Idecop,
par["ny"] * par["nx"] * Nsub,
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Iop * x.ravel()).reshape(par["ny"], par["nx"], Nsub)
ydec = (Idecop * x.ravel()).reshape(par["ny"], par["nx"], Nsub)
assert_array_almost_equal(y, x[:, :, iava])
if par["kind"] == "nearest":
assert_array_almost_equal(ydec, x[:, :, iava])
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Bilinear_2dsignal(par):
"""Dot-test and forward for Interp operator for 2d signal"""
np.random.seed(1)
x = np.random.normal(0, 1, (par["nx"], par["nt"])) + par["imag"] * np.random.normal(
0, 1, (par["nx"], par["nt"])
)
# fixed indeces
iava = np.vstack((np.arange(0, 10), np.arange(0, 10)))
Iop = Bilinear(iava, dims=(par["nx"], par["nt"]), dtype=par["dtype"])
assert dottest(
Iop, 10, par["nx"] * par["nt"], complexflag=0 if par["imag"] == 0 else 3
)
# decimal indeces
Nsub = int(np.round(par["nx"] * par["nt"] * perc_subsampling))
iavadec = np.vstack(
(
np.random.uniform(0, par["nx"] - 1, Nsub),
np.random.uniform(0, par["nt"] - 1, Nsub),
)
)
Idecop = Bilinear(iavadec, dims=(par["nx"], par["nt"]), dtype=par["dtype"])
assert dottest(
Idecop, Nsub, par["nx"] * par["nt"], complexflag=0 if par["imag"] == 0 else 3
)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = [0, 0]
iava_rep[-1] = [0, 0]
_, _ = Bilinear(iava_rep, dims=(par["nx"], par["nt"]), dtype=par["dtype"])
y = Iop * x.ravel()
assert_array_almost_equal(y, x[iava[0], iava[1]])
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Bilinear_3dsignal(par):
"""Dot-test and forward for Interp operator for 3d signal"""
np.random.seed(1)
x = np.random.normal(0, 1, (par["ny"], par["nx"], par["nt"])) + par[
"imag"
] * np.random.normal(0, 1, (par["ny"], par["nx"], par["nt"]))
# fixed indeces
iava = np.vstack((np.arange(0, 10), np.arange(0, 10)))
Iop = Bilinear(iava, dims=(par["ny"], par["nx"], par["nt"]), dtype=par["dtype"])
assert dottest(
Iop,
10 * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# decimal indeces
Nsub = int(np.round(par["ny"] * par["nt"] * perc_subsampling))
iavadec = np.vstack(
(
np.random.uniform(0, par["ny"] - 1, Nsub),
np.random.uniform(0, par["nx"] - 1, Nsub),
)
)
Idecop = Bilinear(
iavadec, dims=(par["ny"], par["nx"], par["nt"]), dtype=par["dtype"]
)
assert dottest(
Idecop,
Nsub * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
# repeated indeces
with pytest.raises(ValueError):
iava_rep = iava.copy()
iava_rep[-2] = [0, 0]
iava_rep[-1] = [0, 0]
_, _ = Bilinear(
iava_rep, dims=(par["ny"], par["nx"], par["nt"]), dtype=par["dtype"]
)
y = Iop * x.ravel()
assert_array_almost_equal(y, x[iava[0], iava[1]].ravel())
| 12,425 | 26.30989 | 88 | py |
pylops | pylops-master/pytests/test_lsm.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.utils.wavelets import ricker
from pylops.waveeqprocessing.lsm import LSM
PAR = {
"ny": 10,
"nx": 12,
"nz": 20,
"nt": 50,
"dy": 3,
"dx": 1,
"dz": 2,
"dt": 0.004,
"nsy": 4,
"nry": 8,
"nsx": 6,
"nrx": 4,
}
# Check if skfmm is available and by-pass tests using it otherwise. This is
# currently required for Travis as since we moved to Python3.8 it has
# stopped working
try:
import skfmm # noqa: F401
skfmm_enabled = True
except ImportError:
skfmm_enabled = False
v0 = 500
y = np.arange(PAR["ny"]) * PAR["dy"]
x = np.arange(PAR["nx"]) * PAR["dx"]
z = np.arange(PAR["nz"]) * PAR["dz"]
t = np.arange(PAR["nt"]) * PAR["dt"]
sy = np.linspace(y.min(), y.max(), PAR["nsy"])
sx = np.linspace(x.min(), x.max(), PAR["nsx"])
syy, sxx = np.meshgrid(sy, sx, indexing="ij")
s2d = np.vstack((sx, 2 * np.ones(PAR["nsx"])))
s3d = np.vstack((syy.ravel(), sxx.ravel(), 2 * np.ones(PAR["nsx"] * PAR["nsy"])))
ry = np.linspace(y.min(), y.max(), PAR["nry"])
rx = np.linspace(x.min(), x.max(), PAR["nrx"])
ryy, rxx = np.meshgrid(ry, rx, indexing="ij")
r2d = np.vstack((rx, 2 * np.ones(PAR["nrx"])))
r3d = np.vstack((ryy.ravel(), rxx.ravel(), 2 * np.ones(PAR["nrx"] * PAR["nry"])))
wav, _, wavc = ricker(t[:41], f0=40)
par1 = {"mode": "analytic", "dynamic": False}
par2 = {"mode": "eikonal", "dynamic": False}
par1d = {"mode": "analytic", "dynamic": True}
par2d = {"mode": "eikonal", "dynamic": True}
def test_unknown_mode():
"""Check error is raised if unknown mode is passed"""
with pytest.raises(NotImplementedError):
_ = LSM(z, x, t, s2d, r2d, 0, np.ones(3), 1, mode="foo")
@pytest.mark.parametrize("par", [(par1), (par2), (par1d), (par2d)])
def test_lsm2d(par):
"""Dot-test and inverse for LSM operator"""
if skfmm_enabled or par["mode"] != "eikonal":
vel = v0 * np.ones((PAR["nx"], PAR["nz"]))
refl = np.zeros((PAR["nx"], PAR["nz"]))
refl[:, PAR["nz"] // 2] = 1
refl[:, 3 * PAR["nz"] // 4] = 1
lsm = LSM(
z,
x,
t,
s2d,
r2d,
vel if par["mode"] == "eikonal" else v0,
wav,
wavc,
mode=par["mode"],
dynamic=par["dynamic"],
dottest=True,
)
d = lsm.Demop * refl.ravel()
d = d.reshape(PAR["nsx"], PAR["nrx"], PAR["nt"])
minv = lsm.solve(d.ravel(), **dict(iter_lim=100, show=True))
minv = minv.reshape(PAR["nx"], PAR["nz"])
dinv = lsm.Demop * minv.ravel()
dinv = dinv.reshape(PAR["nsx"], PAR["nrx"], PAR["nt"])
assert_array_almost_equal(d / d.max(), dinv / d.max(), decimal=2)
assert_array_almost_equal(refl / refl.max(), minv / refl.max(), decimal=1)
| 2,867 | 27.969697 | 82 | py |
pylops | pylops-master/pytests/test_marchenko.py | import numpy as np
import pytest
from scipy.signal import convolve
from pylops.waveeqprocessing.marchenko import Marchenko
# Test data
inputfile = "testdata/marchenko/input.npz"
# Parameters
vel = 2400.0 # velocity
toff = 0.045 # direct arrival time shift
nsmooth = 10 # time window smoothing
nfmax = 1000 # max frequency for MDC (#samples)
# Input data
inputdata = np.load(inputfile)
# Receivers
r = inputdata["r"]
nr = r.shape[1]
dr = r[0, 1] - r[0, 0]
# Sources
s = inputdata["s"]
ns = s.shape[1]
# Virtual points
vs = inputdata["vs"]
# Multiple virtual points
vs_multi = [np.arange(-1, 2) * 100 + vs[0], np.ones(3) * vs[1]]
# Density model
rho = inputdata["rho"]
z, x = inputdata["z"], inputdata["x"]
# Reflection data and subsurface fields
R = inputdata["R"]
R = np.swapaxes(R, 0, 1)
gsub = inputdata["Gsub"]
g0sub = inputdata["G0sub"]
wav = inputdata["wav"]
wav_c = np.argmax(wav)
t = inputdata["t"]
ot, dt, nt = t[0], t[1] - t[0], len(t)
gsub = np.apply_along_axis(convolve, 0, gsub, wav, mode="full")
gsub = gsub[wav_c:][:nt]
g0sub = np.apply_along_axis(convolve, 0, g0sub, wav, mode="full")
g0sub = g0sub[wav_c:][:nt]
# Direct arrival window
trav = np.sqrt((vs[0] - r[0]) ** 2 + (vs[1] - r[1]) ** 2) / vel
trav_multi = (
np.sqrt(
(vs_multi[0] - r[0][:, np.newaxis]) ** 2
+ (vs_multi[1] - r[1][:, np.newaxis]) ** 2
)
/ vel
)
# Create Rs in frequency domain
Rtwosided = np.concatenate((np.zeros((nr, ns, nt - 1)), R), axis=-1)
R1twosided = np.concatenate((np.flip(R, axis=-1), np.zeros((nr, ns, nt - 1))), axis=-1)
Rtwosided_fft = np.fft.rfft(Rtwosided, 2 * nt - 1, axis=-1) / np.sqrt(2 * nt - 1)
Rtwosided_fft = Rtwosided_fft[..., :nfmax]
R1twosided_fft = np.fft.rfft(R1twosided, 2 * nt - 1, axis=-1) / np.sqrt(2 * nt - 1)
R1twosided_fft = R1twosided_fft[..., :nfmax]
par1 = {"niter": 10, "prescaled": False, "fftengine": "numpy"}
par2 = {"niter": 10, "prescaled": True, "fftengine": "numpy"}
par3 = {"niter": 10, "prescaled": False, "fftengine": "scipy"}
par4 = {"niter": 10, "prescaled": False, "fftengine": "fftw"}
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Marchenko_freq(par):
"""Solve marchenko equations using input Rs in frequency domain"""
if par["prescaled"]:
Rtwosided_fft_sc = np.sqrt(2 * nt - 1) * dt * dr * Rtwosided_fft
else:
Rtwosided_fft_sc = Rtwosided_fft
MarchenkoWM = Marchenko(
Rtwosided_fft_sc,
nt=nt,
dt=dt,
dr=dr,
nfmax=nfmax,
wav=wav,
toff=toff,
nsmooth=nsmooth,
prescaled=par["prescaled"],
fftengine=par["fftengine"],
)
_, _, _, g_inv_minus, g_inv_plus = MarchenkoWM.apply_onepoint(
trav,
G0=g0sub.T,
rtm=True,
greens=True,
dottest=True,
**dict(iter_lim=par["niter"], show=0)
)
ginvsub = (g_inv_minus + g_inv_plus)[:, nt - 1 :].T
ginvsub_norm = ginvsub / ginvsub.max()
gsub_norm = gsub / gsub.max()
assert np.linalg.norm(gsub_norm - ginvsub_norm) / np.linalg.norm(gsub_norm) < 1e-1
@pytest.mark.parametrize("par", [(par1)])
def test_Marchenko_time(par):
"""Solve marchenko equations using input Rs in time domain"""
MarchenkoWM = Marchenko(
R, dt=dt, dr=dr, nfmax=nfmax, wav=wav, toff=toff, nsmooth=nsmooth
)
_, _, _, g_inv_minus, g_inv_plus = MarchenkoWM.apply_onepoint(
trav,
G0=g0sub.T,
rtm=True,
greens=True,
dottest=True,
**dict(iter_lim=par["niter"], show=0)
)
ginvsub = (g_inv_minus + g_inv_plus)[:, nt - 1 :].T
ginvsub_norm = ginvsub / ginvsub.max()
gsub_norm = gsub / gsub.max()
assert np.linalg.norm(gsub_norm - ginvsub_norm) / np.linalg.norm(gsub_norm) < 1e-1
@pytest.mark.parametrize("par", [(par1)])
def test_Marchenko_time_ana(par):
"""Solve marchenko equations using input Rs in time domain and analytical
direct wave
"""
MarchenkoWM = Marchenko(
R, dt=dt, dr=dr, nfmax=nfmax, wav=wav, toff=toff, nsmooth=nsmooth
)
_, _, g_inv_minus, g_inv_plus = MarchenkoWM.apply_onepoint(
trav,
nfft=2**11,
rtm=False,
greens=True,
dottest=True,
**dict(iter_lim=par["niter"], show=0)
)
ginvsub = (g_inv_minus + g_inv_plus)[:, nt - 1 :].T
ginvsub_norm = ginvsub / ginvsub.max()
gsub_norm = gsub / gsub.max()
assert np.linalg.norm(gsub_norm - ginvsub_norm) / np.linalg.norm(gsub_norm) < 1e-1
@pytest.mark.parametrize("par", [(par1)])
def test_Marchenko_timemulti_ana(par):
"""Solve marchenko equations using input Rs in time domain with multiple
points
"""
MarchenkoWM = Marchenko(
R, dt=dt, dr=dr, nfmax=nfmax, wav=wav, toff=toff, nsmooth=nsmooth
)
_, _, g_inv_minus, g_inv_plus = MarchenkoWM.apply_multiplepoints(
trav_multi,
nfft=2**11,
rtm=False,
greens=True,
dottest=True,
**dict(iter_lim=par["niter"], show=0)
)
ginvsub = (g_inv_minus + g_inv_plus)[:, 1, nt - 1 :].T
ginvsub_norm = ginvsub / ginvsub.max()
gsub_norm = gsub / gsub.max()
assert np.linalg.norm(gsub_norm - ginvsub_norm) / np.linalg.norm(gsub_norm) < 1e-1
| 5,231 | 28.066667 | 87 | py |
pylops | pylops-master/pytests/test_directwave.py | import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.signal import convolve
from pylops.waveeqprocessing.marchenko import directwave
# Test data
inputfile2d = "testdata/marchenko/input.npz"
inputfile3d = "testdata/marchenko/direct3D.npz"
# Parameters
vel = 2400.0 # velocity
def test_direct2D():
"""Check consistency of analytical 2D Green's function with FD modelling"""
inputdata = np.load(inputfile2d)
# Receivers
r = inputdata["r"]
nr = r.shape[1]
# Virtual points
vs = inputdata["vs"]
# Time axis
t = inputdata["t"]
dt, nt = t[1] - t[0], len(t)
# FD GF
G0FD = inputdata["G0sub"]
wav = inputdata["wav"]
wav_c = np.argmax(wav)
G0FD = np.apply_along_axis(convolve, 0, G0FD, wav, mode="full")
G0FD = G0FD[wav_c:][:nt]
# Analytic GF
trav = np.sqrt((vs[0] - r[0]) ** 2 + (vs[1] - r[1]) ** 2) / vel
G0ana = directwave(wav, trav, nt, dt, nfft=nt, derivative=False)
# Differentiate to get same response as in FD modelling
G0ana = np.diff(G0ana, axis=0)
G0ana = np.vstack([G0ana, np.zeros(nr)])
assert_array_almost_equal(
G0FD / np.max(np.abs(G0FD)), G0ana / np.max(np.abs(G0ana)), decimal=1
)
def test_direct3D():
"""Check consistency of analytical 3D Green's function with FD modelling"""
inputdata = np.load(inputfile3d)
# Receivers
r = inputdata["r"]
nr = r.shape[0]
# Virtual points
vs = inputdata["vs"]
# Time axis
t = inputdata["t"]
dt, nt = t[1] - t[0], len(t)
# FD GF
G0FD = inputdata["G0"][:, :nr]
wav = inputdata["wav"]
wav_c = np.argmax(wav)
G0FD = np.apply_along_axis(convolve, 0, G0FD, wav, mode="full")
G0FD = G0FD[wav_c:][:nt]
# Analytic GF
dist = np.sqrt(
(vs[0] - r[:, 0]) ** 2 + (vs[1] - r[:, 1]) ** 2 + (vs[2] - r[:, 2]) ** 2
)
trav = dist / vel
G0ana = directwave(
wav, trav, nt, dt, nfft=nt, dist=dist, kind="3d", derivative=False
)
# Differentiate to get same response as in FD modelling
G0ana = np.diff(G0ana, axis=0)
G0ana = np.vstack([G0ana, np.zeros(nr)])
assert_array_almost_equal(
G0FD / np.max(np.abs(G0FD)), G0ana / np.max(np.abs(G0ana)), decimal=1
)
| 2,266 | 24.188889 | 80 | py |
pylops | pylops-master/pytests/test_waveeqprocessing.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.utils import dottest
from pylops.utils.seismicevents import linear2d, linear3d, makeaxis
from pylops.utils.wavelets import ricker
from pylops.waveeqprocessing.mdd import MDC, MDD
PAR = {
"ox": 0,
"dx": 2,
"nx": 10,
"oy": 0,
"dy": 2,
"ny": 20,
"ot": 0,
"dt": 0.004,
"nt": 401,
"f0": 20,
}
# nt odd, single-sided, full fft
par1 = PAR.copy()
par1["twosided"] = False
par1["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2))
# nt odd, double-sided, full fft
par2 = PAR.copy()
par2["twosided"] = True
par2["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2))
# nt odd, single-sided, truncated fft
par3 = PAR.copy()
par3["twosided"] = False
par3["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2)) - 30
# nt odd, double-sided, truncated fft
par4 = PAR.copy()
par4["twosided"] = True
par4["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2)) - 30
# nt even, single-sided, full fft
par5 = PAR.copy()
par5["nt"] -= 1
par5["twosided"] = False
par5["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2))
# nt even, double-sided, full fft
par6 = PAR.copy()
par6["nt"] -= 1
par6["twosided"] = True
par6["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2))
# nt even, single-sided, truncated fft
par7 = PAR.copy()
par7["nt"] -= 1
par7["twosided"] = False
par7["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2)) - 30
# nt even, double-sided, truncated fft
par8 = PAR.copy()
par8["nt"] -= 1
par8["twosided"] = True
par8["nfmax"] = int(np.ceil((PAR["nt"] + 1.0) / 2)) - 30
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par5), (par6), (par7), (par8)]
)
def test_MDC_1virtualsource(par):
"""Dot-test and inversion for MDC operator of 1 virtual source"""
if par["twosided"]:
par["nt2"] = 2 * par["nt"] - 1
else:
par["nt2"] = par["nt"]
v = 1500
it0_m = 25
t0_m = it0_m * par["dt"]
theta_m = 0
amp_m = 1.0
it0_G = np.array([25, 50, 75])
t0_G = it0_G * par["dt"]
theta_G = (0, 0, 0)
phi_G = (0, 0, 0)
amp_G = (1.0, 0.6, 2.0)
# Create axis
t, _, x, y = makeaxis(par)
# Create wavelet
wav = ricker(t[:41], f0=par["f0"])[0]
# Generate model
_, mwav = linear2d(x, t, v, t0_m, theta_m, amp_m, wav)
# Generate operator
_, Gwav = linear3d(x, y, t, v, t0_G, theta_G, phi_G, amp_G, wav)
# Add negative part to data and model
if par["twosided"]:
mwav = np.concatenate((np.zeros((par["nx"], par["nt"] - 1)), mwav), axis=-1)
Gwav = np.concatenate(
(np.zeros((par["ny"], par["nx"], par["nt"] - 1)), Gwav), axis=-1
)
# Define MDC linear operator
Gwav_fft = np.fft.fft(Gwav, par["nt2"], axis=-1)
Gwav_fft = Gwav_fft[..., : par["nfmax"]]
MDCop = MDC(
Gwav_fft.transpose(2, 0, 1),
nt=par["nt2"],
nv=1,
dt=par["dt"],
dr=par["dx"],
twosided=par["twosided"],
)
dottest(MDCop, par["nt2"] * par["ny"], par["nt2"] * par["nx"])
mwav = mwav.T
d = MDCop * mwav.ravel()
d = d.reshape(par["nt2"], par["ny"])
for it, amp in zip(it0_G, amp_G):
ittot = it0_m + it
if par["twosided"]:
ittot += par["nt"] - 1
assert (
np.abs(
d[ittot, par["ny"] // 2]
- np.abs(wav**2).sum()
* amp_m
* amp
* par["nx"]
* par["dx"]
* par["dt"]
* np.sqrt(par["nt2"])
)
< 1e-2
)
minv = MDD(
Gwav[:, :, par["nt"] - 1 :] if par["twosided"] else Gwav,
d[par["nt"] - 1 :].T if par["twosided"] else d.T,
dt=par["dt"],
dr=par["dx"],
nfmax=par["nfmax"],
twosided=par["twosided"],
add_negative=True,
adjoint=False,
psf=False,
dottest=False,
**dict(damp=1e-10, iter_lim=50, show=0)
)
assert_array_almost_equal(mwav, minv.T, decimal=2)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par5), (par6), (par7), (par8)]
)
def test_MDC_Nvirtualsources(par):
"""Dot-test and inversion for MDC operator of N virtual source"""
if par["twosided"]:
par["nt2"] = 2 * par["nt"] - 1
else:
par["nt2"] = par["nt"]
v = 1500
it0_m = 25
t0_m = it0_m * par["dt"]
theta_m = 0
phi_m = 0
amp_m = 1.0
it0_G = np.array([25, 50, 75])
t0_G = it0_G * par["dt"]
theta_G = (0, 0, 0)
phi_G = (0, 0, 0)
amp_G = (1.0, 0.6, 2.0)
# Create axis
t, _, x, y = makeaxis(par)
# Create wavelet
wav = ricker(t[:41], f0=par["f0"])[0]
# Generate model
_, mwav = linear3d(x, x, t, v, t0_m, theta_m, phi_m, amp_m, wav)
# Generate operator
_, Gwav = linear3d(x, y, t, v, t0_G, theta_G, phi_G, amp_G, wav)
# Add negative part to data and model
if par["twosided"]:
mwav = np.concatenate(
(np.zeros((par["nx"], par["nx"], par["nt"] - 1)), mwav), axis=-1
)
Gwav = np.concatenate(
(np.zeros((par["ny"], par["nx"], par["nt"] - 1)), Gwav), axis=-1
)
# Define MDC linear operator
Gwav_fft = np.fft.fft(Gwav, par["nt2"], axis=-1)
Gwav_fft = Gwav_fft[..., : par["nfmax"]]
MDCop = MDC(
Gwav_fft.transpose(2, 0, 1),
nt=par["nt2"],
nv=par["nx"],
dt=par["dt"],
dr=par["dx"],
twosided=par["twosided"],
)
dottest(
MDCop, par["nt2"] * par["ny"] * par["nx"], par["nt2"] * par["nx"] * par["nx"]
)
mwav = mwav.transpose(2, 0, 1)
d = MDCop * mwav.ravel()
d = d.reshape(par["nt2"], par["ny"], par["nx"])
for it, amp in zip(it0_G, amp_G):
ittot = it0_m + it
if par["twosided"]:
ittot += par["nt"] - 1
assert (
d[ittot, par["ny"] // 2, par["nx"] // 2]
> d[ittot - 1, par["ny"] // 2, par["nx"] // 2]
)
assert (
d[ittot, par["ny"] // 2, par["nx"] // 2]
> d[ittot + 1, par["ny"] // 2, par["nx"] // 2]
)
minv = MDD(
Gwav[:, :, par["nt"] - 1 :] if par["twosided"] else Gwav,
d[par["nt"] - 1 :].transpose(1, 2, 0)
if par["twosided"]
else d.transpose(1, 2, 0),
dt=par["dt"],
dr=par["dx"],
nfmax=par["nfmax"],
twosided=par["twosided"],
add_negative=True,
adjoint=False,
psf=False,
dottest=False,
**dict(damp=1e-10, iter_lim=50, show=0)
)
assert_array_almost_equal(mwav, minv.transpose(2, 0, 1), decimal=2)
| 6,650 | 25.604 | 85 | py |
pylops | pylops-master/pytests/test_convolve.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.signal.windows import triang
from scipy.sparse.linalg import lsqr
from pylops.signalprocessing import Convolve1D, Convolve2D, ConvolveND
from pylops.utils import dottest
# filters
nfilt = (5, 6, 5)
h1 = triang(nfilt[0], sym=True)
h2 = np.outer(triang(nfilt[0], sym=True), triang(nfilt[1], sym=True))
h3 = np.outer(
np.outer(triang(nfilt[0], sym=True), triang(nfilt[1], sym=True)),
triang(nfilt[2], sym=True),
).reshape(nfilt)
par1_1d = {
"nz": 21,
"ny": 51,
"nx": 31,
"offset": nfilt[0] // 2,
"axis": 0,
} # zero phase, first direction
par2_1d = {
"nz": 21,
"ny": 61,
"nx": 31,
"offset": 0,
"axis": 0,
} # non-zero phase, first direction
par3_1d = {
"nz": 21,
"ny": 51,
"nx": 31,
"offset": nfilt[0] // 2,
"axis": 1,
} # zero phase, second direction
par4_1d = {
"nz": 21,
"ny": 61,
"nx": 31,
"offset": nfilt[0] // 2 - 1,
"axis": 1,
} # non-zero phase, second direction
par5_1d = {
"nz": 21,
"ny": 51,
"nx": 31,
"offset": nfilt[0] // 2,
"axis": 2,
} # zero phase, third direction
par6_1d = {
"nz": 21,
"ny": 61,
"nx": 31,
"offset": nfilt[0] // 2 - 1,
"axis": 2,
} # non-zero phase, third direction
par1_2d = {
"nz": 21,
"ny": 51,
"nx": 31,
"offset": (nfilt[0] // 2, nfilt[1] // 2),
"axis": 0,
} # zero phase, first direction
par2_2d = {
"nz": 21,
"ny": 61,
"nx": 31,
"offset": (nfilt[0] // 2 - 1, nfilt[1] // 2 + 1),
"axis": 0,
} # non-zero phase, first direction
par3_2d = {
"nz": 21,
"ny": 51,
"nx": 31,
"offset": (nfilt[0] // 2, nfilt[1] // 2),
"axis": 1,
} # zero phase, second direction
par4_2d = {
"nz": 21,
"ny": 61,
"nx": 31,
"offset": (nfilt[0] // 2 - 1, nfilt[1] // 2 + 1),
"axis": 1,
} # non-zero phase, second direction
par5_2d = {
"nz": 21,
"ny": 51,
"nx": 31,
"offset": (nfilt[0] // 2, nfilt[1] // 2),
"axis": 2,
} # zero phase, third direction
par6_2d = {
"nz": 21,
"ny": 61,
"nx": 31,
"offset": (nfilt[0] // 2 - 1, nfilt[1] // 2 + 1),
"axis": 2,
} # non-zero phase, third direction
par1_3d = {
"nz": 21,
"ny": 51,
"nx": 31,
"nt": 5,
"offset": (nfilt[0] // 2, nfilt[1] // 2, nfilt[2] // 2),
"axis": 0,
} # zero phase, all directions
par2_3d = {
"nz": 21,
"ny": 61,
"nx": 31,
"nt": 5,
"offset": (nfilt[0] // 2 - 1, nfilt[1] // 2 + 1, nfilt[2] // 2 + 1),
"axis": 0,
} # non-zero phase, first direction
@pytest.mark.parametrize(
"par", [(par1_1d), (par2_1d), (par3_1d), (par4_1d), (par5_1d), (par6_1d)]
)
def test_Convolve1D(par):
"""Dot-test and inversion for Convolve1D operator"""
np.random.seed(10)
# 1D
if par["axis"] == 0:
Cop = Convolve1D(par["nx"], h=h1, offset=par["offset"], dtype="float64")
assert dottest(Cop, par["nx"], par["nx"])
x = np.zeros((par["nx"]))
x[par["nx"] // 2] = 1.0
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
# 1D on 2D
if par["axis"] < 2:
Cop = Convolve1D(
(par["ny"], par["nx"]),
h=h1,
offset=par["offset"],
axis=par["axis"],
dtype="float64",
)
assert dottest(Cop, par["ny"] * par["nx"], par["ny"] * par["nx"])
x = np.zeros((par["ny"], par["nx"]))
x[
int(par["ny"] / 2 - 3) : int(par["ny"] / 2 + 3),
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
] = 1.0
x = x.ravel()
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
# 1D on 3D
Cop = Convolve1D(
(par["nz"], par["ny"], par["nx"]),
h=h1,
offset=par["offset"],
axis=par["axis"],
dtype="float64",
)
assert dottest(
Cop, par["nz"] * par["ny"] * par["nx"], par["nz"] * par["ny"] * par["nx"]
)
x = np.zeros((par["nz"], par["ny"], par["nx"]))
x[
int(par["nz"] / 2 - 3) : int(par["nz"] / 2 + 3),
int(par["ny"] / 2 - 3) : int(par["ny"] / 2 + 3),
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
] = 1.0
x = x.ravel()
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
@pytest.mark.parametrize(
"par", [(par1_2d), (par2_2d), (par3_2d), (par4_2d), (par5_2d), (par6_2d)]
)
def test_Convolve2D(par):
"""Dot-test and inversion for Convolve2D operator"""
# 2D on 2D
if par["axis"] == 2:
Cop = Convolve2D(
(par["ny"], par["nx"]),
h=h2,
offset=par["offset"],
dtype="float64",
)
assert dottest(Cop, par["ny"] * par["nx"], par["ny"] * par["nx"])
x = np.zeros((par["ny"], par["nx"]))
x[
int(par["ny"] / 2 - 3) : int(par["ny"] / 2 + 3),
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
] = 1.0
x = x.ravel()
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
# 2D on 3D
axes = list(range(3))
axes.remove(par["axis"])
Cop = Convolve2D(
(par["nz"], par["ny"], par["nx"]),
h=h2,
offset=par["offset"],
axes=axes,
dtype="float64",
)
assert dottest(
Cop, par["nz"] * par["ny"] * par["nx"], par["nz"] * par["ny"] * par["nx"]
)
x = np.zeros((par["nz"], par["ny"], par["nx"]))
x[
int(par["nz"] / 2 - 3) : int(par["nz"] / 2 + 3),
int(par["ny"] / 2 - 3) : int(par["ny"] / 2 + 3),
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
] = 1.0
x = x.ravel()
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
# due to ringing in solution we cannot use assert_array_almost_equal
assert np.linalg.norm(xlsqr - x) / np.linalg.norm(xlsqr) < 2e-1
@pytest.mark.parametrize("par", [(par1_3d), (par2_3d)])
def test_Convolve3D(par):
"""Dot-test and inversion for ConvolveND operator"""
# 3D on 3D
Cop = ConvolveND(
(par["nz"], par["ny"], par["nx"]),
h=h3,
offset=par["offset"],
dtype="float64",
)
assert dottest(
Cop, par["nz"] * par["ny"] * par["nx"], par["nz"] * par["ny"] * par["nx"]
)
x = np.zeros((par["nz"], par["ny"], par["nx"]))
x[
int(par["nz"] / 2 - 3) : int(par["nz"] / 2 + 3),
int(par["ny"] / 2 - 3) : int(par["ny"] / 2 + 3),
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
] = 1.0
x = x.ravel()
y = Cop * x
xlsqr = lsqr(Cop, y, damp=1e-20, iter_lim=400, atol=1e-8, btol=1e-8, show=0)[0]
# due to ringing in solution we cannot use assert_array_almost_equal
assert np.linalg.norm(xlsqr - x) / np.linalg.norm(xlsqr) < 2e-1
# 3D on 4D (only modelling)
Cop = ConvolveND(
(par["nz"], par["ny"], par["nx"], par["nt"]),
h=h3,
offset=par["offset"],
axes=[0, 1, 2],
dtype="float64",
)
assert dottest(
Cop,
par["nz"] * par["ny"] * par["nx"] * par["nt"],
par["nz"] * par["ny"] * par["nx"] * par["nt"],
)
| 7,491 | 26.645756 | 93 | py |
pylops | pylops-master/pytests/test_wavelets.py | import numpy as np
import pytest
from pylops.utils.wavelets import gaussian, klauder, ormsby, ricker
par1 = {"nt": 21, "dt": 0.004} # odd samples
par2 = {"nt": 20, "dt": 0.004} # even samples
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_gaussian(par):
"""Create gaussian wavelet and check size and central value"""
t = np.arange(par["nt"]) * par["dt"]
wav, twav, wcenter = gaussian(t, std=10)
assert twav.size == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav.shape[0] == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav[wcenter] == 1
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_klauder(par):
"""Create klauder wavelet and check size and central value"""
t = np.arange(par["nt"]) * par["dt"]
wav, twav, wcenter = klauder(t, f=(10, 20))
assert twav.size == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav.shape[0] == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav[wcenter] == 1
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_ormsby(par):
"""Create ormsby wavelet and check size and central value"""
t = np.arange(par["nt"]) * par["dt"]
wav, twav, wcenter = ormsby(t, f=(5, 10, 25, 30))
assert twav.size == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav.shape[0] == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav[wcenter] == 1
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_ricker(par):
"""Create ricker wavelet and check size and central value"""
t = np.arange(par["nt"]) * par["dt"]
wav, twav, wcenter = ricker(t, f0=20)
assert twav.size == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav.shape[0] == (par["nt"] - 1 if par["nt"] % 2 == 0 else par["nt"]) * 2 - 1
assert wav[wcenter] == 1
| 1,923 | 36 | 87 | py |
pylops | pylops-master/pytests/test_avo.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.signal import filtfilt
from scipy.sparse.linalg import lsqr
from pylops.avo.avo import (
akirichards,
approx_zoeppritz_pp,
fatti,
zoeppritz_element,
zoeppritz_pp,
zoeppritz_scattering,
)
from pylops.avo.prestack import AVOLinearModelling
from pylops.utils import dottest
np.random.seed(0)
# Create medium parameters for single contrast
vp1, vs1, rho1 = 2200.0, 1300.0, 2000 # upper medium
vp0, vs0, rho0 = 2300.0, 1400.0, 2100 # lower medium
# Create medium parameters for multiple contrasts
nt0 = 201
dt0 = 0.004
t0 = np.arange(nt0) * dt0
vp = 1200 + np.arange(nt0) + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 80, nt0))
vs = 600 + vp / 2 + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 20, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 30, nt0))
m = (np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)).ravel()
# Angles
ntheta = 21
thetamin, thetamax = 0, 40
theta = np.linspace(thetamin, thetamax, ntheta)
# Parameters
par1 = {"vsvp": 0.5, "linearization": "akirich"} # constant vsvp
par2 = {"vsvp": 0.5, "linearization": "fatti"} # constant vsvp
par3 = {"vsvp": vs / vp, "linearization": "akirich"} # time-variant vsvp
par4 = {"vsvp": vs / vp, "linearization": "fatti"} # time-variant vsvp
def test_zoeppritz():
"""Validate zoeppritz using `CREWES Zoeppritz Explorer
`<https://www.crewes.org/ResearchLinks/ExplorerPrograms/ZE/index.html>`_
as benchmark
"""
r_zoep = zoeppritz_scattering(vp1, vs1, rho1, vp0, vs0, rho0, theta[0])
rpp_zoep = zoeppritz_element(
vp1, vs1, rho1, vp0, vs0, rho0, theta[0], element="PdPu"
)
rpp_zoep1 = zoeppritz_pp(vp1, vs1, rho1, vp0, vs0, rho0, theta[0])
assert r_zoep.shape == (4, 4, 1)
assert r_zoep[0, 0] == pytest.approx(0.04658, rel=1e-3)
assert rpp_zoep == pytest.approx(0.04658, rel=1e-3)
assert rpp_zoep1 == pytest.approx(0.04658, rel=1e-3)
def test_zoeppritz_and_approx_zeroangle():
"""Validate zoeppritz and approximations at zero incident angle"""
# Create composite parameters
ai0, si0, _ = vp0 * rho0, vs0 * rho0, vp0 / vs0
ai1, si1, _ = vp1 * rho1, vs1 * rho1, vp1 / vs1
# Zoeppritz
rpp_zoep = zoeppritz_pp(vp1, vs1, rho1, vp0, vs0, rho0, theta[0])
rpp_zoep_approx = approx_zoeppritz_pp(vp1, vs1, rho1, vp0, vs0, rho0, theta[0])
# Aki Richards
rvp = np.log(vp0) - np.log(vp1)
rvs = np.log(vs0) - np.log(vs1)
rrho = np.log(rho0) - np.log(rho1)
G1, G2, G3 = akirichards(theta[0], vs1 / vp1)
rpp_aki = G1 * rvp + G2 * rvs + G3 * rrho
# Fatti
rai = np.log(ai0) - np.log(ai1)
rsi = np.log(si0) - np.log(si1)
G1, G2, G3 = fatti(theta[0], vs1 / vp1)
rpp_fatti = G1 * rai + G2 * rsi + G3 * rrho
assert_array_almost_equal(rpp_zoep, rpp_zoep_approx, decimal=3)
assert_array_almost_equal(rpp_zoep, rpp_aki, decimal=3)
assert_array_almost_equal(rpp_zoep, rpp_fatti, decimal=3)
def test_zoeppritz_and_approx_multipleangles():
"""Validate zoeppritz and approximations for set of angles from 0 to 40 degress"""
# Create composite parameters
ai0, si0 = vp0 * rho0, vs0 * rho0
ai1, si1 = vp1 * rho1, vs1 * rho1
# Zoeppritz
rpp_zoep = zoeppritz_pp(vp1, vs1, rho1, vp0, vs0, rho0, theta)
rpp_zoep_approx = approx_zoeppritz_pp(vp1, vs1, rho1, vp0, vs0, rho0, theta)
# Aki Richards
rvp = np.log(vp0) - np.log(vp1)
rvs = np.log(vs0) - np.log(vs1)
rrho = np.log(rho0) - np.log(rho1)
G1, G2, G3 = akirichards(theta, vs1 / vp1)
rpp_aki = G1 * rvp + G2 * rvs + G3 * rrho
# Fatti
rai = np.log(ai0) - np.log(ai1)
rsi = np.log(si0) - np.log(si1)
G1, G2, G3 = fatti(theta, vs1 / vp1)
rpp_fatti = G1 * rai + G2 * rsi + G3 * rrho
assert_array_almost_equal(rpp_zoep, rpp_zoep_approx, decimal=3)
assert_array_almost_equal(rpp_zoep, rpp_aki, decimal=3)
assert_array_almost_equal(rpp_zoep, rpp_fatti, decimal=3)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_AVOLinearModelling(par):
"""Dot-test and inversion for AVOLinearModelling"""
AVOop = AVOLinearModelling(
theta, vsvp=par["vsvp"], nt0=nt0, linearization=par["linearization"]
)
assert dottest(AVOop, ntheta * nt0, 3 * nt0)
minv = lsqr(
AVOop, AVOop * m, damp=1e-20, iter_lim=1000, atol=1e-8, btol=1e-8, show=0
)[0]
assert_array_almost_equal(m, minv, decimal=3)
| 4,526 | 32.533333 | 88 | py |
pylops | pylops-master/pytests/test_seismicevents.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from pylops.utils.seismicevents import (
hyperbolic2d,
hyperbolic3d,
linear2d,
linear3d,
makeaxis,
parabolic2d,
)
from pylops.utils.wavelets import ricker
# Wavelet
wav = ricker(np.arange(41) * 0.004, f0=10)[0]
par1 = {
"ot": 0,
"dt": 1,
"nt": 300,
"ox": 0,
"dx": 2,
"nx": 200,
"oy": 0,
"dy": 2,
"ny": 100,
} # even axis
par2 = {
"ot": 0,
"dt": 1,
"nt": 301,
"ox": -200,
"dx": 2,
"nx": 201,
"oy": -100,
"dy": 2,
"ny": 101,
} # odd axis, centered to 0
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_makeaxis(par):
"""Verify makeaxis creation"""
# Create t, x, and y axis
t, _, x, y = makeaxis(par)
# Check axis lenght
assert len(t) == par["nt"]
assert len(x) == par["nx"]
assert len(y) == par["ny"]
# Check axis initial and end values
assert t[0] == par["ot"]
assert t[-1] == par["ot"] + par["dt"] * (par["nt"] - 1)
assert x[0] == par["ox"]
assert x[-1] == par["ox"] + par["dx"] * (par["nx"] - 1)
assert y[0] == par["oy"]
assert y[-1] == par["oy"] + par["dy"] * (par["ny"] - 1)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_linear2d(par):
"""Create small dataset with an horizontal event and check that output
contains the event at correct time and correct amplitude
"""
# Data creation
v = 1
t0 = 50
theta = 0.0
amp = 0.6
# Create axes
t, _, x, _ = makeaxis(par)
# Create data
d, dwav = linear2d(x, t, v, t0, theta, amp, wav)
# Assert shape
assert d.shape[0] == par["nx"]
assert d.shape[1] == par["nt"]
assert dwav.shape[0] == par["nx"]
assert dwav.shape[1] == par["nt"]
# Assert correct position of event
assert_array_equal(d[:, t0], amp * np.ones(par["nx"]))
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_multilinear2d(par):
"""Create small dataset with several horizontal events and check that output
contains the events at correct time and correct amplitude
"""
# Data creation
v = 1
t0 = (50, 130)
theta = (0.0, 0.0)
amp = (0.6, 1)
# Create axes
t, _, x, _ = makeaxis(par)
# Create data
d, dwav = linear2d(x, t, v, t0, theta, amp, wav)
# Assert shape
assert d.shape[0] == par["nx"]
assert d.shape[1] == par["nt"]
assert dwav.shape[0] == par["nx"]
assert dwav.shape[1] == par["nt"]
# Assert correct position of event
assert_array_equal(d[:, t0[0]], amp[0] * np.ones(par["nx"]))
assert_array_equal(d[:, t0[1]], amp[1] * np.ones(par["nx"]))
@pytest.mark.parametrize("par", [(par2)])
def test_parabolic2d(par):
"""Create small dataset with a parabolic event and check that output
contains the event apex at correct time and correct amplitude
"""
# Data creation
t0 = 50
px = 0
pxx = 1e-1
amp = 0.6
# Create axes
t, _, x, _ = makeaxis(par)
# Create data
d, dwav = parabolic2d(x, t, t0, px, pxx, amp, np.ones(1))
# Assert shape
assert d.shape[0] == par["nx"]
assert d.shape[1] == par["nt"]
assert dwav.shape[0] == par["nx"]
assert dwav.shape[1] == par["nt"]
# Assert correct position of event
assert_array_equal(d[par["nx"] // 2, t0], amp)
@pytest.mark.parametrize("par", [(par2)])
def test_hyperbolic2d(par):
"""Create small dataset with a hyperbolic event and check that output
contains the event apex at correct time and correct amplitude
"""
# Data creation
t0 = 50
vrms = 1
amp = 0.6
# Create axes
t, _, x, _ = makeaxis(par)
# Create data
d, dwav = hyperbolic2d(x, t, t0, vrms, amp, wav)
# Assert shape
assert d.shape[0] == par["nx"]
assert d.shape[1] == par["nt"]
assert dwav.shape[0] == par["nx"]
assert dwav.shape[1] == par["nt"]
# Assert correct position of event
assert_array_equal(d[par["nx"] // 2, t0], amp)
assert_array_equal(dwav[par["nx"] // 2, t0], amp)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_linear3d(par):
"""Create small dataset with an horizontal event and check output
contains event at correct time and correct amplitude
"""
# Data creation
v = 1
t0 = 50
theta = 0.0
phi = 0.0
amp = 0.6
# Create axes
t, _, x, y = makeaxis(par)
# Create data
d, dwav = linear3d(x, y, t, v, t0, theta, phi, amp, wav)
# Assert shape
assert d.shape[0] == par["ny"]
assert d.shape[1] == par["nx"]
assert d.shape[2] == par["nt"]
assert dwav.shape[0] == par["ny"]
assert dwav.shape[1] == par["nx"]
assert dwav.shape[2] == par["nt"]
# Assert correct position of event
assert_array_equal(d[:, :, t0], amp * np.ones((par["ny"], par["nx"])))
assert_array_equal(dwav[:, :, t0], amp * np.ones((par["ny"], par["nx"])))
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_multilinear3d(par):
"""Create small dataset with several linear events and check output
contains the events at correct time and correct amplitude
"""
# Data creation
v = 1
t0 = (50, 130)
theta = (0.0, 0.0)
phi = (0.0, 0.0)
amp = (0.6, 1)
# Create axes
t, _, x, y = makeaxis(par)
# Create data
d, dwav = linear3d(x, y, t, v, t0, theta, phi, amp, wav)
# Assert shape
assert d.shape[0] == par["ny"]
assert d.shape[1] == par["nx"]
assert d.shape[2] == par["nt"]
assert dwav.shape[0] == par["ny"]
assert dwav.shape[1] == par["nx"]
assert dwav.shape[2] == par["nt"]
# Assert correct position of event
assert_array_equal(d[:, :, t0[0]], amp[0] * np.ones((par["ny"], par["nx"])))
assert_array_equal(d[:, :, t0[1]], amp[1] * np.ones((par["ny"], par["nx"])))
@pytest.mark.parametrize("par", [(par2)])
def test_hyperbolic3d(par):
"""Create small dataset with several hyperbolic events and check output
contains the events at correct time and correct amplitude
"""
# Data creation
t0 = 50
vrms_x = 1.0
vrms_y = 1.0
amp = 0.6
# Create axes
t, _, x, y = makeaxis(par)
# Create data
d, dwav = hyperbolic3d(x, y, t, t0, vrms_x, vrms_y, amp, wav)
# Assert shape
assert d.shape[0] == par["ny"]
assert d.shape[1] == par["nx"]
assert d.shape[2] == par["nt"]
assert dwav.shape[0] == par["ny"]
assert dwav.shape[1] == par["nx"]
assert dwav.shape[2] == par["nt"]
# Assert correct position of event
assert_array_equal(d[par["ny"] // 2, par["nx"] // 2, t0], amp)
| 6,630 | 23.742537 | 80 | py |
pylops | pylops-master/pytests/test_dct.py | import numpy as np
import pytest
from pylops.signalprocessing import DCT
from pylops.utils import dottest
par1 = {"ny": 11, "nx": 11, "imag": 0, "dtype": "float64"}
par2 = {"ny": 11, "nx": 21, "imag": 0, "dtype": "float64"}
par3 = {"ny": 21, "nx": 21, "imag": 0, "dtype": "float64"}
@pytest.mark.parametrize("par", [(par1), (par3)])
def test_DCT1D(par):
"""Dot test for Discrete Cosine Transform Operator 1D"""
t = np.arange(par["ny"]) + 1
for type in [1, 2, 3, 4]:
Dct = DCT(dims=(par["ny"],), type=type, dtype=par["dtype"])
assert dottest(Dct, par["ny"], par["ny"], rtol=1e-6, complexflag=0, verb=True)
y = Dct.H * (Dct * t)
np.testing.assert_allclose(t, y)
@pytest.mark.parametrize("par", [(par1), (par2), (par3)])
def test_DCT2D(par):
"""Dot test for Discrete Cosine Transform Operator 2D"""
t = np.outer(np.arange(par["ny"]) + 1, np.arange(par["nx"]) + 1)
for type in [1, 2, 3, 4]:
for axes in [0, 1]:
Dct = DCT(dims=t.shape, type=type, axes=axes, dtype=par["dtype"])
assert dottest(
Dct,
par["nx"] * par["ny"],
par["nx"] * par["ny"],
rtol=1e-6,
complexflag=0,
verb=True,
)
y = Dct.H * (Dct * t)
np.testing.assert_allclose(t, y)
@pytest.mark.parametrize("par", [(par1), (par2), (par3)])
def test_DCT3D(par):
"""Dot test for Discrete Cosine Transform Operator 3D"""
t = np.random.rand(par["nx"], par["nx"], par["nx"])
for type in [1, 2, 3, 4]:
for axes in [0, 1, 2]:
Dct = DCT(dims=t.shape, type=type, axes=axes, dtype=par["dtype"])
assert dottest(
Dct,
par["nx"] * par["nx"] * par["nx"],
par["nx"] * par["nx"] * par["nx"],
rtol=1e-6,
complexflag=0,
verb=True,
)
y = Dct.H * (Dct * t)
np.testing.assert_allclose(t, y)
@pytest.mark.parametrize("par", [(par1), (par3)])
def test_DCT_workers(par):
"""Dot test for Discrete Cosine Transform Operator with workers"""
t = np.arange(par["ny"]) + 1
Dct = DCT(dims=(par["ny"],), type=1, dtype=par["dtype"], workers=2)
assert dottest(Dct, par["ny"], par["ny"], rtol=1e-6, complexflag=0, verb=True)
y = Dct.H * (Dct * t)
np.testing.assert_allclose(t, y)
| 2,438 | 29.4875 | 86 | py |
pylops | pylops-master/pytests/test_seismicinterpolation.py | # import multiprocessing
import numpy as np
import pytest
from pylops.basicoperators import Restriction
from pylops.utils.seismicevents import linear2d, linear3d, makeaxis
from pylops.utils.wavelets import ricker
from pylops.waveeqprocessing.seismicinterpolation import SeismicInterpolation
np.random.seed(5)
# avoid timeout in travis and azure-pipeline(linux) for numba
# if multiprocessing.cpu_count() >= 4:
# engine = 'numba'
# else:
engine = "numpy"
# params
par = {
"oy": 0,
"dy": 2,
"ny": 30,
"ox": 0,
"dx": 2,
"nx": 10,
"ot": 0,
"dt": 0.004,
"nt": 40,
"f0": 25,
}
v = 1500
t0 = [0.05, 0.1, 0.12]
theta = [0, 30, -60]
phi = [0, 50, 30]
amp = [1.0, -2, 0.5]
perc_subsampling = 0.7
nysub = int(np.round(par["ny"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["ny"]))[:nysub])
taxis, taxis2, xaxis, yaxis = makeaxis(par)
wav = ricker(taxis[:41], f0=par["f0"])[0]
# 2d model
_, x2d = linear2d(yaxis, taxis, v, t0, theta, amp, wav)
_, x3d = linear3d(xaxis, yaxis, taxis, v, t0, theta, phi, amp, wav)
# Create restriction operator
Rop2d = Restriction((par["ny"], par["nt"]), iava, axis=0, dtype="float64")
y2d = Rop2d * x2d.ravel()
y2d = y2d.reshape(nysub, par["nt"])
Rop3d = Restriction((par["ny"], par["nx"], par["nt"]), iava, axis=0, dtype="float64")
y3d = Rop3d * x3d.ravel()
y3d = y3d.reshape(nysub, par["nx"], par["nt"])
par1_2d = {
"kind": "spatial",
"kwargs": dict(epsRs=[np.sqrt(0.1)], damp=np.sqrt(1e-4), iter_lim=20, show=0),
}
par2_2d = {
"kind": "fk",
"kwargs": dict(
nffts=(2**9, 2**9),
sampling=(par["dy"], par["dt"]),
niter=20,
eps=1e-2,
eigsdict={"niter": 4},
),
}
par3_2d = {
"kind": "radon-linear",
"kwargs": dict(
paxis=np.linspace(-1e-3, 1e-3, 50),
centeredh=True,
niter=20,
eps=1e-1,
eigsdict={"niter": 4},
),
}
par4_2d = {
"kind": "sliding",
"kwargs": dict(
paxis=np.linspace(-1e-3, 1e-3, 50),
nwin=12,
nwins=3,
nover=3,
niter=20,
eps=1e-1,
eigsdict={"niter": 4},
),
}
par1_2d.update(par)
par2_2d.update(par)
par3_2d.update(par)
par4_2d.update(par)
par1_3d = par1_2d
par2_3d = {
"kind": "fk",
"kwargs": dict(
nffts=(2**7, 2**7, 2**8),
sampling=(par["dy"], par["dx"], par["dt"]),
niter=20,
eps=5e-2,
alpha=1e0,
show=False,
),
}
par3_3d = {
"kind": "radon-linear",
"kwargs": dict(
paxis=np.linspace(-1e-3, 1e-3, 21),
p1axis=np.linspace(-1e-3, 1e-3, 50),
centeredh=True,
niter=20,
eps=1e-3,
alpha=1.3e-6,
show=False,
),
}
par4_3d = {
"kind": "sliding",
"kwargs": dict(
paxis=np.linspace(-1e-3, 1e-3, 21),
p1axis=np.linspace(-1e-3, 1e-3, 21),
nwin=(12, 5),
nwins=(3, 2),
nover=(3, 2),
niter=20,
eps=1e-2,
alpha=1.3e-4,
show=False,
),
}
par1_3d.update(par)
par2_3d.update(par)
par3_3d.update(par)
par4_3d.update(par)
@pytest.mark.parametrize("par", [(par1_2d), (par2_2d), (par3_2d), (par4_2d)])
def test_SeismicInterpolation2d(par):
"""Dot-test and inversion for SeismicInterpolation in 2d"""
xinv, _, _ = SeismicInterpolation(
y2d,
par["ny"],
iava,
kind=par["kind"],
spataxis=yaxis,
taxis=taxis,
engine=engine,
dottest=True,
**par["kwargs"]
)
assert np.linalg.norm(x2d - xinv) / np.linalg.norm(xinv) < 2e-1
# , (par3_3d), (par4_3d)])
@pytest.mark.parametrize("par", [(par1_3d), (par2_3d)])
def test_SeismicInterpolation3d(par):
"""Dot-test and inversion for SeismicInterpolation in 3d"""
xinv, _, _ = SeismicInterpolation(
y3d,
(par["ny"], par["nx"]),
iava,
kind=par["kind"],
spataxis=yaxis,
spat1axis=xaxis,
taxis=taxis,
engine=engine,
dottest=True,
**par["kwargs"]
)
# remove edges before checking inversion if using sliding windows
if par["kind"] == "sliding":
win0in = par["kwargs"]["nover"][0]
win0end = (par["kwargs"]["nwin"][0] - par["kwargs"]["nover"][0]) * par[
"kwargs"
]["nwins"][0] - par["kwargs"]["nover"][0]
win1in = par["kwargs"]["nover"][1]
win1end = (par["kwargs"]["nwin"][1] - par["kwargs"]["nover"][1]) * par[
"kwargs"
]["nwins"][1] - par["kwargs"]["nover"][1]
x3dwin = x3d[win0in:win0end, win1in:win1end]
xinvwin = xinv[win0in:win0end, win1in:win1end]
else:
x3dwin = x3d.copy()
xinvwin = xinv.copy()
assert np.linalg.norm(x3dwin - xinvwin) / np.linalg.norm(xinvwin) < 3e-1
| 4,790 | 23.823834 | 85 | py |
pylops | pylops-master/pytests/test_blending.py | import numpy as np
import pytest
from pylops.utils import dottest
from pylops.waveeqprocessing import BlendingContinuous, BlendingGroup, BlendingHalf
par = {"nt": 101, "ns": 50, "nr": 20, "dtype": "float64"}
d = np.random.normal(0, 1, (par["ns"], par["nr"], par["nt"]))
dt = 0.004
@pytest.mark.parametrize("par", [(par)])
def test_Blending_continuous(par):
"""Dot-test for continuous Blending operator"""
np.random.seed(0)
# ignition times
overlap = 0.5
ignition_times = 2.0 * np.random.rand(par["ns"]) - 1.0
ignition_times += (
np.arange(0, overlap * par["nt"] * par["ns"], overlap * par["nt"]) * dt
)
ignition_times[0] = 0.0
Bop = BlendingContinuous(
par["nt"],
par["nr"],
par["ns"],
dt,
ignition_times,
dtype=par["dtype"],
)
assert dottest(
Bop,
Bop.nttot * par["nr"],
par["nt"] * par["ns"] * par["nr"],
)
@pytest.mark.parametrize("par", [(par)])
def test_Blending_group(par):
"""Dot-test for group Blending operator"""
np.random.seed(0)
group_size = 2
n_groups = par["ns"] // group_size
ignition_times = 0.8 * np.random.rand(par["ns"])
Bop = BlendingGroup(
par["nt"],
par["nr"],
par["ns"],
dt,
ignition_times.reshape(group_size, n_groups),
n_groups=n_groups,
group_size=group_size,
dtype=par["dtype"],
)
assert dottest(
Bop,
par["nt"] * n_groups * par["nr"],
par["nt"] * par["ns"] * par["nr"],
)
@pytest.mark.parametrize("par", [(par)])
def test_Blending_half(par):
"""Dot-test for half Blending operator"""
np.random.seed(0)
group_size = 2
n_groups = par["ns"] // group_size
ignition_times = 0.8 * np.random.rand(par["ns"])
Bop = BlendingHalf(
par["nt"],
par["nr"],
par["ns"],
dt,
ignition_times.reshape(group_size, n_groups),
n_groups=n_groups,
group_size=group_size,
dtype=par["dtype"],
)
assert dottest(
Bop,
par["nt"] * n_groups * par["nr"],
par["nt"] * par["ns"] * par["nr"],
)
| 2,175 | 24.011494 | 83 | py |
pylops | pylops-master/pytests/test_prestack.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.signal import filtfilt
from pylops.avo.prestack import (
PrestackInversion,
PrestackLinearModelling,
PrestackWaveletModelling,
_linearizations,
)
from pylops.utils import dottest
from pylops.utils.wavelets import ricker
np.random.seed(10)
# params
dt0 = 0.004
ntwav = 41
ntheta = 7
nsmooth = 50
# angles
thetamin, thetamax = 0, 40
theta = np.linspace(thetamin, thetamax, ntheta)
# 1d model
nt0 = 184
t0 = np.arange(nt0) * dt0
vp = 1200 + np.arange(nt0) + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 40, nt0))
vs = 600 + vp / 2 + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 20, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 30, nt0))
m = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)
mback = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m, axis=0)
# 2d model
inputfile = "testdata/avo/poststack_model.npz"
model = np.load(inputfile)
z, x, model = (
model["z"][::3] / 1000.0,
model["x"][::5] / 1000.0,
1000 * model["model"][::3, ::5],
)
nx, nz = len(x), len(z)
mvp = model.copy()
mvs = model / 2
mrho = model / 3 + 300
m2d = np.log(np.stack((mvp, mvs, mrho), axis=1))
mback2d = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m2d, axis=0)
mback2d = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, mback2d, axis=2)
# wavelet
wav, twav, wavc = ricker(t0[: ntwav // 2 + 1], 20)
# shifted wavelet
wavoff = 10
wav_phase = np.hstack((wav[wavoff:], np.zeros(wavoff)))
# constant vsvp, aki-richards approx, unregularized, trace-by-trace
par1 = {
"vsvp": 0.5,
"linearization": "akirich",
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": False,
"kind": "centered",
}
# constant vsvp, fatti approx, unregularized, trace-by-trace
par2 = {
"vsvp": 0.5,
"linearization": "fatti",
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": False,
"kind": "forward",
}
# time-variant vsvp, aki-richards approx, unregularized, trace-by-trace
par3 = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "akirich",
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": False,
"kind": "centered",
}
# time-variant vsvp, fatti approx, unregularized, trace-by-trace
par4 = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "fatti",
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": False,
"kind": "forward",
}
# constant vsvp, aki-richards approx, unregularized, simultaneous
par1s = {
"vsvp": 0.5,
"linearization": "akirich",
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": True,
"kind": "centered",
}
# constant vsvp, fatti approx, unregularized, simultaneous
par2s = {
"vsvp": 0.5,
"linearization": "fatti",
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": True,
"kind": "forward",
}
# time-variant vsvp, aki-richards approx, unregularized, simultaneous
par3s = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "akirich",
"epsR": None,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "centered",
}
# time-variant vsvp, fatti approx, unregularized, simultaneous
par4s = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "fatti",
"epsR": None,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "forward",
}
# constant vsvp, aki-richards approx, regularized, simultaneous
par1r = {
"vsvp": 0.5,
"linearization": "akirich",
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "centered",
}
# constant vsvp, fatti approx, regularized, simultaneous
par2r = {
"vsvp": 0.5,
"linearization": "fatti",
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "forward",
}
# time-variant vsvp, aki-richards approx, regularized, simultaneous
par3r = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "akirich",
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "centered",
}
# time-variant vsvp, fatti approx, regularized, simultaneous
par4r = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "fatti",
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "forward",
}
# constant vsvp, aki-richards approx, blocky, simultaneous
par1b = {
"vsvp": 0.5,
"linearization": "akirich",
"epsR": 1e-4,
"epsRL1": 1e-2,
"epsI": 1e-6,
"simultaneous": True,
"kind": "centered",
}
# time-variant vsvp, aki-richards approx, regularized, simultaneous
par3b = {
"vsvp": np.linspace(0.4, 0.6, nt0),
"linearization": "akirich",
"epsR": 1e-4,
"epsRL1": 1e-2,
"epsI": 1e-6,
"simultaneous": True,
"kind": "forward",
}
@pytest.mark.parametrize(
"par",
[
(par1),
(par2),
(par3),
(par4),
(par1s),
(par2s),
(par3s),
(par4s),
(par1r),
(par2r),
(par3r),
(par4r),
(par1b),
(par3b),
],
)
def test_PrestackLinearModelling(par):
"""Dot-test, comparison of dense vs lop implementation and
inversion for PrestackLinearModelling
"""
# Dense
PPop_dense = PrestackLinearModelling(
wav,
theta,
vsvp=par["vsvp"],
nt0=nt0,
linearization=par["linearization"],
explicit=True,
kind=par["kind"],
)
assert dottest(
PPop_dense, nt0 * ntheta, nt0 * _linearizations[par["linearization"]]
)
# Linear operator
PPop = PrestackLinearModelling(
wav,
theta,
vsvp=par["vsvp"],
nt0=nt0,
linearization=par["linearization"],
explicit=False,
kind=par["kind"],
)
assert dottest(PPop, nt0 * ntheta, nt0 * _linearizations[par["linearization"]])
# Compare data
d = PPop * m.ravel()
d = d.reshape(nt0, ntheta)
d_dense = PPop_dense * m.T.ravel()
d_dense = d_dense.reshape(ntheta, nt0).T
assert_array_almost_equal(d, d_dense, decimal=4)
# Inversion
for explicit in [True, False]:
dict_inv = dict(iter_lim=10)
if not par["simultaneous"]:
dict_inv = {}
if not explicit:
dict_inv = dict(iter_lim=10)
if par["epsRL1"] is not None:
dict_inv = dict(mu=0.1, niter_outer=5, niter_inner=5, iter_lim=8)
minv = PrestackInversion(
d,
theta,
wav,
m0=mback,
explicit=explicit,
epsI=par["epsI"],
epsR=par["epsR"],
epsRL1=par["epsRL1"],
simultaneous=par["simultaneous"],
kind=par["kind"],
**dict_inv
)
assert np.linalg.norm(m - minv) / np.linalg.norm(minv) < 4e-2
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_PrestackWaveletModelling(par):
"""Dot-test and inversion for PrestackWaveletModelling"""
# Operators
Wavestop = PrestackWaveletModelling(
m,
theta,
nwav=ntwav,
wavc=wavc,
vsvp=par["vsvp"],
linearization=par["linearization"],
)
assert dottest(Wavestop, nt0 * ntheta, ntwav)
Wavestop_phase = PrestackWaveletModelling(
m,
theta,
nwav=ntwav,
wavc=wavc,
vsvp=par["vsvp"],
linearization=par["linearization"],
)
assert dottest(Wavestop_phase, nt0 * ntheta, ntwav)
# Create data
d = (Wavestop * wav).reshape(ntheta, nt0).T
d_phase = (Wavestop_phase * wav_phase).reshape(ntheta, nt0).T
# Estimate wavelet
wav_est = Wavestop / d.T.ravel()
wav_phase_est = Wavestop_phase / d_phase.T.ravel()
assert_array_almost_equal(wav, wav_est, decimal=3)
assert_array_almost_equal(wav_phase, wav_phase_est, decimal=3)
@pytest.mark.parametrize(
"par", [(par1), (par3), (par2s), (par4s), (par1r), (par3r), (par1b), (par3b)]
)
def test_PrestackLinearModelling2d(par):
"""Dot-test and inversion for PoststackLinearModelling in 2d"""
nm = _linearizations[par["linearization"]]
# Dense
PPop_dense = PrestackLinearModelling(
wav,
theta,
vsvp=par["vsvp"],
nt0=nz,
spatdims=(nx,),
linearization=par["linearization"],
explicit=True,
)
assert dottest(PPop_dense, nz * ntheta * nx, nz * nm * nx)
# Linear operator
PPop = PrestackLinearModelling(
wav,
theta,
vsvp=par["vsvp"],
nt0=nz,
spatdims=(nx,),
linearization=par["linearization"],
explicit=False,
)
assert dottest(PPop_dense, nz * ntheta * nx, nz * nm * nx)
# Compare data
d = (PPop * m2d.ravel()).reshape(nz, ntheta, nx)
d_dense = (
(PPop_dense * m2d.swapaxes(0, 1).ravel()).reshape(ntheta, nz, nx).swapaxes(0, 1)
)
assert_array_almost_equal(d, d_dense, decimal=4)
# Inversion
for explicit in [True, False]:
dict_inv = dict(iter_lim=10)
if not par["simultaneous"]:
dict_inv = {}
if not explicit:
dict_inv = dict(iter_lim=10)
if par["epsRL1"] is not None:
dict_inv = dict(mu=0.1, niter_outer=3, niter_inner=3, iter_lim=5)
minv2d, dinv2d = PrestackInversion(
d,
theta,
wav,
m0=mback2d,
explicit=explicit,
epsI=par["epsI"],
epsR=par["epsR"],
epsRL1=par["epsRL1"],
simultaneous=par["simultaneous"],
returnres=True,
**dict_inv
)
assert np.linalg.norm(m2d - minv2d) / np.linalg.norm(minv2d) < 2e-1
| 9,847 | 24.512953 | 88 | py |
pylops | pylops-master/pytests/test_seislet.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import FunctionOperator
from pylops.signalprocessing import Seislet
from pylops.signalprocessing.seislet import _predict_haar, _predict_lin, _predict_trace
from pylops.utils import dottest
par1 = {
"nx": 16,
"nt": 30,
"dx": 10,
"dt": 0.004,
"level": None,
"dtype": "float32",
} # nx power of 2, max level
par2 = {
"nx": 16,
"nt": 30,
"dx": 10,
"dt": 0.004,
"level": 2,
"dtype": "float32",
} # nx power of 2, smaller level
par3 = {
"nx": 13,
"nt": 30,
"dx": 10,
"dt": 0.004,
"level": 2,
"dtype": "float32",
} # nx not power of 2, max level
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1)])
def test_predict_trace(par):
"""Dot-test for _predict_trace operator"""
t = np.arange(par["nt"]) * par["dt"]
for slope in [-0.2, 0.0, 0.3]:
Fop = FunctionOperator(
lambda x: _predict_trace(x, t, par["dt"], par["dx"], slope),
lambda x: _predict_trace(x, t, par["dt"], par["dx"], slope, adj=True),
par["nt"],
par["nt"],
)
dottest(Fop, par["nt"], par["nt"])
@pytest.mark.parametrize("par", [(par1)])
def test_predict(par):
"""Dot-test for _predict operator"""
def _predict_reshape(
predictor, traces, nt, nx, dt, dx, slopes, repeat=0, backward=False, adj=False
):
return predictor(
traces.reshape(nx, nt),
dt,
dx,
slopes,
repeat=repeat,
backward=backward,
adj=adj,
)
for predictor in (_predict_haar, _predict_lin):
for repeat in (0, 1, 2):
slope = np.random.normal(0, 0.1, (2 ** (repeat + 1) * par["nx"], par["nt"]))
for backward in (False, True):
Fop = FunctionOperator(
lambda x: _predict_reshape(
predictor,
x,
par["nt"],
par["nx"],
par["dt"],
par["dx"],
slope,
backward=backward,
),
lambda x: _predict_reshape(
predictor,
x,
par["nt"],
par["nx"],
par["dt"],
par["dx"],
slope,
backward=backward,
adj=True,
),
par["nt"] * par["nx"],
par["nt"] * par["nx"],
)
dottest(Fop, par["nt"] * par["nx"], par["nt"] * par["nx"])
@pytest.mark.parametrize("par", [(par1), (par2), (par3)])
def test_Seislet(par):
"""Dot-test and forward-inverse for Seislet"""
slope = np.random.normal(0, 0.1, (par["nx"], par["nt"]))
for kind in ("haar", "linear"):
Sop = Seislet(
slope,
sampling=(par["dx"], par["dt"]),
level=par["level"],
kind=kind,
dtype=par["dtype"],
)
dottest(Sop, Sop.shape[0], par["nx"] * par["nt"])
x = np.random.normal(0, 0.1, par["nx"] * par["nt"])
y = Sop * x
xinv = Sop.inverse(y)
assert_array_almost_equal(x, xinv)
| 3,456 | 27.808333 | 88 | py |
pylops | pylops-master/pytests/test_poststack.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.signal import filtfilt
from pylops.avo.poststack import PoststackInversion, PoststackLinearModelling
from pylops.utils import dottest
from pylops.utils.wavelets import ricker
np.random.seed(10)
# params
dt0 = 0.004
ntwav = 41
nsmooth = 50
# 1d model
nt0 = 201
t0 = np.arange(nt0) * dt0
vp = 1200 + np.arange(nt0) + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 80, nt0))
rho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 30, nt0))
m = np.log(vp * rho)
mback = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m)
# 2d model
inputfile = "testdata/avo/poststack_model.npz"
model = np.load(inputfile)
x, z, m2d = model["x"][::3], model["z"][::3], np.log(model["model"][::3, ::3])
nx, nz = len(x), len(z)
mback2d = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m2d, axis=0)
mback2d = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, mback2d, axis=1)
# stationary wavelet
wav = ricker(t0[: ntwav // 2 + 1], 20)[0]
# non-stationary wavelet
f0s = np.flip(np.arange(nt0) * 0.05 + 3)
wavs = np.array([ricker(t0[:ntwav], f0)[0] for f0 in f0s])
wavc = np.argmax(wavs[0])
par1 = {
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": False,
} # unregularized
par2 = {
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": False,
"kind": "centered",
} # regularized, centered
par3 = {
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": False,
"kind": "forward",
} # regularized, forward
par4 = {
"epsR": None,
"epsRL1": None,
"epsI": None,
"simultaneous": True,
} # unregularized, simultaneous
par5 = {
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "centered",
} # regularized, simultaneous, centered
par6 = {
"epsR": 1e-4,
"epsRL1": None,
"epsI": 1e-6,
"simultaneous": True,
"kind": "forward",
} # regularized, simultaneous, forward
par7 = {
"epsR": 1e-4,
"epsRL1": 1e-1,
"epsI": 1e-6,
"simultaneous": True,
} # blocky, simultaneous
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_PoststackLinearModelling1d(par):
"""Dot-test, comparison of dense vs lop implementation and
inversion for PoststackLinearModelling in 1d with stationary wavelet
"""
# Dense
PPop_dense = PoststackLinearModelling(wav, nt0=nt0, explicit=True)
assert dottest(PPop_dense, nt0, nt0, rtol=1e-4)
# Linear operator
PPop = PoststackLinearModelling(wav, nt0=nt0, explicit=False)
assert dottest(PPop, nt0, nt0, rtol=1e-4)
# Compare data
d = PPop * m.ravel()
d_dense = PPop_dense * m.T.ravel()
assert_array_almost_equal(d, d_dense, decimal=4)
# Inversion
for explicit in [True, False]:
if par["epsR"] is None:
dict_inv = {}
else:
dict_inv = dict(damp=0 if par["epsI"] is None else par["epsI"], iter_lim=80)
minv = PoststackInversion(
d,
wav,
m0=mback,
explicit=explicit,
epsR=par["epsR"],
epsI=par["epsI"],
simultaneous=par["simultaneous"],
**dict_inv
)[0]
assert np.linalg.norm(m - minv) / np.linalg.norm(minv) < 1e-2
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_PoststackLinearModelling1d_nonstationary(par):
"""Dot-test, comparison of dense vs lop implementation and
inversion for PoststackLinearModelling in 1d with nonstationary wavelet
"""
# Dense
PPop_dense = PoststackLinearModelling(wavs, nt0=nt0, explicit=True)
assert dottest(PPop_dense, nt0, nt0, rtol=1e-4)
# Linear operator
PPop = PoststackLinearModelling(wavs, nt0=nt0, explicit=False)
assert dottest(PPop, nt0, nt0, rtol=1e-4)
# Compare data
d = PPop * m.ravel()
d_dense = PPop_dense * m.T.ravel()
assert_array_almost_equal(d, d_dense, decimal=4)
# Inversion
for explicit in [True, False]:
if par["epsR"] is None:
dict_inv = {}
else:
dict_inv = dict(damp=0 if par["epsI"] is None else par["epsI"], iter_lim=80)
minv = PoststackInversion(
d,
wavs,
m0=mback,
explicit=explicit,
epsR=par["epsR"],
epsI=par["epsI"],
simultaneous=par["simultaneous"],
**dict_inv
)[0]
assert np.linalg.norm(m - minv) / np.linalg.norm(minv) < 1e-2
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par5), (par6), (par7)]
)
def test_PoststackLinearModelling2d(par):
"""Dot-test and inversion for PoststackLinearModelling in 2d"""
# Dense
PPop_dense = PoststackLinearModelling(wav, nt0=nz, spatdims=nx, explicit=True)
assert dottest(PPop_dense, nz * nx, nz * nx, rtol=1e-4)
# Linear operator
PPop = PoststackLinearModelling(wav, nt0=nz, spatdims=nx, explicit=False)
assert dottest(PPop, nz * nx, nz * nx, rtol=1e-4)
# Compare data
d = (PPop * m2d.ravel()).reshape(nz, nx)
d_dense = (PPop_dense * m2d.ravel()).reshape(nz, nx)
assert_array_almost_equal(d, d_dense, decimal=4)
# Inversion
for explicit in [True, False]:
if explicit and not par["simultaneous"] and par["epsR"] is None:
dict_inv = {}
elif explicit and not par["simultaneous"] and par["epsR"] is not None:
dict_inv = dict(damp=0 if par["epsI"] is None else par["epsI"], iter_lim=10)
else:
dict_inv = dict(damp=0 if par["epsI"] is None else par["epsI"], iter_lim=10)
minv2d = PoststackInversion(
d,
wav,
m0=mback2d,
explicit=explicit,
epsI=par["epsI"],
epsR=par["epsR"],
epsRL1=par["epsRL1"],
simultaneous=par["simultaneous"],
**dict_inv
)[0]
assert np.linalg.norm(m2d - minv2d) / np.linalg.norm(m2d) < 1e-1
| 6,020 | 28.370732 | 88 | py |
pylops | pylops-master/pytests/test_causalintegration.py | import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import CausalIntegration, FirstDerivative
from pylops.utils import dottest
par1 = {
"nt": 20,
"nx": 101,
"dt": 1.0,
"imag": 0,
"dtype": "float64",
} # even samples, real, unitary step
par2 = {
"nt": 21,
"nx": 101,
"dt": 1.0,
"imag": 0,
"dtype": "float64",
} # odd samples, real, unitary step
par3 = {
"nt": 20,
"nx": 101,
"dt": 0.3,
"imag": 0,
"dtype": "float64",
} # even samples, real, non-unitary step
par4 = {
"nt": 21,
"nx": 101,
"dt": 0.3,
"imag": 0,
"dtype": "float64",
} # odd samples, real, non-unitary step
par1j = {
"nt": 20,
"nx": 101,
"dt": 1.0,
"imag": 1j,
"dtype": "complex128",
} # even samples, complex, unitary step
par2j = {
"nt": 21,
"nx": 101,
"dt": 1.0,
"imag": 1j,
"dtype": "complex128",
} # odd samples, complex, unitary step
par3j = {
"nt": 20,
"nx": 101,
"dt": 0.3,
"imag": 1j,
"dtype": "complex128",
} # even samples, complex, non-unitary step
par4j = {
"nt": 21,
"nx": 101,
"dt": 0.3,
"imag": 1j,
"dtype": "complex128",
} # odd samples, complex, non-unitary step
np.random.seed(0)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par4j)]
)
def test_CausalIntegration1d(par):
"""Dot-test and inversion for CausalIntegration operator for 1d signals"""
t = np.arange(par["nt"]) * par["dt"]
x = t + par["imag"] * t
for kind, rf in itertools.product(("full", "half", "trapezoidal"), (False, True)):
Cop = CausalIntegration(
par["nt"],
sampling=par["dt"],
kind=kind,
removefirst=rf,
dtype=par["dtype"],
)
rf1 = 1 if rf else 0
assert dottest(
Cop, par["nt"] - rf1, par["nt"], complexflag=0 if par["imag"] == 0 else 3
)
# test analytical integration and derivative inversion only for
# cases where a zero c is required
if kind != "full" and not rf:
# numerical integration
y = Cop * x
# analytical integration
yana = (
t**2 / 2.0
- t[0] ** 2 / 2.0
+ par["imag"] * (t**2 / 2.0 - t[0] ** 2 / 2.0)
)
assert_array_almost_equal(y, yana[rf1:], decimal=4)
# numerical derivative
Dop = FirstDerivative(
par["nt"] - rf1, sampling=par["dt"], dtype=par["dtype"]
)
xder = Dop * y.ravel()
# derivative by inversion
xinv = Cop / y
assert_array_almost_equal(x[:-1], xder[:-1], decimal=4)
assert_array_almost_equal(x, xinv, decimal=4)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1j), (par2j), (par3j), (par4j)]
)
def test_CausalIntegration2d(par):
"""Dot-test and inversion for CausalIntegration operator for 2d signals"""
dt = 0.2 * par["dt"] # need lower frequency in sinusoids for stability
t = np.arange(par["nt"]) * dt
x = np.outer(np.sin(t), np.ones(par["nx"])) + par["imag"] * np.outer(
np.sin(t), np.ones(par["nx"])
)
for kind, rf in itertools.product(("full", "half", "trapezoidal"), (False, True)):
Cop = CausalIntegration(
(par["nt"], par["nx"]),
sampling=dt,
axis=0,
kind=kind,
removefirst=rf,
dtype=par["dtype"],
)
rf1 = 1 if rf else 0
assert dottest(
Cop,
(par["nt"] - rf1) * par["nx"],
par["nt"] * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
# test analytical integration and derivative inversion only for
# cases where a zero c is required
if kind != "full" and not rf:
# numerical integration
y = Cop * x.ravel()
y = y.reshape(par["nt"], par["nx"])
# analytical integration
yana = (
np.outer(-np.cos(t), np.ones(par["nx"]))
+ np.cos(t[0])
+ par["imag"]
* (np.outer(-np.cos(t), np.ones(par["nx"])) + np.cos(t[0]))
)
yana = yana.reshape(par["nt"], par["nx"])
assert_array_almost_equal(y, yana, decimal=2)
# numerical derivative
Dop = FirstDerivative(
(par["nt"], par["nx"]), axis=0, sampling=dt, dtype=par["dtype"]
)
xder = Dop * y.ravel()
xder = xder.reshape(par["nt"], par["nx"])
# derivative by inversion
xinv = Cop / y.ravel()
xinv = xinv.reshape(par["nt"], par["nx"])
assert_array_almost_equal(x[:-1], xder[:-1], decimal=2)
assert_array_almost_equal(x, xinv, decimal=2)
| 4,999 | 27.248588 | 86 | py |
pylops | pylops-master/pytests/test_ffts.py | import itertools
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr
from pylops.signalprocessing import FFT, FFT2D, FFTND
from pylops.utils import dottest
# Utility function
def _choose_random_axes(ndim, n_choices=2):
"""Chooses `n_choices` random axes given an array of `ndim` dimensions.
Examples:
_choose_random_axes(2, 1) may return any of [0], [1], [-2] or [-1]
_choose_random_axes(3, 2) may return any of [0, 1], [1, 0], [-2, -1],
[-1, -2], [-2, 1], [1, -2], [0, -1] or [-1, 0].
"""
if ndim < n_choices:
raise ValueError("ndim < n_choices")
axes_choices = list(range(-ndim, ndim))
axes = []
for _ in range(n_choices):
axis_chosen = np.random.choice(axes_choices)
# Remove chosen and its symmetrical counterpart
axes_choices.remove(axis_chosen)
axes_choices.remove(axis_chosen - (1 if axis_chosen >= 0 else -1) * ndim)
axes += [axis_chosen]
return axes
par1 = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": None,
"real": False,
"engine": "numpy",
"ifftshift_before": False,
"dtype": np.complex128,
} # nfft=nt, complex input, numpy engine
par2 = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 64,
"real": False,
"engine": "numpy",
"ifftshift_before": False,
"dtype": np.complex64,
} # nfft>nt, complex input, numpy engine
par3 = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": None,
"real": True,
"engine": "numpy",
"ifftshift_before": False,
"dtype": np.float64,
} # nfft=nt, real input, numpy engine
par4 = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 64,
"real": True,
"engine": "numpy",
"ifftshift_before": False,
"dtype": np.float64,
} # nfft>nt, real input, numpy engine
par5 = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 64,
"real": True,
"engine": "numpy",
"ifftshift_before": True,
"dtype": np.float32,
} # nfft>nt, real input and ifftshift_before, numpy engine
par6 = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 16,
"real": False,
"engine": "numpy",
"ifftshift_before": False,
"dtype": np.complex128,
} # nfft<nt, complex input, numpy engine
par1w = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": None,
"real": False,
"engine": "fftw",
"ifftshift_before": False,
"dtype": np.complex128,
} # nfft=nt, complex input, fftw engine
par2w = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 64,
"real": False,
"engine": "fftw",
"ifftshift_before": False,
"dtype": np.complex128,
} # nfft>nt, complex input, fftw engine
par3w = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": None,
"real": True,
"engine": "fftw",
"ifftshift_before": False,
"dtype": np.float64,
} # nfft=nt, real input, fftw engine
par4w = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 64,
"real": True,
"engine": "fftw",
"ifftshift_before": False,
"dtype": np.float32,
} # nfft>nt, real input, fftw engine
par5w = {
"nt": 41,
"nx": 31,
"ny": 10,
"nfft": 16,
"real": False,
"engine": "fftw",
"ifftshift_before": False,
"dtype": np.complex128,
} # nfft<nt, complex input, fftw engine
np.random.seed(5)
@pytest.mark.parametrize("par", [(par1)])
def test_unknown_engine(par):
"""Check error is raised if unknown engine is passed"""
with pytest.raises(NotImplementedError):
_ = FFT(
dims=[par["nt"]],
nfft=par["nfft"],
sampling=0.005,
real=par["real"],
engine="foo",
)
par_lists_fft_small_real = dict(
dtype_precision=[
(np.float16, 1),
(np.float32, 4),
(np.float64, 11),
(np.float128, 11),
],
norm=["ortho", "none", "1/n"],
ifftshift_before=[False, True],
engine=["numpy", "fftw", "scipy"],
)
# Generate all combinations of the above parameters
pars_fft_small_real = [
dict(zip(par_lists_fft_small_real.keys(), value))
for value in itertools.product(*par_lists_fft_small_real.values())
]
@pytest.mark.parametrize("par", pars_fft_small_real)
def test_FFT_small_real(par):
dtype, decimal = par["dtype_precision"]
norm = par["norm"]
ifftshift_before = par["ifftshift_before"]
engine = par["engine"]
x = np.array([1, 0, -1, 1], dtype=dtype)
FFTop = FFT(
dims=x.shape,
axis=0,
norm=norm,
real=True,
ifftshift_before=ifftshift_before,
dtype=dtype,
engine=engine,
)
y = FFTop * x.ravel()
if norm == "ortho":
y_true = np.array([0.5, 1 + 0.5j, -0.5], dtype=FFTop.cdtype)
elif norm == "none":
y_true = np.array([1, 2 + 1j, -1], dtype=FFTop.cdtype)
elif norm == "1/n":
y_true = np.array([0.25, 0.5 + 0.25j, -0.25], dtype=FFTop.cdtype)
y_true[1:-1] *= np.sqrt(2) # Zero and Nyquist
if ifftshift_before:
# `ifftshift_before`` is useful when the time-axis is centered around zero as
# it ensures the time axis to starts at zero:
# [-2, -1, 0, 1] ---ifftshift--> [0, 1, -2, -1]
# This does not alter the amplitude of the FFT, but does alter the phase. To
# match the results without ifftshift, we need to add a phase shift opposite to
# the one introduced by FFT as given below. See "An FFT Primer for physicists",
# by Thomas Kaiser.
# https://www.iap.uni-jena.de/iapmedia/de/Lecture/Computational+Photonics/CoPho19_supp_FFT_primer.pdf
x0 = -np.ceil(len(x) / 2)
y_true *= np.exp(2 * np.pi * 1j * FFTop.f * x0)
assert_array_almost_equal(y, y_true, decimal=decimal)
assert dottest(FFTop, len(y), len(x), complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, len(y), len(x), complexflag=2, rtol=10 ** (-decimal))
x_inv = FFTop / y
x_inv = x_inv.reshape(x.shape)
assert_array_almost_equal(x_inv, x, decimal=decimal)
par_lists_fft_random_real = dict(
shape=[
np.random.randint(1, 20, size=(1,)),
np.random.randint(1, 20, size=(2,)),
np.random.randint(1, 20, size=(3,)),
],
dtype_precision=[
(np.float16, 1),
(np.float32, 3),
(np.float64, 11),
(np.float128, 11),
],
ifftshift_before=[False, True],
engine=["numpy", "fftw", "scipy"],
)
pars_fft_random_real = [
dict(zip(par_lists_fft_random_real.keys(), value))
for value in itertools.product(*par_lists_fft_random_real.values())
]
@pytest.mark.parametrize("par", pars_fft_random_real)
def test_FFT_random_real(par):
shape = par["shape"]
dtype, decimal = par["dtype_precision"]
ifftshift_before = par["ifftshift_before"]
x = np.random.randn(*shape).astype(dtype)
# Select an axis to apply FFT on. It can be any integer
# in [0,..., ndim-1] but also in [-ndim, ..., -1]
axis = _choose_random_axes(x.ndim, n_choices=1)[0]
FFTop = FFT(
dims=x.shape,
axis=axis,
ifftshift_before=ifftshift_before,
real=True,
dtype=dtype,
)
x = x.ravel()
y = FFTop * x
# Ensure inverse and adjoint recover x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=0, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=decimal)
assert_array_almost_equal(x, xinv, decimal=decimal)
# Dot tests
nr, nc = FFTop.shape
assert dottest(FFTop, nr, nc, complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=2, rtol=10 ** (-decimal))
par_lists_fft_small_cpx = dict(
dtype_precision=[(np.complex64, 4), (np.complex128, 11), (np.complex256, 11)],
norm=["ortho", "none", "1/n"],
ifftshift_before=[False, True],
fftshift_after=[False, True],
engine=["numpy", "fftw", "scipy"],
)
pars_fft_small_cpx = [
dict(zip(par_lists_fft_small_cpx.keys(), value))
for value in itertools.product(*par_lists_fft_small_cpx.values())
]
@pytest.mark.parametrize("par", pars_fft_small_cpx)
def test_FFT_small_complex(par):
dtype, decimal = par["dtype_precision"]
norm = par["norm"]
ifftshift_before = par["ifftshift_before"]
fftshift_after = par["fftshift_after"]
x = np.array([1, 2 - 1j, -1j, -1 + 2j], dtype=dtype)
FFTop = FFT(
dims=x.shape,
axis=0,
norm=norm,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
)
# Compute FFT of x independently
if norm == "ortho":
y_true = np.array([1, -1 - 1j, -1j, 2 + 2j], dtype=FFTop.cdtype)
elif norm == "none":
y_true = np.array([2, -2 - 2j, -2j, 4 + 4j], dtype=FFTop.cdtype)
elif norm == "1/n":
y_true = np.array([0.5, -0.5 - 0.5j, -0.5j, 1 + 1j], dtype=FFTop.cdtype)
if fftshift_after:
y_true = np.fft.fftshift(y_true)
if ifftshift_before:
x0 = -np.ceil(x.shape[0] / 2)
y_true *= np.exp(2 * np.pi * 1j * FFTop.f * x0)
# Compute FFT with FFTop and compare with y_true
y = FFTop * x.ravel()
assert_array_almost_equal(y, y_true, decimal=decimal)
assert dottest(FFTop, *FFTop.shape, complexflag=3, rtol=10 ** (-decimal))
x_inv = FFTop / y
x_inv = x_inv.reshape(x.shape)
assert_array_almost_equal(x_inv, x, decimal=decimal)
par_lists_fft_random_cpx = dict(
shape=[
np.random.randint(1, 20, size=(1,)),
np.random.randint(1, 20, size=(2,)),
np.random.randint(1, 20, size=(3,)),
],
dtype_precision=[
(np.float16, 1),
(np.float32, 3),
(np.float64, 11),
(np.float128, 11),
(np.complex64, 3),
(np.complex128, 11),
(np.complex256, 11),
],
ifftshift_before=[False, True],
fftshift_after=[False, True],
engine=["numpy", "fftw", "scipy"],
)
pars_fft_random_cpx = [
dict(zip(par_lists_fft_random_cpx.keys(), value))
for value in itertools.product(*par_lists_fft_random_cpx.values())
]
@pytest.mark.parametrize("par", pars_fft_random_cpx)
def test_FFT_random_complex(par):
shape = par["shape"]
dtype, decimal = par["dtype_precision"]
ifftshift_before = par["ifftshift_before"]
fftshift_after = par["fftshift_after"]
engine = par["engine"]
x = np.random.randn(*shape).astype(dtype)
if np.issubdtype(dtype, np.complexfloating):
x += 1j * np.random.randn(*shape).astype(dtype)
# Select an axis to apply FFT on. It can be any integer
# in [0,..., ndim-1] but also in [-ndim, ..., -1]
axis = _choose_random_axes(x.ndim, n_choices=1)[0]
FFTop = FFT(
dims=x.shape,
axis=axis,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
engine=engine,
)
# Compute FFT of x independently
y_true = np.fft.fft(x, axis=axis, norm="ortho")
if fftshift_after:
y_true = np.fft.fftshift(y_true, axes=axis)
if ifftshift_before:
y_true = np.swapaxes(y_true, axis, -1)
x0 = -np.ceil(x.shape[axis] / 2)
phase_correction = np.exp(2 * np.pi * 1j * FFTop.f * x0)
y_true *= phase_correction
y_true = np.swapaxes(y_true, -1, axis)
y_true = y_true.ravel()
# Compute FFT with FFTop and compare with y_true
x = x.ravel()
y = FFTop * x
assert_array_almost_equal(y, y_true, decimal=decimal)
# Ensure inverse and adjoint recover x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=0, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=decimal)
assert_array_almost_equal(x, xinv, decimal=decimal)
# Dot tests
nr, nc = FFTop.shape
assert dottest(FFTop, nr, nc, complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=2, rtol=10 ** (-decimal))
if np.issubdtype(dtype, np.complexfloating):
assert dottest(FFTop, nr, nc, complexflag=1, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=3, rtol=10 ** (-decimal))
par_lists_fft2d_random_real = dict(
shape=[
np.random.randint(1, 5, size=(2,)),
np.random.randint(1, 5, size=(3,)),
np.random.randint(1, 5, size=(4,)),
],
dtype_precision=[
(np.float16, 1),
(np.float32, 3),
(np.float64, 11),
(np.float128, 11),
],
ifftshift_before=[False, True],
engine=["numpy", "scipy"],
)
pars_fft2d_random_real = [
dict(zip(par_lists_fft2d_random_real.keys(), value))
for value in itertools.product(*par_lists_fft2d_random_real.values())
]
@pytest.mark.parametrize("par", pars_fft2d_random_real)
def test_FFT2D_random_real(par):
shape = par["shape"]
dtype, decimal = par["dtype_precision"]
ifftshift_before = par["ifftshift_before"]
engine = par["engine"]
x = np.random.randn(*shape).astype(dtype)
# Select an axis to apply FFT on. It can be any integer
# in [0,..., ndim-1] but also in [-ndim, ..., -1]
# However, dimensions cannot be repeated
axes = _choose_random_axes(x.ndim, n_choices=2)
FFTop = FFT2D(
dims=x.shape,
axes=axes,
ifftshift_before=ifftshift_before,
real=True,
dtype=dtype,
engine=engine,
)
x = x.ravel()
y = FFTop * x
# Ensure inverse and adjoint recover x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=0, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=decimal)
assert_array_almost_equal(x, xinv, decimal=decimal)
# Dot tests
nr, nc = FFTop.shape
assert dottest(FFTop, nr, nc, complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=2, rtol=10 ** (-decimal))
par_lists_fft2d_random_cpx = dict(
shape=[
np.random.randint(1, 5, size=(2,)),
np.random.randint(1, 5, size=(3,)),
np.random.randint(1, 5, size=(5,)),
],
dtype_precision=[
(np.float16, 1),
(np.float32, 3),
(np.float64, 11),
(np.float128, 11),
(np.complex64, 3),
(np.complex128, 11),
(np.complex256, 11),
],
ifftshift_before=itertools.product([False, True], [False, True]),
fftshift_after=itertools.product([False, True], [False, True]),
engine=["numpy", "scipy"],
)
# Generate all combinations of the above parameters
pars_fft2d_random_cpx = [
dict(zip(par_lists_fft2d_random_cpx.keys(), value))
for value in itertools.product(*par_lists_fft2d_random_cpx.values())
]
@pytest.mark.parametrize("par", pars_fft2d_random_cpx)
def test_FFT2D_random_complex(par):
shape = par["shape"]
dtype, decimal = par["dtype_precision"]
ifftshift_before = par["ifftshift_before"]
fftshift_after = par["fftshift_after"]
engine = par["engine"]
x = np.random.randn(*shape).astype(dtype)
if np.issubdtype(dtype, np.complexfloating):
x += 1j * np.random.randn(*shape).astype(dtype)
# Select an axis to apply FFT on. It can be any integer
# in [0,..., ndim-1] but also in [-ndim, ..., -1]
# However, dimensions cannot be repeated
axes = _choose_random_axes(x.ndim, n_choices=2)
FFTop = FFT2D(
dims=x.shape,
axes=axes,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
engine=engine,
)
# Compute FFT of x independently
x_ishift = x.copy()
for axis, ishift in zip(axes, ifftshift_before):
if ishift:
x_ishift = np.fft.ifftshift(x_ishift, axes=axis)
y_true = np.fft.fft2(x_ishift, axes=axes, norm="ortho")
for axis, fshift in zip(axes, fftshift_after):
if fshift:
y_true = np.fft.fftshift(y_true, axes=axis)
y_true = y_true.ravel()
# Compute FFT with FFTop and compare with y_true
x = x.ravel()
y = FFTop * x
assert_array_almost_equal(y, y_true, decimal=decimal)
# Ensure inverse and adjoint recover x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=0, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=decimal)
assert_array_almost_equal(x, xinv, decimal=decimal)
# Dot tests
nr, nc = FFTop.shape
assert dottest(FFTop, nr, nc, complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=2, rtol=10 ** (-decimal))
if np.issubdtype(dtype, np.complexfloating):
assert dottest(FFTop, nr, nc, complexflag=1, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=3, rtol=10 ** (-decimal))
par_lists_fftnd_random_real = dict(
shape=[
np.random.randint(1, 5, size=(3,)),
np.random.randint(1, 5, size=(4,)),
],
dtype_precision=[
(np.float16, 1),
(np.float32, 3),
(np.float64, 11),
(np.float128, 11),
],
engine=["numpy", "scipy"],
)
pars_fftnd_random_real = [
dict(zip(par_lists_fftnd_random_real.keys(), value))
for value in itertools.product(*par_lists_fftnd_random_real.values())
]
@pytest.mark.parametrize("par", pars_fftnd_random_real)
def test_FFTND_random_real(par):
shape = par["shape"]
dtype, decimal = par["dtype_precision"]
engine = par["engine"]
x = np.random.randn(*shape).astype(dtype)
# Select an axis to apply FFT on. It can be any integer
# in [0,..., ndim-1] but also in [-ndim, ..., -1]
# However, dimensions cannot be repeated
n_choices = np.random.randint(3, x.ndim + 1)
axes = _choose_random_axes(x.ndim, n_choices=n_choices)
# Trying out all posibilities is very cumbersome, let's select some shifts randomly
ifftshift_before = np.random.choice([False, True], size=n_choices)
FFTop = FFTND(
dims=x.shape,
axes=axes,
ifftshift_before=ifftshift_before,
real=True,
dtype=dtype,
engine=engine,
)
x = x.ravel()
y = FFTop * x
# Ensure inverse and adjoint recover x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=0, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=decimal)
assert_array_almost_equal(x, xinv, decimal=decimal)
# Dot tests
nr, nc = FFTop.shape
assert dottest(FFTop, nr, nc, complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=2, rtol=10 ** (-decimal))
par_lists_fftnd_random_cpx = dict(
shape=[
np.random.randint(1, 5, size=(3,)),
np.random.randint(1, 5, size=(5,)),
],
dtype_precision=[
(np.float16, 1),
(np.float32, 3),
(np.float64, 11),
(np.float128, 11),
(np.complex64, 3),
(np.complex128, 11),
(np.complex256, 11),
],
engine=["numpy", "scipy"],
)
# Generate all combinations of the above parameters
pars_fftnd_random_cpx = [
dict(zip(par_lists_fftnd_random_cpx.keys(), value))
for value in itertools.product(*par_lists_fftnd_random_cpx.values())
]
@pytest.mark.parametrize("par", pars_fftnd_random_cpx)
def test_FFTND_random_complex(par):
shape = par["shape"]
dtype, decimal = par["dtype_precision"]
engine = par["engine"]
x = np.random.randn(*shape).astype(dtype)
if np.issubdtype(dtype, np.complexfloating):
x += 1j * np.random.randn(*shape).astype(dtype)
# Select an axis to apply FFT on. It can be any integer
# in [0,..., ndim-1] but also in [-ndim, ..., -1]
# However, dimensions cannot be repeated
n_choices = np.random.randint(3, x.ndim + 1)
axes = _choose_random_axes(x.ndim, n_choices=n_choices)
# Trying out all posibilities is very cumbersome, let's select some shifts randomly
ifftshift_before = np.random.choice([False, True], size=n_choices)
fftshift_after = np.random.choice([True, False], size=n_choices)
FFTop = FFTND(
dims=x.shape,
axes=axes,
ifftshift_before=ifftshift_before,
fftshift_after=fftshift_after,
dtype=dtype,
engine=engine,
)
# Compute FFT of x independently
x_ishift = x.copy()
for axis, ishift in zip(axes, ifftshift_before):
if ishift:
x_ishift = np.fft.ifftshift(x_ishift, axes=axis)
y_true = np.fft.fft2(x_ishift, axes=axes, norm="ortho")
for axis, fshift in zip(axes, fftshift_after):
if fshift:
y_true = np.fft.fftshift(y_true, axes=axis)
y_true = y_true.ravel()
# Compute FFT with FFTop and compare with y_true
x = x.ravel()
y = FFTop * x
assert_array_almost_equal(y, y_true, decimal=decimal)
# Ensure inverse and adjoint recover x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=0, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xadj, decimal=decimal)
assert_array_almost_equal(x, xinv, decimal=decimal)
# Dot tests
nr, nc = FFTop.shape
assert dottest(FFTop, nr, nc, complexflag=0, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=2, rtol=10 ** (-decimal))
if np.issubdtype(dtype, np.complexfloating):
assert dottest(FFTop, nr, nc, complexflag=1, rtol=10 ** (-decimal))
assert dottest(FFTop, nr, nc, complexflag=3, rtol=10 ** (-decimal))
par_lists_fft2dnd_small_cpx = dict(
dtype_precision=[(np.complex64, 5), (np.complex128, 11), (np.complex256, 11)],
norm=["ortho", "none", "1/n"],
engine=["numpy", "scipy"],
)
pars_fft2dnd_small_cpx = [
dict(zip(par_lists_fft2dnd_small_cpx.keys(), value))
for value in itertools.product(*par_lists_fft2dnd_small_cpx.values())
]
@pytest.mark.parametrize("par", pars_fft2dnd_small_cpx)
def test_FFT2D_small_complex(par):
dtype, decimal = par["dtype_precision"]
norm = par["norm"]
x = np.array(
[
[1, 2 - 1j, -1j, -1 + 2j],
[2 - 1j, -1j, -1 - 2j, 1],
[-1j, -1 - 2j, 1, 2 - 1j],
[-1 - 2j, 1, 2 - 1j, -1j],
]
)
FFTop = FFT2D(
dims=x.shape,
axes=(0, 1),
norm=norm,
dtype=dtype,
)
# Compute FFT of x independently
y_true = np.array(
[
[8 - 12j, -4, -4j, 4],
[4j, 4 - 8j, -4j, 4],
[4j, -4, 4j, 4],
[4j, -4, -4j, 4 + 16j],
],
dtype=FFTop.cdtype,
) # Backward
if norm == "ortho":
y_true /= 4
elif norm == "1/n":
y_true /= 16
# Compute FFT with FFTop and compare with y_true
y = FFTop * x.ravel()
y = y.reshape(FFTop.dimsd)
assert_array_almost_equal(y, y_true, decimal=decimal)
assert dottest(FFTop, *FFTop.shape, complexflag=3, rtol=10 ** (-decimal))
x_inv = FFTop / y.ravel()
x_inv = x_inv.reshape(x.shape)
assert_array_almost_equal(x_inv, x, decimal=decimal)
@pytest.mark.parametrize("par", pars_fft2dnd_small_cpx)
def test_FFTND_small_complex(par):
dtype, decimal = par["dtype_precision"]
norm = par["norm"]
x = np.array(
[
[1, 2 - 1j, -1j, -1 + 2j],
[2 - 1j, -1j, -1 - 2j, 1],
[-1j, -1 - 2j, 1, 2 - 1j],
[-1 - 2j, 1, 2 - 1j, -1j],
]
)
FFTop = FFTND(
dims=x.shape,
axes=(0, 1),
norm=norm,
dtype=dtype,
)
# Compute FFT of x independently
y_true = np.array(
[
[8 - 12j, -4, -4j, 4],
[4j, 4 - 8j, -4j, 4],
[4j, -4, 4j, 4],
[4j, -4, -4j, 4 + 16j],
],
dtype=FFTop.cdtype,
) # Backward
if norm == "ortho":
y_true /= 4
elif norm == "1/n":
y_true /= 16
# Compute FFT with FFTop and compare with y_true
y = FFTop * x.ravel()
y = y.reshape(FFTop.dimsd)
assert_array_almost_equal(y, y_true, decimal=decimal)
assert dottest(FFTop, *FFTop.shape, complexflag=3, rtol=10 ** (-decimal))
x_inv = FFTop / y.ravel()
x_inv = x_inv.reshape(x.shape)
assert_array_almost_equal(x_inv, x, decimal=decimal)
@pytest.mark.parametrize(
"par",
[
(par1),
(par2),
(par3),
(par4),
(par5),
(par6),
(par1w),
(par2w),
(par3w),
(par4w),
(par5w),
],
)
def test_FFT_1dsignal(par):
"""Dot-test and inversion for FFT operator for 1d signal"""
decimal = 3 if np.real(np.ones(1, par["dtype"])).dtype == np.float32 else 8
dt = 0.005
t = np.arange(par["nt"]) * dt
f0 = 10
x = np.sin(2 * np.pi * f0 * t)
x = x.astype(par["dtype"])
nfft = par["nt"] if par["nfft"] is None else par["nfft"]
FFTop = FFT(
dims=[par["nt"]],
nfft=nfft,
sampling=dt,
real=par["real"],
engine=par["engine"],
dtype=par["dtype"],
)
if par["real"]:
assert dottest(
FFTop, nfft // 2 + 1, par["nt"], complexflag=2, rtol=10 ** (-decimal)
)
else:
assert dottest(FFTop, nfft, par["nt"], complexflag=2, rtol=10 ** (-decimal))
assert dottest(FFTop, nfft, par["nt"], complexflag=3, rtol=10 ** (-decimal))
y = FFTop * x
xadj = FFTop.H * y # adjoint is same as inverse for fft
xinv = lsqr(FFTop, y, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
# check all signal if nt>nfft and only up to nfft if nfft<nt
imax = par["nt"] if par["nfft"] is None else min([par["nt"], par["nfft"]])
assert_array_almost_equal(x[:imax], xadj[:imax], decimal=decimal)
assert_array_almost_equal(x[:imax], xinv[:imax], decimal=decimal)
if not par["real"]:
FFTop_fftshift = FFT(
dims=[par["nt"]],
nfft=nfft,
sampling=dt,
real=par["real"],
ifftshift_before=par["ifftshift_before"],
fftshift_after=True,
engine=par["engine"],
dtype=par["dtype"],
)
assert_array_almost_equal(FFTop_fftshift.f, np.fft.fftshift(FFTop.f))
y_fftshift = FFTop_fftshift * x
assert_array_almost_equal(y_fftshift, np.fft.fftshift(y))
xadj = FFTop_fftshift.H * y_fftshift # adjoint is same as inverse for fft
xinv = lsqr(FFTop_fftshift, y_fftshift, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x[:imax], xadj[:imax], decimal=decimal)
assert_array_almost_equal(x[:imax], xinv[:imax], decimal=decimal)
@pytest.mark.parametrize(
"par",
[
(par1),
(par2),
(par3),
(par4),
(par5),
(par6),
(par1w),
(par2w),
(par3w),
(par4w),
(par5w),
],
)
def test_FFT_2dsignal(par):
"""Dot-test and inversion for fft operator for 2d signal
(fft on single dimension)
"""
decimal = 3 if np.real(np.ones(1, par["dtype"])).dtype == np.float32 else 8
dt = 0.005
nt, nx = par["nt"], par["nx"]
t = np.arange(nt) * dt
f0 = 10
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(nx) + 1)
d = d.astype(par["dtype"])
# 1st dimension
nfft = par["nt"] if par["nfft"] is None else par["nfft"]
FFTop = FFT(
dims=(nt, nx),
axis=0,
nfft=nfft,
sampling=dt,
real=par["real"],
engine=par["engine"],
dtype=par["dtype"],
)
if par["real"]:
assert dottest(
FFTop, (nfft // 2 + 1) * nx, nt * nx, complexflag=2, rtol=10 ** (-decimal)
)
else:
assert dottest(FFTop, nfft * nx, nt * nx, complexflag=2, rtol=10 ** (-decimal))
assert dottest(FFTop, nfft * nx, nt * nx, complexflag=3, rtol=10 ** (-decimal))
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is same as inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx))
dinv = np.real(dinv.reshape(nt, nx))
# check all signal if nt>nfft and only up to nfft if nfft<nt
imax = par["nt"] if par["nfft"] is None else min([par["nt"], par["nfft"]])
assert_array_almost_equal(d[:imax], dadj[:imax], decimal=decimal)
assert_array_almost_equal(d[:imax], dinv[:imax], decimal=decimal)
if not par["real"]:
FFTop_fftshift = FFT(
dims=(nt, nx),
axis=0,
nfft=nfft,
sampling=dt,
real=par["real"],
fftshift_after=True,
engine=par["engine"],
dtype=par["dtype"],
)
assert_array_almost_equal(FFTop_fftshift.f, np.fft.fftshift(FFTop.f))
D_fftshift = FFTop_fftshift * d.flatten()
D2 = np.fft.fftshift(D.reshape(nfft, nx), axes=0).flatten()
assert_array_almost_equal(D_fftshift, D2)
dadj = FFTop_fftshift.H * D_fftshift # adjoint is same as inverse for fft
dinv = lsqr(FFTop_fftshift, D_fftshift, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx))
dinv = np.real(dinv.reshape(nt, nx))
assert_array_almost_equal(d[:imax], dadj[:imax], decimal=decimal)
assert_array_almost_equal(d[:imax], dinv[:imax], decimal=decimal)
# 2nd dimension
nfft = par["nx"] if par["nfft"] is None else par["nfft"]
FFTop = FFT(
dims=(nt, nx),
axis=1,
nfft=nfft,
sampling=dt,
real=par["real"],
engine=par["engine"],
dtype=par["dtype"],
)
if par["real"]:
assert dottest(
FFTop, nt * (nfft // 2 + 1), nt * nx, complexflag=2, rtol=10 ** (-decimal)
)
else:
assert dottest(FFTop, nt * nfft, nt * nx, complexflag=2, rtol=10 ** (-decimal))
assert dottest(FFTop, nt * nfft, nt * nx, complexflag=3, rtol=10 ** (-decimal))
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx))
dinv = np.real(dinv.reshape(nt, nx))
# check all signal if nx>nfft and only up to nfft if nfft<nx
imax = par["nx"] if par["nfft"] is None else min([par["nx"], par["nfft"]])
assert_array_almost_equal(d[:, :imax], dadj[:, :imax], decimal=decimal)
assert_array_almost_equal(d[:, :imax], dinv[:, :imax], decimal=decimal)
if not par["real"]:
FFTop_fftshift = FFT(
dims=(nt, nx),
axis=1,
nfft=nfft,
sampling=dt,
real=par["real"],
fftshift_after=True,
engine=par["engine"],
dtype=par["dtype"],
)
assert_array_almost_equal(FFTop_fftshift.f, np.fft.fftshift(FFTop.f))
D_fftshift = FFTop_fftshift * d.flatten()
D2 = np.fft.fftshift(D.reshape(nt, nfft), axes=1).flatten()
assert_array_almost_equal(D_fftshift, D2)
dadj = FFTop_fftshift.H * D_fftshift # adjoint is same as inverse for fft
dinv = lsqr(FFTop_fftshift, D_fftshift, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx))
dinv = np.real(dinv.reshape(nt, nx))
assert_array_almost_equal(d[:, :imax], dadj[:, :imax], decimal=decimal)
assert_array_almost_equal(d[:, :imax], dinv[:, :imax], decimal=decimal)
@pytest.mark.parametrize(
"par",
[
(par1),
(par2),
(par3),
(par4),
(par5),
(par6),
(par1w),
(par2w),
(par3w),
(par4w),
(par5w),
],
)
def test_FFT_3dsignal(par):
"""Dot-test and inversion for fft operator for 3d signal
(fft on single dimension)
"""
decimal = 3 if np.real(np.ones(1, par["dtype"])).dtype == np.float32 else 8
dt = 0.005
nt, nx, ny = par["nt"], par["nx"], par["ny"]
t = np.arange(nt) * dt
f0 = 10
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(nx) + 1)
d = np.tile(d[:, :, np.newaxis], [1, 1, ny])
d = d.astype(par["dtype"])
# 1st dimension
nfft = par["nt"] if par["nfft"] is None else par["nfft"]
FFTop = FFT(
dims=(nt, nx, ny),
axis=0,
nfft=nfft,
sampling=dt,
real=par["real"],
engine=par["engine"],
dtype=par["dtype"],
)
if par["real"]:
assert dottest(
FFTop,
(nfft // 2 + 1) * nx * ny,
nt * nx * ny,
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop, nfft * nx * ny, nt * nx * ny, complexflag=2, rtol=10 ** (-decimal)
)
assert dottest(
FFTop, nfft * nx * ny, nt * nx * ny, complexflag=3, rtol=10 ** (-decimal)
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is same as inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx, ny))
dinv = np.real(dinv.reshape(nt, nx, ny))
# check all signal if nt>nfft and only up to nfft if nfft<nt
imax = nt if nfft is None else min([nt, nfft])
assert_array_almost_equal(d[:imax], dadj[:imax], decimal=decimal)
assert_array_almost_equal(d[:imax], dinv[:imax], decimal=decimal)
# 2nd dimension
nfft = par["nx"] if par["nfft"] is None else par["nfft"]
FFTop = FFT(
dims=(nt, nx, ny),
axis=1,
nfft=nfft,
sampling=dt,
real=par["real"],
engine=par["engine"],
dtype=par["dtype"],
)
if par["real"]:
assert dottest(
FFTop,
nt * (nfft // 2 + 1) * ny,
nt * nx * ny,
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop, nt * nfft * ny, nt * nx * ny, complexflag=2, rtol=10 ** (-decimal)
)
assert dottest(
FFTop, nt * nfft * ny, nt * nx * ny, complexflag=3, rtol=10 ** (-decimal)
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx, ny))
dinv = np.real(dinv.reshape(nt, nx, ny))
# check all signal if nx>nfft and only up to nfft if nfft<nx
imax = nx if nfft is None else min([nx, nfft])
assert_array_almost_equal(d[:, :imax], dadj[:, :imax], decimal=decimal)
assert_array_almost_equal(d[:, :imax], dinv[:, :imax], decimal=decimal)
# 3rd dimension
nfft = par["ny"] if par["nfft"] is None else par["nfft"]
FFTop = FFT(
dims=(nt, nx, ny),
axis=2,
nfft=nfft,
sampling=dt,
real=par["real"],
engine=par["engine"],
dtype=par["dtype"],
)
if par["real"]:
assert dottest(
FFTop,
nt * nx * (nfft // 2 + 1),
nt * nx * ny,
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop, nt * nx * nfft, nt * nx * ny, complexflag=2, rtol=10 ** (-decimal)
)
assert dottest(
FFTop, nt * nx * nfft, nt * nx * ny, complexflag=3, rtol=10 ** (-decimal)
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx, ny))
dinv = np.real(dinv.reshape(nt, nx, ny))
# check all signal if ny>nfft and only up to nfft if nfft<ny
imax = ny if nfft is None else min([ny, nfft])
assert_array_almost_equal(d[..., :imax], dadj[..., :imax], decimal=decimal)
assert_array_almost_equal(d[..., :imax], dinv[..., :imax], decimal=decimal)
if not par["real"]:
FFTop_fftshift = FFT(
dims=(nt, nx, ny),
axis=2,
nfft=nfft,
sampling=dt,
real=par["real"],
fftshift_after=True,
engine=par["engine"],
dtype=par["dtype"],
)
assert_array_almost_equal(FFTop_fftshift.f, np.fft.fftshift(FFTop.f))
D_fftshift = FFTop_fftshift * d.flatten()
D2 = np.fft.fftshift(D.reshape(nt, nx, nfft), axes=2).flatten()
assert_array_almost_equal(D_fftshift, D2)
dadj = FFTop_fftshift.H * D_fftshift # adjoint is same as inverse for fft
dinv = lsqr(FFTop_fftshift, D_fftshift, damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj.reshape(nt, nx, ny))
dinv = np.real(dinv.reshape(nt, nx, ny))
assert_array_almost_equal(d[..., :imax], dadj[..., :imax], decimal=decimal)
assert_array_almost_equal(d[..., :imax], dinv[..., :imax], decimal=decimal)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par6)])
def test_FFT2D(par):
"""Dot-test and inversion for FFT2D operator for 2d signal"""
decimal = 3 if np.real(np.ones(1, par["dtype"])).dtype == np.float32 else 8
dt, dx = 0.005, 5
t = np.arange(par["nt"]) * dt
f0 = 10
nfft1 = par["nt"] if par["nfft"] is None else par["nfft"]
nfft2 = par["nx"] if par["nfft"] is None else par["nfft"]
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(par["nx"]) + 1)
d = d.astype(par["dtype"])
# first fft on axis 1
FFTop = FFT2D(
dims=(par["nt"], par["nx"]),
nffts=(nfft1, nfft2),
sampling=(dt, dx),
real=par["real"],
axes=(0, 1),
)
if par["real"]:
assert dottest(
FFTop,
nfft1 * (nfft2 // 2 + 1),
par["nt"] * par["nx"],
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop,
nfft1 * nfft2,
par["nt"] * par["nx"],
complexflag=2,
rtol=10 ** (-decimal),
)
assert dottest(
FFTop,
nfft1 * nfft2,
par["nt"] * par["nx"],
complexflag=3,
rtol=10 ** (-decimal),
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj).reshape(par["nt"], par["nx"])
dinv = np.real(dinv).reshape(par["nt"], par["nx"])
# check all signal if nt>nfft and only up to nfft if nfft<nt
imax1 = par["nt"] if nfft1 is None else min([par["nt"], nfft1])
imax2 = par["nx"] if nfft2 is None else min([par["nx"], nfft2])
assert_array_almost_equal(d[:imax1, :imax2], dadj[:imax1, :imax2], decimal=decimal)
assert_array_almost_equal(d[:imax1, :imax2], dinv[:imax1, :imax2], decimal=decimal)
# first fft on axis 0
FFTop = FFT2D(
dims=(par["nt"], par["nx"]),
nffts=(nfft2, nfft1),
sampling=(dx, dt),
real=par["real"],
axes=(1, 0),
)
if par["real"]:
assert dottest(
FFTop,
nfft2 * (nfft1 // 2 + 1),
par["nt"] * par["nx"],
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop,
nfft1 * nfft2,
par["nt"] * par["nx"],
complexflag=2,
rtol=10 ** (-decimal),
)
assert dottest(
FFTop,
nfft1 * nfft2,
par["nt"] * par["nx"],
complexflag=3,
rtol=10 ** (-decimal),
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj).reshape(par["nt"], par["nx"])
dinv = np.real(dinv).reshape(par["nt"], par["nx"])
# check all signal if nt>nfft and only up to nfft if nfft<nt
assert_array_almost_equal(d[:imax1, :imax2], dadj[:imax1, :imax2], decimal=decimal)
assert_array_almost_equal(d[:imax1, :imax2], dinv[:imax1, :imax2], decimal=decimal)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par6)])
def test_FFT3D(par):
"""Dot-test and inversion for FFTND operator for 3d signal"""
decimal = 3 if np.real(np.ones(1, par["dtype"])).dtype == np.float32 else 8
dt, dx, dy = 0.005, 5, 2
t = np.arange(par["nt"]) * dt
f0 = 10
nfft1 = par["nt"] if par["nfft"] is None else par["nfft"]
nfft2 = par["nx"] if par["nfft"] is None else par["nfft"]
nfft3 = par["ny"] if par["nfft"] is None else par["nfft"]
d = np.outer(np.sin(2 * np.pi * f0 * t), np.arange(par["nx"]) + 1)
d = np.tile(d[:, :, np.newaxis], [1, 1, par["ny"]])
d = d.astype(par["dtype"])
# first fft on axis 2
FFTop = FFTND(
dims=(par["nt"], par["nx"], par["ny"]),
nffts=(nfft1, nfft2, nfft3),
axes=(0, 1, 2),
sampling=(dt, dx, dy),
real=par["real"],
)
if par["real"]:
assert dottest(
FFTop,
nfft1 * nfft2 * (nfft3 // 2 + 1),
par["nt"] * par["nx"] * par["ny"],
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop,
nfft1 * nfft2 * nfft3,
par["nt"] * par["nx"] * par["ny"],
complexflag=2,
rtol=10 ** (-decimal),
)
assert dottest(
FFTop,
nfft1 * nfft2 * nfft3,
par["nt"] * par["nx"] * par["ny"],
complexflag=3,
rtol=10 ** (-decimal),
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj).reshape(par["nt"], par["nx"], par["ny"])
dinv = np.real(dinv).reshape(par["nt"], par["nx"], par["ny"])
# check all signal if nt>nfft and only up to nfft if nfft<nt
imax1 = par["nt"] if nfft1 is None else min([par["nt"], nfft1])
imax2 = par["nx"] if nfft2 is None else min([par["nx"], nfft2])
imax3 = par["ny"] if nfft3 is None else min([par["ny"], nfft3])
assert_array_almost_equal(
d[:imax1, :imax2, :imax3], dadj[:imax1, :imax2, :imax3], decimal=decimal
)
assert_array_almost_equal(
d[:imax1, :imax2, :imax3], dinv[:imax1, :imax2, :imax3], decimal=decimal
)
# first fft on axis 1
FFTop = FFTND(
dims=(par["nt"], par["nx"], par["ny"]),
nffts=(nfft1, nfft3, nfft2),
axes=(0, 2, 1),
sampling=(dt, dy, dx),
real=par["real"],
)
if par["real"]:
assert dottest(
FFTop,
nfft1 * nfft3 * (nfft2 // 2 + 1),
par["nt"] * par["nx"] * par["ny"],
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop,
nfft1 * nfft2 * nfft3,
par["nt"] * par["nx"] * par["ny"],
complexflag=2,
rtol=10 ** (-decimal),
)
assert dottest(
FFTop,
nfft1 * nfft2 * nfft3,
par["nt"] * par["nx"] * par["ny"],
complexflag=3,
rtol=10 ** (-decimal),
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj).reshape(par["nt"], par["nx"], par["ny"])
dinv = np.real(dinv).reshape(par["nt"], par["nx"], par["ny"])
assert_array_almost_equal(
d[:imax1, :imax2, :imax3], dadj[:imax1, :imax2, :imax3], decimal=decimal
)
assert_array_almost_equal(
d[:imax1, :imax2, :imax3], dinv[:imax1, :imax2, :imax3], decimal=decimal
)
# first fft on axis 0
FFTop = FFTND(
dims=(par["nt"], par["nx"], par["ny"]),
nffts=(nfft2, nfft3, nfft1),
axes=(1, 2, 0),
sampling=(dx, dy, dt),
real=par["real"],
)
if par["real"]:
assert dottest(
FFTop,
nfft2 * nfft3 * (nfft1 // 2 + 1),
par["nt"] * par["nx"] * par["ny"],
complexflag=2,
rtol=10 ** (-decimal),
)
else:
assert dottest(
FFTop,
nfft1 * nfft2 * nfft3,
par["nt"] * par["nx"] * par["ny"],
complexflag=2,
rtol=10 ** (-decimal),
)
assert dottest(
FFTop,
nfft1 * nfft2 * nfft3,
par["nt"] * par["nx"] * par["ny"],
complexflag=3,
rtol=10 ** (-decimal),
)
D = FFTop * d.ravel()
dadj = FFTop.H * D # adjoint is inverse for fft
dinv = lsqr(FFTop, D, damp=1e-10, iter_lim=100, atol=1e-8, btol=1e-8, show=0)[0]
dadj = np.real(dadj).reshape(par["nt"], par["nx"], par["ny"])
dinv = np.real(dinv).reshape(par["nt"], par["nx"], par["ny"])
assert_array_almost_equal(
d[:imax1, :imax2, :imax3], dadj[:imax1, :imax2, :imax3], decimal=decimal
)
assert_array_almost_equal(
d[:imax1, :imax2, :imax3], dinv[:imax1, :imax2, :imax3], decimal=decimal
)
| 45,216 | 29.801771 | 109 | py |
pylops | pylops-master/pytests/test_eigs.py | import numpy as np
import pytest
from pylops.basicoperators import MatrixMult
from pylops.optimization.eigs import power_iteration
par1 = {"n": 21, "imag": 0, "dtype": "float32"} # square, real
par2 = {"n": 21, "imag": 1j, "dtype": "complex64"} # square, complex
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_power_iteration(par):
"""Max eigenvalue computation with power iteration method vs. scipy methods"""
np.random.seed(10)
A = np.random.randn(par["n"], par["n"]) + par["imag"] * np.random.randn(
par["n"], par["n"]
)
A1 = np.conj(A.T) @ A
# non-symmetric
Aop = MatrixMult(A)
eig = power_iteration(Aop, niter=200, tol=0)[0]
eig_np = np.max(np.abs(np.linalg.eig(A)[0]))
assert np.abs(np.abs(eig) - eig_np) < 1e-3
# symmetric
A1op = MatrixMult(A1)
eig = power_iteration(A1op, niter=200, tol=0)[0]
eig_np = np.max(np.abs(np.linalg.eig(A1)[0]))
assert np.abs(np.abs(eig) - eig_np) < 1e-3
| 981 | 27.882353 | 82 | py |
pylops | pylops-master/pytests/test_oneway.py | import numpy as np
import pytest
from pylops.basicoperators import Identity
from pylops.utils import dottest
from pylops.utils.seismicevents import hyperbolic2d, makeaxis
from pylops.utils.wavelets import ricker
from pylops.waveeqprocessing.oneway import Deghosting, PhaseShift
np.random.seed(10)
parmod = {
"ox": -400,
"dx": 10,
"nx": 81,
"oy": -50,
"dy": 10,
"ny": 11,
"ot": 0,
"dt": 0.004,
"nt": 50,
"f0": 40,
}
par1 = {"ny": 8, "nx": 10, "nt": 20, "dtype": "float32"} # even
par2 = {"ny": 9, "nx": 11, "nt": 21, "dtype": "float32"} # odd
# deghosting params
vel_sep = 1000.0 # velocity at separation level
zrec = 20.0 # depth of receivers
# axes and wavelet
t, t2, x, y = makeaxis(parmod)
wav = ricker(t[:41], f0=parmod["f0"])[0]
@pytest.fixture(scope="module")
def create_data2D():
"""Create 2d dataset"""
t0_plus = np.array([0.02, 0.08])
t0_minus = t0_plus + 0.04
vrms = np.array([1400.0, 1800.0])
amp = np.array([1.0, -0.6])
p2d_minus = hyperbolic2d(x, t, t0_minus, vrms, amp, wav)[1].T
kx = np.fft.ifftshift(np.fft.fftfreq(parmod["nx"], parmod["dx"]))
freq = np.fft.rfftfreq(parmod["nt"], parmod["dt"])
Pop = -PhaseShift(vel_sep, 2 * zrec, parmod["nt"], freq, kx)
# Decomposition operator
Dupop = Identity(parmod["nt"] * parmod["nx"]) + Pop
p2d = Dupop * p2d_minus.ravel()
p2d = p2d.reshape(parmod["nt"], parmod["nx"])
return p2d, p2d_minus
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_PhaseShift_2dsignal(par):
"""Dot-test for PhaseShift of 2d signal"""
vel = 1500.0
zprop = 200
freq = np.fft.rfftfreq(par["nt"], 1.0)
kx = np.fft.fftshift(np.fft.fftfreq(par["nx"], 1.0))
Pop = PhaseShift(vel, zprop, par["nt"], freq, kx, dtype=par["dtype"])
assert dottest(Pop, par["nt"] * par["nx"], par["nt"] * par["nx"], rtol=1e-4)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_PhaseShift_3dsignal(par):
"""Dot-test for PhaseShift of 3d signal"""
vel = 1500.0
zprop = 200
freq = np.fft.rfftfreq(par["nt"], 1.0)
kx = np.fft.fftshift(np.fft.fftfreq(par["nx"], 1.0))
ky = np.fft.fftshift(np.fft.fftfreq(par["ny"], 1.0))
Pop = PhaseShift(vel, zprop, par["nt"], freq, kx, ky, dtype=par["dtype"])
assert dottest(
Pop,
par["nt"] * par["nx"] * par["ny"],
par["nt"] * par["nx"] * par["ny"],
rtol=1e-4,
)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Deghosting_2dsignal(par, create_data2D):
"""Deghosting of 2d data"""
p2d, p2d_minus = create_data2D
p2d_minus_inv, p2d_plus_inv = Deghosting(
p2d,
parmod["nt"],
parmod["nx"],
parmod["dt"],
parmod["dx"],
vel_sep,
zrec,
win=np.ones_like(p2d),
npad=0,
ntaper=0,
dtype=par["dtype"],
**dict(damp=1e-10, iter_lim=60)
)
assert np.linalg.norm(p2d_minus_inv - p2d_minus) / np.linalg.norm(p2d_minus) < 3e-1
| 3,008 | 26.108108 | 87 | py |
pylops | pylops-master/pytests/test_transpose.py | import numpy as np
import pytest
from numpy.testing import assert_equal
from pylops.basicoperators import Transpose
from pylops.utils import dottest
par1 = {"ny": 21, "nx": 11, "nt": 20, "imag": 0, "dtype": "float64"} # real
par2 = {"ny": 21, "nx": 11, "nt": 20, "imag": 1j, "dtype": "complex128"} # complex
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Transpose_2dsignal(par):
"""Dot-test and adjoint for Transpose operator for 2d signals"""
dims = (par["ny"], par["nx"])
x = np.arange(par["ny"] * par["nx"]).reshape(dims) + par["imag"] * np.arange(
par["ny"] * par["nx"]
).reshape(dims)
Top = Transpose(dims=dims, axes=(1, 0), dtype=par["dtype"])
assert dottest(
Top, np.prod(dims), np.prod(dims), complexflag=0 if par["imag"] == 0 else 3
)
y = Top * x.ravel()
xadj = Top.H * y
y = y.reshape(Top.dimsd)
xadj = xadj.reshape(Top.dims)
assert_equal(x, xadj)
assert_equal(y, x.T)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Transpose_3dsignal(par):
"""Dot-test and adjoint for Transpose operator for 3d signals"""
dims = (par["ny"], par["nx"], par["nt"])
x = np.arange(par["ny"] * par["nx"] * par["nt"]).reshape(dims) + par[
"imag"
] * np.arange(par["ny"] * par["nx"] * par["nt"]).reshape(dims)
Top = Transpose(dims=dims, axes=(2, 1, 0))
assert dottest(
Top, np.prod(dims), np.prod(dims), complexflag=0 if par["imag"] == 0 else 3
)
y = Top * x.ravel()
xadj = Top.H * y
y = y.reshape(Top.dimsd)
xadj = xadj.reshape(Top.dims)
assert_equal(x, xadj)
| 1,640 | 28.303571 | 83 | py |
pylops | pylops-master/pytests/test_metrics.py | import numpy as np
import pytest
from pylops.utils.metrics import mae, mse, psnr, snr
par1 = {"nx": 11, "dtype": "float64"} # float64
par2 = {"nx": 11, "dtype": "float32"} # float32
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_mae(par):
"""Check MAE with same vector and vector of zeros"""
xref = np.ones(par["nx"])
xcmp = np.zeros(par["nx"])
maesame = mae(xref, xref)
maecmp = mae(xref, xcmp)
assert maesame == 0.0
assert maecmp == 1.0
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_mse(par):
"""Check MSE with same vector and vector of zeros"""
xref = np.ones(par["nx"])
xcmp = np.zeros(par["nx"])
msesame = mse(xref, xref)
msecmp = mse(xref, xcmp)
assert msesame == 0.0
assert msecmp == 1.0
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_snr(par):
"""Check SNR with same vector and vector of zeros"""
xref = np.random.normal(0, 1, par["nx"])
xcmp = np.zeros(par["nx"])
snrsame = snr(xref, xref)
snrcmp = snr(xref, xcmp)
assert snrsame == np.inf
assert snrcmp == 0.0
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_psnr(par):
"""Check PSNR with same vector and vector of zeros"""
xref = np.ones(par["nx"])
xcmp = np.zeros(par["nx"])
psnrsame = psnr(xref, xref, xmax=1.0)
psnrcmp = psnr(xref, xcmp, xmax=1.0)
assert psnrsame == np.inf
assert psnrcmp == 0.0
| 1,435 | 24.642857 | 57 | py |
pylops | pylops-master/pytests/test_tapers.py | import numpy as np
import pytest
from numpy.testing import assert_array_equal
from pylops.utils.tapers import taper2d, taper3d
par1 = {
"nt": 21,
"nspat": (11, 13),
"ntap": (3, 5),
"tapertype": "hanning",
} # hanning, odd samples and taper
par2 = {
"nt": 20,
"nspat": (12, 16),
"ntap": (4, 6),
"tapertype": "hanning",
} # hanning, even samples and taper
par3 = {
"nt": 21,
"nspat": (11, 13),
"ntap": (3, 5),
"tapertype": "cosine",
} # cosine, odd samples and taper
par4 = {
"nt": 20,
"nspat": (12, 16),
"ntap": (4, 6),
"tapertype": "cosine",
} # cosine, even samples and taper
par5 = {
"nt": 21,
"nspat": (11, 13),
"ntap": (3, 5),
"tapertype": "cosinesquare",
} # cosinesquare, odd samples and taper
par6 = {
"nt": 20,
"nspat": (12, 16),
"ntap": (4, 6),
"tapertype": "cosinesquare",
} # cosinesquare, even samples and taper
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_taper2d(par):
"""Create taper wavelet and check size and values"""
tap = taper2d(par["nt"], par["nspat"][0], par["ntap"][0], par["tapertype"])
assert tap.shape == (par["nspat"][0], par["nt"])
assert_array_equal(tap[0], np.zeros(par["nt"]))
assert_array_equal(tap[-1], np.zeros(par["nt"]))
assert_array_equal(tap[par["ntap"][0] + 1], np.ones(par["nt"]))
assert_array_equal(tap[par["nspat"][0] // 2], np.ones(par["nt"]))
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4), (par5), (par6)])
def test_taper3d(par):
"""Create taper wavelet and check size and values"""
tap = taper3d(par["nt"], par["nspat"], par["ntap"], par["tapertype"])
assert tap.shape == (par["nspat"][0], par["nspat"][1], par["nt"])
assert_array_equal(tap[0][0], np.zeros(par["nt"]))
assert_array_equal(tap[-1][-1], np.zeros(par["nt"]))
assert_array_equal(tap[par["ntap"][0], par["ntap"][1]], np.ones(par["nt"]))
assert_array_equal(
tap[par["nspat"][0] // 2, par["nspat"][1] // 2], np.ones(par["nt"])
)
| 2,076 | 29.101449 | 81 | py |
pylops | pylops-master/pytests/test_radon.py | import multiprocessing
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.optimization.sparsity import fista
from pylops.signalprocessing import Radon2D, Radon3D
from pylops.utils import dottest
par1 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 1e-2,
"pxmax": 2e-2,
"centeredh": True,
"kind": "linear",
"interp": True,
"engine": "numpy",
} # linear, centered, linear interp, numpy
par2 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 1e-2,
"pxmax": 2e-2,
"centeredh": False,
"kind": "linear",
"interp": True,
"engine": "numpy",
} # linear, uncentered, linear interp, numpy
par3 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 1e-2,
"pxmax": 2e-2,
"centeredh": True,
"kind": "linear",
"interp": True,
"engine": "numba",
} # linear, centered, linear interp, numba
par4 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 1e-2,
"pxmax": 2e-2,
"centeredh": False,
"kind": "linear",
"interp": False,
"engine": "numba",
} # linear, uncentered, linear interp, numba
par5 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 8e-3,
"pxmax": 7e-3,
"centeredh": True,
"kind": "parabolic",
"interp": False,
"engine": "numpy",
} # parabolic, centered, no interp, numpy
par6 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 8e-3,
"pxmax": 7e-3,
"centeredh": False,
"kind": "parabolic",
"interp": True,
"engine": "numba",
} # parabolic, uncentered, interp, numba
par7 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 9e-2,
"pxmax": 8e-2,
"centeredh": True,
"kind": "hyperbolic",
"interp": True,
"engine": "numpy",
} # hyperbolic, centered, interp, numpy
par8 = {
"nt": 11,
"nhx": 21,
"nhy": 10,
"npx": 21,
"npy": 17,
"pymax": 7e-2,
"pxmax": 8e-2,
"centeredh": False,
"kind": "hyperbolic",
"interp": False,
"engine": "numba",
} # hyperbolic, uncentered, interp, numba
def test_unknown_engine():
"""Check error is raised if unknown engine is passed"""
with pytest.raises(KeyError):
_ = Radon2D(None, None, None, engine="foo")
with pytest.raises(KeyError):
_ = Radon3D(None, None, None, None, None, engine="foo")
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par5), (par6), (par7), (par8)]
)
def test_Radon2D(par):
"""Dot-test, forward and adjoint consistency check
(for onthefly parameter), and sparse inverse for Radon2D operator
"""
dt, dh = 0.005, 1
t = np.arange(par["nt"]) * dt
h = np.arange(par["nhx"]) * dh
px = np.linspace(0, par["pxmax"], par["npx"])
x = np.zeros((par["npx"], par["nt"]))
x[2, par["nt"] // 2] = 1
Rop = Radon2D(
t,
h,
px,
centeredh=par["centeredh"],
interp=par["interp"],
kind=par["kind"],
onthefly=False,
engine=par["engine"],
dtype="float64",
)
R1op = Radon2D(
t,
h,
px,
centeredh=par["centeredh"],
interp=par["interp"],
kind=par["kind"],
onthefly=True,
engine=par["engine"],
dtype="float64",
)
assert dottest(Rop, par["nhx"] * par["nt"], par["npx"] * par["nt"], rtol=1e-3)
y = Rop * x.ravel()
y1 = R1op * x.ravel()
assert_array_almost_equal(y, y1, decimal=4)
xadj = Rop.H * y
xadj1 = R1op.H * y
assert_array_almost_equal(xadj, xadj1, decimal=4)
xinv, _, _ = fista(Rop, y, niter=30, eps=1e0)
assert_array_almost_equal(x.ravel(), xinv, decimal=1)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par5), (par6), (par7), (par8)]
)
def test_Radon3D(par):
"""Dot-test, forward and adjoint consistency check
(for onthefly parameter), and sparse inverse for Radon3D operator
"""
if (
par["engine"] == "numpy" or multiprocessing.cpu_count() >= 4
): # avoid timeout in travis for numba
dt, dhy, dhx = 0.005, 1, 1
t = np.arange(par["nt"]) * dt
hy = np.arange(par["nhy"]) * dhy
hx = np.arange(par["nhx"]) * dhx
py = np.linspace(0, par["pymax"], par["npy"])
px = np.linspace(0, par["pxmax"], par["npx"])
x = np.zeros((par["npy"], par["npx"], par["nt"]))
x[3, 2, par["nt"] // 2] = 1
Rop = Radon3D(
t,
hy,
hx,
py,
px,
centeredh=par["centeredh"],
interp=par["interp"],
kind=par["kind"],
onthefly=False,
engine=par["engine"],
dtype="float64",
)
R1op = Radon3D(
t,
hy,
hx,
py,
px,
centeredh=par["centeredh"],
interp=par["interp"],
kind=par["kind"],
onthefly=True,
engine=par["engine"],
dtype="float64",
)
assert dottest(
Rop,
par["nhy"] * par["nhx"] * par["nt"],
par["npy"] * par["npx"] * par["nt"],
rtol=1e-3,
)
y = Rop * x.ravel()
y1 = R1op * x.ravel()
assert_array_almost_equal(y, y1, decimal=4)
xadj = Rop.H * y
xadj1 = R1op.H * y
assert_array_almost_equal(xadj, xadj1, decimal=4)
if Rop.engine == "numba": # as numpy is too slow here...
xinv, _, _ = fista(Rop, y, niter=200, eps=3e0)
assert_array_almost_equal(x.ravel(), xinv, decimal=1)
| 5,828 | 23.389121 | 82 | py |
pylops | pylops-master/pytests/test_describe.py | import numpy as np
from pylops.basicoperators import BlockDiag, Diagonal, HStack, MatrixMult
from pylops.utils.describe import describe
def test_describe():
"""Testing the describe method. As it is is difficult to verify that the
output is correct, at this point we merely test that no error arises when
applying this method to a variety of operators
"""
A = MatrixMult(np.ones((10, 5)))
A.name = "A"
B = Diagonal(np.ones(5))
B.name = "A"
C = MatrixMult(np.ones((10, 5)))
C.name = "C"
AT = A.T
AH = A.H
A3 = 3 * A
D = A + C
E = D * B
F = (A + C) * B + A
G = HStack((A * B, C * B))
H = BlockDiag((F, G))
describe(A)
describe(AT)
describe(AH)
describe(A3)
describe(D)
describe(E)
describe(F)
describe(G)
describe(H)
| 827 | 21.378378 | 77 | py |
pylops | pylops-master/pytests/test_estimators.py | import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from pylops.basicoperators import MatrixMult
SAMPLERS = ["gaussian", "rayleigh", "rademacher", "unitvector"]
DTYPES = ["float32", "float64"]
pars_hutchinson = [
{"n": 100, "dtype": dtype, "sampler": sampler}
for dtype in DTYPES
for sampler in SAMPLERS
]
pars_hutchpp = [
{"n": 100, "dtype": dtype, "sampler": sampler}
for dtype in DTYPES
for sampler in SAMPLERS[:-1]
]
@pytest.mark.parametrize("par", pars_hutchinson)
def test_trace_hutchison(par):
"""Test Hutchinson estimator."""
np.random.seed(10)
n, dtype, sampler = par["n"], par["dtype"], par["sampler"]
A = np.random.randn(n, n).astype(dtype)
Aop = MatrixMult(A, dtype=dtype)
trace_true = np.trace(A)
assert type(trace_true) == np.dtype(dtype)
trace_expl = Aop.trace()
assert type(trace_expl) == np.dtype(dtype)
assert_almost_equal(trace_true, trace_expl)
# Hutchinson
trace_est = Aop.trace(
neval=10 * n,
batch_size=n + 1,
method="hutchinson",
sampler=sampler,
)
assert type(trace_est) == np.dtype(dtype)
decimal = 7 if sampler == "unitvector" else -1
assert_almost_equal(trace_true, trace_est, decimal=decimal)
@pytest.mark.parametrize("par", pars_hutchpp)
def test_trace_hutchpp(par):
"""Test Hutch++ estimator."""
np.random.seed(10)
n, dtype, sampler = par["n"], par["dtype"], par["sampler"]
A = np.random.randn(n, n).astype(dtype)
Aop = MatrixMult(A, dtype=dtype)
trace_true = np.trace(A)
assert type(trace_true) == np.dtype(dtype)
trace_expl = Aop.trace()
assert type(trace_expl) == np.dtype(dtype)
assert_almost_equal(trace_true, trace_expl)
# Hutch++
trace_est = Aop.trace(
neval=10 * n,
method="hutch++",
sampler=sampler,
)
assert type(trace_est) == np.dtype(dtype)
assert_almost_equal(trace_true, trace_est, decimal=5)
@pytest.mark.parametrize("par", pars_hutchpp)
def test_trace_nahutchpp(par):
"""Test NA-Hutch++ estimator."""
np.random.seed(10)
n, dtype, sampler = par["n"], par["dtype"], par["sampler"]
A = np.random.randn(n, n).astype(dtype)
Aop = MatrixMult(A, dtype=dtype)
trace_true = np.trace(A)
assert type(trace_true) == np.dtype(dtype)
trace_expl = Aop.trace()
assert type(trace_expl) == np.dtype(dtype)
assert_almost_equal(trace_true, trace_expl)
# NA-Hutch++
trace_est = Aop.trace(
neval=10 * n,
method="na-hutch++",
sampler=sampler,
)
assert type(trace_est) == np.dtype(dtype)
assert_almost_equal(trace_true, trace_est, decimal=-1)
| 2,702 | 26.03 | 63 | py |
pylops | pylops-master/pytests/test_nonstatconvolve.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.signal.windows import triang
from scipy.sparse.linalg import lsqr
from pylops.signalprocessing import (
Convolve1D,
Convolve2D,
NonStationaryConvolve1D,
NonStationaryConvolve2D,
NonStationaryFilters1D,
NonStationaryFilters2D,
)
from pylops.utils import dottest
# filters
nfilts = (5, 7)
h1 = triang(nfilts[0], sym=True)
h2 = np.outer(triang(nfilts[0], sym=True), triang(nfilts[1], sym=True))
h1stat = np.vstack([h1, h1, h1])
h1ns = np.vstack([h1, -h1, 2 * h1])
h2stat = np.vstack(
[
h2.ravel(),
h2.ravel(),
h2.ravel(),
h2.ravel(),
h2.ravel(),
h2.ravel(),
]
).reshape(3, 2, nfilts[0], nfilts[1])
h2ns = np.vstack(
[
2 * h2.ravel(),
h2.ravel(),
h2.ravel(),
h2.ravel(),
-h2.ravel(),
2 * h2.ravel(),
]
).reshape(3, 2, nfilts[0], nfilts[1])
par1_1d = {
"nz": 21,
"nx": 31,
"axis": 0,
} # first direction
par2_1d = {
"nz": 21,
"nx": 31,
"axis": 1,
} # second direction
par_2d = {
"nz": 21,
"nx": 31,
}
@pytest.mark.parametrize("par", [(par_2d)])
def test_even_filter(par):
"""Check error is raised if filter has even size"""
with pytest.raises(ValueError):
_ = NonStationaryConvolve1D(
dims=par["nx"],
hs=h1ns[..., :-1],
ih=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
)
with pytest.raises(ValueError):
_ = NonStationaryConvolve2D(
dims=(par["nx"], par["nz"]),
hs=h2ns[..., :-1],
ihx=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
ihz=(int(par["nz"] // 4), int(3 * par["nz"] // 4)),
)
with pytest.raises(ValueError):
_ = NonStationaryFilters1D(
inp=np.arange(par["nx"]),
hsize=nfilts[0] - 1,
ih=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
)
with pytest.raises(ValueError):
_ = NonStationaryFilters2D(
inp=np.ones((par["nx"], par["nz"])),
hshape=(nfilts[0] - 1, nfilts[1] - 1),
ihx=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
ihz=(int(par["nz"] // 4), int(3 * par["nz"] // 4)),
)
@pytest.mark.parametrize("par", [(par_2d)])
def test_ih_irregular(par):
"""Check error is raised if ih (or ihx/ihz) are irregularly sampled"""
with pytest.raises(ValueError):
_ = NonStationaryConvolve1D(
dims=par["nx"],
hs=h1ns,
ih=(10, 11, 15),
)
with pytest.raises(ValueError):
_ = NonStationaryConvolve2D(
dims=(par["nx"], par["nz"]),
hs=h2ns,
ihx=(10, 11, 15),
ihz=(int(par["nz"] // 4), int(3 * par["nz"] // 4)),
)
@pytest.mark.parametrize("par", [(par_2d)])
def test_unknown_engine_2d(par):
"""Check error is raised if unknown engine is passed"""
with pytest.raises(NotImplementedError):
_ = NonStationaryConvolve2D(
dims=(par["nx"], par["nz"]),
hs=h2ns,
ihx=(int(par["nx"] // 3), int(2 * par["nx"] // 3)),
ihz=(int(par["nz"] // 3), int(2 * par["nz"] // 3)),
engine="foo",
)
with pytest.raises(NotImplementedError):
_ = NonStationaryFilters2D(
inp=np.ones((par["nx"], par["nz"])),
hshape=(nfilts[0] - 1, nfilts[1] - 1),
ihx=(int(par["nx"] // 3), int(2 * par["nx"] // 3)),
ihz=(int(par["nz"] // 3), int(2 * par["nz"] // 3)),
engine="foo",
)
@pytest.mark.parametrize("par", [(par1_1d), (par2_1d)])
def test_NonStationaryConvolve1D(par):
"""Dot-test and inversion for NonStationaryConvolve1D operator"""
# 1D
if par["axis"] == 0:
Cop = NonStationaryConvolve1D(
dims=par["nx"],
hs=h1ns,
ih=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
dtype="float64",
)
assert dottest(Cop, par["nx"], par["nx"])
x = np.zeros((par["nx"]))
x[par["nx"] // 2] = 1.0
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
# 1D on 2D
nfilt = par["nx"] if par["axis"] == 0 else par["nz"]
Cop = NonStationaryConvolve1D(
dims=(par["nx"], par["nz"]),
hs=h1ns,
ih=(int(nfilt // 4), int(2 * nfilt // 4), int(3 * nfilt // 4)),
axis=par["axis"],
dtype="float64",
)
assert dottest(Cop, par["nx"] * par["nz"], par["nx"] * par["nz"])
x = np.zeros((par["nx"], par["nz"]))
x[
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
int(par["nz"] / 2 - 3) : int(par["nz"] / 2 + 3),
] = 1.0
x = x.ravel()
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=400, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
@pytest.mark.parametrize("par", [(par1_1d)])
def test_StationaryConvolve1D(par):
"""Check that Convolve1D and NonStationaryConvolve1D return same result for
stationary filter"""
np.random.seed(10)
Cop = NonStationaryConvolve1D(
dims=par["nx"],
hs=h1stat,
ih=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
dtype="float64",
)
Cop_stat = Convolve1D(
dims=par["nx"],
h=h1,
offset=nfilts[0] // 2,
dtype="float64",
)
x = np.random.normal(0, 1, par["nx"])
assert_array_almost_equal(Cop_stat * x, Cop * x, decimal=10)
@pytest.mark.parametrize("par", [(par_2d)])
def test_NonStationaryConvolve2D(par):
"""Dot-test and inversion for NonStationaryConvolve2D operator"""
Cop = NonStationaryConvolve2D(
dims=(par["nx"], par["nz"]),
hs=h2ns,
ihx=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
ihz=(int(par["nz"] // 4), int(3 * par["nz"] // 4)),
dtype="float64",
)
assert dottest(Cop, par["nx"] * par["nz"], par["nx"] * par["nz"])
x = np.zeros((par["nx"], par["nz"]))
x[
int(par["nx"] / 2 - 3) : int(par["nx"] / 2 + 3),
int(par["nz"] / 2 - 3) : int(par["nz"] / 2 + 3),
] = 1.0
x = x.ravel()
xlsqr = lsqr(Cop, Cop * x, damp=1e-20, iter_lim=400, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
@pytest.mark.parametrize("par", [(par_2d)])
def test_StationaryConvolve2D(par):
"""Check that Convolve2D and NonStationaryConvolve2D return same result for
stationary filter"""
Cop = NonStationaryConvolve2D(
dims=(par["nx"], par["nz"]),
hs=h2stat,
ihx=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
ihz=(int(par["nz"] // 4), int(3 * par["nz"] // 4)),
dtype="float64",
)
Cop_stat = Convolve2D(
dims=(par["nx"], par["nz"]),
h=h2,
offset=(nfilts[0] // 2, nfilts[1] // 2),
dtype="float64",
)
x = np.random.normal(0, 1, (par["nx"], par["nz"]))
assert_array_almost_equal(Cop_stat * x, Cop * x, decimal=10)
@pytest.mark.parametrize(
"par",
[
(par1_1d),
],
)
def test_NonStationaryFilters2D(par):
"""Dot-test and inversion for NonStationaryFilters2D operator"""
x = np.zeros((par["nx"]))
x[par["nx"] // 4], x[par["nx"] // 2], x[3 * par["nx"] // 4] = 1.0, 1.0, 1.0
Cop = NonStationaryFilters1D(
inp=x,
hsize=nfilts[0],
ih=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
dtype="float64",
)
assert dottest(Cop, par["nx"], 3 * nfilts[0])
h1lsqr = lsqr(Cop, Cop * h1ns, damp=1e-20, iter_lim=200, show=0)[0]
assert_array_almost_equal(h1ns.ravel(), h1lsqr, decimal=1)
@pytest.mark.parametrize("par", [(par_2d)])
def test_NonStationaryFilters2D(par):
"""Dot-test and inversion for NonStationaryFilters2D operator"""
x = np.zeros((par["nx"], par["nz"]))
x[int(par["nx"] // 4)] = 1.0
x[int(par["nx"] // 2)] = 1.0
x[int(3 * par["nx"] // 4)] = 1.0
Cop = NonStationaryFilters2D(
inp=x,
hshape=nfilts,
ihx=(int(par["nx"] // 4), int(2 * par["nx"] // 4), int(3 * par["nx"] // 4)),
ihz=(int(par["nz"] // 4), int(3 * par["nz"] // 4)),
dtype="float64",
)
assert dottest(Cop, par["nx"] * par["nz"], 6 * nfilts[0] * nfilts[1])
h2lsqr = lsqr(Cop, Cop * h2ns.ravel(), damp=1e-20, iter_lim=400, show=0)[0]
assert_array_almost_equal(h2ns.ravel(), h2lsqr, decimal=1)
| 8,753 | 30.489209 | 93 | py |
pylops | pylops-master/pytests/test_derivative.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal, assert_array_equal
from pylops.basicoperators import (
FirstDerivative,
FirstDirectionalDerivative,
Gradient,
Laplacian,
SecondDerivative,
SecondDirectionalDerivative,
)
from pylops.utils import dottest
par1 = {
"nz": 10,
"ny": 30,
"nx": 40,
"dz": 1.0,
"dy": 1.0,
"dx": 1.0,
"edge": False,
} # even with unitary sampling
par2 = {
"nz": 10,
"ny": 30,
"nx": 40,
"dz": 0.4,
"dy": 2.0,
"dx": 0.5,
"edge": False,
} # even with non-unitary sampling
par3 = {
"nz": 11,
"ny": 51,
"nx": 61,
"dz": 1.0,
"dy": 1.0,
"dx": 1.0,
"edge": False,
} # odd with unitary sampling
par4 = {
"nz": 11,
"ny": 51,
"nx": 61,
"dz": 0.4,
"dy": 2.0,
"dx": 0.5,
"edge": False,
} # odd with non-unitary sampling
par1e = {
"nz": 10,
"ny": 30,
"nx": 40,
"dz": 1.0,
"dy": 1.0,
"dx": 1.0,
"edge": True,
} # even with unitary sampling
par2e = {
"nz": 10,
"ny": 30,
"nx": 40,
"dz": 0.4,
"dy": 2.0,
"dx": 0.5,
"edge": True,
} # even with non-unitary sampling
par3e = {
"nz": 11,
"ny": 51,
"nx": 61,
"dz": 1.0,
"dy": 1.0,
"dx": 1.0,
"edge": True,
} # odd with unitary sampling
par4e = {
"nz": 11,
"ny": 51,
"nx": 61,
"dz": 0.4,
"dy": 2.0,
"dx": 0.5,
"edge": True,
} # odd with non-unitary sampling
np.random.seed(10)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_FirstDerivative_centered(par):
"""Dot-test and forward for FirstDerivative operator (centered stencil)"""
for order in (3, 5):
# 1d
D1op = FirstDerivative(
par["nx"],
sampling=par["dx"],
edge=par["edge"],
order=order,
dtype="float32",
)
assert dottest(D1op, par["nx"], par["nx"], rtol=1e-3)
x = (par["dx"] * np.arange(par["nx"])) ** 2
yana = 2 * par["dx"] * np.arange(par["nx"])
y = D1op * x
assert_array_almost_equal(
y[order // 2 : -order // 2], yana[order // 2 : -order // 2], decimal=1
)
# 2d - derivative on 1st direction
D1op = FirstDerivative(
(par["ny"], par["nx"]),
axis=0,
sampling=par["dy"],
edge=par["edge"],
order=order,
dtype="float32",
)
assert dottest(D1op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
x = np.outer((par["dy"] * np.arange(par["ny"])) ** 2, np.ones(par["nx"]))
yana = np.outer(2 * par["dy"] * np.arange(par["ny"]), np.ones(par["nx"]))
y = D1op * x.ravel()
y = y.reshape(par["ny"], par["nx"])
assert_array_almost_equal(
y[order // 2 : -order // 2], yana[order // 2 : -order // 2], decimal=1
)
# 2d - derivative on 2nd direction
D1op = FirstDerivative(
(par["ny"], par["nx"]),
axis=1,
sampling=par["dx"],
edge=par["edge"],
order=order,
dtype="float32",
)
assert dottest(D1op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
x = np.outer((par["dy"] * np.arange(par["ny"])) ** 2, np.ones(par["nx"]))
yana = np.zeros((par["ny"], par["nx"]))
y = D1op * x.ravel()
y = y.reshape(par["ny"], par["nx"])
assert_array_almost_equal(
y[order // 2 : -order // 2], yana[order // 2 : -order // 2], decimal=1
)
# 3d - derivative on 1st direction
D1op = FirstDerivative(
(par["nz"], par["ny"], par["nx"]),
axis=0,
sampling=par["dz"],
edge=par["edge"],
order=order,
dtype="float32",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
x = np.outer(
(par["dz"] * np.arange(par["nz"])) ** 2, np.ones((par["ny"], par["nx"]))
).reshape(par["nz"], par["ny"], par["nx"])
yana = np.outer(
2 * par["dz"] * np.arange(par["nz"]), np.ones((par["ny"], par["nx"]))
).reshape(par["nz"], par["ny"], par["nx"])
y = D1op * x.ravel()
y = y.reshape(par["nz"], par["ny"], par["nx"])
assert_array_almost_equal(
y[order // 2 : -order // 2], yana[order // 2 : -order // 2], decimal=1
)
# 3d - derivative on 2nd direction
D1op = FirstDerivative(
(par["nz"], par["ny"], par["nx"]),
axis=1,
sampling=par["dy"],
edge=par["edge"],
order=order,
dtype="float32",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
x = np.outer(
(par["dz"] * np.arange(par["nz"])) ** 2, np.ones((par["ny"], par["nx"]))
).reshape(par["nz"], par["ny"], par["nx"])
yana = np.zeros((par["nz"], par["ny"], par["nx"]))
y = D1op * x.ravel()
y = y.reshape(par["nz"], par["ny"], par["nx"])
assert_array_almost_equal(
y[order // 2 : -order // 2], yana[order // 2 : -order // 2], decimal=1
)
# 3d - derivative on 3rd direction
D1op = FirstDerivative(
(par["nz"], par["ny"], par["nx"]),
axis=2,
sampling=par["dx"],
edge=par["edge"],
order=order,
dtype="float32",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
yana = np.zeros((par["nz"], par["ny"], par["nx"]))
y = D1op * x.ravel()
y = y.reshape(par["nz"], par["ny"], par["nx"])
assert_array_almost_equal(
y[order // 2 : -order // 2], yana[order // 2 : -order // 2], decimal=1
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_FirstDerivative_forwaback(par):
"""Dot-test for FirstDerivative operator (forward and backward
stencils). Note that the analytical expression cannot be validated in this
case
"""
for kind in ("forward", "backward"):
# 1d
D1op = FirstDerivative(
par["nx"], sampling=par["dx"], edge=par["edge"], kind=kind, dtype="float32"
)
assert dottest(D1op, par["nx"], par["nx"], rtol=1e-3)
# 2d - derivative on 1st direction
D1op = FirstDerivative(
(par["ny"], par["nx"]),
axis=0,
sampling=par["dy"],
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(D1op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 2d - derivative on 2nd direction
D1op = FirstDerivative(
(par["ny"], par["nx"]),
axis=1,
sampling=par["dx"],
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(D1op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 3d - derivative on 1st direction
D1op = FirstDerivative(
(par["nz"], par["ny"], par["nx"]),
axis=0,
sampling=par["dz"],
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# 3d - derivative on 2nd direction
D1op = FirstDerivative(
(par["nz"], par["ny"], par["nx"]),
axis=1,
sampling=par["dy"],
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# 3d - derivative on 3rd direction
D1op = FirstDerivative(
(par["nz"], par["ny"], par["nx"]),
axis=2,
sampling=par["dx"],
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(
D1op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_SecondDerivative_centered(par):
"""Dot-test and forward for SecondDerivative operator (centered stencil)
The test is based on the fact that the central stencil is exact for polynomials of
degree 3.
"""
x = par["dx"] * np.arange(par["nx"])
y = par["dy"] * np.arange(par["ny"])
z = par["dz"] * np.arange(par["nz"])
xx, yy = np.meshgrid(x, y) # produces arrays of size (ny,nx)
xxx, yyy, zzz = np.meshgrid(x, y, z) # produces arrays of size (ny,nx,nz)
# 1d
D2op = SecondDerivative(
par["nx"], sampling=par["dx"], edge=par["edge"], dtype="float32"
)
assert dottest(D2op, par["nx"], par["nx"], rtol=1e-3)
# polynomial f(x) = x^3, f''(x) = 6x
f = x**3
dfana = 6 * x
df = D2op * f
assert_array_almost_equal(df[1:-1], dfana[1:-1], decimal=1)
# 2d - derivative on 1st direction
D2op = SecondDerivative(
(par["ny"], par["nx"]),
axis=0,
sampling=par["dy"],
edge=par["edge"],
dtype="float32",
)
assert dottest(D2op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# polynomial f(x,y) = y^3, f_{yy}(x,y) = 6y
f = yy**3
dfana = 6 * yy
df = D2op * f.ravel()
df = df.reshape(par["ny"], par["nx"])
assert_array_almost_equal(df[1:-1, :], dfana[1:-1, :], decimal=1)
# 2d - derivative on 2nd direction
D2op = SecondDerivative(
(par["ny"], par["nx"]),
axis=1,
sampling=par["dx"],
edge=par["edge"],
dtype="float32",
)
assert dottest(D2op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# polynomial f(x,y) = x^3, f_{xx}(x,y) = 6x
f = xx**3
dfana = 6 * xx
df = D2op * f.ravel()
df = df.reshape(par["ny"], par["nx"])
assert_array_almost_equal(df[:, 1:-1], dfana[:, 1:-1], decimal=1)
# 3d - derivative on 1st direction
D2op = SecondDerivative(
(par["ny"], par["nx"], par["nz"]),
axis=0,
sampling=par["dy"],
edge=par["edge"],
dtype="float32",
)
assert dottest(
D2op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# polynomial f(x,y,z) = y^3, f_{yy}(x,y,z) = 6y
f = yyy**3
dfana = 6 * yyy
df = D2op * f.ravel()
df = df.reshape(par["ny"], par["nx"], par["nz"])
assert_array_almost_equal(df[1:-1, :, :], dfana[1:-1, :, :], decimal=1)
# 3d - derivative on 2nd direction
D2op = SecondDerivative(
(par["ny"], par["nx"], par["nz"]),
axis=1,
sampling=par["dx"],
edge=par["edge"],
dtype="float32",
)
assert dottest(
D2op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# polynomial f(x,y,z) = x^3, f_{xx}(x,y,z) = 6x
f = xxx**3
dfana = 6 * xxx
df = D2op * f.ravel()
df = df.reshape(par["ny"], par["nx"], par["nz"])
assert_array_almost_equal(df[:, 1:-1, :], dfana[:, 1:-1, :], decimal=1)
# 3d - derivative on 3rd direction
D2op = SecondDerivative(
(par["ny"], par["nx"], par["nz"]),
axis=2,
sampling=par["dz"],
edge=par["edge"],
dtype="float32",
)
assert dottest(
D2op,
par["nz"] * par["ny"] * par["nx"],
par["ny"] * par["nx"] * par["nz"],
rtol=1e-3,
)
# polynomial f(x,y,z) = z^3, f_{zz}(x,y,z) = 6z
f = zzz**3
dfana = 6 * zzz
df = D2op * f.ravel()
df = df.reshape(par["ny"], par["nx"], par["nz"])
assert_array_almost_equal(df[:, :, 1:-1], dfana[:, :, 1:-1], decimal=1)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_SecondDerivative_forwaback(par):
"""Dot-test for SecondDerivative operator (forward and backward stencils).
Note that the analytical expression cannot be validated in this case
"""
x = par["dx"] * np.arange(par["nx"])
y = par["dy"] * np.arange(par["ny"])
z = par["dz"] * np.arange(par["nz"])
xx, yy = np.meshgrid(x, y) # produces arrays of size (ny,nx)
xxx, yyy, zzz = np.meshgrid(x, y, z) # produces arrays of size (ny,nx,nz)
for kind in ("forward", "backward"):
# 1d
D2op = SecondDerivative(
par["nx"],
sampling=par["dx"],
edge=par["edge"],
kind="forward",
dtype="float32",
)
assert dottest(D2op, par["nx"], par["nx"], rtol=1e-3)
# 2d - derivative on 1st direction
D2op = SecondDerivative(
(par["ny"], par["nx"]),
axis=0,
sampling=par["dy"],
edge=par["edge"],
kind="forward",
dtype="float32",
)
assert dottest(D2op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 2d - derivative on 2nd direction
D2op = SecondDerivative(
(par["ny"], par["nx"]),
axis=1,
sampling=par["dx"],
edge=par["edge"],
kind="forward",
dtype="float32",
)
assert dottest(D2op, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 3d - derivative on 1st direction
D2op = SecondDerivative(
(par["ny"], par["nx"], par["nz"]),
axis=0,
sampling=par["dy"],
edge=par["edge"],
kind="forward",
dtype="float32",
)
assert dottest(
D2op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# 3d - derivative on 2nd direction
D2op = SecondDerivative(
(par["ny"], par["nx"], par["nz"]),
axis=1,
sampling=par["dx"],
edge=par["edge"],
kind="forward",
dtype="float32",
)
assert dottest(
D2op,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# 3d - derivative on 3rd direction
D2op = SecondDerivative(
(par["ny"], par["nx"], par["nz"]),
axis=2,
sampling=par["dz"],
edge=par["edge"],
kind="forward",
dtype="float32",
)
assert dottest(
D2op,
par["nz"] * par["ny"] * par["nx"],
par["ny"] * par["nx"] * par["nz"],
rtol=1e-3,
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_Laplacian(par):
"""Dot-test for Laplacian operator"""
# 2d - symmetrical
Dlapop = Laplacian(
(par["ny"], par["nx"]),
axes=(0, 1),
weights=(1, 1),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(Dlapop, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 2d - asymmetrical
Dlapop = Laplacian(
(par["ny"], par["nx"]),
axes=(0, 1),
weights=(1, 2),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(Dlapop, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 3d - symmetrical on 1st and 2nd direction
Dlapop = Laplacian(
(par["nz"], par["ny"], par["nx"]),
axes=(0, 1),
weights=(1, 1),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(
Dlapop,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# 3d - symmetrical on 1st and 2nd direction
Dlapop = Laplacian(
(par["nz"], par["ny"], par["nx"]),
axes=(0, 1),
weights=(1, 1),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(
Dlapop,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
# 3d - symmetrical on all directions
Dlapop = Laplacian(
(par["nz"], par["ny"], par["nx"]),
axes=(0, 1, 2),
weights=(1, 1, 1),
sampling=(par["dz"], par["dx"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(
Dlapop,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_Gradient(par):
"""Dot-test for Gradient operator"""
for kind in ("forward", "centered", "backward"):
# 2d
Gop = Gradient(
(par["ny"], par["nx"]),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(Gop, 2 * par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 3d
Gop = Gradient(
(par["nz"], par["ny"], par["nx"]),
sampling=(par["dz"], par["dy"], par["dx"]),
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(
Gop,
3 * par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_FirstDirectionalDerivative(par):
"""Dot-test for FirstDirectionalDerivative operator"""
for kind in ("forward", "centered", "backward"):
# 2d
Fdop = FirstDirectionalDerivative(
(par["ny"], par["nx"]),
v=np.sqrt(2.0) / 2.0 * np.ones(2),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(Fdop, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 3d
Fdop = FirstDirectionalDerivative(
(par["nz"], par["ny"], par["nx"]),
v=np.ones(3) / np.sqrt(3),
sampling=(par["dz"], par["dy"], par["dx"]),
edge=par["edge"],
kind=kind,
dtype="float32",
)
assert dottest(
Fdop,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_SecondDirectionalDerivative(par):
"""Dot-test for test_SecondDirectionalDerivative operator"""
# 2d
Fdop = SecondDirectionalDerivative(
(par["ny"], par["nx"]),
v=np.sqrt(2.0) / 2.0 * np.ones(2),
sampling=(par["dy"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(Fdop, par["ny"] * par["nx"], par["ny"] * par["nx"], rtol=1e-3)
# 3d
Fdop = SecondDirectionalDerivative(
(par["nz"], par["ny"], par["nx"]),
v=np.ones(3) / np.sqrt(3),
sampling=(par["dz"], par["dy"], par["dx"]),
edge=par["edge"],
dtype="float32",
)
assert dottest(
Fdop,
par["nz"] * par["ny"] * par["nx"],
par["nz"] * par["ny"] * par["nx"],
rtol=1e-3,
)
@pytest.mark.parametrize(
"par", [(par1), (par2), (par3), (par4), (par1e), (par2e), (par3e), (par4e)]
)
def test_SecondDirectionalDerivative_verticalderivative(par):
"""Compare vertical derivative for SecondDirectionalDerivative operator
and SecondDerivative
"""
Fop = FirstDerivative(
(par["ny"], par["nx"]), axis=0, edge=par["edge"], dtype="float32"
)
F2op = Fop.H * Fop
F2dop = SecondDirectionalDerivative(
(par["ny"], par["nx"]), v=np.array([1, 0]), edge=par["edge"], dtype="float32"
)
x = np.random.normal(0.0, 1.0, (par["ny"], par["nx"]))
assert_array_equal(-F2op * x.ravel(), F2dop * x.ravel())
| 21,044 | 27.248322 | 88 | py |
pylops | pylops-master/pytests/test_torchoperator.py | import numpy as np
import pytest
import torch
from numpy.testing import assert_array_equal
from pylops import MatrixMult, TorchOperator
par1 = {"ny": 11, "nx": 11, "dtype": np.float32} # square
par2 = {"ny": 21, "nx": 11, "dtype": np.float32} # overdetermined
np.random.seed(0)
@pytest.mark.parametrize("par", [(par1)])
def test_TorchOperator(par):
"""Apply forward and gradient. As for linear operators the gradient
must equal the adjoint of operator applied to the same vector, the two
results are also checked to be the same.
"""
Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"])))
Top = TorchOperator(Dop, batch=False)
x = np.random.normal(0.0, 1.0, par["nx"])
xt = torch.from_numpy(x).view(-1)
xt.requires_grad = True
v = torch.randn(par["ny"])
# pylops operator
y = Dop * x
xadj = Dop.H * v
# torch operator
yt = Top.apply(xt)
yt.backward(v, retain_graph=True)
assert_array_equal(y, yt.detach().cpu().numpy())
assert_array_equal(xadj, xt.grad.cpu().numpy())
@pytest.mark.parametrize("par", [(par1)])
def test_TorchOperator_batch(par):
"""Apply forward for input with multiple samples (= batch) and flattened arrays"""
Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"])))
Top = TorchOperator(Dop, batch=True)
x = np.random.normal(0.0, 1.0, (4, par["nx"]))
xt = torch.from_numpy(x)
xt.requires_grad = True
y = Dop.matmat(x.T).T
yt = Top.apply(xt)
assert_array_equal(y, yt.detach().cpu().numpy())
@pytest.mark.parametrize("par", [(par1)])
def test_TorchOperator_batch_nd(par):
"""Apply forward for input with multiple samples (= batch) and nd-arrays"""
Dop = MatrixMult(np.random.normal(0.0, 1.0, (par["ny"], par["nx"])), otherdims=(2,))
Top = TorchOperator(Dop, batch=True, flatten=False)
x = np.random.normal(0.0, 1.0, (4, par["nx"], 2))
xt = torch.from_numpy(x)
xt.requires_grad = True
y = (Dop @ x.transpose(1, 2, 0)).transpose(2, 0, 1)
yt = Top.apply(xt)
assert_array_equal(y, yt.detach().cpu().numpy())
| 2,110 | 29.157143 | 88 | py |
pylops | pylops-master/pytests/test_diagonal.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr as sp_lsqr
from pylops.basicoperators import Diagonal
from pylops.optimization.basic import lsqr
from pylops.utils import dottest
par1 = {"ny": 21, "nx": 11, "nt": 20, "imag": 0, "dtype": "float32"} # real
par2 = {"ny": 21, "nx": 11, "nt": 20, "imag": 1j, "dtype": "complex64"} # complex
np.random.seed(10)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Diagonal_1dsignal(par):
"""Dot-test and inversion for Diagonal operator for 1d signal"""
for ddim in (par["nx"], par["nt"]):
d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)
Dop = Diagonal(d, dtype=par["dtype"])
assert dottest(Dop, ddim, ddim, complexflag=0 if par["imag"] == 0 else 3)
x = np.ones(ddim) + par["imag"] * np.ones(ddim)
xlsqr = sp_lsqr(Dop, Dop * x, damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Diagonal_2dsignal(par):
"""Dot-test and inversion for Diagonal operator for 2d signal"""
for idim, ddim in enumerate((par["nx"], par["nt"])):
d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)
Dop = Diagonal(d, dims=(par["nx"], par["nt"]), axis=idim, dtype=par["dtype"])
assert dottest(
Dop,
par["nx"] * par["nt"],
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
x = np.ones((par["nx"], par["nt"])) + par["imag"] * np.ones(
(par["nx"], par["nt"])
)
xlsqr = sp_lsqr(Dop, Dop * x.ravel(), damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xlsqr.ravel(), decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Diagonal_3dsignal(par):
"""Dot-test and inversion for Diagonal operator for 3d signal"""
for idim, ddim in enumerate((par["ny"], par["nx"], par["nt"])):
d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)
Dop = Diagonal(
d, dims=(par["ny"], par["nx"], par["nt"]), axis=idim, dtype=par["dtype"]
)
assert dottest(
Dop,
par["ny"] * par["nx"] * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
x = np.ones((par["ny"], par["nx"], par["nt"])) + par["imag"] * np.ones(
(par["ny"], par["nx"], par["nt"])
)
xlsqr = sp_lsqr(Dop, Dop * x.ravel(), damp=1e-20, iter_lim=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xlsqr.ravel(), decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Diagonal_2dsignal_unflattened(par):
"""Dot-test and inversion for Diagonal operator for unflattened 2d signal (v2 behaviour)"""
for idim, ddim in enumerate((par["nx"], par["nt"])):
d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)
Dop = Diagonal(d, dims=(par["nx"], par["nt"]), axis=idim, dtype=par["dtype"])
assert dottest(
Dop,
par["nx"] * par["nt"],
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
x = np.ones((par["nx"], par["nt"])) + par["imag"] * np.ones(
(par["nx"], par["nt"])
)
xlsqr = lsqr(Dop, Dop * x, damp=1e-20, niter=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_Diagonal_3dsignal_unflattened(par):
"""Dot-test and inversion for Diagonal operator unflattened 3d signal (v2 behaviour)"""
for idim, ddim in enumerate((par["ny"], par["nx"], par["nt"])):
d = np.arange(ddim) + 1.0 + par["imag"] * (np.arange(ddim) + 1.0)
Dop = Diagonal(
d, dims=(par["ny"], par["nx"], par["nt"]), axis=idim, dtype=par["dtype"]
)
assert dottest(
Dop,
par["ny"] * par["nx"] * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
x = np.ones((par["ny"], par["nx"], par["nt"])) + par["imag"] * np.ones(
(par["ny"], par["nx"], par["nt"])
)
xlsqr = lsqr(Dop, Dop * x, damp=1e-20, niter=300, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=4)
| 4,591 | 36.950413 | 104 | py |
pylops | pylops-master/pytests/test_wavedecomposition.py | import numpy as np
import pytest
from pylops.utils.seismicevents import hyperbolic2d, hyperbolic3d, makeaxis
from pylops.utils.wavelets import ricker
from pylops.waveeqprocessing.wavedecomposition import (
PressureToVelocity,
UpDownComposition2D,
UpDownComposition3D,
WavefieldDecomposition,
)
# params
PAR = {
"ox": -100,
"dx": 10,
"nx": 21,
"oy": -50,
"dy": 10,
"ny": 11,
"ot": 0,
"dt": 0.004,
"nt": 30,
"f0": 40,
}
par1 = PAR.copy() # analytical
par1["kind"] = "analytical"
par2 = PAR.copy() # inverse
par2["kind"] = "inverse"
# separation params
vel_sep = 1000.0 # velocity at separation level
rho_sep = 1000.0 # density at separation level
critical = 0.99
ntaper = 5
nfftf = 2**6
nfftk = 2**6
# axes and wavelet
t, t2, x, y = makeaxis(PAR)
wav = ricker(t[:41], f0=PAR["f0"])[0]
@pytest.fixture(scope="module")
def create_data2D():
"""Create 2d dataset"""
t0_plus = np.array([0.05, 0.12])
t0_minus = t0_plus + 0.04
vrms = np.array([1400.0, 1800.0])
amp = np.array([1.0, -0.6])
_, p2d_minus = hyperbolic2d(x, t, t0_minus, vrms, amp, wav)
_, p2d_plus = hyperbolic2d(x, t, t0_plus, vrms, amp, wav)
UPop = UpDownComposition2D(
PAR["nt"],
PAR["nx"],
PAR["dt"],
PAR["dx"],
rho_sep,
vel_sep,
nffts=(nfftk, nfftf),
critical=critical * 100.0,
ntaper=ntaper,
dtype="complex128",
)
d2d = UPop * np.concatenate((p2d_plus.ravel(), p2d_minus.ravel())).ravel()
d2d = np.real(d2d.reshape(2 * PAR["nx"], PAR["nt"]))
p2d, vz2d = d2d[: PAR["nx"]], d2d[PAR["nx"] :]
return p2d, vz2d, p2d_minus, p2d_plus
@pytest.fixture(scope="module")
def create_data3D():
"""Create 3d dataset"""
t0_plus = np.array([0.05, 0.12])
t0_minus = t0_plus + 0.04
vrms = np.array([1400.0, 1800.0])
amp = np.array([1.0, -0.6])
_, p3d_minus = hyperbolic3d(x, y, t, t0_minus, vrms, vrms, amp, wav)
_, p3d_plus = hyperbolic3d(x, y, t, t0_plus, vrms, vrms, amp, wav)
UPop = UpDownComposition3D(
PAR["nt"],
(PAR["ny"], PAR["nx"]),
PAR["dt"],
(PAR["dy"], PAR["dx"]),
rho_sep,
vel_sep,
nffts=(nfftk, nfftk, nfftf),
critical=critical * 100.0,
ntaper=ntaper,
dtype="complex128",
)
d3d = UPop * np.concatenate((p3d_plus.ravel(), p3d_minus.ravel())).ravel()
d3d = np.real(d3d.reshape(2 * PAR["ny"], PAR["nx"], PAR["nt"]))
p3d, vz3d = d3d[: PAR["ny"]], d3d[PAR["ny"] :]
return p3d, vz3d, p3d_minus, p3d_plus
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_WavefieldDecomposition2D(par, create_data2D):
"""WavefieldDecomposition and PressureToVelocity reconstruction of 2d data"""
p2d, vz2d, p2d_minus, p2d_plus = create_data2D
# decomposition
p2d_minus_est, p2d_plus_est = WavefieldDecomposition(
p2d,
vz2d,
par["nt"],
par["nx"],
par["dt"],
par["dx"],
rho_sep,
vel_sep,
nffts=(nfftk, nfftf),
kind=par["kind"],
critical=critical * 100,
ntaper=ntaper,
dottest=True,
dtype="complex128",
**dict(damp=1e-10, atol=1e-8, btol=1e-8, iter_lim=10)
)
assert np.linalg.norm(p2d_minus_est - p2d_minus) / np.linalg.norm(p2d_minus) < 2e-1
assert np.linalg.norm(p2d_plus_est - p2d_plus) / np.linalg.norm(p2d_plus) < 2e-1
# reconstruction
PtoVop = PressureToVelocity(
par["nt"],
par["nx"],
par["dt"],
par["dx"],
rho_sep,
vel_sep,
nffts=(nfftk, nfftf),
critical=critical * 100.0,
ntaper=ntaper,
topressure=False,
)
vz2d_plus_est = (PtoVop * p2d_plus_est.ravel()).reshape(par["nx"], par["nt"])
vz2d_minus_est = (PtoVop * p2d_minus_est.ravel()).reshape(par["nx"], par["nt"])
vz2d_est = np.real(vz2d_plus_est - vz2d_minus_est)
assert np.linalg.norm(vz2d_est - vz2d) / np.linalg.norm(vz2d) < 2e-1
@pytest.mark.parametrize("par", [(par1), (par2)])
def test_WavefieldDecomposition3D(par, create_data3D):
"""WavefieldDecomposition and PressureToVelocity reconstruction of 3d data"""
p3d, vz3d, p3d_minus, p3d_plus = create_data3D
# decomposition
p3d_minus_est, p3d_plus_est = WavefieldDecomposition(
p3d,
vz3d,
par["nt"],
(par["ny"], par["nx"]),
par["dt"],
(par["dy"], par["dx"]),
rho_sep,
vel_sep,
nffts=(nfftk, nfftk, nfftf),
kind=par["kind"],
critical=critical * 100,
ntaper=ntaper,
dottest=True,
dtype="complex128",
**dict(damp=1e-10, iter_lim=10, atol=1e-8, btol=1e-8, show=2)
)
assert np.linalg.norm(p3d_minus_est - p3d_minus) / np.linalg.norm(p3d_minus) < 3e-1
assert np.linalg.norm(p3d_plus_est - p3d_plus) / np.linalg.norm(p3d_plus) < 3e-1
# reconstruction
PtoVop = PressureToVelocity(
par["nt"],
(par["ny"], par["nx"]),
par["dt"],
(par["dy"], par["dx"]),
rho_sep,
vel_sep,
nffts=(nfftk, nfftk, nfftf),
critical=critical * 100.0,
ntaper=ntaper,
topressure=False,
)
vz3d_plus_est = (PtoVop * p3d_plus_est.ravel()).reshape(
par["ny"], par["nx"], par["nt"]
)
vz3d_minus_est = (PtoVop * p3d_minus_est.ravel()).reshape(
par["ny"], par["nx"], par["nt"]
)
vz3d_est = np.real(vz3d_plus_est - vz3d_minus_est)
assert np.linalg.norm(vz3d_est - vz3d) / np.linalg.norm(vz3d) < 3e-1
| 5,596 | 27.125628 | 87 | py |
pylops | pylops-master/pytests/test_shift.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from scipy.sparse.linalg import lsqr
from pylops.signalprocessing import Shift
from pylops.utils import dottest
from pylops.utils.wavelets import gaussian
par1 = {"nt": 41, "nx": 41, "ny": 11, "imag": 0, "dtype": "float64"} # square real
par2 = {
"nt": 41,
"nx": 21,
"ny": 11,
"imag": 0,
"dtype": "float64",
} # overdetermined real
par1j = {
"nt": 41,
"nx": 41,
"ny": 11,
"imag": 1j,
"dtype": "complex128",
} # square complex
par2j = {
"nt": 41,
"nx": 21,
"ny": 11,
"imag": 1j,
"dtype": "complex128",
} # overdetermined complex
@pytest.mark.parametrize("par", [(par1), (par1j)])
def test_Shift1D(par):
"""Dot-test and inversion for Shift operator on 1d data"""
np.random.seed(0)
shift = 5.5
x = (
gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
+ par["imag"] * gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
)
Sop = Shift(
par["nt"], shift, real=True if par["imag"] == 0 else False, dtype=par["dtype"]
)
assert dottest(Sop, par["nt"], par["nt"], complexflag=0 if par["imag"] == 0 else 3)
xlsqr = lsqr(Sop, Sop * x, damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x, xlsqr, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Shift2D(par):
"""Dot-test and inversion for Shift operator on 2d data"""
np.random.seed(0)
shift = 5.5
# 1st axis
x = (
gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
+ par["imag"] * gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
)
x = np.outer(x, np.ones(par["nx"]))
Sop = Shift(
(par["nt"], par["nx"]),
shift,
axis=0,
real=True if par["imag"] == 0 else False,
dtype=par["dtype"],
)
assert dottest(
Sop,
par["nt"] * par["nx"],
par["nt"] * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
xlsqr = lsqr(Sop, Sop * x.ravel(), damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xlsqr, decimal=1)
# 2nd axis
x = (
gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
+ par["imag"] * gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
)
x = np.outer(x, np.ones(par["nx"])).T
Sop = Shift(
(par["nx"], par["nt"]),
shift,
axis=1,
real=True if par["imag"] == 0 else False,
dtype=par["dtype"],
)
assert dottest(
Sop,
par["nt"] * par["nx"],
par["nt"] * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
xlsqr = lsqr(Sop, Sop * x.ravel(), damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xlsqr, decimal=1)
@pytest.mark.parametrize("par", [(par1), (par2), (par1j), (par2j)])
def test_Shift2Dvariable(par):
"""Dot-test and inversion for Shift operator on 2d data with variable shift"""
np.random.seed(0)
shift = np.arange(par["nx"])
# 1st axis
x = (
gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
+ par["imag"] * gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
)
x = np.outer(x, np.ones(par["nx"]))
Sop = Shift(
(par["nt"], par["nx"]),
shift,
axis=0,
real=True if par["imag"] == 0 else False,
dtype=par["dtype"],
)
assert dottest(
Sop,
par["nt"] * par["nx"],
par["nt"] * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
xlsqr = lsqr(Sop, Sop * x.ravel(), damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xlsqr, decimal=1)
# 2nd axis
x = (
gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
+ par["imag"] * gaussian(np.arange(par["nt"] // 2 + 1), 2.0)[0]
)
x = np.outer(x, np.ones(par["nx"])).T
Sop = Shift(
(par["nx"], par["nt"]),
shift,
axis=1,
real=True if par["imag"] == 0 else False,
dtype=par["dtype"],
)
assert dottest(
Sop,
par["nt"] * par["nx"],
par["nt"] * par["nx"],
complexflag=0 if par["imag"] == 0 else 3,
)
xlsqr = lsqr(Sop, Sop * x.ravel(), damp=1e-20, iter_lim=200, atol=1e-8, btol=1e-8, show=0)[0]
assert_array_almost_equal(x.ravel(), xlsqr, decimal=1)
| 4,466 | 28.196078 | 97 | py |
pylops | pylops-master/pytests/test_restriction.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.basicoperators import Restriction
from pylops.utils import dottest
par1 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 0,
"dtype": "float64",
"inplace": "True",
} # real, inplace
par2 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 1j,
"dtype": "complex128",
"inplace": "True",
} # complex, inplace
par3 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 0,
"dtype": "float64",
"inplace": "False",
} # real, out of place
par4 = {
"ny": 21,
"nx": 11,
"nt": 20,
"imag": 1j,
"dtype": "complex128",
"inplace": "False",
} # complex, out of place
# subsampling factor
perc_subsampling = 0.4
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Restriction_1dsignal(par):
"""Dot-test, forward and adjoint for Restriction operator for 1d signal"""
np.random.seed(10)
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
Rop = Restriction(par["nx"], iava, inplace=par["dtype"], dtype=par["dtype"])
assert dottest(Rop, Nsub, par["nx"], complexflag=0 if par["imag"] == 0 else 3)
x = np.ones(par["nx"]) + par["imag"] * np.ones(par["nx"])
y = Rop * x
x1 = Rop.H * y
y1 = Rop.mask(x)
assert_array_almost_equal(y, y1[iava])
assert_array_almost_equal(x[iava], x1[iava])
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Restriction_2dsignal(par):
"""Dot-test, forward and adjoint for Restriction operator for 2d signal"""
np.random.seed(10)
x = np.ones((par["nx"], par["nt"])) + par["imag"] * np.ones((par["nx"], par["nt"]))
# 1st direction
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
Rop = Restriction(
(par["nx"], par["nt"]), iava, axis=0, inplace=par["dtype"], dtype=par["dtype"]
)
assert dottest(
Rop,
Nsub * par["nt"],
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Rop * x.ravel()).reshape(Nsub, par["nt"])
x1 = (Rop.H * y.ravel()).reshape(par["nx"], par["nt"])
y1_fromflat = Rop.mask(x.ravel())
y1 = Rop.mask(x)
assert_array_almost_equal(y, y1_fromflat.reshape(par["nx"], par["nt"])[iava])
assert_array_almost_equal(y, y1[iava])
assert_array_almost_equal(x[iava], x1[iava])
# 2nd direction
Nsub = int(np.round(par["nt"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nt"]))[:Nsub])
Rop = Restriction(
(par["nx"], par["nt"]), iava, axis=1, inplace=par["dtype"], dtype=par["dtype"]
)
assert dottest(
Rop,
par["nx"] * Nsub,
par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Rop * x.ravel()).reshape(par["nx"], Nsub)
x1 = (Rop.H * y.ravel()).reshape(par["nx"], par["nt"])
y1_fromflat = Rop.mask(x.ravel())
y1 = Rop.mask(x)
assert_array_almost_equal(y, y1_fromflat[:, iava])
assert_array_almost_equal(y, y1[:, iava])
assert_array_almost_equal(x[:, iava], x1[:, iava])
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par4)])
def test_Restriction_3dsignal(par):
"""Dot-test, forward and adjoint for Restriction operator for 3d signal"""
np.random.seed(10)
x = np.ones((par["ny"], par["nx"], par["nt"])) + par["imag"] * np.ones(
(par["ny"], par["nx"], par["nt"])
)
# 1st direction
Nsub = int(np.round(par["ny"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["ny"]))[:Nsub])
Rop = Restriction(
(par["ny"], par["nx"], par["nt"]),
iava,
axis=0,
inplace=par["dtype"],
dtype=par["dtype"],
)
assert dottest(
Rop,
Nsub * par["nx"] * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Rop * x.ravel()).reshape(Nsub, par["nx"], par["nt"])
x1 = (Rop.H * y.ravel()).reshape(par["ny"], par["nx"], par["nt"])
y1_fromflat = Rop.mask(x.ravel())
y1 = Rop.mask(x)
assert_array_almost_equal(
y, y1_fromflat.reshape(par["ny"], par["nx"], par["nt"])[iava]
)
assert_array_almost_equal(y, y1[iava])
assert_array_almost_equal(x[iava], x1[iava])
# 2nd direction
Nsub = int(np.round(par["nx"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nx"]))[:Nsub])
Rop = Restriction(
(par["ny"], par["nx"], par["nt"]),
iava,
axis=1,
inplace=par["dtype"],
dtype=par["dtype"],
)
assert dottest(
Rop,
par["ny"] * Nsub * par["nt"],
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Rop * x.ravel()).reshape(par["ny"], Nsub, par["nt"])
x1 = (Rop.H * y.ravel()).reshape(par["ny"], par["nx"], par["nt"])
y1_fromflat = Rop.mask(x.ravel())
y1 = Rop.mask(x)
assert_array_almost_equal(y, y1_fromflat[:, iava])
assert_array_almost_equal(y, y1[:, iava])
assert_array_almost_equal(x[:, iava], x1[:, iava])
# 3rd direction
Nsub = int(np.round(par["nt"] * perc_subsampling))
iava = np.sort(np.random.permutation(np.arange(par["nt"]))[:Nsub])
Rop = Restriction(
(par["ny"], par["nx"], par["nt"]),
iava,
axis=2,
inplace=par["dtype"],
dtype=par["dtype"],
)
assert dottest(
Rop,
par["ny"] * par["nx"] * Nsub,
par["ny"] * par["nx"] * par["nt"],
complexflag=0 if par["imag"] == 0 else 3,
)
y = (Rop * x.ravel()).reshape(par["ny"], par["nx"], Nsub)
x1 = (Rop.H * y.ravel()).reshape(par["ny"], par["nx"], par["nt"])
y1_fromflat = Rop.mask(x.ravel())
y1 = Rop.mask(x)
assert_array_almost_equal(y, y1_fromflat[:, :, iava])
assert_array_almost_equal(y, y1[:, :, iava])
assert_array_almost_equal(x[:, :, iava], x1[:, :, iava])
| 6,129 | 28.190476 | 87 | py |
pylops | pylops-master/pytests/test_kirchhoff.py | import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from pylops.utils import dottest
from pylops.utils.wavelets import ricker
from pylops.waveeqprocessing.kirchhoff import Kirchhoff
PAR = {
"ny": 3,
"nx": 12,
"nz": 20,
"nt": 50,
"dy": 3,
"dx": 1,
"dz": 2,
"dt": 0.004,
"nsy": 4,
"nry": 3,
"nsx": 6,
"nrx": 2,
}
# Check if skfmm is available and by-pass tests using it otherwise. This is
# currently required for Travis as since we moved to Python3.8 it has
# stopped working
try:
import skfmm # noqa: F401
skfmm_enabled = True
except ImportError:
skfmm_enabled = False
v0 = 500
y = np.arange(PAR["ny"]) * PAR["dy"]
x = np.arange(PAR["nx"]) * PAR["dx"]
z = np.arange(PAR["nz"]) * PAR["dz"]
t = np.arange(PAR["nt"]) * PAR["dt"]
sy = np.linspace(y.min(), y.max(), PAR["nsy"])
sx = np.linspace(x.min(), x.max(), PAR["nsx"])
syy, sxx = np.meshgrid(sy, sx, indexing="ij")
s2d = np.vstack((sx, 2 * np.ones(PAR["nsx"])))
s3d = np.vstack((syy.ravel(), sxx.ravel(), 2 * np.ones(PAR["nsx"] * PAR["nsy"])))
ry = np.linspace(y.min(), y.max(), PAR["nry"])
rx = np.linspace(x.min(), x.max(), PAR["nrx"])
ryy, rxx = np.meshgrid(ry, rx, indexing="ij")
r2d = np.vstack((rx, 2 * np.ones(PAR["nrx"])))
r3d = np.vstack((ryy.ravel(), rxx.ravel(), 2 * np.ones(PAR["nrx"] * PAR["nry"])))
wav, _, wavc = ricker(t[:41], f0=40)
par1 = {"mode": "analytic", "dynamic": False}
par2 = {"mode": "eikonal", "dynamic": False}
par3 = {"mode": "byot", "dynamic": False}
par1d = {"mode": "analytic", "dynamic": True}
par2d = {"mode": "eikonal", "dynamic": True}
par3d = {"mode": "byot", "dynamic": True}
def test_identify_geometry():
"""Identify geometry, check expected outputs"""
# 2d
(
ndims,
shiftdim,
dims,
ny,
nx,
nz,
ns,
nr,
dy,
dx,
dz,
dsamp,
origin,
) = Kirchhoff._identify_geometry(z, x, s2d, r2d)
assert ndims == 2
assert shiftdim == 0
assert [1, 2] == [1, 2]
assert list(dims) == [PAR["nx"], PAR["nz"]]
assert ny == 1
assert nx == PAR["nx"]
assert nz == PAR["nz"]
assert ns == PAR["nsx"]
assert nr == PAR["nrx"]
assert list(dsamp) == [dx, dz]
assert list(origin) == [0, 0]
# 3d
(
ndims,
shiftdim,
dims,
ny,
nx,
nz,
ns,
nr,
dy,
dx,
dz,
dsamp,
origin,
) = Kirchhoff._identify_geometry(z, x, s3d, r3d, y=y)
assert ndims == 3
assert shiftdim == 1
assert list(dims) == [PAR["ny"], PAR["nx"], PAR["nz"]]
assert ny == PAR["ny"]
assert nx == PAR["nx"]
assert nz == PAR["nz"]
assert ns == PAR["nsy"] * PAR["nsx"]
assert nr == PAR["nry"] * PAR["nrx"]
assert list(dsamp) == [dy, dx, dz]
assert list(origin) == [0, 0, 0]
def test_traveltime_ana():
"""Check analytical traveltimes in homogenous medium for horizontal and
vertical paths
"""
src = np.array([100, 0])[:, np.newaxis]
(
trav_srcs_ana,
trav_recs_ana,
dist_srcs_ana,
dist_recs_ana,
_,
_,
) = Kirchhoff._traveltime_table(
np.arange(0, 200, 1), np.arange(0, 200, 1), src, src, v0, mode="analytic"
)
assert dist_srcs_ana[0, 0] + dist_recs_ana[0, 0] == 200
assert trav_srcs_ana[0, 0] == 100 / v0
assert trav_recs_ana[0, 0] == 100 / v0
def test_traveltime_table():
"""Compare analytical and eikonal traveltimes in homogenous medium"""
if skfmm_enabled:
# 2d
(
trav_ana,
trav_srcs_ana,
trav_recs_ana,
dist_ana,
_,
_,
) = Kirchhoff._traveltime_table(z, x, s2d, r2d, v0, mode="analytic")
(
trav_eik,
trav_srcs_eik,
trav_recs_eik,
dist_eik,
_,
_,
) = Kirchhoff._traveltime_table(
z, x, s2d, r2d, v0 * np.ones((PAR["nx"], PAR["nz"])), mode="eikonal"
)
assert_array_almost_equal(trav_srcs_ana, trav_srcs_eik, decimal=2)
assert_array_almost_equal(trav_recs_ana, trav_recs_ana, decimal=2)
assert_array_almost_equal(trav_ana, trav_eik, decimal=2)
# 3d
(
trav_srcs_ana,
trav_recs_ana,
dist_srcs_ana,
dist_recs_ana,
_,
_,
) = Kirchhoff._traveltime_table(z, x, s3d, r3d, v0, y=y, mode="analytic")
(
trav_srcs_eik,
trav_recs_eik,
dist_srcs_eik,
dist_recs_eik,
_,
_,
) = Kirchhoff._traveltime_table(
z,
x,
s3d,
r3d,
v0 * np.ones((PAR["ny"], PAR["nx"], PAR["nz"])),
y=y,
mode="eikonal",
)
assert_array_almost_equal(trav_srcs_ana, trav_srcs_eik, decimal=2)
assert_array_almost_equal(trav_recs_ana, trav_recs_eik, decimal=2)
assert_array_almost_equal(trav_ana, trav_eik, decimal=2)
@pytest.mark.parametrize("par", [(par1), (par2), (par3), (par1d), (par2d), (par3d)])
def test_kirchhoff2d(par):
"""Dot-test for Kirchhoff operator"""
vel = v0 * np.ones((PAR["nx"], PAR["nz"]))
if par["mode"] == "byot":
trav_srcs, trav_recs, _, _, _, _ = Kirchhoff._traveltime_table(
z, x, s2d, r2d, v0, mode="analytic"
)
trav = trav_srcs.reshape(
PAR["nx"] * PAR["nz"], PAR["nsx"], 1
) + trav_recs.reshape(PAR["nx"] * PAR["nz"], 1, PAR["nrx"])
trav = trav.reshape(PAR["nx"] * PAR["nz"], PAR["nsx"] * PAR["nrx"])
amp = None
# amp = 1 / (dist + 1e-2 * dist.max())
else:
trav = None
amp = None
if skfmm_enabled or par["mode"] != "eikonal":
Dop = Kirchhoff(
z,
x,
t,
s2d,
r2d,
vel if par["mode"] == "eikonal" else v0,
wav,
wavc,
y=None,
trav=trav,
amp=amp,
mode=par["mode"],
)
assert dottest(Dop, PAR["nsx"] * PAR["nrx"] * PAR["nt"], PAR["nz"] * PAR["nx"])
@pytest.mark.parametrize("par", [(par1), (par2), (par3)])
def test_kirchhoff3d(par):
"""Dot-test for Kirchhoff operator"""
vel = v0 * np.ones((PAR["ny"], PAR["nx"], PAR["nz"]))
if par["mode"] == "byot":
trav_srcs, trav_recs, _, _, _, _ = Kirchhoff._traveltime_table(
z, x, s3d, r3d, v0, y=y, mode="analytic"
)
trav = trav_srcs.reshape(
PAR["ny"] * PAR["nx"] * PAR["nz"], PAR["nsy"] * PAR["nsx"], 1
) + trav_recs.reshape(
PAR["ny"] * PAR["nx"] * PAR["nz"], 1, PAR["nry"] * PAR["nrx"]
)
trav = trav.reshape(
PAR["ny"] * PAR["nx"] * PAR["nz"],
PAR["nsy"] * PAR["nry"] * PAR["nsx"] * PAR["nrx"],
)
else:
trav = None
if skfmm_enabled or par["mode"] != "eikonal":
Dop = Kirchhoff(
z,
x,
t,
s3d,
r3d,
vel if par["mode"] == "eikonal" else v0,
wav,
wavc,
y=y,
trav=trav,
mode=par["mode"],
)
assert dottest(
Dop,
PAR["nsx"] * PAR["nrx"] * PAR["nsy"] * PAR["nry"] * PAR["nt"],
PAR["nz"] * PAR["nx"] * PAR["ny"],
)
@pytest.mark.parametrize(
"par",
[
(par1),
(par1d),
],
)
def test_kirchhoff2d_trav_vs_travsrcrec(par):
"""Compare 2D Kirchhoff operator forward and adjoint when using trav (original behavior)
or trav_src and trav_rec (new reccomended behaviour)"""
# new behaviour
Dop = Kirchhoff(
z,
x,
t,
s2d,
r2d,
v0,
wav,
wavc,
y=None,
mode=par["mode"],
dynamic=par["dynamic"],
angleaperture=None,
)
# old behaviour
trav = Dop.trav_srcs.reshape(
PAR["nx"] * PAR["nz"], PAR["nsx"], 1
) + Dop.trav_recs.reshape(PAR["nx"] * PAR["nz"], 1, PAR["nrx"])
trav = trav.reshape(PAR["nx"] * PAR["nz"], PAR["nsx"] * PAR["nrx"])
if par["dynamic"]:
dist = Dop.dist_srcs.reshape(
PAR["nx"] * PAR["nz"], PAR["nsx"], 1
) + Dop.dist_recs.reshape(PAR["nx"] * PAR["nz"], 1, PAR["nrx"])
dist = dist.reshape(PAR["nx"] * PAR["nz"], PAR["nsx"] * PAR["nrx"])
cosangle = np.cos(Dop.angle_srcs).reshape(
PAR["nx"] * PAR["nz"], PAR["nsx"], 1
) + np.cos(Dop.angle_recs).reshape(PAR["nx"] * PAR["nz"], 1, PAR["nrx"])
cosangle = cosangle.reshape(PAR["nx"] * PAR["nz"], PAR["nsx"] * PAR["nrx"])
epsdist = 1e-2
amp = 1 / (dist + epsdist * np.max(dist))
amp *= np.abs(cosangle)
amp /= v0
D1op = Kirchhoff(
z,
x,
t,
s2d,
r2d,
v0,
wav,
wavc,
y=None,
trav=trav,
amp=amp if par["dynamic"] else None,
mode=par["mode"],
dynamic=par["dynamic"],
angleaperture=None,
)
# forward
xx = np.random.normal(0, 1, PAR["nx"] * PAR["nz"])
assert_array_almost_equal(Dop @ xx, D1op @ xx, decimal=2)
# adjoint
yy = np.random.normal(0, 1, PAR["nrx"] * PAR["nsx"] * PAR["nt"])
assert_array_almost_equal(Dop.H @ yy, D1op.H @ yy, decimal=2)
@pytest.mark.parametrize(
"par",
[
(par1),
],
)
def test_kirchhoff3d_trav_vs_travsrcrec(par):
"""Compare 3D Kirchhoff operator forward and adjoint when using trav (original behavior)
or trav_src and trav_rec (new reccomended behaviour)"""
# new behaviour
Dop = Kirchhoff(
z,
x,
t,
s3d,
r3d,
v0,
wav,
wavc,
y=y,
mode=par["mode"],
)
# old behaviour
trav = Dop.trav_srcs.reshape(
PAR["ny"] * PAR["nx"] * PAR["nz"], PAR["nsy"] * PAR["nsx"], 1
) + Dop.trav_recs.reshape(
PAR["ny"] * PAR["nx"] * PAR["nz"], 1, PAR["nry"] * PAR["nrx"]
)
trav = trav.reshape(
PAR["ny"] * PAR["nx"] * PAR["nz"],
PAR["nsy"] * PAR["nsx"] * PAR["nry"] * PAR["nrx"],
)
D1op = Kirchhoff(
z,
x,
t,
s3d,
r3d,
v0,
wav,
wavc,
y=y,
trav=trav,
mode=par["mode"],
)
# forward
xx = np.random.normal(0, 1, PAR["ny"] * PAR["nx"] * PAR["nz"])
assert_array_almost_equal(Dop @ xx, D1op @ xx, decimal=2)
# adjoint
yy = np.random.normal(
0, 1, PAR["nry"] * PAR["nrx"] * PAR["nsy"] * PAR["nsx"] * PAR["nt"]
)
assert_array_almost_equal(Dop.H @ yy, D1op.H @ yy, decimal=2)
| 10,879 | 25.280193 | 92 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.