repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_maximum.py | import numpy as np
def entropy_maximum(signal):
"""**Maximum Entropy (MaxEn)**
Provides an upper bound for the entropy of a random variable, so that the empirical entropy
(obtained for instance with :func:`entropy_shannon`) will lie in between 0 and max. entropy.
It can be useful to normalize the empirical entropy by the maximum entropy (which is made by
default in some algorithms).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
--------
maxen : float
The maximum entropy of the signal.
info : dict
An empty dictionary returned for consistency with the other complexity functions.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [1, 1, 5, 5, 2, 8, 1]
maxen, _ = nk.entropy_maximum(signal)
maxen
"""
return np.log2(len(np.unique(signal))), {}
| 1,037 | 23.714286 | 96 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_density.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from .entropy_shannon import entropy_shannon
from .optim_complexity_tolerance import complexity_tolerance
from .utils_complexity_embedding import complexity_embedding
def fractal_density(signal, delay=1, tolerance="sd", bins=None, show=False, **kwargs):
"""**Density Fractal Dimension (DFD)**
This is a **Work in Progress (WIP)**. The idea is to find a way of, essentially, averaging
attractors. Because one can not directly average the trajectories, one way is to convert the
attractor to a 2D density matrix that we can use similarly to a time-frequency heatmap. However,
it is very unclear how to then derive meaningful indices from this density plot. Also, how many
bins, or smoothing, should one use?
Basically, this index is exploratory and should not be used in its state. However, if you're
interested in the problem of "average" attractors (e.g., from multiple epochs / trials), and
you want to think about it with us, feel free to let us know!
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
bins : int
If not ``None`` but an integer, will use this value for the number of bins instead of a
value based on the ``tolerance`` parameter.
show : bool
Plot the density matrix. Defaults to ``False``.
**kwargs
Other arguments to be passe.
Returns
---------
dfd : float
The density fractal dimension.
info : dict
A dictionary containing additional information.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 9], noise=0.01)
@savefig p_fractal_density1.png scale=100%
dfd, _ = nk.fractal_density(signal, delay=20, show=True)
@suppress
plt.close()
.. ipython:: python
signal = nk.signal_simulate(duration=4, frequency=[5, 10, 11], noise=0.01)
epochs = nk.epochs_create(signal, events=20)
@savefig p_fractal_density2.png scale=100%
dfd, info1 = nk.fractal_density(epochs, delay=20, bins=20, show=True)
@suppress
plt.close()
Compare the complexity of two signals.
.. warning::
Help is needed to find a way to make statistics and comparing two density maps.
.. ipython:: python
import matplotlib.pyplot as plt
sig2 = nk.signal_simulate(duration=4, frequency=[4, 12, 14], noise=0.01)
epochs2 = nk.epochs_create(sig2, events=20)
dfd, info2 = nk.fractal_density(epochs2, delay=20, bins=20)
# Difference between two density maps
D = info1["Average"] - info2["Average"]
@savefig p_fractal_density3.png scale=100%
plt.imshow(nk.standardize(D), cmap='RdBu')
@suppress
plt.close()
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
if isinstance(signal, (np.ndarray, list)):
# This index is made to work on epochs, so if not an epoch,
# got to transform
signal = {"1": pd.DataFrame({"Signal": signal})}
# Get edges and tolerance from first epoch. Imperfect but what to do?
edges = np.percentile(signal["1"]["Signal"].values, [1, 99])
if bins is None:
tolerance, _ = complexity_tolerance(signal["1"]["Signal"].values, method="sd")
# Compute number of "bins"
bins = int((edges[1] - edges[0]) / tolerance)
# Prepare the container for the 2D density matrix
X = np.empty((bins, bins, len(signal)))
for i, (k, epoch) in enumerate(signal.items()):
X[:, :, i] = _fractal_density(
epoch["Signal"].dropna().values, edges, bins=bins, delay=delay, **kwargs
)
# Compute grand average
grand_av = np.mean(X, axis=-1)
# Compute DFD
freq, x = np.histogram(grand_av, bins=bins)
dfd, _ = entropy_shannon(freq=freq)
if show is True:
fig, ax = plt.subplots(1, 2)
ax[0].imshow(grand_av)
ax[1].bar(x[1::] - np.diff(x) / 2, height=freq, width=np.diff(x))
return dfd, {"Density": X, "Average": grand_av}
# =============================================================================
# Utilities
# =============================================================================
def _fractal_density(signal, edges, bins, delay=1, method="histogram"):
emb = complexity_embedding(signal, delay=delay, dimension=2)
if method == "histogram":
edges = np.linspace(edges[0], edges[1], bins + 1)
edges = np.reshape(np.repeat(edges, 2), (len(edges), 2))
X, _, = np.histogramdd(
emb,
bins=edges.T,
density=False,
)
else:
kde = scipy.stats.gaussian_kde(emb.T)
kde.set_bandwidth(bw_method=(edges[1] - edges[0]) / bins)
# Create grid
edges = np.linspace(edges[0], edges[1], bins)
x, y = np.meshgrid(edges, edges)
grid = np.append(x.reshape(-1, 1), y.reshape(-1, 1), axis=1)
X = np.reshape(kde(grid.T), (len(edges), len(edges)))
return np.log(1 + X)
| 5,842 | 34.628049 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/information_fishershannon.py | import numpy as np
import scipy.integrate
from .entropy_power import entropy_power
def fishershannon_information(signal, **kwargs):
"""**Fisher-Shannon Information (FSI)**
The :func:`Shannon Entropy Power <entropy_power>` is closely related to another index, the
Fisher Information Measure (FIM). Their combination results in the Fisher-Shannon Information
index.
.. warning::
We are not sure at all about the correct implementation of this function. Please consider
helping us by double-checking the code against the formulas in the references.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
**kwargs
Other arguments to be passed to :func:`density_bandwidth`.
Returns
-------
fsi : float
The computed FSI.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_power
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, frequency=[10, 12], noise=0.1)
fsi, info = nk.fishershannon_information(signal, method=0.01)
fsi
References
----------
* Guignard, F., Laib, M., Amato, F., & Kanevski, M. (2020). Advanced analysis of temporal data
using Fisher-Shannon information: theoretical development and application in geosciences.
Frontiers in Earth Science, 8, 255.
* Vignat, C., & Bercher, J. F. (2003). Analysis of signals in the Fisher-Shannon information
plane. Physics Letters A, 312(1-2), 27-33.
"""
# Shannon Power Entropy
powen, info = entropy_power(signal, **kwargs)
x_range = info["Values"]
fx = info["Density"]
gx = np.gradient(fx)
# Fisher
fi = gx ** 2 / fx
fi = scipy.integrate.simpson(fi, x=x_range)
info.update({"FI": fi})
# Fisher-Shannon Complexity
fsc = powen * fi
# if fsc < 1:
# warnings.warn(
# "Fisher-Shannon Complexity is lower than 1. The problem could be related to kernel"
# " density estimation, bandwidth selection, or too little data points."
# )
return fsc, info
| 2,282 | 27.898734 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_spectral.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..signal.signal_psd import signal_psd
from .entropy_shannon import entropy_shannon
def entropy_spectral(signal, bins=None, show=False, **kwargs):
"""**Spectral Entropy (SpEn)**
Spectral entropy (SE or SpEn) treats the signal's normalized power spectrum density (PSD) in the
frequency domain as a probability distribution, and calculates the Shannon entropy of it.
.. math:: H(x, sf) = -\\sum P(f) \\log_2[P(f)]
A signal with a single frequency component (i.e., pure sinusoid) produces the smallest entropy.
On the other hand, a signal with all frequency components of equal power value (white
noise) produces the greatest entropy.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
bins : int
If an integer is passed, will cut the PSD into a number of bins of frequency.
show : bool
Display the power spectrum.
**kwargs : optional
Keyword arguments to be passed to ``signal_psd()``.
Returns
-------
SpEn : float
Spectral Entropy
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, entropy_wiener, .signal_psd
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6, 10], noise=0.1)
# Compute Spectral Entropy
@savefig p_entropy_spectral1.png scale=100%
SpEn, info = nk.entropy_spectral(signal, show=True)
@suppress
plt.close()
.. ipython:: python
SpEn
Bin the frequency spectrum.
.. ipython:: python
@savefig p_entropy_spectral2.png scale=100%
SpEn, info = nk.entropy_spectral(signal, bins=10, show=True)
@suppress
plt.close()
References
----------
* Crepeau, J. C., & Isaacson, L. K. (1991). Spectral Entropy Measurements of Coherent
Structures in an Evolving Shear Layer. Journal of Non-Equilibrium Thermodynamics, 16(2).
doi:10.1515/jnet.1991.16.2.137
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Power-spectrum density (PSD) (actual sampling rate does not matter)
psd = signal_psd(signal, sampling_rate=1000, **kwargs)
# Cut into bins
if isinstance(bins, int):
psd = psd.groupby(pd.cut(psd["Frequency"], bins=bins)).agg("sum")
idx = psd.index.values.astype(str)
else:
idx = psd["Frequency"].values
# Area under normalized spectrum should sum to 1 (np.sum(psd["Power"]))
psd["Power"] = psd["Power"] / psd["Power"].sum()
if show is True:
plt.bar(idx, psd["Power"])
if not np.issubdtype(idx.dtype, np.floating):
plt.xticks(rotation=90)
plt.title("Normalized Power Spectrum")
plt.xlabel("Frequency (Hz)")
plt.ylabel("Normalized Power")
# Compute Shannon entropy
se, _ = entropy_shannon(freq=psd["Power"].values)
# Normalize
se /= np.log2(len(psd)) # between 0 and 1
return se, {"PSD": psd}
| 3,458 | 29.078261 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity_rqa.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import find_groups
from .entropy_shannon import entropy_shannon
from .optim_complexity_tolerance import complexity_tolerance
from .utils_recurrence_matrix import recurrence_matrix
def complexity_rqa(
signal, dimension=3, delay=1, tolerance="sd", min_linelength=2, method="python", show=False
):
"""**Recurrence Quantification Analysis (RQA)**
A :func:`recurrence plot <recurrence_matrix>` is based on a time-delay embedding representation
of a signal and is a 2D depiction of when a system revisits a state that is has been in the
past.
Recurrence quantification analysis (RQA) is a method of complexity analysis
for the investigation of dynamical systems. It quantifies the number and duration
of recurrences of a dynamical system presented by its phase space trajectory.
.. figure:: ../img/douglas2022c.png
:alt: Illustration of RQA (Douglas et al., 2022).
Features include:
* **Recurrence rate (RR)**: Proportion of points that are labelled as recurrences. Depends on
the radius *r*.
* **Determinism (DET)**: Proportion of recurrence points which form diagonal lines. Indicates
autocorrelation.
* **Divergence (DIV)**: The inverse of the longest diagonal line length (*LMax*).
* **Laminarity (LAM)**: Proportion of recurrence points which form vertical lines. Indicates the
amount of laminar phases (intermittency).
* **Trapping Time (TT)**: Average length of vertical black lines.
* **L**: Average length of diagonal black lines. Average duration that a system is staying in
the same state.
* **LEn**: Entropy of diagonal lines lengths.
* **VMax**: Longest vertical line length.
* **VEn**: Entropy of vertical lines lengths.
* **W**: Average white vertical line length.
* **WMax**: Longest white vertical line length.
* **WEn**: Entropy of white vertical lines lengths.
* **DeteRec**: The ratio of determinism / recurrence rate.
* **LamiDet**: The ratio of laminarity / determinism.
* **DiagRec**: Diagonal Recurrence Rates, capturing the magnitude of autocorrelation at
different lags, which is related to fractal fluctuations. See Tomashin et al. (2022),
approach 3.
.. note::
More feature exist for RQA, such as the `trend <https://juliadynamics.github.io/
DynamicalSystems.jl/dev/rqa/quantification/#RecurrenceAnalysis.trend>`_. We would like to add
them, but we need help. Get in touch if you're interested!
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
min_linelength : int
Minimum length of diagonal and vertical lines. Default to 2.
method : str
Can be ``"pyrqa"`` to use the *PyRQA* package (requires to install it first).
show : bool
Visualise recurrence matrix.
Returns
----------
rqa : DataFrame
The RQA results.
info : dict
A dictionary containing additional information regarding the parameters used to compute RQA.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6, 7], noise=0.2)
# RQA
@savefig p_complexity_rqa1.png scale=100%
results, info = nk.complexity_rqa(signal, tolerance=1, show=True)
@suppress
plt.close()
.. ipython:: python
results
# Compare to PyRQA
# results1, info = nk.complexity_rqa(signal, tolerance=1, show=True, method = "pyrqa")
References
----------
* Rawald, T., Sips, M., Marwan, N., & Dransch, D. (2014). Fast computation of recurrences in
long time series. In Translational Recurrences (pp. 17-29). Springer, Cham.
* Tomashin, A., Leonardi, G., & Wallot, S. (2022). Four Methods to Distinguish between Fractal
Dimensions in Time Series through Recurrence Quantification Analysis. Entropy, 24(9), 1314.
"""
info = {
"Tolerance": complexity_tolerance(
signal, method=tolerance, delay=delay, dimension=dimension
)[0]
}
if method == "pyrqa":
data = _complexity_rqa_pyrqa(
signal,
delay=delay,
dimension=dimension,
tolerance=info["Tolerance"],
linelength=min_linelength,
)
rc = np.flip(data.pop("Recurrence_Matrix"), axis=0)
info["Recurrence_Matrix"] = rc
else:
# Get recurrence matrix (rm)
rc, dm = recurrence_matrix(
signal,
delay=delay,
dimension=dimension,
tolerance=info["Tolerance"],
)
info["Recurrence_Matrix"] = rc
info["Distance_Matrix"] = dm
# Compute features
data = _complexity_rqa_features(rc, min_linelength=min_linelength)
data = pd.DataFrame(data, index=[0])
if show is True:
try:
plt.imshow(rc, cmap="Greys")
# Flip the matrix to match traditional RQA representation
plt.gca().invert_yaxis()
plt.title("Recurrence Matrix")
plt.ylabel("Time")
plt.xlabel("Time")
except MemoryError as e:
raise MemoryError(
"NeuroKit error: complexity_rqa(): the recurrence plot is too large to display. ",
"You can recover the matrix from the parameters and try to display parts of it.",
) from e
return data, info
def _complexity_rqa_features(rc, min_linelength=2):
"""Compute recurrence rate from a recurrence matrix (rc)."""
width = len(rc)
# Recurrence Rate (RR)
# --------------------------------------------------
# Indices of the lower triangular (without the diagonal)
idx = np.tril_indices(width, k=-1)
# Compute percentage
data = {"RecurrenceRate": (rc[idx].sum()) / len(rc[idx])}
# Find diagonale lines
# --------------------------------------------------
diag_lines = []
recdiag = np.zeros(width)
# All diagonals except the main one (0)
for i in range(1, width):
diag = np.diagonal(rc, offset=i) # Get diagonal
recdiag[i - 1] = np.sum(diag) / len(diag)
diag = find_groups(diag) # Split into consecutives
diag_lines.extend([diag[i] for i in range(len(diag)) if diag[i][0] == 1]) # Store 1s
# Diagonal Recurrence Rates (Diag %REC)
# Tomashin et al. (2022)
distance = np.arange(1, width + 1)[recdiag > 0]
recdiag = recdiag[recdiag > 0]
if len(recdiag) > 2:
data["DiagRec"] = np.polyfit(np.log2(distance), np.log2(recdiag), 1)[0]
# plt.loglog(distance, recdiag)
else:
data["DiagRec"] = np.nan
# Get lengths
diag_lengths = np.array([len(i) for i in diag_lines])
# Exclude small diagonals (> 1)
diag_lengths = diag_lengths[np.where(diag_lengths >= min_linelength)[0]]
# Compute features
if data["RecurrenceRate"] == 0:
data["Determinism"] = np.nan
data["DeteRec"] = np.nan
else:
data["Determinism"] = diag_lengths.sum() / rc[idx].sum()
data["DeteRec"] = data["Determinism"] / data["RecurrenceRate"]
data["L"] = 0 if len(diag_lengths) == 0 else np.mean(diag_lengths)
data["Divergence"] = np.nan if len(diag_lengths) == 0 else 1 / np.max(diag_lengths)
data["LEn"] = entropy_shannon(
freq=np.unique(diag_lengths, return_counts=True)[1],
base=np.e,
)[0]
# Find vertical lines
# --------------------------------------------------
black_lines = []
white_lines = []
for i in range(width - 1):
verti = rc[i, i + 1 :]
verti = find_groups(verti)
black_lines.extend([verti[i] for i in range(len(verti)) if verti[i][0] == 1])
white_lines.extend([verti[i] for i in range(len(verti)) if verti[i][0] == 0])
# Get lengths
black_lengths = np.array([len(i) for i in black_lines])
white_lengths = np.array([len(i) for i in white_lines])
# Exclude small lines (> 1)
black_lengths = black_lengths[np.where(black_lengths >= min_linelength)[0]]
white_lengths = white_lengths[np.where(white_lengths >= min_linelength)[0]]
# Compute features
if rc[idx].sum() == 0:
data["Laminarity"] = np.nan
else:
data["Laminarity"] = black_lengths.sum() / rc[idx].sum()
if data["Determinism"] == 0 or np.isnan(data["Determinism"]):
data["LamiDet"] = np.nan
else:
data["Laminarity"] / data["Determinism"]
data["TrappingTime"] = 0 if len(black_lengths) == 0 else np.nanmean(black_lengths)
data["VMax"] = 0 if len(black_lengths) == 0 else np.nanmax(black_lengths)
data["VEn"] = entropy_shannon(
freq=np.unique(black_lengths, return_counts=True)[1],
base=np.e,
)[0]
data["W"] = 0 if len(white_lengths) == 0 else np.nanmean(white_lengths)
data["WMax"] = 0 if len(white_lengths) == 0 else np.nanmax(white_lengths)
data["WEn"] = entropy_shannon(
freq=np.unique(white_lengths, return_counts=True)[1],
base=np.e,
)[0]
return data
# =============================================================================
# PyRQA
# =============================================================================
def _complexity_rqa_pyrqa(signal, dimension=3, delay=1, tolerance=0.1, linelength=2):
"""Compute recurrence rate (imported in complexity_rqa)"""
# Try loading pyrqa
try:
import pyrqa.analysis_type
import pyrqa.computation
import pyrqa.image_generator
import pyrqa.metric
import pyrqa.neighbourhood
import pyrqa.settings
import pyrqa.time_series
except (ModuleNotFoundError, ImportError) as e:
raise ImportError(
"NeuroKit error: complexity_rqa(): the 'pyrqa' module is required for this function to run. ",
"Please install it first (`pip install PyRQA`).",
) from e
# Get neighbourhood
r = pyrqa.neighbourhood.FixedRadius(tolerance)
# Convert signal to time series
signal = pyrqa.time_series.TimeSeries(signal, embedding_dimension=dimension, time_delay=delay)
settings = pyrqa.settings.Settings(
signal,
analysis_type=pyrqa.analysis_type.Classic,
neighbourhood=r,
similarity_measure=pyrqa.metric.EuclideanMetric,
theiler_corrector=1,
)
# RQA features
rqa = pyrqa.computation.RQAComputation.create(settings, verbose=False).run()
# Minimum line lengths
rqa.min_diagonal_line_length = linelength
rqa.min_vertical_line_length = linelength
rqa.min_white_vertical_line_length = linelength
rp = pyrqa.computation.RPComputation.create(settings, verbose=False).run()
return {
"RecurrenceRate": rqa.recurrence_rate,
"Determinism": rqa.determinism,
"Divergence": rqa.divergence,
"Laminarity": rqa.laminarity,
"TrappingTime": rqa.trapping_time,
"DeteRec": rqa.determinism / rqa.recurrence_rate,
"LamiDet": rqa.laminarity / rqa.determinism,
"L": rqa.average_diagonal_line,
"LEn": rqa.entropy_diagonal_lines,
"VMax": rqa.longest_vertical_line,
"VEn": rqa.entropy_vertical_lines,
"W": rqa.average_white_vertical_line,
"WMax": rqa.longest_white_vertical_line,
"W_div": rqa.longest_white_vertical_line_inverse,
"WEn": rqa.entropy_white_vertical_lines,
"Recurrence_Matrix": rp.recurrence_matrix_reverse, # recurrence_matrix_reverse_normalized
}
| 12,320 | 37.503125 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_approximate.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .optim_complexity_tolerance import _entropy_apen, complexity_tolerance
from .utils import _get_count
def entropy_approximate(signal, delay=1, dimension=2, tolerance="sd", corrected=False, **kwargs):
"""**Approximate entropy (ApEn) and its corrected version (cApEn)**
Approximate entropy is a technique used to quantify the amount of regularity and the
unpredictability of fluctuations over time-series data. The advantages of ApEn include lower
computational demand (ApEn can be designed to work for small data samples (< 50 data points)
and can be applied in real time) and less sensitive to noise. However, ApEn is heavily
dependent on the record length and lacks relative consistency.
This function can be called either via ``entropy_approximate()`` or ``complexity_apen()``, and
the corrected version via ``complexity_capen()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
corrected : bool
If true, will compute corrected ApEn (cApEn), see Porta (2007).
**kwargs
Other arguments.
See Also
--------
entropy_shannon, entropy_sample, entropy_fuzzy
Returns
----------
apen : float
The approximate entropy of the single time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute approximate entropy.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
apen, parameters = nk.entropy_approximate(signal)
apen
capen, parameters = nk.entropy_approximate(signal, corrected=True)
capen
References
-----------
* Sabeti, M., Katebi, S., & Boostani, R. (2009). Entropy and complexity measures for EEG signal
classification of schizophrenic and control participants. Artificial intelligence in medicine,
47(3), 263-274.
* Shi, B., Zhang, Y., Yuan, C., Wang, S., & Li, P. (2017). Entropy analysis of short-term
heartbeat interval time series during regular walking. Entropy, 19(10), 568.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
"Corrected": corrected,
}
# Compute index
if corrected is False:
# ApEn is implemented in 'optim_complexity_tolerance()' to avoid circular imports
# as one of the method for optimizing tolerance relies on ApEn
out, _ = _entropy_apen(signal, delay, dimension, info["Tolerance"], **kwargs)
else:
out = _entropy_capen(signal, delay, dimension, info["Tolerance"], **kwargs)
return out, info
# =============================================================================
# Utils
# =============================================================================
def _entropy_capen(signal, delay, dimension, tolerance, **kwargs):
__, count1, _ = _get_count(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
approximate=True,
**kwargs,
)
__, count2, _ = _get_count(
signal,
delay=delay,
dimension=dimension + 1,
tolerance=tolerance,
approximate=True,
**kwargs,
)
# Limit the number of vectors to N - (dimension + 1) * delay
upper_limit = len(signal) - (dimension + 1) * delay
# Correction to replace the ratio of count1 and count2 when either is equal to 1
# As when count = 1, only the vector itself is within r distance
correction = 1 / upper_limit
vector_similarity = np.full(upper_limit, np.nan)
for i in np.arange(upper_limit):
if count1.astype(int)[i] != 1 and count2.astype(int)[i] != 1:
vector_similarity[i] = np.log(count2[i] / count1[i])
else:
vector_similarity[i] = np.log(correction)
return -np.mean(vector_similarity)
| 5,130 | 33.904762 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_complexity_attractor.py | import matplotlib.pyplot as plt
import numpy as np
import scipy
def complexity_attractor(
embedded="lorenz", alpha="time", color="last_dim", shadows=True, linewidth=1, **kwargs
):
"""**Attractor Graph**
Create an attractor graph from an :func:`embedded <complexity_embedding>` time series.
Parameters
----------
embedded : Union[str, np.ndarray]
Output of ``complexity_embedding()``. Can also be a string, such as ``"lorenz"`` (Lorenz
attractor) or ``"rossler"`` (Rössler attractor).
alpha : Union[str, float]
Transparency of the lines. If ``"time"``, the lines will be transparent as a function of
time (slow).
color : str
Color of the plot. If ``"last_dim"``, the last dimension (max 4th) of the embedded data
will be used when the dimensions are higher than 2. Useful to visualize the depth (for
3-dimensions embedding), or the fourth dimension, but it is slow.
shadows : bool
If ``True``, 2D projections will be added to the sides of the 3D attractor.
linewidth: float
Set the line width in points.
**kwargs
Additional keyword arguments are passed to the color palette (e.g., ``name="plasma"``), or
to the Lorenz system simulator, such as ``duration`` (default = 100), ``sampling_rate``
(default = 10), ``sigma`` (default = 10), ``beta`` (default = 8/3), ``rho`` (default = 28).
See Also
------------
complexity_embeddding
Examples
---------
**Lorenz attractors**
.. ipython:: python
import neurokit2 as nk
@savefig p_complexity_attractor1.png scale=100%
fig = nk.complexity_attractor(color = "last_dim", alpha="time", duration=1)
@suppress
plt.close()
.. ipython:: python
# Fast result (fixed alpha and color)
@savefig p_complexity_attractor2.png scale=100%
fig = nk.complexity_attractor(color = "red", alpha=1, sampling_rate=5000, linewidth=0.2)
@suppress
plt.close()
**Rössler attractors**
.. ipython:: python
@savefig p_complexity_attractor3.png scale=100%
nk.complexity_attractor("rossler", color = "blue", alpha=1, sampling_rate=5000)
@suppress
plt.close()
**2D Attractors using a signal**
.. ipython:: python
# Simulate Signal
signal = nk.signal_simulate(duration=10, sampling_rate=100, frequency = [0.1, 5, 7, 10])
# 2D Attractor
embedded = nk.complexity_embedding(signal, delay = 3, dimension = 2)
# Fast (fixed alpha and color)
@savefig p_complexity_attractor4.png scale=100%
nk.complexity_attractor(embedded, color = "red", alpha = 1)
@suppress
plt.close()
.. ipython:: python
# Slow
@savefig p_complexity_attractor5.png scale=100%
nk.complexity_attractor(embedded, color = "last_dim", alpha = "time")
@suppress
plt.close()
**3D Attractors using a signal**
.. ipython:: python
# 3D Attractor
embedded = nk.complexity_embedding(signal, delay = 3, dimension = 3)
# Fast (fixed alpha and color)
@savefig p_complexity_attractor6.png scale=100%
nk.complexity_attractor(embedded, color = "red", alpha = 1)
@suppress
plt.close()
.. ipython:: python
# Slow
@savefig p_complexity_attractor7.png scale=100%
nk.complexity_attractor(embedded, color = "last_dim", alpha = "time")
@suppress
plt.close()
**Animated Rotation**
.. ipython:: python
import matplotlib.animation as animation
import IPython
fig = nk.complexity_attractor(embedded, color = "black", alpha = 0.5, shadows=False)
ax = fig.get_axes()[0]
def rotate(angle):
ax.view_init(azim=angle)
anim = animation.FuncAnimation(fig, rotate, frames=np.arange(0, 361, 10), interval=10)
IPython.display.HTML(anim.to_jshtml())
"""
if isinstance(embedded, str):
embedded = _attractor_equation(embedded, **kwargs)
# Parameters -----------------------------
# Color
if color == "last_dim":
# Get data
last_dim = min(3, embedded.shape[1] - 1) # Find last dim with max = 3
color = embedded[:, last_dim]
# Create color palette
palette = kwargs["name"] if "name" in kwargs else "plasma"
cmap = plt.get_cmap(palette)
colors = cmap(plt.Normalize(color.min(), color.max())(color))
else:
colors = [color] * len(embedded[:, 0])
# Alpha
if alpha == "time":
alpha = np.linspace(0.01, 1, len(embedded[:, 0]))
else:
alpha = [alpha] * len(embedded[:, 0])
# Plot ------------------------------------
fig = plt.figure()
# 2D
if embedded.shape[1] == 2:
ax = plt.axes(projection=None)
# Fast
if len(np.unique(colors)) == 1 and len(np.unique(alpha)) == 1:
ax.plot(
embedded[:, 0], embedded[:, 1], color=colors[0], alpha=alpha[0], linewidth=linewidth
)
# Slow (color and/or alpha)
else:
ax = _attractor_2D(ax, embedded, colors, alpha, linewidth)
# 3D
else:
ax = plt.axes(projection="3d")
# Fast
if len(np.unique(colors)) == 1 and len(np.unique(alpha)) == 1:
ax = _attractor_3D_fast(ax, embedded, embedded, 0, colors, alpha, shadows, linewidth)
else:
ax = _attractor_3D(ax, embedded, colors, alpha, shadows, linewidth)
return fig
# =============================================================================
# 2D Attractors
# =============================================================================
def _attractor_2D(ax, embedded, colors, alpha=0.8, linewidth=1.5):
# Create a set of line segments
points = np.array([embedded[:, 0], embedded[:, 1]]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for i in range(len(segments)):
ax.plot(
segments[i][:, 0],
segments[i][:, 1],
color=colors[i],
alpha=alpha[i],
linewidth=linewidth,
solid_capstyle="round",
)
return ax
# =============================================================================
# Slow plots
# =============================================================================
def _attractor_3D_fast(ax, embedded, seg, i, colors, alpha, shadows, linewidth):
# Plot 2D shadows
if shadows is True:
ax.plot(
seg[:, 0],
seg[:, 2],
zs=np.max(embedded[:, 1]),
zdir="y",
color="lightgrey",
alpha=alpha[i],
linewidth=linewidth,
zorder=i + 1,
solid_capstyle="round",
)
ax.plot(
seg[:, 1],
seg[:, 2],
zs=np.min(embedded[:, 0]),
zdir="x",
color="lightgrey",
alpha=alpha[i],
linewidth=linewidth,
zorder=i + 1 + len(embedded),
solid_capstyle="round",
)
ax.plot(
seg[:, 0],
seg[:, 1],
zs=np.min(embedded[:, 2]),
zdir="z",
color="lightgrey",
alpha=alpha[i],
linewidth=linewidth,
zorder=i + 1 + len(embedded) * 2,
solid_capstyle="round",
)
ax.plot(
seg[:, 0],
seg[:, 1],
seg[:, 2],
color=colors[i],
alpha=alpha[i],
linewidth=linewidth,
zorder=i + 1 + len(embedded) * 3,
)
return ax
def _attractor_3D(ax, embedded, colors, alpha=0.8, shadows=True, linewidth=1.5):
# Create a set of line segments
points = np.array([embedded[:, 0], embedded[:, 1], embedded[:, 2]]).T.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for i in range(len(segments)):
ax = _attractor_3D_fast(ax, embedded, segments[i], i, colors, alpha, shadows, linewidth)
return ax
# =============================================================================
# Equations (must be located here to avoid circular imports from complexity_embedding)
# =============================================================================
def _attractor_equation(name, **kwargs):
if name == "lorenz":
return _attractor_lorenz(**kwargs)
elif name == "clifford":
return _attractor_clifford(**kwargs)
else:
return _attractor_rossler(**kwargs)
def _attractor_lorenz(duration=1, sampling_rate=1000, sigma=10.0, beta=8.0 / 3, rho=28.0):
"""Simulate Data from Lorenz System"""
def lorenz_equation(coord, t0, sigma, beta, rho):
return [
sigma * (coord[1] - coord[0]),
coord[0] * (rho - coord[2]) - coord[1],
coord[0] * coord[1] - beta * coord[2],
]
x0 = [1, 1, 1] # starting vector
t = np.linspace(0, duration * 20, int(duration * sampling_rate))
return scipy.integrate.odeint(lorenz_equation, x0, t, args=(sigma, beta, rho))
def _attractor_rossler(duration=1, sampling_rate=1000, a=0.1, b=0.1, c=14):
"""Simulate Data from Rössler System"""
def rossler_equation(coord, t0, a, b, c):
return [-coord[1] - coord[2], coord[0] + a * coord[1], b + coord[2] * (coord[0] - c)]
x0 = [0.1, 0.0, 0.1] # starting vector
t = np.linspace(0, duration * 500, int(duration * sampling_rate))
return scipy.integrate.odeint(rossler_equation, x0, t, args=(a, b, c))
def _attractor_clifford(duration=1, sampling_rate=1000, a=-1.4, b=1.6, c=1.0, d=0.7, x0=0, y0=0):
"""Simulate Data from Clifford System
>>> import neurokit2 as nk
>>>
>>> emb = nk.complexity_embedding("clifford", sampling_rate=100000)
>>> plt.plot(emb[:, 0], emb[:, 1], '.', alpha=0.2, markersize=0.5) #doctest: +ELLIPSIS
[...
>>> emb = nk.complexity_embedding("clifford", sampling_rate=100000, a=1.9, b=1.0, c=1.9, d=-1.1)
>>> plt.plot(emb[:, 0], emb[:, 1], '.', alpha=0.2, markersize=0.5) #doctest: +ELLIPSIS
[...
"""
def clifford_equation(coord, a, b, c, d):
return [
np.sin(a * coord[1]) + c * np.cos(a * coord[0]),
np.sin(b * coord[0]) + d * np.cos(b * coord[1]),
]
emb = np.tile([x0, y0], (int(duration * sampling_rate), 1)).astype(float)
for i in range(len(emb) - 1):
emb[i + 1, :] = clifford_equation(emb[i, :], a, b, c, d)
return emb
| 10,479 | 30.95122 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_coalition.py | import numpy as np
import pandas as pd
import scipy.signal
from ..signal.signal_binarize import _signal_binarize_threshold
from ..signal.signal_detrend import signal_detrend
from .entropy_shannon import entropy_shannon
def entropy_coalition(signal, method="amplitude"):
"""**Amplitude Coalition Entropy (ACE) and Synchrony Coalition Entropy (SCE)**
Amplitude Coalition Entropy (ACE) reflects the entropy over time of the constitution of the set
of most active channels (Shanahan, 2010), and is similar to Lempel-Ziv complexity, in the sense
that it quantifies variability in space and time of the activity. ACE is normalized by dividing
the raw by the value obtained for the same binary input but randomly shuffled. The
implementation used here is that of Schartner et al.'s (2015), which modified Shanahan's (2010)
original version of coalition entropy so that it is applicable to real EEG data.
Synchrony Coalition Entropy (SCE) reflects the entropy over time of the constitution of
the set of synchronous channels, introduced and implemented by Schartner et al. (2015).
SCE quantifies variability in the relationships between pairs of channel, i.e., the uncertainty
over time of the constitution of the set of channels in synchrony (rather than active).
The overall SCE is the mean value of SCE across channels.
Parameters
----------
signal : DataFrame
The DataFrame containing all the respective signals (n_samples x n_channels).
method : str
Can be ``"amplitude"`` for ACE or ``"synchrony"`` for SCE.
Returns
----------
ce : float
The coalition entropy.
info : dict
A dictionary containing additional information regarding the parameters used
to compute coalition entropy.
References
----------
* Shanahan, M. (2010). Metastable chimera states in community-structured oscillator networks.
Chaos: An Interdisciplinary Journal of Nonlinear Science, 20(1), 013108.
* Schartner, M., Seth, A., Noirhomme, Q., Boly, M., Bruno, M. A., Laureys, S., &
Barrett, A. (2015). Complexity of multi-dimensional spontaneous EEG decreases
during propofol induced general anaesthesia. PloS one, 10(8), e0133532.
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Get data
raw = nk.mne_data("raw")
signal = nk.mne_to_df(raw)[["EEG 001", "EEG 002", "EEG 003"]]
# ACE
ace, info = nk.entropy_coalition(signal, method="amplitude")
ace
# SCE
sce, info = nk.entropy_coalition(signal, method="synchrony")
sce
"""
# Sanity checks
if isinstance(signal, pd.DataFrame):
# return signal in (len(channels), len(samples)) format
signal = signal.values.transpose()
elif (isinstance(signal, np.ndarray) and len(signal.shape) == 1) or isinstance(
signal, (list, pd.Series)
):
raise ValueError(
"entropy_coalition(): The input must be a dataframe containing multiple signals.",
)
# Detrend and normalize
signal = np.array([signal_detrend(i - np.mean(i)) for i in signal])
# Method
method = method.lower()
if method in ["ACE", "amplitude"]:
info = {"Method": "ACE"}
entropy = _entropy_coalition_amplitude(signal)
elif method in ["SCE", "synchrony"]:
info = {"Method": "SCE"}
entropy, info["Values"] = _entropy_coalition_synchrony(signal)
return entropy, info
# =============================================================================
# Methods
# =============================================================================
def _entropy_coalition_synchrony(signal):
n_channels, n_samples = np.shape(signal)
# Get binary matrices of synchrony for each series
transformed = np.angle(scipy.signal.hilbert(signal))
matrix = np.zeros(
(n_channels, n_channels - 1, n_samples)
) # store array of synchrony series for each channel
for i in range(n_channels):
index = 0
for j in range(n_channels):
if i != j:
matrix[i, index] = _entropy_coalition_synchrony_phase(
transformed[i], transformed[j]
)
index += 1
# Create random binary matrix for normalization
y = np.random.rand(n_channels - 1, n_samples)
random_binarized = np.array([_signal_binarize_threshold(i, threshold=0.5) for i in y])
norm = entropy_shannon(_entropy_coalition_map(random_binarized))[0]
# Compute shannon entropy
entropy = np.zeros(n_channels)
for i in range(n_channels):
c = _entropy_coalition_map(matrix[i])
entropy[i] = entropy_shannon(c)[0]
return np.mean(entropy) / norm, entropy / norm
def _entropy_coalition_amplitude(signal):
# Hilbert transform to determine the amplitude envelope
env = np.array([np.abs(scipy.signal.hilbert(i)) for i in signal])
# Binarize (similar to LZC), mean of absolute of signal as threshold
binarized = np.array([_signal_binarize_threshold(i, threshold="mean") for i in env])
# Compute Shannon Entropy
e1 = entropy_shannon(_entropy_coalition_map(binarized))[0]
# Shuffle
np.random.seed(30) # set random seed to get reproducible results
for seq in binarized:
np.random.shuffle(seq)
# Shuffled result as normalization
e2 = entropy_shannon(_entropy_coalition_map(binarized))[0]
return e1 / e2
# =============================================================================
# Utilities
# =============================================================================
def _entropy_coalition_synchrony_phase(phase1, phase2):
"""Compute synchrony of two series of phases"""
diff = np.abs(phase1 - phase2)
d2 = np.zeros(len(diff))
for i in range(len(d2)):
if diff[i] > np.pi:
diff[i] = 2 * np.pi - diff[i]
if diff[i] < 0.8:
d2[i] = 1
return d2
def _entropy_coalition_map(binary_sequence):
"""Map each binary column of binary matrix psi onto an integer"""
n_channels, n_samples = binary_sequence.shape[0], binary_sequence.shape[1]
mapped = np.zeros(n_samples)
for t in range(n_samples):
for j in range(n_channels):
mapped[t] += binary_sequence[j, t] * (2 ** j)
return mapped
| 6,380 | 34.45 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/information_fisher.py | import numpy as np
import pandas as pd
from .utils_complexity_embedding import complexity_embedding
def fisher_information(signal, delay=1, dimension=2):
"""**Fisher Information (FI)**
The Fisher information was introduced by R. A. Fisher in 1925, as a measure of "intrinsic
accuracy" in statistical estimation theory. It is central to many statistical fields far beyond
that of complexity theory. It measures the amount of information that an observable random
variable carries about an unknown parameter. In complexity analysis, the amount of information
that a system carries "about itself" is measured. Similarly to :func:`SVDEn <entropy_svd>`, it
is based on the Singular Value Decomposition (SVD) of the :func:`time-delay embedded <complexity_embedding>`
signal. The value of FI is usually anti-correlated with other measures of complexity (the more
information a system withholds about itself, and the more predictable and thus, less complex it
is).
See Also
--------
entropy_svd, information_mutual, complexity_embedding, complexity_delay, complexity_dimension
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
Returns
-------
fi : float
The computed fisher information measure.
info : dict
A dictionary containing additional information regarding the parameters used
to compute fisher information.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
fi, info = nk.fisher_information(signal, delay=10, dimension=3)
fi
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
W = np.linalg.svd(embedded, compute_uv=False)
W /= np.sum(W) # normalize singular values
FI_v = (W[1:] - W[:-1]) ** 2 / W[:-1]
return np.sum(FI_v), {"Dimension": dimension, "Delay": delay, "Values": FI_v}
| 2,660 | 39.318182 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_hurst.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.special
from .fractal_dfa import _fractal_dfa_findscales
def fractal_hurst(signal, scale="default", corrected=True, show=False):
"""**Hurst Exponent (H)**
This function estimates the Hurst exponent via the standard rescaled range (R/S) approach, but
other methods exist, such as Detrended Fluctuation Analysis (DFA, see :func:`fractal_dfa`).
The Hurst exponent is a measure for the "long-term memory" of a signal. It can be used to
determine whether the time series is more, less, or equally likely to increase if it has
increased in previous steps. This property makes the Hurst exponent especially interesting for
the analysis of stock data. It typically ranges from 0 to 1, with 0.5 corresponding to a
Brownian motion. If H < 0.5, the time-series covers less "distance" than a random walk (the
memory of the signal decays faster than at random), and vice versa.
The R/S approach first splits the time series into non-overlapping subseries of length n. R and
S (sigma) are then calculated for each subseries and the mean is taken over all subseries
yielding (R/S)_n. This process is repeated for several lengths *n*. The final exponent is then
derived from fitting a straight line to the plot of :math:`log((R/S)_n)` vs :math:`log(n)`.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
or dataframe.
scale : list
A list containing the lengths of the windows (number of data points in each subseries) that
the signal is divided into. See :func:`fractal_dfa` for more information.
corrected : boolean
if ``True``, the Anis-Lloyd-Peters correction factor will be applied to the
output according to the expected value for the individual (R/S) values.
show : bool
If ``True``, returns a plot.
See Also
--------
fractal_dfa
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate Signal with duration of 2s
signal = nk.signal_simulate(duration=2, frequency=5)
# Compute Hurst Exponent
h, info = nk.fractal_hurst(signal, corrected=True, show=True)
h
References
----------
* Brandi, G., & Di Matteo, T. (2021). On the statistics of scaling exponents and the
Multiscaling Value at Risk. The European Journal of Finance, 1-22.
* Annis, A. A., & Lloyd, E. H. (1976). The expected value of the adjusted rescaled Hurst range
of independent normal summands. Biometrika, 63(1), 111-116.
* https://github.com/CSchoel/nolds
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
n = len(signal)
scale = _fractal_dfa_findscales(n, scale)
# get individual values for (R/S)_n
rs_vals = np.array([_fractal_hurst_rs(signal, window) for window in scale])
# filter NaNs (zeros should not be possible, because if R is 0 then S is also zero)
valid = ~np.isnan(rs_vals)
rs_vals = rs_vals[valid]
n_vals = np.asarray(scale)[valid]
# it may happen that no rs vals are left (if all values of data are the same)
if len(rs_vals) == 0:
raise ValueError(
"All (R/S) values are NaN. Check your data, or try a different window length."
)
# Transform RS values
rs_vals = np.log10(rs_vals)
if corrected:
rs_vals -= np.log10([expected_rs(n) for n in n_vals])
n_vals = np.log10(n_vals)
# fit a line to the logarithm of the obtained (R/S) values
poly = np.polyfit(n_vals, rs_vals, 1)
h = poly[0] # Get the slope
if corrected:
h = poly[0] + 0.5
if show:
_fractal_hurst_plot(poly, n_vals, rs_vals, corrected=corrected, ax=None)
return h, {"Values": n_vals, "Scores": rs_vals, "Corrected": corrected, "Intercept": poly[1]}
# =============================================================================
# Utilities
# =============================================================================
def expected_rs(n):
"""Calculates the expected (R/S)_n for white noise for a given n.
This is used as a correction factor in the function hurst_rs. It uses the
formula of Anis-Lloyd-Peters.
https://en.wikipedia.org/wiki/Hurst_exponent#cite_note-:2-17
"""
# front = (n - 0.5) / n
i = np.arange(1, n)
back = np.sum(np.sqrt((n - i) / i))
if n <= 340:
middle = scipy.special.gamma((n - 1) * 0.5) / np.sqrt(np.pi) / scipy.special.gamma(n * 0.5)
else:
middle = 1.0 / np.sqrt(n * np.pi * 0.5)
return middle * back
def _fractal_hurst_rs(signal, window):
"""Calculates an individual R/S value in the rescaled range approach for
a given window size (the size of the subseries in which data should be split).
"""
n = len(signal)
m = n // window # number of sequences
# cut values at the end of data to make the array divisible by n
signal = signal[: n - (n % window)]
# split remaining data into subsequences of length n
seqs = np.reshape(signal, (m, window))
# calculate means of subsequences
means = np.mean(seqs, axis=1)
# normalize subsequences by substracting mean
y = seqs - means.reshape((m, 1))
# build cumulative sum of subsequences
y = np.cumsum(y, axis=1)
# find ranges
r = np.max(y, axis=1) - np.min(y, axis=1)
# find standard deviation
# we should use the unbiased estimator, since we do not know the true mean
s = np.std(seqs, ddof=1, axis=1)
# some ranges may be zero and have to be excluded from the analysis
idx = np.where(r != 0)
r = r[idx]
s = s[idx]
# it may happen that all ranges are zero (if all values in data are equal)
if len(r) == 0:
return np.nan
# return mean of r/s along subsequence index
return np.mean(r / s)
def _fractal_hurst_plot(poly, n_vals, rs_vals, corrected=False, ax=None):
if ax is None: # ax option in case more plots need to be added later
fig, ax = plt.subplots()
fig.suptitle("Hurst Exponent via Rescaled Range (R/S) Analysis")
else:
fig = None
ax.set_ylabel(r"$log$((R/S)_n)")
ax.set_xlabel(r"$log$(n)")
# Plot log((R/S)_n) vs log(n)
ax.scatter(
n_vals,
rs_vals,
marker="o",
zorder=1,
label="_no_legend_",
)
fit_values = [poly[0] * i + poly[1] for i in n_vals]
if corrected:
label = "corrected h = {}".format(round(poly[0] + 0.5, 2))
else:
label = "h = {}".format(round(poly[0], 2))
ax.plot(n_vals, fit_values, color="#E91E63", zorder=2, linewidth=3, label=label)
ax.legend(loc="lower right")
return fig
# =============================================================================
# Generalized Hurst Exponent
# =============================================================================
def _fractal_hurst_generalized(signal, q=2):
"""TO BE DONE.
The Generalized Hurst exponent method is assesses directly the scaling properties of the time series
via the qth-order moments of the distribution of the increments.
Different exponents `q` are associated with different characterizations of the multi-scaling complexity of the signal.
In contrast to the popular R/S statistics approach, it does not deal with max and min functions, and thus less sensitive
to outliers.
From https://github.com/PTRRupprecht/GenHurst"""
n = len(signal)
H = np.zeros((len(range(5, 20)), 1))
k = 0
for Tmax in range(5, 20):
x = np.arange(1, Tmax + 1, 1)
mcord = np.zeros((Tmax, 1))
for tt in range(1, Tmax + 1):
dV = signal[np.arange(tt, n, tt)] - signal[np.arange(tt, n, tt) - tt]
VV = signal[np.arange(tt, n + tt, tt) - tt]
N = len(dV) + 1
X = np.arange(1, N + 1, dtype=np.float64)
Y = VV
mx = np.sum(X) / N
SSxx = np.sum(X ** 2) - N * mx ** 2
my = np.sum(Y) / N
SSxy = np.sum(np.multiply(X, Y)) - N * mx * my
cc1 = SSxy / SSxx
cc2 = my - cc1 * mx
ddVd = dV - cc1
VVVd = VV - np.multiply(cc1, np.arange(1, N + 1, dtype=np.float64)) - cc2
mcord[tt - 1] = np.mean(np.abs(ddVd) ** q) / np.mean(np.abs(VVVd) ** q)
mx = np.mean(np.log10(x))
SSxx = np.sum(np.log10(x) ** 2) - Tmax * mx ** 2
my = np.mean(np.log10(mcord))
SSxy = np.sum(np.multiply(np.log10(x), np.transpose(np.log10(mcord)))) - Tmax * mx * my
H[k] = SSxy / SSxx
k = k + 1
mH = np.mean(H) / q
return mH
| 8,916 | 35.247967 | 124 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_petrosian.py | import numpy as np
import pandas as pd
from .utils_complexity_symbolize import complexity_symbolize
def fractal_petrosian(signal, symbolize="C", show=False):
"""**Petrosian fractal dimension (PFD)**
Petrosian (1995) proposed a fast method to estimate the fractal dimension by converting the
signal into a binary sequence from which the fractal dimension is estimated. Several variations
of the algorithm exist (e.g., ``"A"``, ``"B"``, ``"C"`` or ``"D"``), primarily differing in the
way the discrete (symbolic) sequence is created (see func:`complexity_symbolize` for details).
The most common method (``"C"``, by default) binarizes the signal by the sign of consecutive
differences.
.. math::
\\frac{log(N)}{log(N) + log(\\frac{N}{N+0.4N_{\\delta}})}
Most of these methods assume that the signal is periodic (without a linear trend). Linear
detrending might be useful to eliminate linear trends (see :func:`.signal_detrend`).
See Also
--------
information_mutual, entropy_svd
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. By default,
assigns 0 and 1 to values below and above the mean. Can be ``None`` to skip the process (in
case the input is already discrete). See :func:`complexity_symbolize` for details.
show : bool
If ``True``, will show the discrete the signal.
Returns
-------
pfd : float
The petrosian fractal dimension (PFD).
info : dict
A dictionary containing additional information regarding the parameters used
to compute PFD.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 12])
@savefig p_fractal_petrosian1.png scale=100%
pfd, info = nk.fractal_petrosian(signal, symbolize = "C", show=True)
@suppress
plt.close()
.. ipython:: python
pfd
info
References
----------
* Esteller, R., Vachtsevanos, G., Echauz, J., & Litt, B. (2001). A comparison of waveform
fractal dimension algorithms. IEEE Transactions on Circuits and Systems I: Fundamental Theory
and Applications, 48(2), 177-183.
* Petrosian, A. (1995, June). Kolmogorov complexity of finite sequences and recognition of
different preictal EEG patterns. In Proceedings eighth IEEE symposium on computer-based
medical systems (pp. 212-217). IEEE.
* Kumar, D. K., Arjunan, S. P., & Aliahmad, B. (2017). Fractals: applications in biological
Signalling and image processing. CRC Press.
* Goh, C., Hamadicharef, B., Henderson, G., & Ifeachor, E. (2005, June). Comparison of fractal
dimension algorithms for the computation of EEG biomarkers for dementia. In 2nd International
Conference on Computational Intelligence in Medicine and Healthcare (CIMED2005).
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Binarize the sequence
symbolic = complexity_symbolize(signal, method=symbolize, show=show)
# if isinstance(method, str) and method.lower() in ["d", "r"]:
# # These methods are already based on the consecutive differences
# n_inversions = symbolic.sum()
# else:
# # Note: np.diff(symbolic).sum() wouldn't work in case there's a seq like [0, -1, 1]
# n_inversions = (symbolic[1:] != symbolic[:-1]).sum()
n_inversions = (symbolic[1:] != symbolic[:-1]).sum()
n = len(symbolic)
pfd = np.log10(n) / (np.log10(n) + np.log10(n / (n + 0.4 * n_inversions)))
return pfd, {"Symbolization": symbolize}
| 3,975 | 38.366337 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_complexity_embedding.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning
from ..signal import signal_sanitize
from .utils_complexity_attractor import (_attractor_equation,
complexity_attractor)
def complexity_embedding(signal, delay=1, dimension=3, show=False, **kwargs):
"""**Time-delay Embedding of a Signal**
Time-delay embedding is one of the key concept of complexity science. It is based on the idea
that a dynamical system can be described by a vector of numbers, called its *'state'*, that
aims to provide a complete description of the system at some point in time. The set of all
possible states is called the *'state space'*.
Takens's (1981) embedding theorem suggests that a sequence of measurements of a dynamic system
includes in itself all the information required to completely reconstruct the state space.
Time-delay embedding attempts to identify the state *s* of the system at some time *t* by
searching the past history of observations for similar states, and, by studying the evolution
of similar states, infer information about the future of the system.
**Attractors**
How to visualize the dynamics of a system? A sequence of state values over time is called a
trajectory. Depending on the system, different trajectories can evolve to a common subset of
state space called an attractor. The presence and behavior of attractors gives intuition about
the underlying dynamical system. We can visualize the system and its attractors by plotting the
trajectory of many different initial state values and numerically integrating them to
approximate their continuous time evolution on discrete computers.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values. Can also be a string,
such as ``"lorenz"`` (Lorenz attractor), ``"rossler"`` (Rössler attractor), or
``"clifford"`` (Clifford attractor) to obtain a pre-defined attractor.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
show : bool
Plot the reconstructed attractor. See :func:`complexity_attractor` for details.
**kwargs
Other arguments to be passed to :func:`complexity_attractor`.
Returns
-------
array
Embedded time-series, of shape ``length - (dimension - 1) * delay``
See Also
------------
complexity_delay, complexity_dimension, complexity_attractor
Examples
---------
**Example 1**: Understanding the output
.. ipython
import neurokit2 as nk
# Basic example
signal = [1, 2, 3, 2.5, 2.0, 1.5]
embedded = nk.complexity_embedding(signal, delay = 2, dimension = 2)
embedded
The first columns contains the beginning of the signal, and the second column contains the
values at *t+2*.
**Example 2**: 2D, 3D, and "4D" Attractors. Note that 3D attractors are slow to plot.
.. ipython
# Artifical example
signal = nk.signal_simulate(duration=4, sampling_rate=200, frequency=5, noise=0.01)
@savefig p_complexity_embedding1.png scale=100%
embedded = nk.complexity_embedding(signal, delay=50, dimension=2, show=True)
@suppress
plt.close()
.. ipython
@savefig p_complexity_embedding2.png scale=100%
embedded = nk.complexity_embedding(signal, delay=50, dimension=3, show=True)
@suppress
plt.close()
.. ipython
@savefig p_complexity_embedding3.png scale=100%
embedded = nk.complexity_embedding(signal, delay=50, dimension=4, show=True)
@suppress
plt.close()
In the last 3D-attractor, the 4th dimension is represented by the color.
**Example 3**: Attractor of heart rate
ecg = nk.ecg_simulate(duration=60*4, sampling_rate=200)
peaks, _ = nk.ecg_peaks(ecg, sampling_rate=200)
signal = nk.ecg_rate(peaks, sampling_rate=200, desired_length=len(ecg))
@savefig p_complexity_embedding4.png scale=100%
embedded = nk.complexity_embedding(signal, delay=250, dimension=2, show=True)
@suppress
plt.close()
References
-----------
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
* Takens, F. (1981). Detecting strange attractors in turbulence. In Dynamical systems and
turbulence, Warwick 1980 (pp. 366-381). Springer, Berlin, Heidelberg.
"""
# If string
if isinstance(signal, str):
return _attractor_equation(signal, **kwargs)
N = len(signal)
signal = signal_sanitize(signal)
# Sanity checks
if isinstance(delay, float):
warn("`delay` must be an integer. Running `int(delay)`", category=NeuroKitWarning)
delay = int(delay)
if isinstance(dimension, float):
warn("`dimension` must be an integer. Running `int(dimension)`", category=NeuroKitWarning)
dimension = int(dimension)
if dimension * delay > N:
raise ValueError(
"NeuroKit error: complexity_embedding(): dimension * delay should be lower than",
" the length of the signal.",
)
if delay < 1:
raise ValueError("NeuroKit error: complexity_embedding(): 'delay' has to be at least 1.")
Y = np.zeros((dimension, N - (dimension - 1) * delay))
for i in range(dimension):
Y[i] = signal[i * delay : i * delay + Y.shape[1]]
embedded = Y.T
if show is True:
complexity_attractor(embedded, **kwargs)
return embedded
| 6,158 | 38.229299 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_rate.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import find_knee
from .entropy_shannon import entropy_shannon
from .utils_complexity_embedding import complexity_embedding
from .utils_complexity_symbolize import complexity_symbolize
def entropy_rate(signal, kmax=10, symbolize="mean", show=False):
"""**Entropy Rate (RatEn)**
The Entropy Rate (RatEn or ER) quantifies the amount of information needed to describe the
signal given observations of signal(k). In other words, it is the entropy of the time series
conditioned on the *k*-histories.
It quantifies how much uncertainty or randomness the process produces at each new time step,
given knowledge about the past states of the process. The entropy rate is estimated as the
slope of the linear fit between the history length *k* and the joint Shannon entropies. The
entropy at k = 1 is called **Excess Entropy** (ExEn).
We adapted the algorithm to include a knee-point detection (beyond which the self-Entropy
reaches a plateau), and if it exists, we additionally re-compute the Entropy Rate up until that
point. This **Maximum Entropy Rate** (MaxRatEn) can be retrieved from the dictionary.
Parameters
----------
signal : Union[list, np.array, pd.Series]
A :func:`symbolic <complexity_symbolize>` sequence in the form of a vector of values.
kmax : int
The max history length to consider. If an integer is passed, will generate a range from 1
to kmax.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. By default,
assigns 0 and 1 to values below and above the mean. Can be ``None`` to skip the process (in
case the input is already discrete). See :func:`complexity_symbolize` for details.
show : bool
Plot the Entropy Rate line.
See Also
--------
entropy_shannon
Examples
----------
**Example 1**: A simple discrete signal. We have to specify ``symbolize=None`` as the signal is
already discrete.
.. ipython:: python
import neurokit2 as nk
signal = [1, 1, 2, 1, 2, 1, 1, 1, 2, 2, 1, 1, 1, 3, 2, 2, 1, 3, 2]
@savefig p_entropy_rate1.png scale=100%
raten, info = nk.entropy_rate(signal, kmax=10, symbolize=None, show=True)
@suppress
plt.close()
Here we can see that *kmax* is likely to big to provide an accurate estimation of entropy rate.
.. ipython:: python
@savefig p_entropy_rate2.png scale=100%
raten, info = nk.entropy_rate(signal, kmax=3, symbolize=None, show=True)
@suppress
plt.close()
**Example 2**: A continuous signal.
.. ipython:: python
signal = nk.signal_simulate(duration=2, frequency=[5, 12, 40, 60], sampling_rate=1000)
@savefig p_entropy_rate3.png scale=100%
raten, info = nk.entropy_rate(signal, kmax=60, show=True)
@suppress
plt.close()
.. ipython:: python
raten
info["Excess_Entropy"]
info["MaxRatEn"]
References
----------
* Mediano, P. A., Rosas, F. E., Timmermann, C., Roseman, L., Nutt, D. J., Feilding, A., ... &
Carhart-Harris, R. L. (2020). Effects of external stimulation on psychedelic state
neurodynamics. Biorxiv.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Force to array
if not isinstance(signal, np.ndarray):
signal = np.array(signal)
# Make discrete
if np.isscalar(signal) is False:
signal = complexity_symbolize(signal, method=symbolize)
# Convert into range if integer
if np.isscalar(kmax) is True:
kmax = np.arange(1, kmax + 1)
# Compute self-entropy
info = {
"Entropy": [_selfentropy(signal, k) for k in kmax],
"k": kmax,
}
# Traditional Entropy Rate (on all the values)
raten, intercept1 = np.polyfit(info["k"], info["Entropy"], 1)
# Excess Entropy
info["Excess_Entropy"] = info["Entropy"][0]
# Max Entropy Rate
# Detect knee
try:
knee = find_knee(info["Entropy"], verbose=False)
except ValueError:
knee = len(info["k"]) - 1
if knee == len(info["k"]) - 1:
info["MaxRatEn"], intercept2 = raten, np.nan
else:
info["MaxRatEn"], intercept2 = np.polyfit(info["k"][0:knee], info["Entropy"][0:knee], 1)
# Store knee
info["Knee"] = knee
# Plot
if show:
plt.figure(figsize=(6, 6))
plt.plot(info["k"], info["Entropy"], "o-", color="black")
y = raten * info["k"] + intercept1
plt.plot(
info["k"],
y,
color="red",
label=f"Entropy Rate = {raten:.2f}",
)
plt.plot(
(np.min(info["k"]), np.min(info["k"])),
(0, info["Entropy"][0]),
"--",
color="blue",
label=f"Excess Entropy = {info['Excess_Entropy']:.2f}",
)
if not np.isnan(intercept2):
y2 = info["MaxRatEn"] * info["k"] + intercept2
plt.plot(
info["k"][y2 <= np.max(y)],
y2[y2 <= np.max(y)],
color="purple",
label=f"Max Entropy Rate = {info['MaxRatEn']:.2f}",
)
plt.plot(
(info["k"][knee], info["k"][knee]),
(0, info["Entropy"][knee]),
"--",
color="purple",
label=f"Knee = {info['k'][knee]}",
)
plt.legend(loc="lower right")
plt.xlabel("History Length $k$")
plt.ylabel("Entropy")
plt.title("Entropy Rate")
return raten, info
def _selfentropy(x, k=3):
"""Shannon's Self joint entropy with k as the length of k-history"""
z = complexity_embedding(x, dimension=int(k), delay=1)
_, freq = np.unique(z, return_counts=True, axis=0)
freq = freq / freq.sum()
return entropy_shannon(freq=freq, base=2)[0]
| 6,145 | 32.043011 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_fractal_mandelbrot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
def fractal_mandelbrot(
size=1000,
real_range=(-2, 2),
imaginary_range=(-2, 2),
threshold=4,
iterations=25,
buddha=False,
show=False,
):
"""**Mandelbrot (or a Buddhabrot) Fractal**
Vectorized function to efficiently generate an array containing values corresponding to a
Mandelbrot fractal.
Parameters
-----------
size : int
The size in pixels (corresponding to the width of the figure).
real_range : tuple
The mandelbrot set is defined within the -2, 2 complex space (the real being the x-axis and
the imaginary the y-axis). Adjusting these ranges can be used to pan, zoom and crop the
figure.
imaginary_range : tuple
The mandelbrot set is defined within the -2, 2 complex space (the real being the x-axis and
the imaginary the y-axis). Adjusting these ranges can be used to pan, zoom and crop the
figure.
iterations : int
Number of iterations.
threshold : int
The threshold used, increasing it will increase the sharpness (not used for buddhabrots).
buddha : bool
Whether to return a buddhabrot.
show : bool
Visualize the fractal.
Returns
-------
ndarray
Array of values.
Examples
---------
Create the Mandelbrot fractal
.. ipython:: python
import neurokit2 as nk
@savefig p_fractal_mandelbrot1.png scale=100%
m = nk.fractal_mandelbrot(show=True)
@suppress
plt.close()
Zoom at the Seahorse Valley
.. ipython:: python
@savefig p_fractal_mandelbrot2.png scale=100%
m = nk.fractal_mandelbrot(real_range=(-0.76, -0.74), imaginary_range=(0.09, 0.11),
iterations=100, show=True)
@suppress
plt.close()
Draw manually
.. ipython:: python
import matplotlib.pyplot as plt
m = nk.fractal_mandelbrot(real_range=(-2, 0.75), imaginary_range=(-1.25, 1.25))
@savefig p_fractal_mandelbrot3.png scale=100%
plt.imshow(m.T, cmap="viridis")
plt.axis("off")
@suppress
plt.close()
Generate a Buddhabrot fractal
.. ipython:: python
b = nk.fractal_mandelbrot(size=1500, real_range=(-2, 0.75), imaginary_range=(-1.25, 1.25),
buddha=True, iterations=200)
@savefig p_fractal_mandelbrot4.png scale=100%
plt.imshow(b.T, cmap="gray")
plt.axis("off")
@suppress
plt.close()
Mixed MandelBuddha
.. ipython:: python
m = nk.fractal_mandelbrot()
b = nk.fractal_mandelbrot(buddha=True, iterations=200)
mixed = m - b
@savefig p_fractal_mandelbrot5.png scale=100%
plt.imshow(mixed.T, cmap="gray")
plt.axis("off")
@suppress
plt.close()
"""
if buddha is False:
img = _mandelbrot(
size=size,
real_range=real_range,
imaginary_range=imaginary_range,
threshold=threshold,
iterations=iterations,
)
else:
img = _buddhabrot(
size=size, real_range=real_range, imaginary_range=imaginary_range, iterations=iterations
)
if show is True:
plt.imshow(img, cmap="rainbow")
plt.axis("off")
return img
# =============================================================================
# Internals
# =============================================================================
def _mandelbrot(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2), iterations=25, threshold=4):
img, c = _mandelbrot_initialize(
size=size, real_range=real_range, imaginary_range=imaginary_range
)
optim = _mandelbrot_optimize(c)
z = np.copy(c)
for i in range(1, iterations + 1): # pylint: disable=W0612
# Continue only where smaller than threshold
mask = (z * z.conjugate()).real < threshold
mask = np.logical_and(mask, optim)
if np.all(~mask) is True:
break
# Increase
img[mask] += 1
# Iterate based on Mandelbrot equation
z[mask] = z[mask] ** 2 + c[mask]
# Fill otpimized area
img[~optim] = np.max(img)
return img
def _mandelbrot_initialize(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2)):
# Image space
width = size
height = _mandelbrot_width2height(width, real_range, imaginary_range)
img = np.full((height, width), 0)
# Complex space
real = np.array([np.linspace(*real_range, width)] * height)
imaginary = np.array([np.linspace(*imaginary_range, height)] * width).T
c = 1j * imaginary
c += real
return img, c
# =============================================================================
# Buddhabrot
# =============================================================================
def _buddhabrot(size=1000, iterations=100, real_range=(-2, 2), imaginary_range=(-2, 2)):
# Find original width and height (postdoc enforcing so that is has the same size than mandelbrot)
width = size
height = _mandelbrot_width2height(width, real_range, imaginary_range)
# Inflate size to match -2, 2
x = np.array((np.array(real_range) + 2) / 4 * size, int)
size = int(size * (size / (x[1] - x[0])))
img = np.zeros([size, size], int)
c = _buddhabrot_initialize(
size=img.size, iterations=iterations, real_range=real_range, imaginary_range=imaginary_range
)
# use these c-points as the initial 'z' points.
z = np.copy(c)
while len(z) > 0:
# translate z points into image coordinates
x = np.array((z.real + 2) / 4 * size, int)
y = np.array((z.imag + 2) / 4 * size, int)
# add value to all occupied pixels
img[y, x] += 1
# apply mandelbrot dynamic
z = z ** 2 + c
# shed the points that have escaped
mask = np.abs(z) < 2
c = c[mask]
z = z[mask]
# Crop parts not asked for
xrange = np.array((np.array(real_range) + 2) / 4 * size).astype(int)
yrange = np.array((np.array(imaginary_range) + 2) / 4 * size).astype(int)
img = img[yrange[0] : yrange[0] + height, xrange[0] : xrange[0] + width]
return img
def _buddhabrot_initialize(size=1000, iterations=100, real_range=(-2, 2), imaginary_range=(-2, 2)):
# Allocate an array to store our non-mset points as we find them.
sets = np.zeros(size, dtype=np.complex128)
sets_found = 0
# create an array of random complex numbers (our 'c' points)
c = np.random.uniform(*real_range, size) + (np.random.uniform(*imaginary_range, size) * 1j)
c = c[_mandelbrot_optimize(c)]
z = np.copy(c)
for i in range(iterations): # pylint: disable=W0612
# apply mandelbrot dynamic
z = z ** 2 + c
# collect the c points that have escaped
mask = np.abs(z) < 2
new_sets = c[~mask]
sets[sets_found : sets_found + len(new_sets)] = new_sets
sets_found += len(new_sets)
# then shed those points from our test set before continuing.
c = c[mask]
z = z[mask]
# return only the points that are not in the mset
return sets[:sets_found]
# =============================================================================
# Utils
# =============================================================================
def _mandelbrot_optimize(c):
# Optimizations: most of the mset points lie within the
# within the cardioid or in the period-2 bulb. (The two most
# prominent shapes in the mandelbrot set. We can eliminate these
# from our search straight away and save alot of time.
# see: http://en.wikipedia.org/wiki/Mandelbrot_set#Optimizations
# First eliminate points within the cardioid
p = (((c.real - 0.25) ** 2) + (c.imag ** 2)) ** 0.5
mask1 = c.real > p - (2 * p ** 2) + 0.25
# Next eliminate points within the period-2 bulb
mask2 = ((c.real + 1) ** 2) + (c.imag ** 2) > 0.0625
# Combine masks
mask = np.logical_and(mask1, mask2)
return mask
def _mandelbrot_width2height(size=1000, real_range=(-2, 2), imaginary_range=(-2, 2)):
return int(
np.rint((imaginary_range[1] - imaginary_range[0]) / (real_range[1] - real_range[0]) * size)
)
| 8,324 | 28.416961 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_symbolicdynamic.py | import numpy as np
import pandas as pd
from .utils_complexity_embedding import complexity_embedding
from .utils_complexity_symbolize import complexity_symbolize
def entropy_symbolicdynamic(signal, dimension=3, symbolize="MEP", c=6, **kwargs):
"""**Symbolic Dynamic Entropy (SyDyEn) and its Multiscale variants (MSSyDyEn)**
Symbolic Dynamic Entropy (SyDyEn) combines the merits of symbolic dynamic and information
theory.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. Can be one
of ``"MEP"`` (default), ``"NCDF"``, ``"linear"``, ``"uniform"``, ``"kmeans"``, ``"equal"``,
or others. See :func:`complexity_symbolize` for details.
c : int
Number of symbols *c*.
**kwargs : optional
Other keyword arguments (currently not used).
Returns
-------
SyDyEn : float
Symbolic Dynamic Entropy (SyDyEn) of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, entropy_multiscale, entropy_dispersion
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [2, -7, -12, 5, -1, 9, 14]
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# Compute Symbolic Dynamic Entropy
sydyen, info = nk.entropy_symbolicdynamic(signal, c=3, symbolize="MEP")
sydyen
sydyen, info = nk.entropy_symbolicdynamic(signal, c=3, symbolize="kmeans")
sydyen
# Compute Multiscale Symbolic Dynamic Entropy (MSSyDyEn)
@savefig p_entropy_symbolicdynamic1.png scale=100%
mssydyen, info = nk.entropy_multiscale(signal, method="MSSyDyEn", show=True)
@suppress
plt.close()
# Compute Modified Multiscale Symbolic Dynamic Entropy (MMSyDyEn)
@savefig p_entropy_symbolicdynamic2.png scale=100%
mmsydyen, info = nk.entropy_multiscale(signal, method="MMSyDyEn", show=True)
@suppress
plt.close()
References
----------
* Matilla-García, M., Morales, I., Rodríguez, J. M., & Marín, M. R. (2021). Selection of
embedding dimension and delay time in phase space reconstruction via symbolic dynamics.
Entropy, 23(2), 221.
* Li, Y., Yang, Y., Li, G., Xu, M., & Huang, W. (2017). A fault diagnosis scheme for planetary
gearboxes using modified multi-scale symbolic dynamic entropy and mRMR feature selection.
Mechanical Systems and Signal Processing, 91, 295-312.
* Rajagopalan, V., & Ray, A. (2006). Symbolic time series analysis via wavelet-based
partitioning. Signal processing, 86(11), 3309-3320.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Dimension": dimension, "c": c, "Symbolization": symbolize}
# We could technically expose the Delay, but the paper is about consecutive differences so...
if "delay" in kwargs.keys():
delay = kwargs["delay"]
kwargs.pop("delay")
else:
delay = 1
n = len(signal)
# There are four main steps of SDE algorithm
# 1. Convert the time series into the symbol time series (called symbolization).
symbolic = complexity_symbolize(signal, method=symbolize, c=c)
# 2. Construct the embedding vectors based on the symbol time series and compute the potential
# state patterns probability
embedded = complexity_embedding(symbolic, dimension=dimension, delay=delay)
# 3. Construct the state transitions and compute the probability of state transitions.
unique = np.unique(embedded, axis=0)
counter1 = np.zeros(len(unique))
counter2 = np.zeros((len(unique), c))
Bins = np.arange(0.5, c + 1.5, 1)
for i in range(len(unique)):
Ordx = np.any(embedded - unique[i, :], axis=1) == 0
counter1[i] = sum(Ordx) / (n - ((dimension - 1) * delay))
Temp = embedded[
np.hstack((np.zeros(dimension * delay, dtype=bool), Ordx[: -(dimension * delay)])), 0
]
counter2[i, :], _ = np.histogram(Temp, Bins)
Temp = np.sum(counter2, axis=1)
counter2[Temp > 0, :] = counter2[Temp > 0, :] / np.tile(Temp[Temp > 0], (c, 1)).transpose()
counter2[np.isnan(counter2)] = 0
# 4. Based on the Shannon entropy [39], we define the SDE as the sum of the state entropy and
# the state transition entropy
with np.errstate(divide="ignore"):
P1 = -sum(counter1 * np.log(counter1))
P2 = np.log(np.tile(counter1, (c, 1)).transpose() * counter2)
P2[~np.isfinite(P2)] = 0
sydyen = P1 - sum(counter1 * np.sum(P2, axis=1))
# Normalize
sydyen = sydyen / np.log(c ** (dimension + 1))
return sydyen, info
| 5,300 | 37.413043 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity_lyapunov.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.metrics.pairwise
from ..misc import NeuroKitWarning
from ..signal.signal_psd import signal_psd
from .utils_complexity_embedding import complexity_embedding
def complexity_lyapunov(
signal,
delay=1,
dimension=2,
method="rosenstein1993",
len_trajectory=20,
matrix_dim=4,
min_neighbors="default",
**kwargs,
):
"""**(Largest) Lyapunov Exponent (LLE)**
Lyapunov exponents (LE) describe the rate of exponential separation (convergence or divergence)
of nearby trajectories of a dynamical system. It is a measure of sensitive dependence on
initial conditions, i.e. how quickly two nearby states diverge. A system can have multiple LEs,
equal to thenumber of the dimensionality of the phase space, and the largest LE value, "LLE" is
often used to determine the overall predictability of the dynamical system.
Different algorithms exist to estimate these indices:
* **Rosenstein et al.'s (1993)** algorithm was designed for calculating LLEs from small
datasets. The time series is first reconstructed using a delay-embedding method, and the
closest neighbour of each vector is computed using the euclidean distance. These two
neighbouring points are then tracked along their distance trajectories for a number of data
points. The slope of the line using a least-squares fit of the mean log trajectory of the
distances gives the final LLE.
* **Eckmann et al. (1996)** computes LEs by first reconstructing the time series using a
delay-embedding method, and obtains the tangent that maps to the reconstructed dynamics using
a least-squares fit, where the LEs are deduced from the tangent maps.
.. warning::
The **Eckman (1996)** method currently does not work. Please help us fixing it by double
checking the code, the paper and helping us figuring out what's wrong. Overall, we would like
to improve this function to return for instance all the exponents (Lyapunov spectrum),
implement newer and faster methods (e.g., Balcerzak, 2018, 2020), etc. If you're interested
in helping out with this, please get in touch!
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter. If method
is ``"eckmann1996"``, larger values for dimension are recommended.
method : str
The method that defines the algorithm for computing LE. Can be one of ``"rosenstein1993"``
or ``"eckmann1996"``.
len_trajectory : int
Applies when method is ``"rosenstein1993"``. The number of data points in which
neighboring trajectories are followed.
matrix_dim : int
Applies when method is ``"eckmann1996"``. Corresponds to the number of LEs to return.
min_neighbors : int, str
Applies when method is ``"eckmann1996"``. Minimum number of neighbors. If ``"default"``,
``min(2 * matrix_dim, matrix_dim + 4)`` is used.
**kwargs : optional
Other arguments to be passed to ``signal_psd()`` for calculating the minimum temporal
separation of two neighbors.
Returns
--------
lle : float
An estimate of the largest Lyapunov exponent (LLE) if method is ``"rosenstein1993"``, and
an array of LEs if ``"eckmann1996"``.
info : dict
A dictionary containing additional information regarding the parameters used
to compute LLE.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=3, sampling_rate=100, frequency=[5, 8], noise=0.5)
lle, info = nk.complexity_lyapunov(signal, method="rosenstein1993", show=True)
lle
# Eckman's method is broken. Please help us fix-it!
# lle, info = nk.complexity_lyapunov(signal, dimension=2, method="eckmann1996")
References
----------
* Rosenstein, M. T., Collins, J. J., & De Luca, C. J. (1993). A practical method
for calculating largest Lyapunov exponents from small data sets.
Physica D: Nonlinear Phenomena, 65(1-2), 117-134.
* Eckmann, J. P., Kamphorst, S. O., Ruelle, D., & Ciliberto, S. (1986). Liapunov
exponents from time series. Physical Review A, 34(6), 4971.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Compute Minimum temporal separation between two neighbors
# -----------------------------------------------------------
# Rosenstein (1993) finds a suitable value by calculating the mean period of the data,
# obtained by the reciprocal of the mean frequency of the power spectrum.
# "We impose the additional constraint that nearest neighbors have a temporal separation
# greater than the mean period of the time series: This allows us to consider each pair of
# neighbors as nearby initial conditions for different trajectories.""
# "We estimated the mean period as the reciprocal of the mean frequency of the power spectrum,
# although we expect any comparable estimate, e.g., using the median frequency of the magnitude
# spectrum, to yield equivalent results."
# Actual sampling rate does not matter
psd = signal_psd(signal, sampling_rate=1000, method="fft", normalize=False, show=False)
mean_freq = np.sum(psd["Power"] * psd["Frequency"]) / np.sum(psd["Power"])
# 1 / mean_freq = seconds per cycle
separation = int(np.ceil(1 / mean_freq * 1000))
# Run algorithm
# ----------------
# Method
method = method.lower()
if method in ["rosenstein", "rosenstein1993"]:
le, parameters = _complexity_lyapunov_rosenstein(
signal, delay, dimension, separation, len_trajectory, **kwargs
)
elif method in ["eckmann", "eckmann1996"]:
le, parameters = _complexity_lyapunov_eckmann(
signal,
dimension=dimension,
separation=separation,
matrix_dim=matrix_dim,
min_neighbors=min_neighbors,
)
# Store params
info = {
"Dimension": dimension,
"Delay": delay,
"Separation": separation,
"Method": method,
}
info.update(parameters)
return le, info
# =============================================================================
# Methods
# =============================================================================
def _complexity_lyapunov_rosenstein(
signal, delay=1, dimension=2, separation=1, len_trajectory=20, show=False, **kwargs
):
# 1. Check that sufficient data points are available
# Minimum length required to find single orbit vector
min_len = (dimension - 1) * delay + 1
# We need len_trajectory orbit vectors to follow a complete trajectory
min_len += len_trajectory - 1
# we need tolerance * 2 + 1 orbit vectors to find neighbors for each
min_len += separation * 2 + 1
# Sanity check
if len(signal) < min_len:
warn(
f"for dimension={dimension}, delay={delay}, separation={separation} and "
f"len_trajectory={len_trajectory}, you need at least {min_len} datapoints in your"
" time series.",
category=NeuroKitWarning,
)
# Embedding
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
m = len(embedded)
# Construct matrix with pairwise distances between vectors in orbit
dists = sklearn.metrics.pairwise.euclidean_distances(embedded)
for i in range(m):
# Exclude indices within separation
dists[i, max(0, i - separation) : i + separation + 1] = np.inf
# Find indices of nearest neighbours
ntraj = m - len_trajectory + 1
min_dist_indices = np.argmin(dists[:ntraj, :ntraj], axis=1) # exclude last few indices
min_dist_indices = min_dist_indices.astype(int)
# Follow trajectories of neighbour pairs for len_trajectory data points
trajectories = np.zeros(len_trajectory)
for k in range(len_trajectory):
divergence = dists[(np.arange(ntraj) + k, min_dist_indices + k)]
dist_nonzero = np.where(divergence != 0)[0]
if len(dist_nonzero) == 0:
trajectories[k] = -np.inf
else:
# Get average distances of neighbour pairs along the trajectory
trajectories[k] = np.mean(np.log(divergence[dist_nonzero]))
divergence_rate = trajectories[np.isfinite(trajectories)]
# LLE obtained by least-squares fit to average line
slope, intercept = np.polyfit(np.arange(1, len(divergence_rate) + 1), divergence_rate, 1)
if show is True:
plt.plot(np.arange(1, len(divergence_rate) + 1), divergence_rate)
plt.axline((0, intercept), slope=slope, color="orange", label="Least-squares Fit")
plt.ylabel("Divergence Rate")
plt.legend()
parameters = {"Trajectory_Length": len_trajectory}
return slope, parameters
def _complexity_lyapunov_eckmann(
signal, dimension=2, separation=None, matrix_dim=4, min_neighbors="default", tau=1
):
"""TODO: check implementation
From https://github.com/CSchoel/nolds
"""
# Prepare parameters
if min_neighbors == "default":
min_neighbors = min(2 * matrix_dim, matrix_dim + 4)
m = (dimension - 1) // (matrix_dim - 1)
# minimum length required to find single orbit vector
min_len = dimension
# we need to follow each starting point of an orbit vector for m more steps
min_len += m
# we need separation * 2 + 1 orbit vectors to find neighbors for each
min_len += separation * 2
# we need at least min_nb neighbors for each orbit vector
min_len += min_neighbors
# Sanity check
if len(signal) < min_len:
warn(
f"for dimension={dimension}, separation={separation}, "
f"matrix_dim={matrix_dim} and min_neighbors={min_neighbors}, "
f"you need at least {min_len} datapoints in your time series.",
category=NeuroKitWarning,
)
# Storing of LEs
lexp = np.zeros(matrix_dim)
lexp_counts = np.zeros(matrix_dim)
old_Q = np.identity(matrix_dim)
# We need to be able to step m points further for the beta vector
vec = signal if m == 0 else signal[:-m] # If m==0, return full signal
# Reconstruction using time-delay method
embedded = complexity_embedding(vec, delay=1, dimension=dimension)
distances = sklearn.metrics.pairwise_distances(embedded, metric="chebyshev")
for i in range(len(embedded)):
# exclude difference of vector to itself and those too close in time
distances[i, max(0, i - separation) : i + separation + 1] = np.inf
# index of furthest nearest neighbour
neighbour_furthest = np.argsort(distances[i])[min_neighbors - 1]
# get neighbors within the radius
r = distances[i][neighbour_furthest]
neighbors = np.where(distances[i] <= r)[0] # should have length = min_neighbours
# Find matrix T_i (matrix_dim * matrix_dim) that sends points from neighbourhood of x(i) to x(i+1)
vec_beta = signal[neighbors + matrix_dim * m] - signal[i + matrix_dim * m]
matrix = np.array([signal[j : j + dimension : m] for j in neighbors]) # x(j)
matrix -= signal[i : i + dimension : m] # x(j) - x(i)
# form matrix T_i
t_i = np.zeros((matrix_dim, matrix_dim))
t_i[:-1, 1:] = np.identity(matrix_dim - 1)
t_i[-1] = np.linalg.lstsq(matrix, vec_beta, rcond=-1)[0] # least squares solution
# QR-decomposition of T * old_Q
mat_Q, mat_R = np.linalg.qr(np.dot(t_i, old_Q))
# force diagonal of R to be positive
sign_diag = np.sign(np.diag(mat_R))
sign_diag[np.where(sign_diag == 0)] = 1
sign_diag = np.diag(sign_diag)
mat_Q = np.dot(mat_Q, sign_diag)
mat_R = np.dot(sign_diag, mat_R)
old_Q = mat_Q
# successively build sum for Lyapunov exponents
diag_R = np.diag(mat_R)
# filter zeros in mat_R (would lead to -infs)
positive_elements = np.where(diag_R > 0)
lexp_i = np.zeros(len(diag_R))
lexp_i[positive_elements] = np.log(diag_R[positive_elements])
lexp_i[np.where(diag_R == 0)] = np.inf
lexp[positive_elements] += lexp_i[positive_elements]
lexp_counts[positive_elements] += 1
# normalize exponents over number of individual mat_Rs
idx = np.where(lexp_counts > 0)
lexp[idx] /= lexp_counts[idx]
lexp[np.where(lexp_counts == 0)] = np.inf
# normalize with respect to tau
lexp /= tau
# take m into account
lexp /= m
parameters = {"Minimum_Neighbors": min_neighbors}
return lexp, parameters
| 13,377 | 39.786585 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_multiscale.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import copyfunction
from .complexity_lempelziv import complexity_lempelziv
from .entropy_approximate import entropy_approximate
from .entropy_cosinesimilarity import entropy_cosinesimilarity
from .entropy_increment import entropy_increment
from .entropy_permutation import entropy_permutation
from .entropy_sample import entropy_sample
from .entropy_slope import entropy_slope
from .entropy_symbolicdynamic import entropy_symbolicdynamic
from .optim_complexity_tolerance import complexity_tolerance
from .utils import _phi, _phi_divide
from .utils_complexity_coarsegraining import _get_scales, complexity_coarsegraining
def entropy_multiscale(
signal,
scale="default",
dimension=3,
tolerance="sd",
method="MSEn",
show=False,
**kwargs,
):
"""**Multiscale entropy (MSEn) and its Composite (CMSEn), Refined (RCMSEn) or fuzzy versions**
One of the limitation of :func:`SampEn <entropy_sample>` is that it characterizes
complexity strictly on the time scale defined by the sampling procedure (via the ``delay``
argument). To address this, Costa et al. (2002) proposed the multiscale entropy (MSEn),
which computes sample entropies at multiple scales.
The conventional MSEn algorithm consists of two steps:
1. A :func:`coarse-graining <complexity_coarsegraining>` procedure is used to represent the
signal at different time scales.
2. :func:`Sample entropy <entropy_sample>` (or other function) is used to quantify the
regularity of a coarse-grained time series at each time scale factor.
However, in the traditional coarse-graining procedure, the larger the scale factor is, the
shorter the coarse-grained time series is. As such, the variance of the entropy of the
coarse-grained series estimated by SampEn increases as the time scale factor increases, making
it problematic for shorter signals.
* **CMSEn**: In order to reduce the variance of estimated entropy values at large scales, Wu et
al. (2013) introduced the **Composite Multiscale Entropy** algorithm, which computes
multiple coarse-grained series for each scale factor (via the **time-shift** method for
:func:`coarse-graining <complexity_coarsegraining>`).
* **RCMSEn**: Wu et al. (2014) further **Refined** their CMSEn by averaging not the entropy
values of each subcoarsed vector, but its components at a lower level.
* **MMSEn**: Wu et al. (2013) also introduced the **Modified Multiscale Entropy**
algorithm, which is based on rolling-average :func:`coarse-graining <complexity_coarsegraining>`.
* **IMSEn**: Liu et al. (2012) introduced an adaptive-resampling procedure to resample the
coarse-grained series. We implement a generalization of this via interpolation that can be
referred to as **Interpolated Multiscale Entropy**.
.. warning::
Interpolated Multiscale variants don't work as expected. Help is needed to fix this
procedure.
Their :func:`Fuzzy <entropy_fuzzy>` version can be obtained by setting ``fuzzy=True``.
This function can be called either via ``entropy_multiscale()`` or ``complexity_mse()``.
Moreover, variants can be directly accessed via ``complexity_cmse()``, `complexity_rcmse()``,
``complexity_fuzzymse()``, ``complexity_fuzzycmse()`` and ``complexity_fuzzyrcmse()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
or dataframe.
scale : str or int or list
A list of scale factors used for coarse graining the time series. If 'default', will use
``range(len(signal) / (dimension + 10))`` (see discussion
`here <https://github.com/neuropsychology/NeuroKit/issues/75#issuecomment-583884426>`_).
If 'max', will use all scales until half the length of the signal. If an integer, will
create a range until the specified int. See :func:`complexity_coarsegraining` for details.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
method : str
What version of multiscale entropy to compute. Can be one of ``"MSEn"``, ``"CMSEn"``,
``"RCMSEn"``, ``"MMSEn"``, ``"IMSEn"``, ``"MSApEn"``, ``"MSPEn"``, ``"CMSPEn"``,
``"MMSPEn"``, ``"IMSPEn"``, ``"MSWPEn"``, ``"CMSWPEn"``, ``"MMSWPEn"``, ``"IMSWPEn"``
(case sensitive).
show : bool
Show the entropy values for each scale factor.
**kwargs
Optional arguments.
Returns
----------
float
The point-estimate of multiscale entropy (MSEn) of the single time series corresponding to
the area under the MSEn values curve, which is essentially the sum of sample entropy values
over the range of scale factors.
dict
A dictionary containing additional information regarding the parameters used
to compute multiscale entropy. The entropy values corresponding to each ``"Scale"``
factor are stored under the ``"Value"`` key.
See Also
--------
complexity_coarsegraining, entropy_sample, entropy_fuzzy, entropy_permutation
Examples
----------
**MSEn** (basic coarse-graining)
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 12, 40])
@savefig p_entropy_multiscale1.png scale=100%
msen, info = nk.entropy_multiscale(signal, show=True)
@suppress
plt.close()
**CMSEn** (time-shifted coarse-graining)
.. ipython:: python
@savefig p_entropy_multiscale2.png scale=100%
cmsen, info = nk.entropy_multiscale(signal, method="CMSEn", show=True)
@suppress
plt.close()
**RCMSEn** (refined composite MSEn)
.. ipython:: python
@savefig p_entropy_multiscale3.png scale=100%
rcmsen, info = nk.entropy_multiscale(signal, method="RCMSEn", show=True)
@suppress
plt.close()
**MMSEn** (rolling-window coarse-graining)
.. ipython:: python
@savefig p_entropy_multiscale4.png scale=100%
mmsen, info = nk.entropy_multiscale(signal, method="MMSEn", show=True)
@suppress
plt.close()
**IMSEn** (interpolated coarse-graining)
.. ipython:: python
@savefig p_entropy_multiscale5.png scale=100%
imsen, info = nk.entropy_multiscale(signal, method="IMSEn", show=True)
@suppress
plt.close()
**MSApEn** (based on ApEn instead of SampEn)
.. ipython:: python
@savefig p_entropy_multiscale6.png scale=100%
msapen, info = nk.entropy_multiscale(signal, method="MSApEn", show=True)
@suppress
plt.close()
**MSPEn** (based on PEn), **CMSPEn**, **MMSPEn** and **IMSPEn**
.. ipython:: python
@savefig p_entropy_multiscale7.png scale=100%
mspen, info = nk.entropy_multiscale(signal, method="MSPEn", show=True)
@suppress
plt.close()
.. ipython:: python
cmspen, info = nk.entropy_multiscale(signal, method="CMSPEn")
cmspen
mmspen, info = nk.entropy_multiscale(signal, method="MMSPEn")
mmspen
imspen, info = nk.entropy_multiscale(signal, method="IMSPEn")
imspen
**MSWPEn** (based on WPEn), **CMSWPEn**, **MMSWPEn** and **IMSWPEn**
.. ipython:: python
mswpen, info = nk.entropy_multiscale(signal, method="MSWPEn")
cmswpen, info = nk.entropy_multiscale(signal, method="CMSWPEn")
mmswpen, info = nk.entropy_multiscale(signal, method="MMSWPEn")
imswpen, info = nk.entropy_multiscale(signal, method="IMSWPEn")
**FuzzyMSEn**, **FuzzyCMSEn** and **FuzzyRCMSEn**
.. ipython:: python
@savefig p_entropy_multiscale8.png scale=100%
fuzzymsen, info = nk.entropy_multiscale(signal, method="MSEn", fuzzy=True, show=True)
@suppress
plt.close()
.. ipython:: python
fuzzycmsen, info = nk.entropy_multiscale(signal, method="CMSEn", fuzzy=True)
fuzzycmsen
fuzzyrcmsen, info = nk.entropy_multiscale(signal, method="RCMSEn", fuzzy=True)
fuzzycmsen
References
-----------
* Costa, M., Goldberger, A. L., & Peng, C. K. (2002). Multiscale entropy analysis of complex
physiologic time series. Physical review letters, 89(6), 068102.
* Costa, M., Goldberger, A. L., & Peng, C. K. (2005). Multiscale entropy analysis of biological
signals. Physical review E, 71(2), 021906.
* Wu, S. D., Wu, C. W., Lee, K. Y., & Lin, S. G. (2013). Modified multiscale entropy for
short-term time series analysis. Physica A: Statistical Mechanics and its Applications, 392
(23), 5865-5873.
* Wu, S. D., Wu, C. W., Lin, S. G., Wang, C. C., & Lee, K. Y. (2013). Time series analysis
using composite multiscale entropy. Entropy, 15(3), 1069-1084.
* Wu, S. D., Wu, C. W., Lin, S. G., Lee, K. Y., & Peng, C. K. (2014). Analysis of complex time
series using refined composite multiscale entropy. Physics Letters A, 378(20), 1369-1374.
* Gow, B. J., Peng, C. K., Wayne, P. M., & Ahn, A. C. (2015). Multiscale entropy analysis of
center-of-pressure dynamics in human postural control: methodological considerations. Entropy,
17(12), 7926-7947.
* Norris, P. R., Anderson, S. M., Jenkins, J. M., Williams, A. E., & Morris Jr, J. A. (2008).
Heart rate multiscale entropy at three hours predicts hospital mortality in 3,154 trauma
patients. Shock, 30(1), 17-22.
* Liu, Q., Wei, Q., Fan, S. Z., Lu, C. W., Lin, T. Y., Abbod, M. F., & Shieh, J. S. (2012).
Adaptive computation of multiscale entropy and its application in EEG signals for monitoring
depth of anesthesia during surgery. Entropy, 14(6), 978-992.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Prevent multiple arguments error in case 'delay' is passed in kwargs
if "delay" in kwargs:
kwargs.pop("delay")
# Parameters selection
algorithm = entropy_sample
refined = False
coarsegraining = "nonoverlapping"
if method in ["MSEn", "SampEn"]:
pass # The default arguments are good
elif method in ["MSApEn", "ApEn", "MSPEn", "PEn", "MSWPEn", "WPEn"]:
if method in ["MSApEn", "ApEn"]:
algorithm = entropy_approximate
if method in ["MSPEn", "PEn"]:
algorithm = entropy_permutation
if method in ["MSWPEn", "WPEn"]:
algorithm = copyfunction(entropy_permutation, weighted=True)
elif method in ["MMSEn", "MMSPEn", "MMSWPEn"]:
coarsegraining = "rolling"
if method in ["MMSPEn"]:
algorithm = entropy_permutation
if method in ["MMSWPEn"]:
algorithm = copyfunction(entropy_permutation, weighted=True)
elif method in ["IMSEn", "IMSPEn", "IMSWPEn"]:
coarsegraining = "interpolate"
if method in ["IMSPEn"]:
algorithm = entropy_permutation
if method in ["IMSWPEn"]:
algorithm = copyfunction(entropy_permutation, weighted=True)
elif method in ["CMSEn", "RCMSEn", "CMSPEn", "CMSWPEn"]:
coarsegraining = "timeshift"
if method in ["CMSPEn"]:
algorithm = entropy_permutation
if method in ["CMSWPEn"]:
algorithm = copyfunction(entropy_permutation, weighted=True)
if method in ["RCMSEn"]:
refined = True
elif method in ["MSCoSiEn", "CoSiEn"]:
algorithm = entropy_cosinesimilarity
elif method in ["MSIncrEn", "IncrEn"]:
algorithm = entropy_increment
elif method in ["MSSlopEn", "SlopEn"]:
algorithm = entropy_slope
elif method in ["MSLZC", "LZC"]:
algorithm = complexity_lempelziv
elif method in ["MSPLZC", "PLZC"]:
algorithm = copyfunction(complexity_lempelziv, permutation=True)
elif method in ["MSSyDyEn", "SyDyEn", "MMSyDyEn"]:
algorithm = entropy_symbolicdynamic
if method in ["MMSyDyEn"]:
coarsegraining = "rolling"
else:
raise ValueError(
"Method '{method}' is not supported. Please use "
"'MSEn', 'CMSEn', 'RCMSEn', 'MMSEn', 'IMSPEn',"
"'MSPEn', 'CMSPEn', 'MMSPEn', 'IMSPEn',"
"'MSWPEn', 'CMSWPEn', 'MMSWPEn', 'IMSWPEn',"
"'MSCoSiEn', 'MSIncrEn', 'MSSlopEn', 'MSSyDyEn'"
"'MSLZC', 'MSPLZC'"
" or 'MSApEn' (case sensitive)."
)
# Store parameters
info = {
"Method": method,
"Algorithm": algorithm.__name__,
"Coarsegraining": coarsegraining,
"Dimension": dimension,
"Scale": _get_scales(signal, scale=scale, dimension=dimension),
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
}
# Compute entropy for each coarsegrained segment
info["Value"] = np.array(
[
_entropy_multiscale(
coarse=complexity_coarsegraining(
signal,
scale=scale,
method=coarsegraining,
show=False,
**kwargs,
),
algorithm=algorithm,
dimension=dimension,
tolerance=info["Tolerance"],
refined=refined,
**kwargs,
)
for scale in info["Scale"]
]
)
# Remove inf, nan and 0
mse = info["Value"][np.isfinite(info["Value"])]
# The MSE index is quantified as the area under the curve (AUC),
# which is like the sum normalized by the number of values. It's similar to the mean.
mse = np.trapz(mse) / len(mse)
# Plot overlay
if show is True:
_entropy_multiscale_plot(mse, info)
return mse, info
# =============================================================================
# Internal
# =============================================================================
def _entropy_multiscale_plot(mse, info):
fig = plt.figure(constrained_layout=False)
fig.suptitle("Entropy values across scale factors")
plt.title(f"(Total {info['Method']} = {np.round(mse, 3)})")
plt.ylabel("Entropy values")
plt.xlabel("Scale")
plt.plot(
info["Scale"][np.isfinite(info["Value"])],
info["Value"][np.isfinite(info["Value"])],
color="#FF9800",
)
return fig
# =============================================================================
# Methods
# =============================================================================
def _entropy_multiscale(coarse, algorithm, dimension, tolerance, refined=False, **kwargs):
"""Wrapper function that works both on 1D and 2D coarse-grained (for composite)"""
# For 1D coarse-graining
if coarse.ndim == 1:
return algorithm(
coarse,
delay=1,
dimension=dimension,
tolerance=tolerance,
**kwargs,
)[0]
# 2D coarse-graining (time-shifted, used in composite)
else:
# CMSE
if refined is False:
return _validmean(
[
algorithm(
coarse[i],
delay=1,
dimension=dimension,
tolerance=tolerance,
**kwargs,
)[0]
for i in range(len(coarse))
]
)
# RCMSE
else:
phis = np.array(
[
_phi(
coarse[i],
delay=1,
dimension=dimension,
tolerance=tolerance,
approximate=False,
)[0]
for i in range(len(coarse))
]
)
# Average all phi of the same dimension, then divide, then log
return _phi_divide([_validmean(phis[:, 0]), _validmean(phis[:, 1])])
def _validmean(x):
"""Mean that is robust to NaN and Inf."""
x = np.array(x)[np.isfinite(x)]
if len(x) == 0:
return np.nan
else:
return np.mean(x)
| 16,848 | 37.822581 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_permutation.py | import numpy as np
import pandas as pd
from .entropy_shannon import entropy_shannon
from .utils_complexity_ordinalpatterns import complexity_ordinalpatterns
def entropy_permutation(
signal, delay=1, dimension=3, corrected=True, weighted=False, conditional=False, **kwargs
):
"""**Permutation Entropy (PEn), its Weighted (WPEn) and Conditional (CPEn) forms**
Permutation Entropy (PEn) is a robust measure of the complexity of a dynamic system by
capturing the order relations between values of a time series and extracting a probability
distribution of the ordinal patterns (see Henry and Judge, 2019). Using ordinal descriptors
increases robustness to large artifacts occurring with low frequencies. PEn is applicable
for regular, chaotic, noisy, or real-world time series and has been employed in the context of
EEG, ECG, and stock market time series.
Mathematically, it corresponds to the :func:`Shannon entropy <entropy_shannon>` after the
signal has been made :func:`discrete <complexity_symbolize>` by analyzing the permutations in
the time-embedded space.
However, the main shortcoming of traditional PEn is that no information besides the order
structure is retained when extracting the ordinal patterns, which leads to several possible
issues (Fadlallah et al., 2013). The **Weighted PEn** was developed to address these
limitations by incorporating significant information (regarding the amplitude) from the
original time series into the ordinal patterns.
The **Conditional Permutation Entropy (CPEn)** was originally defined by Bandt & Pompe as
*Sorting Entropy*, but recently gained in popularity as conditional through the work of
Unakafov et al. (2014). It describes the average diversity of the ordinal patterns succeeding a
given ordinal pattern (dimension+1 vs. dimension).
This function can be called either via ``entropy_permutation()`` or ``complexity_pe()``.
Moreover, variants can be directly accessed via ``complexity_wpe()`` and ``complexity_mspe()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
corrected : bool
If ``True``, divide by log2(factorial(m)) to normalize the entropy between 0 and 1. Otherwise,
return the permutation entropy in bit.
weighted : bool
If True, compute the weighted permutation entropy (WPE).
**kwargs
Optional arguments, such as a function to compute Entropy (:func:`nk.entropy_shannon`
(default), :func:`nk.entropy_tsallis` or :func:`nk.entropy_reyni`).
Returns
-------
PEn : float
Permutation Entropy
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
complexity_ordinalpatterns, entropy_shannon, entropy_multiscale
Examples
----------
.. ipython:: python
signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6], noise=0.5)
# Permutation Entropy (uncorrected)
pen, info = nk.entropy_permutation(signal, corrected=False)
pen
# Weighted Permutation Entropy (WPEn)
wpen, info = nk.entropy_permutation(signal, weighted=True)
wpen
# Conditional Permutation Entropy (CPEn)
cpen, info = nk.entropy_permutation(signal, conditional=True)
cpen
# Conditional Weighted Permutation Entropy (CWPEn)
cwpen, info = nk.entropy_permutation(signal, weighted=True, conditional=True)
cwpen
# Conditional Renyi Permutation Entropy (CRPEn)
crpen, info = nk.entropy_permutation(signal, conditional=True, algorithm=nk.entropy_renyi, alpha=2)
crpen
References
----------
* Henry, M., & Judge, G. (2019). Permutation entropy and information recovery in nonlinear
dynamic economic time series. Econometrics, 7(1), 10.
* Fadlallah, B., Chen, B., Keil, A., & Principe, J. (2013). Weighted-permutation entropy: A
complexity measure for time series incorporating amplitude information. Physical Review E, 87
(2), 022911.
* Zanin, M., Zunino, L., Rosso, O. A., & Papo, D. (2012). Permutation entropy and its main
biomedical and econophysics applications: a review. Entropy, 14(8), 1553-1577.
* Bandt, C., & Pompe, B. (2002). Permutation entropy: a natural complexity measure for time
series. Physical review letters, 88(17), 174102.
* Unakafov, A. M., & Keller, K. (2014). Conditional entropy of ordinal patterns. Physica D:
Nonlinear Phenomena, 269, 94-102.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {"Corrected": corrected, "Weighted": weighted, "Dimension": dimension, "Delay": delay}
pen = _entropy_permutation(
signal,
dimension=dimension,
delay=delay,
corrected=corrected,
weighted=weighted,
**kwargs,
)
if conditional is True:
# Compute PEn at m+1
pen_m1 = _entropy_permutation(
signal,
dimension=dimension + 1,
delay=delay,
corrected=corrected,
weighted=weighted,
**kwargs,
)
# Get difference
pen = pen_m1 - pen
if corrected:
pen = pen / np.log2(np.math.factorial(dimension + 1))
else:
if corrected:
pen = pen / np.log2(np.math.factorial(dimension))
return pen, info
# =============================================================================
# Permutation Entropy
# =============================================================================
def _entropy_permutation(
signal,
dimension=3,
delay=1,
corrected=True,
weighted=False,
algorithm=entropy_shannon,
sorting="quicksort",
**kwargs
):
patterns, info = complexity_ordinalpatterns(
signal,
dimension=dimension,
delay=delay,
algorithm=sorting,
)
# Weighted permutation entropy ----------------------------------------------
if weighted is True:
info["Weights"] = np.var(info["Embedded"], axis=1)
# Weighted frequencies of all permutations
freq = np.array(
[
info["Weights"][np.all(info["Permutations"] == patterns[i], axis=1)].sum()
for i in range(len(patterns))
]
)
# Normalize
freq = freq / info["Weights"].sum()
else:
freq = info["Frequencies"]
# Compute entropy algorithm ------------------------------------------------
pe, _ = algorithm(freq=freq, **kwargs)
return pe
| 7,257 | 37.606383 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_tsallis.py | import numpy as np
from .entropy_shannon import _entropy_freq
def entropy_tsallis(signal=None, q=1, symbolize=None, show=False, freq=None, **kwargs):
"""**Tsallis entropy (TSEn)**
Tsallis Entropy is an extension of :func:`Shannon entropy <entropy_shannon>` to the case where
entropy is nonextensive. It is similarly computed from a vector of probabilities of different
states. Because it works on discrete inputs (e.g., [A, B, B, A, B]), it requires to transform
the continuous signal into a discrete one.
.. math::
TSEn = \\frac{1}{q - 1} \\left( 1 - \\sum_{x \\in \\mathcal{X}} p(x)^q \\right)
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
q : float
Tsallis's *q* parameter, sometimes referred to as the entropic-index (default to 1).
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. ``None`` by
default, which skips the process (and assumes the input is already discrete). See
:func:`complexity_symbolize` for details.
show : bool
If ``True``, will show the discrete the signal.
freq : np.array
Instead of a signal, a vector of probabilities can be provided.
**kwargs
Optional arguments. Not used for now.
Returns
--------
tsen : float
The Tsallis entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, fractal_petrosian, entropy_renyi
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [1, 3, 3, 2, 6, 6, 6, 1, 0]
tsen, _ = nk.entropy_tsallis(signal, q=1)
tsen
shanen, _ = nk.entropy_shannon(signal, base=np.e)
shanen
References
-----------
* Tsallis, C. (2009). Introduction to nonextensive statistical mechanics: approaching a complex
world. Springer, 1(1), 2-1.
"""
if freq is None:
_, freq = _entropy_freq(signal, symbolize=symbolize, show=show)
freq = freq / np.sum(freq)
if np.isclose(q, 1):
lnq_1_over_p = np.log(1 / freq)
else:
lnq_1_over_p = ((1 / freq) ** (1 - q) - 1) / (1 - q)
tsens = freq * lnq_1_over_p
return np.sum(tsens), {"Symbolization": symbolize, "Values": tsens}
| 2,443 | 29.936709 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/bio/bio_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..ecg import ecg_process
from ..eda import eda_process
from ..emg import emg_process
from ..eog import eog_process
from ..hrv import hrv_rsa
from ..misc import as_vector
from ..ppg import ppg_process
from ..rsp import rsp_process
def bio_process(
ecg=None, rsp=None, eda=None, emg=None, ppg=None, eog=None, keep=None, sampling_rate=1000
):
"""**Automated processing of bio signals**
Wrapper for other bio processing functions of
electrocardiography signals (ECG), respiration signals (RSP),
electrodermal activity (EDA) and electromyography signals (EMG).
Parameters
----------
data : DataFrame
The DataFrame containing all the respective signals (e.g., ecg, rsp, Photosensor etc.). If
provided, there is no need to fill in the other arguments denoting the channel inputs.
Defaults to ``None``.
ecg : Union[list, np.array, pd.Series]
The raw ECG channel.
rsp : Union[list, np.array, pd.Series]
The raw RSP channel (as measured, for instance, by a respiration belt).
eda : Union[list, np.array, pd.Series]
The raw EDA channel.
emg : Union[list, np.array, pd.Series]
The raw EMG channel.
ppg : Union[list, np.array, pd.Series]
The raw PPG channel.
eog : Union[list, np.array, pd.Series]
The raw EOG channel, extracted from :func:`.mne_channel_extract().`
keep : DataFrame
Dataframe or channels to add by concatenation
to the processed dataframe (for instance, the Photosensor channel).
sampling_rate : int
The sampling frequency of the signals (in Hz, i.e., samples/second).
Defaults to ``1000``.
Returns
----------
bio_df : DataFrame
DataFrames of the following processed bio features:
* *"ECG"*: the raw signal, the cleaned signal, the heart rate, and the R peaks indexes.
Also generated by :func:`.ecg_process()`.
* *"RSP"*: the raw signal, the cleaned signal, the rate, and the amplitude. Also generated
by :func:`.rsp_process()`.
* *"EDA"*: the raw signal, the cleaned signal, the tonic component, the phasic component,
indexes of the SCR onsets, peaks, amplitudes, and half-recovery times. Also generated by
:func:`.eda_process()`.
* *"EMG"*: the raw signal, the cleaned signal, and amplitudes. Also generated by :func:`.
emg_process()`.
* *"PPG"*: the raw signal, the cleaned signal, rate and peaks. Also generated by :func:`.
ppg_process()`.
* *"RSA"*: Respiratory Sinus Arrhythmia features generated by :func:`.ecg_rsa()`, if both
ECG and RSP are provided.
* *"EOG"*: the raw signal, the cleaned signal, the indexes of EOG blinks, and the blink
rate.
bio_info : dict
A dictionary containing the samples of peaks, troughs, amplitudes, onsets, offsets, periods
of activation, recovery times of the respective processed signals,
as well as the signals' sampling rate.
See Also
----------
.ecg_process, .rsp_process, .eda_process, .emg_process, .ppg_process, .eog_process
Example
----------
**Example 1**: Using synthetic data
.. ipython:: python
import neurokit2 as nk
# With Simulated Data
ecg = nk.ecg_simulate(duration=40, sampling_rate=250)
rsp = nk.rsp_simulate(duration=40, sampling_rate=250)
eda = nk.eda_simulate(duration=40, sampling_rate=250, scr_number=3)
emg = nk.emg_simulate(duration=40, sampling_rate=250, burst_number=5)
bio_df, bio_info = nk.bio_process(ecg=ecg, rsp=rsp, eda=eda, emg=emg, sampling_rate=250)
bio_df.head()
.. ipython:: python
# Visualize a subset of signals
@savefig p_bio_process1.png scale=100%
bio_df.iloc[:, 0:16].plot(subplots=True)
@suppress
plt.close()
**Example 2**: Using a real dataset
.. ipython:: python
# Download EOG signal separately
eog = nk.data('eog_100hz')
# Download data but crop with same length as eog signal
data = nk.data('bio_eventrelated_100hz')[:len(eog)]
bio_df, bio_info = nk.bio_process(ecg=data['ECG'], rsp=data['RSP'], eda=data['EDA'],
emg=None, eog=eog, keep=data['Photosensor'], sampling_rate=100)
# Visualize all signals
@savefig p_bio_process2.png scale=100%
bio_df.iloc[:, 0:16].plot(subplots=True)
@suppress
plt.close()
"""
bio_info = {}
bio_df = pd.DataFrame({})
# Error check if first argument is a Dataframe.
if ecg is not None:
if isinstance(ecg, pd.DataFrame):
data = ecg.copy()
if "RSP" in data.keys():
rsp = data["RSP"]
else:
rsp = None
if "EDA" in data.keys():
eda = data["EDA"]
else:
eda = None
if "EMG" in data.keys():
emg = data["EMG"]
else:
emg = None
if "ECG" in data.keys():
ecg = data["ECG"]
elif "EKG" in data.keys():
ecg = data["EKG"]
else:
ecg = None
if "PPG" in data.keys():
ppg = data["PPG"]
else:
ppg = None
if "EOG" in data.keys():
eog = data["EOG"]
else:
eog = None
cols = ["ECG", "EKG", "RSP", "EDA", "EMG", "PPG", "EOG"]
keep_keys = [key for key in data.keys() if key not in cols]
if len(keep_keys) != 0:
keep = data[keep_keys]
else:
keep = None
# ECG
if ecg is not None:
ecg = as_vector(ecg)
ecg_signals, ecg_info = ecg_process(ecg, sampling_rate=sampling_rate)
bio_info.update(ecg_info)
bio_df = pd.concat([bio_df, ecg_signals], axis=1)
# RSP
if rsp is not None:
rsp = as_vector(rsp)
rsp_signals, rsp_info = rsp_process(rsp, sampling_rate=sampling_rate)
bio_info.update(rsp_info)
bio_df = pd.concat([bio_df, rsp_signals], axis=1)
# EDA
if eda is not None:
eda = as_vector(eda)
eda_signals, eda_info = eda_process(eda, sampling_rate=sampling_rate)
bio_info.update(eda_info)
bio_df = pd.concat([bio_df, eda_signals], axis=1)
# EMG
if emg is not None:
emg = as_vector(emg)
emg_signals, emg_info = emg_process(emg, sampling_rate=sampling_rate)
bio_info.update(emg_info)
bio_df = pd.concat([bio_df, emg_signals], axis=1)
# PPG
if ppg is not None:
ppg = as_vector(ppg)
ppg_signals, ppg_info = ppg_process(ppg, sampling_rate=sampling_rate)
bio_info.update(ppg_info)
bio_df = pd.concat([bio_df, ppg_signals], axis=1)
# EOG
if eog is not None:
eog = as_vector(eog)
eog_signals, eog_info = eog_process(eog, sampling_rate=sampling_rate)
bio_info.update(eog_info)
bio_df = pd.concat([bio_df, eog_signals], axis=1)
# Additional channels to keep
if keep is not None:
keep = keep.reset_index(drop=True)
bio_df = pd.concat([bio_df, keep], axis=1)
# RSA
if ecg is not None and rsp is not None:
rsa = hrv_rsa(
ecg_signals, rsp_signals, rpeaks=None, sampling_rate=sampling_rate, continuous=True
)
bio_df = pd.concat([bio_df, rsa], axis=1)
# Add sampling rate in dict info
bio_info["sampling_rate"] = sampling_rate
return bio_df, bio_info
| 7,646 | 33.60181 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/bio/bio_analyze.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..ecg import ecg_analyze
from ..eda import eda_analyze
from ..emg import emg_analyze
from ..eog import eog_analyze
from ..hrv import hrv_rsa
from ..ppg import ppg_analyze
from ..rsp import rsp_analyze
def bio_analyze(data, sampling_rate=1000, method="auto", window_lengths="constant"):
"""**Automated analysis of physiological signals**
Wrapper for other bio analyze functions of electrocardiography signals (ECG), respiration
signals (RSP), electrodermal activity (EDA), electromyography signals (EMG) and
electrooculography signals (EOG).
Parameters
----------
data : DataFrame
The DataFrame containing all the processed signals, typically
produced by :func:`.bio_process`, :func:`.ecg_process`, :func:`.rsp_process`,
:func:`.eda_process`, :func:`.emg_process` or :func:`.eog_process`. Can also be an
epochs object.
sampling_rate : int
The sampling frequency of the signals (in Hz, i.e., samples/second).
Defaults to 1000.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs,
or ``"interval-related"`` for analysis on longer periods of data. Defaults
to ``auto`` where the right method will be chosen based on the
mean duration of the data (event-related for duration under 10s).
window_lengths : dict
If ``constant`` (default), will use the full epoch for all the signals. Can also
be a dictionary with the epoch start and end times for different
types of signals, e.g., ``window_lengths = {"ECG": [0.5, 1.5], "EDA": [0.5, 3.5]}``
Returns
----------
DataFrame
DataFrame of the analyzed bio features. See docstrings of :func:`.ecg_analyze()`,
:func:`.rsp_analyze()`, :func:`.eda_analyze()`, :func:`.emg_analyze()` and
:func:`.eog_analyze()` for more details. Also returns Respiratory Sinus Arrhythmia features
produced by :func:`.hrv_rsa()` if interval-related analysis is carried out.
See Also
----------
.ecg_analyze, .rsp_analyze, .eda_analyze, .emg_analyze, .eog_analyze
Examples
----------
**Example 1**: Event-related analysis
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_eventrelated_100hz")
# Process the data
df, info = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"],
keep=data["Photosensor"], sampling_rate=100)
# Build epochs around photosensor-marked events
events = nk.events_find(data["Photosensor"], threshold_keep="below",
event_conditions=["Negative", "Neutral",
"Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1,
epochs_end=1.9)
# Analyze
nk.bio_analyze(epochs, sampling_rate=100)
**Example 2**: Interval-related analysis
.. ipython:: python
# Download data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], ppg=data["PPG"], sampling_rate=100)
# Analyze
nk.bio_analyze(df, sampling_rate=100)
"""
features = pd.DataFrame()
method = method.lower()
# Sanitize input
if isinstance(data, pd.DataFrame):
ecg_cols = [col for col in data.columns if "ECG" in col]
rsp_cols = [col for col in data.columns if "RSP" in col]
eda_cols = [col for col in data.columns if "EDA" in col]
emg_cols = [col for col in data.columns if "EMG" in col]
ppg_cols = [col for col in data.columns if "PPG" in col]
eog_cols = [col for col in data.columns if "EOG" in col]
ecg_rate_col = [col for col in data.columns if "ECG_Rate" in col]
rsp_phase_col = [col for col in data.columns if "RSP_Phase" in col]
elif isinstance(data, dict):
for i in data:
ecg_cols = [col for col in data[i].columns if "ECG" in col]
rsp_cols = [col for col in data[i].columns if "RSP" in col]
eda_cols = [col for col in data[i].columns if "EDA" in col]
emg_cols = [col for col in data[i].columns if "EMG" in col]
ppg_cols = [col for col in data[i].columns if "PPG" in col]
eog_cols = [col for col in data[i].columns if "EOG" in col]
ecg_rate_col = [col for col in data[i].columns if "ECG_Rate" in col]
rsp_phase_col = [col for col in data[i].columns if "RSP_Phase" in col]
else:
raise ValueError(
"NeuroKit error: bio_analyze(): Wrong input, please make sure you enter a DataFrame or a dictionary. "
)
# ECG
if len(ecg_cols) != 0:
ecg_data = data.copy()
if window_lengths != "constant":
if "ECG" in window_lengths.keys(): # only for epochs
ecg_data = _bio_analyze_slicewindow(ecg_data, window_lengths, signal="ECG")
ecg_analyzed = ecg_analyze(ecg_data, sampling_rate=sampling_rate, method=method)
features = pd.concat([features, ecg_analyzed], axis=1, sort=False)
# RSP
if len(rsp_cols) != 0:
rsp_data = data.copy()
if window_lengths != "constant":
if "RSP" in window_lengths.keys(): # only for epochs
rsp_data = _bio_analyze_slicewindow(rsp_data, window_lengths, signal="RSP")
rsp_analyzed = rsp_analyze(rsp_data, sampling_rate=sampling_rate, method=method)
features = pd.concat([features, rsp_analyzed], axis=1, sort=False)
# EDA
if len(eda_cols) != 0:
eda_data = data.copy()
if window_lengths != "constant":
if "EDA" in window_lengths.keys(): # only for epochs
eda_data = _bio_analyze_slicewindow(eda_data, window_lengths, signal="EDA")
eda_analyzed = eda_analyze(eda_data, sampling_rate=sampling_rate, method=method)
features = pd.concat([features, eda_analyzed], axis=1, sort=False)
# EMG
if len(emg_cols) != 0:
emg_data = data.copy()
if window_lengths != "constant":
if "EMG" in window_lengths.keys(): # only for epochs
emg_data = _bio_analyze_slicewindow(emg_data, window_lengths, signal="EMG")
emg_analyzed = emg_analyze(emg_data, sampling_rate=sampling_rate, method=method)
features = pd.concat([features, emg_analyzed], axis=1, sort=False)
# PPG
if len(ppg_cols) != 0:
ppg_data = data.copy()
if window_lengths != "constant":
if "PPG" in window_lengths.keys(): # only for epochs
ppg_data = _bio_analyze_slicewindow(ppg_data, window_lengths, signal="PPG")
ppg_analyzed = ppg_analyze(ppg_data, sampling_rate=sampling_rate, method=method)
features = pd.concat([features, ppg_analyzed], axis=1, sort=False)
# EOG
if len(eog_cols) != 0:
eog_data = data.copy()
if window_lengths != "constant":
if "EOG" in window_lengths.keys(): # only for epochs
eog_data = _bio_analyze_slicewindow(eog_data, window_lengths, signal="EOG")
eog_analyzed = eog_analyze(eog_data, sampling_rate=sampling_rate, method=method)
features = pd.concat([features, eog_analyzed], axis=1, sort=False)
# RSA
if len(ecg_rate_col + rsp_phase_col) >= 3:
if method == "auto":
duration = _bio_analyze_findduration(data, sampling_rate=sampling_rate)
if duration >= 10:
method = "interval"
else:
method = "event"
# Event-related
if method in ["event-related", "event", "epoch"]:
rsa = _bio_analyze_rsa_event(data.copy())
# Interval-related
elif method in ["interval-related", "interval", "resting-state"]:
rsa = _bio_analyze_rsa_interval(data.copy(), sampling_rate=sampling_rate)
# Auto
else:
raise ValueError("Wrong `method` argument.")
features = pd.concat([features, rsa], axis=1, sort=False)
# Remove duplicate columns of Label and Condition
if "Label" in features.columns.values:
features = features.loc[:, ~features.columns.duplicated()]
return features
# =============================================================================
# Internals
# =============================================================================
def _bio_analyze_slicewindow(data, window_lengths, signal="ECG"):
if signal in window_lengths.keys():
start = window_lengths[signal][0]
end = window_lengths[signal][1]
epochs = {}
for _, label in enumerate(data):
# Slice window
epoch = data[label].loc[(data[label].index > start) & (data[label].index < end)]
epochs[label] = epoch
return epochs
def _bio_analyze_findduration(data, sampling_rate=1000):
# If DataFrame
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
labels = data["Label"].unique()
durations = [len(data[data["Label"] == label]) / sampling_rate for label in labels]
else:
durations = [len(data) / sampling_rate]
# If dictionary
if isinstance(data, dict):
durations = [len(data[i]) / sampling_rate for i in data]
return np.nanmean(durations)
def _bio_analyze_rsa_interval(data, sampling_rate=1000):
# RSA features for interval-related analysis
if isinstance(data, pd.DataFrame):
rsa = hrv_rsa(data, sampling_rate=sampling_rate, continuous=False)
rsa = pd.DataFrame.from_dict(rsa, orient="index").T
elif isinstance(data, dict):
for index in data:
rsa[index] = {} # Initialize empty container
data[index] = data[index].set_index("Index").drop(["Label"], axis=1)
rsa[index] = hrv_rsa(data[index], sampling_rate=sampling_rate, continuous=False)
rsa = pd.DataFrame.from_dict(rsa, orient="index")
return rsa
def _bio_analyze_rsa_event(data):
# RSA features for event-related analysis
rsa = {}
if isinstance(data, dict):
for i in data:
rsa[i] = _bio_analyze_rsa_epoch(data[i])
rsa = pd.DataFrame.from_dict(rsa, orient="index")
elif isinstance(data, pd.DataFrame):
# Convert back to dict
for label, df in data.groupby("Label"):
rsa[label] = {}
epoch = df.set_index("Time")
rsa[label] = _bio_analyze_rsa_epoch(epoch, rsa[label])
rsa = pd.DataFrame.from_dict(rsa, orient="index")
# Fix index sorting to combine later with features dataframe
rsa.index = rsa.index.astype(int)
rsa = rsa.sort_index().rename_axis(None)
rsa.index = rsa.index.astype(str)
return rsa
def _bio_analyze_rsa_epoch(epoch):
# RSA features for event-related analysis: epoching
output = {}
# To remove baseline
if np.min(epoch.index.values) <= 0:
baseline = epoch["RSA_P2T"][epoch.index <= 0].values
signal = epoch["RSA_P2T"][epoch.index > 0].values
output["RSA_P2T"] = np.mean(signal) - np.mean(baseline)
baseline = epoch["RSA_Gates"][epoch.index <= 0].values
signal = epoch["RSA_Gates"][epoch.index > 0].values
output["RSA_Gates"] = np.nanmean(signal) - np.nanmean(baseline)
else:
signal = epoch["RSA_P2T"].values
output["RSA_P2T"] = np.mean(signal)
signal = epoch["RSA_Gates"].values
output["RSA_Gates"] = np.nanmean(signal)
return output
| 11,712 | 37.029221 | 114 | py |
NeuroKit | NeuroKit-master/neurokit2/bio/__init__.py | """Submodule for NeuroKit."""
from .bio_analyze import bio_analyze
from .bio_process import bio_process
__all__ = ["bio_process", "bio_analyze"]
| 148 | 17.625 | 40 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_simulate.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import check_random_state, check_random_state_children
from ..signal import signal_distort, signal_merge
def eda_simulate(
duration=10,
length=None,
sampling_rate=1000,
noise=0.01,
scr_number=1,
drift=-0.01,
random_state=None,
random_state_distort="spawn",
):
"""**Simulate Electrodermal Activity (EDA) signal**
Generate an artificial (synthetic) EDA signal of a given duration and sampling rate.
Parameters
----------
duration : int
Desired recording length in seconds.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz.
length : int
The desired length of the signal (in samples). Defaults to None.
noise : float
Noise level (amplitude of the laplace noise). Defaults to 0.01.
scr_number : int
Desired number of skin conductance responses (SCRs), i.e., peaks. Defaults to 1.
drift : float or list
The slope of a linear drift of the signal. Defaults to -0.01.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
random_state_distort : {'legacy', 'spawn'}, None, int, numpy.random.RandomState or numpy.random.Generator
Random state to be used to distort the signal. If ``"legacy"``, use the same random state used to
generate the signal (discouraged as it creates dependent random streams). If ``"spawn"``, spawn
independent children random number generators from the random_state argument. If any of the other types,
generate independent children random number generators from the random_state_distort provided (this
allows generating multiple version of the same signal distorted by different random noise realizations).
Returns
----------
array
Vector containing the EDA signal.
Examples
----------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
eda = nk.eda_simulate(duration=10, scr_number=3)
@savefig p_eda_simulate1.png scale=100%
fig = nk.signal_plot(eda)
@suppress
plt.close()
See Also
--------
ecg_simulate, rsp_simulate, emg_simulate, ppg_simulate
References
-----------
* Bach, D. R., Flandin, G., Friston, K. J., & Dolan, R. J. (2010). Modelling event-related skin
conductance responses. International Journal of Psychophysiology, 75(3), 349-356.
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
random_state_distort = check_random_state_children(random_state, random_state_distort, n_children=1)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
eda = np.full(length, 1.0)
eda += drift * np.linspace(0, duration, length)
time = [0, duration]
start_peaks = np.linspace(0, duration, scr_number, endpoint=False)
for start_peak in start_peaks:
relative_time_peak = np.abs(rng.normal(0, 5, size=1)) + 3.0745
scr = _eda_simulate_scr(sampling_rate=sampling_rate, time_peak=relative_time_peak)
time_scr = [start_peak, start_peak + 9]
if time_scr[0] < 0:
scr = scr[int(np.round(np.abs(time_scr[0]) * sampling_rate)) : :]
time_scr[0] = 0
if time_scr[1] > duration:
scr = scr[0 : int(np.round((duration - time_scr[0]) * sampling_rate))]
time_scr[1] = duration
eda = signal_merge(signal1=eda, signal2=scr, time1=time, time2=time_scr)
# Add random noise
if noise > 0:
eda = signal_distort(
eda,
sampling_rate=sampling_rate,
noise_amplitude=noise,
noise_frequency=[5, 10, 100],
noise_shape="laplace",
silent=True,
random_state=random_state_distort[0],
)
return eda
def _eda_simulate_scr(sampling_rate=1000, length=None, time_peak=3.0745, rise=0.7013, decay=[3.1487, 14.1257]):
"""Simulate a canonical skin conductance response (SCR)
Based on `Bach (2010)
<https://sourceforge.net/p/scralyze/code/HEAD/tree/branches/version_b2.1.8/scr_bf_crf.m#l24>`_
Parameters
-----------
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz.
length : int
The desired length of the signal (in samples). Defaults to None.
time_peak : float
Time to peak.
rise : float
Variance of rise defining gaussian.
decay : list
Decay constants.
Returns
----------
array
Vector containing the SCR signal.
Examples
--------
# scr1 = _eda_simulate_scr(time_peak=3.0745)
# scr2 = _eda_simulate_scr(time_peak=10)
# pd.DataFrame({"SCR1": scr1, "SCR2": scr2}).plot()
"""
if length is None:
length = 9 * sampling_rate
t = np.linspace(sampling_rate / 10000, 90, length)
gt = np.exp(-((t - time_peak) ** 2) / (2 * rise ** 2))
ht = np.exp(-t / decay[0]) + np.exp(-t / decay[1]) # pylint: disable=E1130
ft = np.convolve(gt, ht)
ft = ft[0 : len(t)]
ft = ft / np.max(ft)
return ft
def _eda_simulate_bateman(sampling_rate=1000, t1=0.75, t2=2):
"""Generates the bateman function:
:math:`b = e^{-t/T1} - e^{-t/T2}`
Parameters
----------
sampling_rate : float
Sampling frequency
t1 : float
Defaults to 0.75.
t2 : float
Defaults to 2.
Parameters of the bateman function
Returns
-------
bateman : array
The bateman function
Examples
----------
# bateman = _eda_simulate_bateman()
# nk.signal_plot(bateman)
"""
idx_T1 = t1 * sampling_rate
idx_T2 = t2 * sampling_rate
len_bat = idx_T2 * 10
idx_bat = np.arange(len_bat)
bateman = np.exp(-idx_bat / idx_T2) - np.exp(-idx_bat / idx_T1)
# normalize
bateman = sampling_rate * bateman / np.sum(bateman)
return bateman
| 6,198 | 30.467005 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_fixpeaks.py | # -*- coding: utf-8 -*-
from ..signal.signal_formatpeaks import _signal_formatpeaks_sanitize
def eda_fixpeaks(peaks, onsets=None, height=None):
"""**Correct Skin Conductance Responses (SCR) peaks**
Low-level function used by ``"eda_peaks()"`` to correct the peaks found by
``"eda_findpeaks()"``. Doesn't do anything for now for EDA. See :func:`eda_peaks` for details.
Parameters
----------
peaks : list or array or DataFrame or Series or dict
The samples at which the SCR peaks occur. If a dict or a DataFrame is passed,
it is assumed that these containers were obtained with ``"eda_findpeaks()"``.
onsets : list or array or DataFrame or Series or dict
The samples at which the SCR onsets occur. If a dict or a DataFrame is passed,
it is assumed that these containers were obtained with ``"eda_findpeaks()"``. Defaults to None.
height : list or array or DataFrame or Series or dict
The samples at which the amplitude of the SCR peaks occur. If a dict or a DataFrame is
passed, it is assumed that these containers were obtained with ``"eda_findpeaks()"``. Defaults to None.
Returns
-------
info : dict
A dictionary containing additional information, in this case the aplitude of the SCR, the
samples at which the SCR onset and the SCR peaks occur. Accessible with the keys
``"SCR_Amplitude"``, ``"SCR_Onsets"``, and ``"SCR_Peaks"`` respectively.
See Also
--------
eda_simulate, eda_clean, eda_phasic, eda_findpeaks, eda_peaks, eda_process, eda_plot
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Get phasic component
eda_signal = nk.eda_simulate(duration=30, scr_number=5, drift=0.1, noise=0)
eda_cleaned = nk.eda_clean(eda_signal)
eda = nk.eda_phasic(eda_cleaned)
eda_phasic = eda["EDA_Phasic"].values
# Find and fix peaks
info = nk.eda_findpeaks(eda_phasic)
info = nk.eda_fixpeaks(info)
@savefig p_eda_fixpeaks.png scale=100%
nk.events_plot(info["SCR_Peaks"], eda_phasic)
@suppress
plt.close()
"""
# Format input.
peaks, onsets, height = _eda_fixpeaks_retrieve(peaks, onsets, height)
# Do whatever fixing is required (nothing for now)
# Prepare output
info = {"SCR_Onsets": onsets, "SCR_Peaks": peaks, "SCR_Height": height}
return info
# =============================================================================
# Internals
# =============================================================================
def _eda_fixpeaks_retrieve(peaks, onsets=None, height=None):
# Format input.
original_input = peaks
peaks = _signal_formatpeaks_sanitize(original_input, key="Peaks")
if onsets is None:
onsets = _signal_formatpeaks_sanitize(original_input, key="Onsets")
if height is None:
height = _signal_formatpeaks_sanitize(original_input, key="Height")
return peaks, onsets, height
| 3,002 | 35.621951 | 111 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_analyze.py | # -*- coding: utf-8 -*-
import pandas as pd
from .eda_eventrelated import eda_eventrelated
from .eda_intervalrelated import eda_intervalrelated
def eda_analyze(data, sampling_rate=1000, method="auto"):
"""**EDA Analysis**
Perform EDA analysis on either epochs (event-related analysis) or on longer periods of data
such as resting-state data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A dictionary of epochs, containing one DataFrame per epoch, usually obtained via
`epochs_create`, or a DataFrame containing all epochs, usually obtained via
`epochs_to_df`. Can also take a DataFrame of processed signals from a longer period of
data, typically generated by `eda_process` or `bio_process`. Can also take a dict
containing sets of separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Defaults to 1000Hz.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs, or
``"interval-related"`` for analysis on longer periods of data. Defaults to ``"auto"`` where
the right method will be chosen based on the mean duration of the data (``"event-related"``
for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed EDA features. If event-related analysis is conducted,
each epoch is indicated by the `Label` column. See :func:`eda_eventrelated` and
:func:`eda_intervalrelated` docstrings for details.
See Also
--------
.bio_process, eda_process, .epochs_create, eda_eventrelated, eda_intervalrelated
Examples
----------
* **Example 1: Data for event-related analysis**
.. ipython:: python
import neurokit2 as nk
# Download the data for event-related analysis
data = nk.data("bio_eventrelated_100hz")
# Process the data for event-related analysis
df, info = nk.bio_process(eda=data["EDA"], sampling_rate=100)
events = nk.events_find(data["Photosensor"], threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=1.9)
# Analyze
analyze_epochs = nk.eda_analyze(epochs, sampling_rate=100)
analyze_epochs
* **Example 2: Resting-state data**
.. ipython:: python
import neurokit2 as nk
# Download the resting-state data
data = nk.data("bio_resting_8min_100hz")
# Process the data
df, info = nk.eda_process(data["EDA"], sampling_rate=100)
# Analyze
analyze_df = nk.eda_analyze(df, sampling_rate=100)
analyze_df
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError(
"NeuroKit error: eda_analyze(): Wrong input or method, we couldn't extract epochs features."
)
else:
features = eda_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = eda_intervalrelated(data)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = eda_intervalrelated(data)
else:
features = eda_eventrelated(data)
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
epoch_len = data["Label"].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = eda_intervalrelated(data)
else:
features = eda_eventrelated(data)
return features
| 4,310 | 33.488 | 108 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..misc.report import create_report
from ..signal import signal_sanitize
from .eda_clean import eda_clean
from .eda_peaks import eda_peaks
from .eda_phasic import eda_phasic
from .eda_methods import eda_methods
from .eda_plot import eda_plot
def eda_process(eda_signal, sampling_rate=1000, method="neurokit", report=None, **kwargs):
"""**Process Electrodermal Activity (EDA)**
Convenience function that automatically processes electrodermal activity (EDA) signal.
Parameters
----------
eda_signal : Union[list, np.array, pd.Series]
The raw EDA signal.
sampling_rate : int
The sampling frequency of ``"eda_signal"`` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"biosppy"`` or ``"neurokit"`` (default).
report : str
The filename of a report containing description and figures of processing
(e.g. ``"myreport.html"``). Needs to be supplied if a report file
should be generated. Defaults to ``None``. Can also be ``"text"`` to
just print the text in the console without saving anything.
**kwargs
Other arguments to be passed to specific methods. For more information,
see :func:`.rsp_methods`.
Returns
-------
signals : DataFrame
A DataFrame of same length as ``"eda_signal"`` containing the following
columns:
* ``"EDA_Raw"``: the raw signal.
* ``"EDA_Clean"``: the cleaned signal.
* ``"EDA_Tonic"``: the tonic component of the signal, or the Tonic Skin Conductance Level
(SCL).
* ``"EDA_Phasic"``: the phasic component of the signal, or the Phasic Skin Conductance
Response (SCR).
* ``"SCR_Onsets"``: the samples at which the onsets of the peaks occur, marked as "1" in a
list of zeros.
* ``"SCR_Peaks"``: the samples at which the peaks occur, marked as "1" in a list of zeros.
* ``"SCR_Height"``: the SCR amplitude of the signal including the Tonic component. Note that
cumulative effects of close-occurring SCRs might lead to an underestimation of the
amplitude.
* ``"SCR_Amplitude"``: the SCR amplitude of the signal excluding the Tonic component.
* ``"SCR_RiseTime"``: the time taken for SCR onset to reach peak amplitude within the SCR.
* ``"SCR_Recovery"``: the samples at which SCR peaks recover (decline) to half amplitude,
marked as "1" in a list of zeros.
info : dict
A dictionary containing the information of each SCR peak (see :func:`eda_findpeaks`),
as well as the signals' sampling rate.
See Also
--------
eda_simulate, eda_clean, eda_phasic, eda_findpeaks, eda_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
eda_signal = nk.eda_simulate(duration=30, scr_number=5, drift=0.1, noise=0)
signals, info = nk.eda_process(eda_signal, sampling_rate=1000)
@savefig p_eda_process.png scale=100%
nk.eda_plot(signals)
@suppress
plt.close()
"""
# Sanitize input
eda_signal = signal_sanitize(eda_signal)
methods = eda_methods(sampling_rate=sampling_rate, method=method, **kwargs)
# Preprocess
# Clean signal
eda_cleaned = eda_clean(eda_signal,
sampling_rate=sampling_rate,
method=methods["method_cleaning"],
**methods["kwargs_cleaning"])
if methods["method_phasic"] is None or methods["method_phasic"].lower() == "none":
eda_decomposed = pd.DataFrame({"EDA_Phasic": eda_cleaned})
else:
eda_decomposed = eda_phasic(eda_cleaned,
sampling_rate=sampling_rate,
method=methods["method_phasic"],
**methods["kwargs_phasic"])
# Find peaks
peak_signal, info = eda_peaks(
eda_decomposed["EDA_Phasic"].values,
sampling_rate=sampling_rate,
method=methods["method_peaks"],
amplitude_min=0.1,
**methods["kwargs_peaks"],
)
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
# Store
signals = pd.DataFrame({"EDA_Raw": eda_signal, "EDA_Clean": eda_cleaned})
signals = pd.concat([signals, eda_decomposed, peak_signal], axis=1)
if report is not None:
# Generate report containing description and figures of processing
if ".html" in str(report):
fig = eda_plot(signals, sampling_rate=sampling_rate, static=False)
else:
fig = None
create_report(file=report, signals=signals, info=methods, fig=fig)
return signals, info
| 4,800 | 35.930769 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_changepoints.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..signal import signal_changepoints
def eda_changepoints(eda_cleaned, penalty=10000, show=False):
"""**Calculate Number of Change Points**
Calculate the number of change points using of the skin conductance signal in terms of mean
and variance.
.. note::
This function is somewhat experimental, and improvements or discussions about it are
welcome.
Parameters
----------
eda_cleaned : Union[list, np.array, pd.Series]
The cleaned EDA signal.
penalty : int
Defaults to an algorithm penalty of 10000, as recommended by Halem et al. (2020). See
:func:`.signal_changepoints`.
show : bool
Show the signal with the change points.
Returns
-------
float
Number of changepoints in the
See Also
--------
eda_simulate, .signal_changepoints
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Simulate EDA signal
eda_signal = nk.eda_simulate(duration=5, sampling_rate=100, scr_number=5, drift=0.1)
eda_cleaned = nk.eda_clean(eda_signal, sampling_rate=100)
@savefig p_eda_changepoints1.png scale=100%
nk.eda_changepoints(eda_cleaned, penalty = 100, show=True)
@suppress
plt.close()
References
-----------
* van Halem, S., Van Roekel, E., Kroencke, L., Kuper, N., & Denissen, J. (2020). Moments that
matter? On the complexity of using triggers based on skin conductance to sample arousing
events within an experience sampling framework. European Journal of Personality, 34(5),
794-807.
"""
# Sanity checks
if not isinstance(eda_cleaned, np.ndarray):
if isinstance(eda_cleaned, pd.DataFrame):
colnames = eda_cleaned.columns.values
if len([i for i in colnames if "EDA_Clean" in i]) == 0:
raise ValueError(
"NeuroKit error: eda_changepoints(): Your input does not contain the cleaned EDA signal."
)
else:
eda_cleaned = eda_cleaned["EDA_Clean"]
eda_cleaned = np.array(eda_cleaned)
# Calculate changepoints based on mean and variance
changepoints = signal_changepoints(eda_cleaned, change="meanvar", show=show, penalty=penalty)
number = len(changepoints)
return number
| 2,404 | 28.691358 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_eventrelated.py | # -** coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..epochs.eventrelated_utils import (_eventrelated_addinfo,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput)
from ..misc import NeuroKitWarning
def eda_eventrelated(epochs, silent=False):
"""**Performs event-related EDA analysis on epochs**
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial,
usually obtained via ``"epochs_create()"``, or a DataFrame
containing all epochs, usually obtained via ``"epochs_to_df()"``.
silent : bool
If True, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed EDA features for each epoch, with each epoch indicated
by the `Label` column (if not present, by the `Index` column). The analyzed features consist
the following:
* ``"EDA_SCR"``: indication of whether Skin Conductance Response (SCR) occurs following the event
(1 if an SCR onset is present and 0 if absent) and if so, its corresponding peak amplitude,
time of peak, rise and recovery time. If there is no occurrence of SCR, nans are displayed
for the below features.
* ``"EDA_Peak_Amplitude"``: the maximum amplitude of the phasic component of the signal.
* ``"SCR_Peak_Amplitude"``: the peak amplitude of the first SCR in each epoch.
* ``"SCR_Peak_Amplitude_Time"``: the timepoint of each first SCR peak amplitude.
* ``"SCR_RiseTime"``: the risetime of each first SCR i.e., the time it takes for SCR to
reach peak amplitude from onset.
* ``"SCR_RecoveryTime"``: the half-recovery time of each first SCR i.e., the time it takes
for SCR to decrease to half amplitude.
See Also
--------
.events_find, .epochs_create, .bio_process
Examples
----------
* **Example 1: Simulated Data**
.. ipython:: python
import neurokit2 as nk
# Example with simulated data
eda = nk.eda_simulate(duration=15, scr_number=3)
# Process data
eda_signals, info = nk.eda_process(eda, sampling_rate=1000)
epochs = nk.epochs_create(eda_signals, events=[5000, 10000, 15000], sampling_rate=1000,
epochs_start=-0.1, epochs_end=1.9)
# Analyze
nk.eda_eventrelated(epochs)
* **Example 2: Real Data**
.. ipython:: python
import neurokit2 as nk
# Example with real data
data = nk.data("bio_eventrelated_100hz")
# Process the data
df, info = nk.bio_process(eda=data["EDA"], sampling_rate=100)
events = nk.events_find(data["Photosensor"], threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=6.9)
# Analyze
nk.eda_eventrelated(epochs)
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="eda", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize an empty dict for the current epoch
# Maximum phasic amplitude
data[i] = _eda_eventrelated_eda(epochs[i], data[i])
# Detect activity following the events
if np.any(epochs[i]["SCR_Peaks"][epochs[i].index > 0] == 1) and np.any(
epochs[i]["SCR_Onsets"][epochs[i].index > 0] == 1
):
data[i]["EDA_SCR"] = 1
else:
data[i]["EDA_SCR"] = 0
# Analyze based on if activations are present
if data[i]["EDA_SCR"] != 0:
data[i] = _eda_eventrelated_scr(epochs[i], data[i])
else:
data[i]["SCR_Peak_Amplitude"] = np.nan
data[i]["SCR_Peak_Amplitude_Time"] = np.nan
data[i]["SCR_RiseTime"] = np.nan
data[i]["SCR_RecoveryTime"] = np.nan
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
df = _eventrelated_sanitizeoutput(data)
return df
# =============================================================================
# Internals
# =============================================================================
def _eda_eventrelated_eda(epoch, output={}):
# Sanitize input
if "EDA_Phasic" not in epoch:
warn(
"Input does not have an `EDA_Phasic` column."
" Will skip computation of maximum amplitude of phasic EDA component.",
category=NeuroKitWarning
)
return output
output["EDA_Peak_Amplitude"] = epoch["EDA_Phasic"].max()
return output
def _eda_eventrelated_scr(epoch, output={}):
# Sanitize input
if "SCR_Amplitude" not in epoch:
warn(
"Input does not have an `SCR_Amplitude` column."
" Will skip computation of SCR peak amplitude.",
category=NeuroKitWarning
)
return output
if "SCR_RecoveryTime" not in epoch:
warn(
"Input does not have an `SCR_RecoveryTime` column."
" Will skip computation of SCR half-recovery times.",
category=NeuroKitWarning
)
return output
if "SCR_RiseTime" not in epoch:
warn(
"Input does not have an `SCR_RiseTime` column."
" Will skip computation of SCR rise times.",
category=NeuroKitWarning
)
return output
# Peak amplitude and Time of peak
first_activation = np.where(epoch["SCR_Amplitude"][epoch.index > 0] != 0)[0][0]
peak_amplitude = epoch["SCR_Amplitude"][epoch.index > 0].iloc[first_activation]
output["SCR_Peak_Amplitude"] = peak_amplitude
output["SCR_Peak_Amplitude_Time"] = epoch["SCR_Amplitude"][epoch.index > 0].index[first_activation]
# Rise Time
rise_time = epoch["SCR_RiseTime"][epoch.index > 0].iloc[first_activation]
output["SCR_RiseTime"] = rise_time
# Recovery Time
if any(epoch["SCR_RecoveryTime"][epoch.index > 0] != 0):
recovery_time = np.where(epoch["SCR_RecoveryTime"][epoch.index > 0] != 0)[0][0]
output["SCR_RecoveryTime"] = recovery_time
else:
output["SCR_RecoveryTime"] = np.nan
return output
| 6,444 | 33.100529 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_phasic.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.linalg
import scipy.signal
from ..signal import signal_filter, signal_resample, signal_smooth
def eda_phasic(eda_signal, sampling_rate=1000, method="highpass", **kwargs):
"""**Electrodermal Activity (EDA) Decomposition into Phasic and Tonic Components**
Decompose the Electrodermal Activity (EDA) into two components, namely **Phasic** and
**Tonic**, using different methods including cvxEDA (Greco, 2016) or Biopac's Acqknowledge
algorithms.
* **High-pass filtering**: Method implemented in Biopac's Acqknowledge. The raw EDA signal
is passed through a high pass filter with a cutoff frequency of 0.05 Hz
(cutoff frequency can be adjusted by the ``cutoff`` argument).
* **Median smoothing**: Method implemented in Biopac's Acqknowledge. The raw EDA signal is
passed through a median value smoothing filter, which removes areas of rapid change. The
phasic component is then calculated by subtracting the smoothed signal from the original.
This method is computationally intensive and the processing time depends on the smoothing
factor, which can be controlled by the as ``smoothing_factor`` argument, set by default to
``4`` seconds. Higher values will produce results more rapidly.
* **cvxEDA**: Convex optimization approach to EDA processing (Greco, 2016). Requires the
``cvxopt`` package (`> 1.3.0.1 <https://github.com/neuropsychology/NeuroKit/issues/781>`_) to
be installed.
* **SparsEDA**: Sparse non-negative deconvolution (Hernando-Gallego et al., 2017).
.. warning::
sparsEDA was newly added thanks to
`this implementation <https://github.com/yskong224/SparsEDA-python>`_. Help is needed to
double-check it, improve it and make it more concise and efficient. Also, sometimes it errors
for unclear reasons. Please help.
Parameters
----------
eda_signal : Union[list, np.array, pd.Series]
The raw EDA signal.
sampling_rate : int
The sampling frequency of raw EDA signal (in Hz, i.e., samples/second). Defaults to 1000Hz.
method : str
The processing pipeline to apply. Can be one of ``"cvxEDA"``, ``"smoothmedian"``,
``"highpass"``. Defaults to ``"highpass"``.
**kwargs : dict
Additional arguments to pass to the specific method.
Returns
-------
DataFrame
DataFrame containing the ``"Tonic"`` and the ``"Phasic"`` components as columns.
See Also
--------
eda_simulate, eda_clean, eda_peaks, eda_process, eda_plot
Examples
---------
**Example 1**: Methods comparison.
.. ipython:: python
import neurokit2 as nk
# Simulate EDA signal
eda_signal = nk.eda_simulate(duration=30, scr_number=5, drift=0.1)
# Decompose using different algorithms
# cvxEDA = nk.eda_phasic(eda_signal, method='cvxeda')
smoothMedian = nk.eda_phasic(eda_signal, method='smoothmedian')
highpass = nk.eda_phasic(eda_signal, method='highpass')
sparse = nk.eda_phasic(eda_signal, method='smoothmedian')
# Extract tonic and phasic components for plotting
t1, p1 = smoothMedian["EDA_Tonic"].values, smoothMedian["EDA_Phasic"].values
t2, p2 = highpass["EDA_Tonic"].values, highpass["EDA_Phasic"].values
t3, p3 = sparse["EDA_Tonic"].values, sparse["EDA_Phasic"].values
# Plot tonic
@savefig p_eda_phasic1.png scale=100%
nk.signal_plot([t1, t2, t3], labels=["SmoothMedian", "Highpass", "Sparse"])
@suppress
plt.close()
# Plot phasic
@savefig p_eda_phasic2.png scale=100%
nk.signal_plot([p1, p2, p3], labels=["SmoothMedian", "Highpass", "Sparse"])
@suppress
plt.close()
**Example 2**: Real data.
.. ipython:: python
eda_signal = nk.data("bio_eventrelated_100hz")["EDA"]
data = nk.eda_phasic(nk.standardize(eda_signal), sampling_rate=100)
data["EDA_Raw"] = eda_signal
@savefig p_eda_phasic2.png scale=100%
nk.signal_plot(data, standardize=True)
@suppress
plt.close()
References
-----------
* Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In
Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43).
Springer International Publishing.
* Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex
optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical
Engineering, 63(4), 797-804.
* Hernando-Gallego, F., Luengo, D., & Artés-Rodríguez, A. (2017). Feature extraction of
galvanic skin responses by nonnegative sparse deconvolution. IEEE journal of biomedical and
shealth informatics, 22(5), 1385-1394.
"""
method = method.lower() # remove capitalised letters
if method in ["cvxeda", "convex"]:
tonic, phasic = _eda_phasic_cvxeda(eda_signal, sampling_rate)
elif method in ["sparse", "sparseda"]:
tonic, phasic = _eda_phasic_sparsEDA(eda_signal, sampling_rate)
elif method in ["median", "smoothmedian"]:
tonic, phasic = _eda_phasic_mediansmooth(eda_signal, sampling_rate, **kwargs)
elif method in ["neurokit", "highpass", "biopac", "acqknowledge"]:
tonic, phasic = _eda_phasic_highpass(eda_signal, sampling_rate, **kwargs)
else:
raise ValueError(
"NeuroKit error: eda_phasic(): 'method' should be one of "
"'cvxeda', 'median', 'smoothmedian', 'neurokit', 'highpass', "
"'biopac', 'acqknowledge'."
)
return pd.DataFrame({"EDA_Tonic": tonic, "EDA_Phasic": phasic})
# =============================================================================
# Acqknowledge
# =============================================================================
def _eda_phasic_mediansmooth(eda_signal, sampling_rate=1000, smoothing_factor=4):
"""One of the two methods available in biopac's acqknowledge (https://www.biopac.com/knowledge-base/phasic-eda-
issue/)"""
size = smoothing_factor * sampling_rate
tonic = signal_smooth(eda_signal, kernel="median", size=size)
phasic = eda_signal - tonic
return np.array(tonic), np.array(phasic)
def _eda_phasic_highpass(eda_signal, sampling_rate=1000, cutoff=0.05):
"""One of the two methods available in biopac's acqknowledge (https://www.biopac.com/knowledge-base/phasic-eda-
issue/)"""
phasic = signal_filter(eda_signal, sampling_rate=sampling_rate, lowcut=cutoff, method="butter")
tonic = signal_filter(eda_signal, sampling_rate=sampling_rate, highcut=cutoff, method="butter")
return tonic, phasic
# =============================================================================
# cvxEDA (Greco et al., 2016)
# =============================================================================
def _eda_phasic_cvxeda(
eda_signal,
sampling_rate=1000,
tau0=2.0,
tau1=0.7,
delta_knot=10.0,
alpha=8e-4,
gamma=1e-2,
solver=None,
reltol=1e-9,
):
"""A convex optimization approach to electrodermal activity processing (CVXEDA).
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015).
Parameters
----------
eda_signal : list or array
raw EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
tau0 : float
Slow time constant of the Bateman function.
tau1 : float
Fast time constant of the Bateman function.
delta_knot : float
Time between knots of the tonic spline function.
alpha : float
Penalization for the sparse SMNA driver.
gamma : float
Penalization for the tonic spline coefficients.
solver : bool
Sparse QP solver to be used, see cvxopt.solvers.qp
reltol : float
Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns
-------
Dataframe
Contains EDA tonic and phasic signals.
"""
# Try loading cvx
try:
import cvxopt
except ImportError:
raise ImportError(
"NeuroKit error: eda_decompose(): the 'cvxopt' module is required for this method to run. ",
"Please install it first (`pip install cvxopt`).",
)
# Internal functions
def _cvx(m, n):
return cvxopt.spmatrix([], [], [], (m, n))
frequency = 1 / sampling_rate
n = len(eda_signal)
eda = cvxopt.matrix(eda_signal)
# bateman ARMA model
a1 = 1.0 / min(tau1, tau0) # a1 > a0
a0 = 1.0 / max(tau1, tau0)
ar = np.array(
[
(a1 * frequency + 2.0) * (a0 * frequency + 2.0),
2.0 * a1 * a0 * frequency**2 - 8.0,
(a1 * frequency - 2.0) * (a0 * frequency - 2.0),
]
) / ((a1 - a0) * frequency**2)
ma = np.array([1.0, 2.0, 1.0])
# matrices for ARMA model
i = np.arange(2, n)
A = cvxopt.spmatrix(np.tile(ar, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n))
M = cvxopt.spmatrix(np.tile(ma, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n))
# spline
delta_knot_s = int(round(delta_knot / frequency))
spl = np.r_[np.arange(1.0, delta_knot_s), np.arange(delta_knot_s, 0.0, -1.0)] # order 1
spl = np.convolve(spl, spl, "full")
spl /= max(spl)
# matrix of spline regressors
i = (
np.c_[np.arange(-(len(spl) // 2), (len(spl) + 1) // 2)]
+ np.r_[np.arange(0, n, delta_knot_s)]
)
nB = i.shape[1]
j = np.tile(np.arange(nB), (len(spl), 1))
p = np.tile(spl, (nB, 1)).T
valid = (i >= 0) & (i < n)
B = cvxopt.spmatrix(p[valid], i[valid], j[valid])
# trend
C = cvxopt.matrix(np.c_[np.ones(n), np.arange(1.0, n + 1.0) / n])
nC = C.size[1]
# Solve the problem:
# .5*(M*q + B*l + C*d - eda)^2 + alpha*sum(A, 1)*p + .5*gamma*l'*l
# s.t. A*q >= 0
cvxopt.solvers.options.update({"reltol": reltol, "show_progress": False})
if solver == "conelp":
# Use conelp
G = cvxopt.sparse(
[
[-A, _cvx(2, n), M, _cvx(nB + 2, n)],
[_cvx(n + 2, nC), C, _cvx(nB + 2, nC)],
[_cvx(n, 1), -1, 1, _cvx(n + nB + 2, 1)],
[_cvx(2 * n + 2, 1), -1, 1, _cvx(nB, 1)],
[_cvx(n + 2, nB), B, _cvx(2, nB), cvxopt.spmatrix(1.0, range(nB), range(nB))],
]
)
h = cvxopt.matrix([_cvx(n, 1), 0.5, 0.5, eda, 0.5, 0.5, _cvx(nB, 1)])
c = cvxopt.matrix(
[(cvxopt.matrix(alpha, (1, n)) * A).T, _cvx(nC, 1), 1, gamma, _cvx(nB, 1)]
)
res = cvxopt.solvers.conelp(c, G, h, dims={"l": n, "q": [n + 2, nB + 2], "s": []})
else:
# Use qp
Mt, Ct, Bt = M.T, C.T, B.T
H = cvxopt.sparse(
[
[Mt * M, Ct * M, Bt * M],
[Mt * C, Ct * C, Bt * C],
[Mt * B, Ct * B, Bt * B + gamma * cvxopt.spmatrix(1.0, range(nB), range(nB))],
]
)
f = cvxopt.matrix(
[(cvxopt.matrix(alpha, (1, n)) * A).T - Mt * eda, -(Ct * eda), -(Bt * eda)]
)
res = cvxopt.solvers.qp(
H,
f,
cvxopt.spmatrix(-A.V, A.I, A.J, (n, len(f))),
cvxopt.matrix(0.0, (n, 1)),
kktsolver="chol2",
)
cvxopt.solvers.options.clear()
tonic_splines = res["x"][-nB:]
drift = res["x"][n : n + nC]
tonic = B * tonic_splines + C * drift
q = res["x"][:n]
phasic = M * q
# Return tonic and phasic components
return np.array(tonic)[:, 0], np.array(phasic)[:, 0]
# =============================================================================
# sparsEDA (Hernando-Gallego et al., 2017)
# =============================================================================
def _eda_phasic_sparsEDA(
eda_signal, sampling_rate=8, epsilon=0.0001, Kmax=40, Nmin=5 / 4, rho=0.025
):
""" "
Credits go to:
- https://github.com/fhernandogallego/sparsEDA (Matlab original implementation)
- https://github.com/yskong224/SparsEDA-python (Python implementation)
Parameters
----------
epsilon
step remainder
maxIters
maximum number of LARS iterations
dmin
maximum distance between sparse reactions
rho
minimun threshold of sparse reactions
Returns
-------
driver
driver responses, tonic component
SCL
low component
MSE
reminder of the signal fitting
"""
dmin = Nmin * sampling_rate
original_length = len(eda_signal) # Used for resampling at the end
# Exceptions
# if len(eda_signal) / sampling_rate < 80:
# raise AssertionError("Signal not enough large. longer than 80 seconds")
if np.sum(np.isnan(eda_signal)) > 0:
raise AssertionError("Signal contains NaN")
# Preprocessing
signalAdd = np.zeros(len(eda_signal) + (20 * sampling_rate) + (60 * sampling_rate))
signalAdd[0 : 20 * sampling_rate] = eda_signal[0]
signalAdd[20 * sampling_rate : 20 * sampling_rate + len(eda_signal)] = eda_signal
signalAdd[20 * sampling_rate + len(eda_signal) :] = eda_signal[-1]
# Resample to 8 Hz
eda_signal = signal_resample(eda_signal, sampling_rate=sampling_rate, desired_sampling_rate=8)
new_sr = 8
Nss = len(eda_signal)
Ns = len(signalAdd)
b0 = 0
pointerS = 20 * new_sr
pointerE = pointerS + Nss
# signalRs = signalAdd[pointerS:pointerE]
# overlap Save
durationR = 70
Lreg = int(20 * new_sr * 3)
L = 10 * new_sr
N = durationR * new_sr
T = 6
Rzeros = np.zeros([N + L, Lreg * 5])
srF = new_sr * np.array([0.5, 0.75, 1, 1.25, 1.5])
for j in range(0, len(srF)):
t_rf = np.arange(0, 10 + 1e-10, 1 / srF[j]) # 10 sec
taus = np.array([0.5, 2, 1])
rf_biexp = np.exp(-t_rf / taus[1]) - np.exp(-t_rf / taus[0])
rf_est = taus[2] * rf_biexp
rf_est = rf_est / np.sqrt(np.sum(rf_est**2))
rf_est_zeropad = np.zeros(len(rf_est) + (N - len(rf_est)))
rf_est_zeropad[: len(rf_est)] = rf_est
Rzeros[0:N, j * Lreg : (j + 1) * Lreg] = scipy.linalg.toeplitz(
rf_est_zeropad, np.zeros(Lreg)
)
R0 = Rzeros[0:N, 0 : 5 * Lreg]
R = np.zeros([N, T + Lreg * 5])
R[0:N, T:] = R0
# SCL
R[0:Lreg, 0] = np.linspace(0, 1, Lreg)
R[0:Lreg, 1] = -np.linspace(0, 1, Lreg)
R[int(Lreg / 3) : Lreg, 2] = np.linspace(0, 2 / 3, int((2 * Lreg) / 3))
R[int(Lreg / 3) : Lreg, 3] = -np.linspace(0, 2 / 3, int((2 * Lreg) / 3))
R[int(2 * Lreg / 3) : Lreg, 4] = np.linspace(0, 1 / 3, int(Lreg / 3))
R[int(2 * Lreg / 3) : Lreg, 5] = -np.linspace(0, 1 / 3, int(Lreg / 3))
Cte = np.sum(R[:, 0] ** 2)
R[:, 0:6] = R[:, 0:6] / np.sqrt(Cte)
# Loop
cutS = 0
cutE = N
slcAux = np.zeros(Ns)
driverAux = np.zeros(Ns)
resAux = np.zeros(Ns)
aux = 0
while cutE < Ns:
aux = aux + 1
signalCut = signalAdd[cutS:cutE]
if b0 == 0:
b0 = signalCut[0]
signalCutIn = signalCut - b0
beta, _, _, _, _, _ = lasso(R, signalCutIn, sampling_rate, Kmax, epsilon)
signalEst = (np.matmul(R, beta) + b0).reshape(-1)
# remAout = (signalCut - signalEst).^2;
# res2 = sum(remAout(20*sampling_rate+1:(40*sampling_rate)));
# res3 = sum(remAout(40*sampling_rate+1:(60*sampling_rate)));
remAout = (signalCut - signalEst) ** 2
res2 = np.sum(remAout[20 * sampling_rate : 40 * sampling_rate])
res3 = np.sum(remAout[40 * sampling_rate : 60 * sampling_rate])
jump = 1
if res2 < 1:
jump = 2
if res3 < 1:
jump = 3
SCL = np.matmul(R[:, 0:6], beta[0:6, :]) + b0
SCRline = beta[6:, :]
SCRaux = np.zeros([Lreg, 5])
SCRaux[:] = SCRline.reshape([5, Lreg]).transpose()
driver = SCRaux.sum(axis=1)
b0 = np.matmul(R[jump * 20 * sampling_rate - 1, 0:6], beta[0:6, :]) + b0
driverAux[cutS : cutS + (jump * 20 * sampling_rate)] = driver[0 : jump * sampling_rate * 20]
slcAux[cutS : cutS + (jump * 20 * sampling_rate)] = SCL[
0 : jump * sampling_rate * 20
].reshape(-1)
resAux[cutS : cutS + (jump * 20 * sampling_rate)] = remAout[0 : jump * sampling_rate * 20]
cutS = cutS + jump * 20 * sampling_rate
cutE = cutS + N
SCRaux = driverAux[pointerS:pointerE]
SCL = slcAux[pointerS:pointerE]
MSE = resAux[pointerS:pointerE]
# PP
ind = np.argwhere(SCRaux > 0).reshape(-1)
scr_temp = SCRaux[ind]
ind2 = np.argsort(scr_temp)[::-1]
scr_ord = scr_temp[ind2]
scr_fin = [scr_ord[0]]
ind_fin = [ind[ind2[0]]]
for i in range(1, len(ind2)):
if np.all(np.abs(ind[ind2[i]] - ind_fin) >= dmin):
scr_fin.append(scr_ord[i])
ind_fin.append(ind[ind2[i]])
driver = np.zeros(len(SCRaux))
driver[np.array(ind_fin)] = np.array(scr_fin)
scr_max = scr_fin[0]
threshold = rho * scr_max
driver[driver < threshold] = 0
# Resample
SCL = signal_resample(SCL, desired_length=original_length)
MSE = signal_resample(MSE, desired_length=original_length)
return driver, SCL, MSE
def lasso(R, s, sampling_rate, maxIters, epsilon):
N = len(s)
W = R.shape[1]
OptTol = -10
solFreq = 0
resStop2 = 0.0005
lmbdaStop = 0
zeroTol = 1e-5
x = np.zeros(W)
x_old = np.zeros(W)
iter = 0
c = np.matmul(R.transpose(), s.reshape(-1, 1)).reshape(-1)
lmbda = np.max(c)
if lmbda < 0:
raise Exception(
"y is not expressible as a non-negative linear combination of the columns of X"
)
newIndices = np.argwhere(np.abs(c - lmbda) < zeroTol).flatten()
collinearIndices = []
beta = []
duals = []
res = s
if (lmbdaStop > 0 and lmbda < lmbdaStop) or ((epsilon > 0) and (np.linalg.norm(res) < epsilon)):
activationHist = []
numIters = 0
R_I = []
activeSet = []
for j in range(0, len(newIndices)):
iter = iter + 1
R_I, flag = updateChol(R_I, N, W, R, 1, activeSet, newIndices[j], zeroTol)
activeSet.append(newIndices[j])
activationHist = activeSet.copy()
# Loop
done = 0
while done == 0:
if len(activationHist) == 4:
lmbda = np.max(c)
newIndices = np.argwhere(np.abs(c - lmbda) < zeroTol).flatten()
activeSet = []
for j in range(0, len(newIndices)):
iter = iter + 1
R_I, flag = updateChol(R_I, N, W, R, 1, activeSet, newIndices[j], zeroTol)
activeSet.append(newIndices[j])
[activationHist.append(ele) for ele in activeSet]
else:
lmbda = c[activeSet[0]]
dx = np.zeros(W)
if len(np.array([R_I]).flatten()) == 1:
z = scipy.linalg.solve(
R_I.reshape([-1, 1]),
np.sign(c[np.array(activeSet).flatten()].reshape(-1, 1)),
transposed=True,
lower=False,
)
else:
z = scipy.linalg.solve(
R_I,
np.sign(c[np.array(activeSet).flatten()].reshape(-1, 1)),
transposed=True,
lower=False,
)
if len(np.array([R_I]).flatten()) == 1:
dx[np.array(activeSet).flatten()] = scipy.linalg.solve(
R_I.reshape([-1, 1]), z, transposed=False, lower=False
)
else:
dx[np.array(activeSet).flatten()] = scipy.linalg.solve(
R_I, z, transposed=False, lower=False
).flatten()
v = np.matmul(
R[:, np.array(activeSet).flatten()], dx[np.array(activeSet).flatten()].reshape(-1, 1)
)
ATv = np.matmul(R.transpose(), v).flatten()
gammaI = np.Inf
removeIndices = []
inactiveSet = np.arange(0, W)
if len(np.array(activeSet).flatten()) > 0:
inactiveSet[np.array(activeSet).flatten()] = -1
if len(np.array(collinearIndices).flatten()) > 0:
inactiveSet[np.array(collinearIndices).flatten()] = -1
inactiveSet = np.argwhere(inactiveSet >= 0).flatten()
if len(inactiveSet) == 0:
gammaIc = 1
newIndices = []
else:
epsilon = 1e-12
gammaArr = (lmbda - c[inactiveSet]) / (1 - ATv[inactiveSet] + epsilon)
gammaArr[gammaArr < zeroTol] = np.Inf
gammaIc = np.min(gammaArr)
# Imin = np.argmin(gammaArr)
newIndices = inactiveSet[(np.abs(gammaArr - gammaIc) < zeroTol)]
gammaMin = min(gammaIc, gammaI)
x = x + gammaMin * dx
res = res - gammaMin * v.flatten()
c = c - gammaMin * ATv
if (
((lmbda - gammaMin) < OptTol)
or ((lmbdaStop > 0) and (lmbda <= lmbdaStop))
or ((epsilon > 0) and (np.linalg.norm(res) <= epsilon))
):
newIndices = []
removeIndices = []
done = 1
if (lmbda - gammaMin) < OptTol:
# print(lmbda-gammaMin)
pass
if np.linalg.norm(res[0 : sampling_rate * 20]) <= resStop2:
done = 1
if np.linalg.norm(res[sampling_rate * 20 : sampling_rate * 40]) <= resStop2:
done = 1
if np.linalg.norm(res[sampling_rate * 40 : sampling_rate * 60]) <= resStop2:
done = 1
if gammaIc <= gammaI and len(newIndices) > 0:
for j in range(0, len(newIndices)):
iter = iter + 1
R_I, flag = updateChol(
R_I, N, W, R, 1, np.array(activeSet).flatten(), newIndices[j], zeroTol
)
if flag:
collinearIndices.append(newIndices[j])
else:
activeSet.append(newIndices[j])
activationHist.append(newIndices[j])
if gammaI <= gammaIc:
for j in range(0, len(removeIndices)):
iter = iter + 1
col = np.argwhere(np.array(activeSet).flatten() == removeIndices[j]).flatten()
R_I = downdateChol(R_I, col)
activeSet.pop(col)
collinearIndices = []
x[np.array(removeIndices).flatten()] = 0
activationHist.append(-removeIndices)
if iter >= maxIters:
done = 1
if len(np.argwhere(x < 0).flatten()) > 0:
x = x_old.copy()
done = 1
else:
x_old = x.copy()
if done or ((solFreq > 0) and not (iter % solFreq)):
beta.append(x)
duals.append(v)
numIters = iter
return np.array(beta).reshape(-1, 1), numIters, activationHist, duals, lmbda, res
def updateChol(R_I, n, N, R, explicitA, activeSet, newIndex, zeroTol):
# global opts_tr, zeroTol
flag = 0
newVec = R[:, newIndex]
if len(activeSet) == 0:
R_I0 = np.sqrt(np.sum(newVec**2))
else:
if explicitA:
if len(np.array([R_I]).flatten()) == 1:
p = scipy.linalg.solve(
np.array(R_I).reshape(-1, 1),
np.matmul(R[:, activeSet].transpose(), R[:, newIndex]),
transposed=True,
lower=False,
)
else:
p = scipy.linalg.solve(
R_I,
np.matmul(R[:, activeSet].transpose(), R[:, newIndex]),
transposed=True,
lower=False,
)
else:
# Original matlab code:
# Global var for linsolve functions..
# optsUT = True
# opts_trUT = True
# opts_trTRANSA = True
# AnewVec = feval(R,2,n,length(activeSet),newVec,activeSet,N);
# p = linsolve(R_I,AnewVec,opts_tr);
# Translation by chatGPT-3, might be wrong
AnewVec = np.zeros((n, 1))
for i in range(len(activeSet)):
AnewVec += R[2, :, activeSet[i]] * newVec[i]
p = scipy.linalg.solve(R_I, AnewVec, transposed=True, lower=False)
q = np.sum(newVec**2) - np.sum(p**2)
if q <= zeroTol:
flag = 1
R_I0 = R_I.copy()
else:
if len(np.array([R_I]).shape) == 1:
R_I = np.array([R_I]).reshape(-1, 1)
# print(R_I)
R_I0 = np.zeros([np.array(R_I).shape[0] + 1, R_I.shape[1] + 1])
R_I0[0 : R_I.shape[0], 0 : R_I.shape[1]] = R_I
R_I0[0 : R_I.shape[0], -1] = p
R_I0[-1, -1] = np.sqrt(q)
return R_I0, flag
def downdateChol(R, j):
# global opts_tr, zeroTol
def planerot(x):
# http://statweb.stanford.edu/~susan/courses/b494/index/node30.html
if x[1] != 0:
r = np.linalg.norm(x)
G = np.zeros(len(x) + 2)
G[: len(x)] = x / r
G[-2] = -x[1] / r
G[-1] = x[0] / r
else:
G = np.eye(2)
return G, x
R1 = np.zeros([R.shape[0], R.shape[1] - 1])
R1[:, :j] = R[:, :j]
R1[:, j:] = R[:, j + 1 :]
# m = R1.shape[0]
n = R1.shape[1]
for k in range(j, n):
p = np.array([k, k + 1])
G, R[p, k] = planerot(R[p, k])
if k < n:
R[p, k + 1 : n] = G * R[p, k + 1 : n]
return R[:n, :]
| 25,798 | 32.856955 | 115 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_autocor.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..signal import signal_autocor
def eda_autocor(eda_cleaned, sampling_rate=1000, lag=4):
"""**EDA Autocorrelation**
Compute the autocorrelation measure of raw EDA signal i.e., the correlation between the time
series data and a specified time-lagged version of itself.
Parameters
----------
eda_cleaned : Union[list, np.array, pd.Series]
The cleaned EDA signal.
sampling_rate : int
The sampling frequency of raw EDA signal (in Hz, i.e., samples/second). Defaults to 1000Hz.
lag : int
Time lag in seconds. Defaults to 4 seconds to avoid autoregressive
correlations approaching 1, as recommended by Halem et al. (2020).
Returns
-------
float
Autocorrelation index of the eda signal.
See Also
--------
eda_simulate, eda_clean
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Simulate EDA signal
eda_signal = nk.eda_simulate(duration=5, scr_number=5, drift=0.1)
eda_cleaned = nk.eda_clean(eda_signal)
cor = nk.eda_autocor(eda_cleaned)
cor
References
-----------
* van Halem, S., Van Roekel, E., Kroencke, L., Kuper, N., & Denissen, J. (2020). Moments that
matter? On the complexity of using triggers based on skin conductance to sample arousing
events within an experience sampling framework. European Journal of Personality, 34(5),
794-807.
"""
# Sanity checks
if isinstance(eda_cleaned, pd.DataFrame):
colnames = eda_cleaned.columns.values
if len([i for i in colnames if "EDA_Clean" in i]) == 0:
raise ValueError(
"NeuroKit error: eda_autocor(): Your input does not contain the cleaned EDA signal."
)
else:
eda_cleaned = eda_cleaned["EDA_Clean"]
if isinstance(eda_cleaned, pd.Series):
eda_cleaned = eda_cleaned.values
# Autocorrelation
lag_samples = lag * sampling_rate
if lag_samples > len(eda_cleaned):
raise ValueError(
"NeuroKit error: eda_autocor(): The time lag "
"exceeds the duration of the EDA signal. "
"Consider using a longer duration of the EDA signal."
)
cor, _ = signal_autocor(eda_cleaned, lag=lag_samples)
return cor
| 2,357 | 29.230769 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_methods.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc.report import get_kwargs
from .eda_clean import eda_clean
from .eda_peaks import eda_peaks
from .eda_phasic import eda_phasic
def eda_methods(
sampling_rate=1000,
method="default",
method_cleaning="default",
method_peaks="default",
method_phasic="default",
**kwargs,
):
"""**EDA Preprocessing Methods**
This function analyzes and specifies the methods used in the preprocessing, and create a
textual description of the methods used. It is used by :func:`eda_process()` to dispatch the
correct methods to each subroutine of the pipeline and :func:`eda_report()` to create a
preprocessing report.
Parameters
----------
sampling_rate : int
The sampling frequency of the raw EDA signal (in Hz, i.e., samples/second).
method : str
The method used for cleaning and peak finding if ``"method_cleaning"``
and ``"method_peaks"`` are set to ``"default"``. Can be one of ``"default"``, ``"biosppy"``.
Defaults to ``"default"``.
method_cleaning: str
The method used to clean the raw EDA signal. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.eda_clean`.
method_peaks: str
The method used to find peaks. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.eda_peaks`.
method_phasic: str
The method used to decompose the EDA signal into phasic and tonic components. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.eda_phasic`.
**kwargs
Other arguments to be passed to :func:`.eda_clean`,
:func:`.eda_peaks`, and :func:`.eda_phasic`.
Returns
-------
report_info : dict
A dictionary containing the keyword arguments passed to the cleaning
and peak finding functions, text describing the methods, and the corresponding
references.
See Also
--------
eda_process, eda_clean, eda_peaks
"""
# Sanitize inputs
method_cleaning = str(method).lower() if method_cleaning == "default" else str(method_cleaning).lower()
method_phasic = str(method).lower() if method_phasic == "default" else str(method_phasic).lower()
method_peaks = str(method).lower() if method_peaks == "default" else str(method_peaks).lower()
# Create dictionary with all inputs
report_info = {
"sampling_rate": sampling_rate,
"method_cleaning": method_cleaning,
"method_phasic": method_phasic,
"method_peaks": method_peaks,
"kwargs": kwargs,
}
# Get arguments to be passed to underlying functions
kwargs_cleaning, report_info = get_kwargs(report_info, eda_clean)
kwargs_phasic, report_info = get_kwargs(report_info, eda_phasic)
kwargs_peaks, report_info = get_kwargs(report_info, eda_peaks)
# Save keyword arguments in dictionary
report_info["kwargs_cleaning"] = kwargs_cleaning
report_info["kwargs_phasic"] = kwargs_phasic
report_info["kwargs_peaks"] = kwargs_peaks
# Initialize refs list
refs = []
# 1. Cleaning
# ------------
report_info["text_cleaning"] = f"The raw signal, sampled at {sampling_rate} Hz,"
if method_cleaning == "biosppy":
report_info["text_cleaning"] += " was cleaned using the biosppy package."
elif method_cleaning in ["default", "neurokit", "nk"]:
report_info["text_cleaning"] += " was cleaned using the default method of the neurokit2 package."
elif method_cleaning in ["none"]:
report_info["text_cleaning"] += "was directly used without cleaning."
else:
report_info["text_cleaning"] += " was cleaned using the method described in " + method_cleaning + "."
# 2. Phasic decomposition
# -----------------------
# TODO: add descriptions of individual methods
report_info["text_phasic"] = "The signal was decomposed into phasic and tonic components using"
if method_phasic is None or method_phasic in ["none"]:
report_info["text_phasic"] = "There was no phasic decomposition carried out."
else:
report_info["text_phasic"] += " the method described in " + method_phasic + "."
# 3. Peak detection
# -----------------
report_info["text_peaks"] = "The cleaned signal was used to detect peaks using"
if method_peaks in ["gamboa2008", "gamboa"]:
report_info["text_peaks"] += " the method described in Gamboa et al. (2008)."
refs.append("""Gamboa, H. (2008). Multi-modal behavioral biometrics based on hci
and electrophysiology. PhD ThesisUniversidade.""")
elif method_peaks in ["kim", "kbk", "kim2004", "biosppy"]:
report_info["text_peaks"] += " the method described in Kim et al. (2004)."
refs.append("""Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term
monitoring of physiological signals. Medical and biological engineering and computing, 42(3),
419-427.""")
elif method_peaks in ["nk", "nk2", "neurokit", "neurokit2"]:
report_info["text_peaks"] += " the default method of the `neurokit2` package."
refs.append("https://doi.org/10.21105/joss.01667")
elif method_peaks in ["vanhalem2020", "vanhalem", "halem2020"]:
report_info["text_peaks"] += " the method described in Vanhalem et al. (2020)."
refs.append("""van Halem, S., Van Roekel, E., Kroencke, L., Kuper, N., & Denissen, J. (2020).
Moments That Matter? On the Complexity of Using Triggers Based on Skin Conductance to Sample
Arousing Events Within an Experience Sampling Framework. European Journal of Personality.""")
elif method_peaks in ["nabian2018", "nabian"]:
report_info["text_peaks"] += " the method described in Nabian et al. (2018)."
refs.append("""Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., & Ostadabbas, S. (2018). An
Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data. IEEE
journal of translational engineering in health and medicine, 6, 2800711.""")
else:
report_info[
"text_peaks"
] = f"The peak detection was carried out using the method {method_peaks}."
# References
report_info["references"] = list(np.unique(refs))
return report_info
| 6,621 | 45.307692 | 117 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_clean.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning, as_vector
from ..signal import signal_filter, signal_smooth
def eda_clean(eda_signal, sampling_rate=1000, method="neurokit"):
"""**Preprocess Electrodermal Activity (EDA) signal**
This function cleans the EDA signal by removing noise and smoothing the signal with different methods.
* **NeuroKit**: Default methods. Low-pass filter with a 3 Hz cutoff frequency and a 4th order
Butterworth filter. Note thaht if the sampling rate is lower than 7 Hz (as it is the case
with some signals recorded by wearables such as Empatica), the filtering is skipped (as there
is no high enough frequency to remove).
* **BioSPPy**: More aggresive filtering than NeuroKit's default method. Low-pass filter with a
5 Hz cutoff frequency and a 4th order Butterworth filter.
Parameters
----------
eda_signal : Union[list, np.array, pd.Series]
The raw EDA signal.
sampling_rate : int
The sampling frequency of `rsp_signal` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"neurokit"`` (default), ``"biosppy"``, or
``"none"``.
Returns
-------
array
Vector containing the cleaned EDA signal.
See Also
--------
eda_simulate, eda_findpeaks, eda_process, eda_plot
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
# Simulate raw signal
eda = nk.eda_simulate(duration=15, sampling_rate=100, scr_number=10, noise=0.01, drift=0.02)
# Clean
eda_clean1 = nk.eda_clean(eda, sampling_rate=100, method='neurokit')
eda_clean2 = nk.eda_clean(eda, sampling_rate=100, method='biosppy')
@savefig p_eda_clean.png scale=100%
nk.signal_plot([eda, eda_clean1, eda_clean2], labels=["Raw", "NeuroKit", "BioSPPy"])
@suppress
plt.close()
"""
eda_signal = as_vector(eda_signal)
# Missing data
n_missing = np.sum(np.isnan(eda_signal))
if n_missing > 0:
warn(
"There are " + str(n_missing) + " missing data points in your signal."
" Filling missing values by using the forward filling method.",
category=NeuroKitWarning,
)
eda_signal = _eda_clean_missing(eda_signal)
method = method.lower() # remove capitalised letters
if method == "biosppy":
clean = _eda_clean_biosppy(eda_signal, sampling_rate)
elif method in ["default", "neurokit", "nk"]:
clean = _eda_clean_neurokit(eda_signal, sampling_rate)
elif method is None or method == "none":
clean = eda_signal
else:
raise ValueError("NeuroKit error: eda_clean(): 'method' should be one of 'biosppy'.")
return clean
# =============================================================================
# Handle missing data
# =============================================================================
def _eda_clean_missing(eda_signal):
eda_signal = pd.DataFrame.pad(pd.Series(eda_signal))
return eda_signal
# =============================================================================
# NK
# =============================================================================
def _eda_clean_neurokit(eda_signal, sampling_rate=1000):
if sampling_rate <= 6:
warn(
"EDA signal is sampled at very low frequency. Skipping filtering.",
category=NeuroKitWarning,
)
return eda_signal
# Filtering
filtered = signal_filter(
eda_signal, sampling_rate=sampling_rate, highcut=3, method="butterworth", order=4
)
return filtered
# =============================================================================
# BioSPPy
# =============================================================================
def _eda_clean_biosppy(eda_signal, sampling_rate=1000):
"""Uses the same defaults as `BioSPPy.
<https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/eda.py>`_.
"""
# Parameters
order = 4
frequency = 5
frequency = (
2 * np.array(frequency) / sampling_rate
) # Normalize frequency to Nyquist Frequency (Fs/2).
# Filtering
b, a = scipy.signal.butter(N=order, Wn=frequency, btype="lowpass", analog=False, output="ba")
filtered = scipy.signal.filtfilt(b, a, eda_signal)
# Smoothing
clean = signal_smooth(
filtered, method="convolution", kernel="boxzen", size=int(0.75 * sampling_rate)
)
return clean
| 4,638 | 30.993103 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_peaks.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..misc import find_closest
from ..signal import signal_formatpeaks
from .eda_findpeaks import eda_findpeaks
from .eda_fixpeaks import eda_fixpeaks
def eda_peaks(eda_phasic, sampling_rate=1000, method="neurokit", amplitude_min=0.1):
"""**Find Skin Conductance Responses (SCR) in Electrodermal Activity (EDA)**
Identify Skin Conductance Responses (SCR) peaks in the phasic component of
Electrodermal Activity (EDA) with different possible methods, such as:
* `Gamboa, H. (2008) <http://www.lx.it.pt/~afred/pub/thesisHugoGamboa.pdf>`_
* `Kim et al. (2004) <http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.102.7385&rep=rep1&type=pdf>`_
Parameters
----------
eda_phasic : Union[list, np.array, pd.Series]
The phasic component of the EDA signal (from :func:`eda_phasic()`).
sampling_rate : int
The sampling frequency of the EDA signal (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"neurokit"`` (default),
``"gamboa2008"``, ``"kim2004"`` (the default in BioSPPy), ``"vanhalem2020"`` or ``"nabian2018"``.
amplitude_min : float
Only used if ``method`` is ``"neurokit"`` or ``"kim2004"``. Minimum threshold by which to
exclude SCRs (peaks) as relative to the largest amplitude in the signal.
Returns
-------
info : dict
A dictionary containing additional information, in this case the aplitude of the SCR, the
samples at which the SCR onset and the SCR peaks occur. Accessible with the keys
``"SCR_Amplitude"``, ``"SCR_Onsets"``, and ``"SCR_Peaks"`` respectively. It also contains the signals' sampling rate.
signals : DataFrame
A DataFrame of same length as the input signal in which occurences of SCR peaks are marked
as "1" in lists of zeros with the same length as ``"eda_cleaned"``. Accessible with the keys
``"SCR_Peaks"``.
See Also
--------
eda_simulate, eda_clean, eda_phasic, eda_process, eda_plot
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Get phasic component
eda_signal = nk.eda_simulate(duration=30, scr_number=5, drift=0.1, noise=0, sampling_rate=100)
eda_cleaned = nk.eda_clean(eda_signal, sampling_rate=100)
eda = nk.eda_phasic(eda_cleaned, sampling_rate=100)
eda_phasic = eda["EDA_Phasic"].values
# Find peaks
_, kim2004 = nk.eda_peaks(eda_phasic, sampling_rate=100, method="kim2004")
_, neurokit = nk.eda_peaks(eda_phasic, sampling_rate=100, method="neurokit")
_, nabian2018 = nk.eda_peaks(eda_phasic, sampling_rate=100, method="nabian2018")
@savefig p_eda_peaks.png scale=100%
nk.events_plot([
nabian2018["SCR_Peaks"],
kim2004["SCR_Peaks"],
neurokit["SCR_Peaks"]
], eda_phasic)
@suppress
plt.close()
References
----------
* Gamboa, H. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology.
PhD ThesisUniversidade.
* Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term
monitoring of physiological signals. Medical and biological engineering and computing, 42(3),
419-427.
* van Halem, S., Van Roekel, E., Kroencke, L., Kuper, N., & Denissen, J. (2020).
Moments That Matter? On the Complexity of Using Triggers Based on Skin Conductance to Sample
Arousing Events Within an Experience Sampling Framework. European Journal of Personality.
* Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., & Ostadabbas, S. (2018). An
Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data. IEEE
journal of translational engineering in health and medicine, 6, 2800711.
"""
if isinstance(eda_phasic, (pd.DataFrame, pd.Series)):
try:
eda_phasic = eda_phasic["EDA_Phasic"]
except KeyError:
eda_phasic = eda_phasic.values
# Get basic
info = eda_findpeaks(
eda_phasic, sampling_rate=sampling_rate, method=method, amplitude_min=amplitude_min
)
info = eda_fixpeaks(info)
# Get additional features (rise time, half recovery time, etc.)
info = _eda_peaks_getfeatures(info, eda_phasic, sampling_rate, recovery_percentage=0.5)
# Prepare output.
peak_signal = signal_formatpeaks(
info,
desired_length=len(eda_phasic),
peak_indices=info["SCR_Peaks"],
other_indices=info["SCR_Recovery"],
)
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
return peak_signal, info
# =============================================================================
# Utility
# =============================================================================
def _eda_peaks_getfeatures(info, eda_phasic, sampling_rate=1000, recovery_percentage=0.5):
# Sanity checks -----------------------------------------------------------
# Peaks (remove peaks before first onset)
valid_peaks = np.logical_and(
info["SCR_Peaks"] > np.nanmin(info["SCR_Onsets"]), ~np.isnan(info["SCR_Onsets"])
) # pylint: disable=E1111
peaks = info["SCR_Peaks"][valid_peaks]
# Onsets (remove onsets with after last peak)
valid_onsets = ~np.isnan(info["SCR_Onsets"])
valid_onsets[valid_onsets] = info["SCR_Onsets"][valid_onsets] < np.nanmax(info["SCR_Peaks"])
onsets = info["SCR_Onsets"][valid_onsets].astype(int)
if len(onsets) != len(peaks):
raise ValueError(
"NeuroKit error: eda_peaks(): Peaks and onsets don't ",
"match, so cannot get amplitude safely. Check why using `find_peaks()`.",
)
# Amplitude and Rise Time -------------------------------------------------
# Amplitudes
amplitude = np.full(len(info["SCR_Height"]), np.nan)
amplitude[valid_peaks] = info["SCR_Height"][valid_peaks] - eda_phasic[onsets]
# Rise times (in seconds)
risetime = np.full(len(info["SCR_Peaks"]), np.nan)
risetime[valid_peaks] = (peaks - onsets) / sampling_rate
# Save info
info["SCR_Amplitude"] = amplitude
info["SCR_RiseTime"] = risetime
# Recovery time -----------------------------------------------------------
# (Half) Recovery times
recovery = np.full(len(info["SCR_Peaks"]), np.nan)
recovery_time = np.full(len(info["SCR_Peaks"]), np.nan)
recovery_values = eda_phasic[onsets] + (amplitude[valid_peaks] * recovery_percentage)
for i, peak_index in enumerate(peaks):
# Get segment between peak and next peak
try:
segment = eda_phasic[peak_index : peaks[i + 1]]
except IndexError:
segment = eda_phasic[peak_index::]
# Adjust segment (cut when it reaches minimum to avoid picking out values on the rise of the next peak)
segment = segment[0 : np.argmin(segment)]
# Find recovery time
recovery_value = find_closest(
recovery_values[i], segment, direction="smaller", strictly=False
)
# Detect recovery points only if there are datapoints below recovery value
if np.min(segment) < recovery_value:
segment_index = np.where(segment == recovery_value)[0][0]
recovery[np.where(valid_peaks)[0][i]] = peak_index + segment_index
recovery_time[np.where(valid_peaks)[0][i]] = segment_index / sampling_rate
# Save ouput
info["SCR_Recovery"] = recovery
info["SCR_RecoveryTime"] = recovery_time
return info
| 7,635 | 39.402116 | 125 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_findpeaks.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..signal import signal_filter, signal_findpeaks, signal_smooth, signal_zerocrossings
def eda_findpeaks(eda_phasic, sampling_rate=1000, method="neurokit", amplitude_min=0.1):
"""**Find Skin Conductance Responses (SCR) in Electrodermal Activity (EDA)**
Low-level function used by `eda_peaks()` to identify Skin Conductance Responses (SCR) peaks in
the phasic component of Electrodermal Activity (EDA) with different possible methods. See
:func:`eda_peaks` for details.
Parameters
----------
eda_phasic : Union[list, np.array, pd.Series]
The phasic component of the EDA signal (from :func:`eda_phasic`).
sampling_rate : int
The sampling frequency of the EDA signal (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"neurokit"`` (default),
``"gamboa2008"``, ``"kim2004"`` (the default in BioSPPy), ``"vanhalem2020"`` or ``"nabian2018"``.
amplitude_min : float
Only used if "method" is ``"neurokit"`` or ``"kim2004"``. Minimum threshold by which to
exclude SCRs (peaks) as relative to the largest amplitude in the signal.
Returns
-------
info : dict
A dictionary containing additional information, in this case the aplitude of the SCR, the
samples at which the SCR onset and the SCR peaks occur. Accessible with the keys
``"SCR_Amplitude"``, ``"SCR_Onsets"``, and ``"SCR_Peaks"`` respectively.
See Also
--------
eda_simulate, eda_clean, eda_phasic, eda_fixpeaks, eda_peaks, eda_process, eda_plot
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Get phasic component
eda_signal = nk.eda_simulate(duration=30, scr_number=5, drift=0.1, noise=0)
eda_cleaned = nk.eda_clean(eda_signal)
eda = nk.eda_phasic(eda_cleaned)
eda_phasic = eda["EDA_Phasic"].values
# Find peaks
gamboa2008 = nk.eda_findpeaks(eda_phasic, method="gamboa2008")
kim2004 = nk.eda_findpeaks(eda_phasic, method="kim2004")
neurokit = nk.eda_findpeaks(eda_phasic, method="neurokit")
vanhalem2020 = nk.eda_findpeaks(eda_phasic, method="vanhalem2020")
nabian2018 = nk.eda_findpeaks(eda_phasic, method="nabian2018")
@savefig p_eda_findpeaks.png scale=100%
nk.events_plot([gamboa2008["SCR_Peaks"], kim2004["SCR_Peaks"], vanhalem2020["SCR_Peaks"],
neurokit["SCR_Peaks"], nabian2018["SCR_Peaks"]], eda_phasic)
@suppress
plt.close()
References
----------
* Gamboa, H. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology.
PhD Thesis Universidade.
* Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term
monitoring of physiological signals. Medical and biological engineering and computing, 42(3),
419-427.
* van Halem, S., Van Roekel, E., Kroencke, L., Kuper, N., & Denissen, J. (2020).
Moments That Matter? On the Complexity of Using Triggers Based on Skin Conductance to Sample
Arousing Events Within an Experience Sampling Framework. European Journal of Personality.
* Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., & Ostadabbas, S. (2018). An
Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data. IEEE
journal of translational engineering in health and medicine, 6, 2800711.
"""
# Try to retrieve the right column if a dataframe is passed
if isinstance(eda_phasic, pd.DataFrame):
try:
eda_phasic = eda_phasic["EDA_Phasic"]
except KeyError:
raise KeyError(
"NeuroKit error: eda_findpeaks(): Please provide an array as the input signal."
)
method = method.lower() # remove capitalised letters
if method in ["gamboa2008", "gamboa"]:
info = _eda_findpeaks_gamboa2008(eda_phasic)
elif method in ["kim", "kbk", "kim2004", "biosppy"]:
info = _eda_findpeaks_kim2004(
eda_phasic, sampling_rate=sampling_rate, amplitude_min=amplitude_min
)
elif method in ["nk", "nk2", "neurokit", "neurokit2"]:
info = _eda_findpeaks_neurokit(eda_phasic, amplitude_min=amplitude_min)
elif method in ["vanhalem2020", "vanhalem", "halem2020"]:
info = _eda_findpeaks_vanhalem2020(eda_phasic, sampling_rate=sampling_rate)
elif method in ["nabian2018", "nabian"]:
info = _eda_findpeaks_nabian2018(eda_phasic)
else:
raise ValueError(
"NeuroKit error: eda_findpeaks(): 'method' should be one of 'neurokit', 'gamboa2008', 'kim2004'"
" 'vanhalem2020' or 'nabian2018'."
)
return info
# =============================================================================
# Methods
# =============================================================================
def _eda_findpeaks_neurokit(eda_phasic, amplitude_min=0.1):
peaks = signal_findpeaks(eda_phasic, relative_height_min=amplitude_min, relative_max=True)
info = {
"SCR_Onsets": peaks["Onsets"],
"SCR_Peaks": peaks["Peaks"],
"SCR_Height": eda_phasic[peaks["Peaks"]],
}
return info
def _eda_findpeaks_vanhalem2020(eda_phasic, sampling_rate=1000):
"""Follows approach of van Halem et al. (2020).
A peak is considered when there is a consistent increase of 0.5 seconds following a consistent
decrease of 0.5 seconds.
* van Halem, S., Van Roekel, E., Kroencke, L., Kuper, N., & Denissen, J. (2020).
Moments That Matter? On the Complexity of Using Triggers Based on Skin Conductance to Sample
Arousing Events Within an Experience Sampling Framework. European Journal of Personality.
"""
# smooth
eda_phasic = signal_filter(
eda_phasic,
sampling_rate=sampling_rate,
lowcut=None,
highcut=None,
method="savgol",
window_size=501,
)
info = signal_findpeaks(eda_phasic)
peaks = info["Peaks"]
threshold = 0.5 * sampling_rate
# Define each peak as a consistent increase of 0.5s
increase = info["Peaks"] - info["Onsets"]
peaks = peaks[increase > threshold]
idx = np.where(peaks[:, None] == info["Peaks"][None, :])[1]
# Check if each peak is followed by consistent decrease of 0.5s
decrease = info["Offsets"][idx] - peaks
if any(np.isnan(decrease)):
decrease[np.isnan(decrease)] = False
if any(decrease < threshold):
keep = np.where(decrease > threshold)[0]
idx = idx[keep] # Update index
info = {
"SCR_Onsets": info["Onsets"][idx],
"SCR_Peaks": info["Peaks"][idx],
"SCR_Height": eda_phasic[info["Peaks"][idx]],
}
return info
def _eda_findpeaks_gamboa2008(eda_phasic):
"""Basic method to extract Skin Conductivity Responses (SCR) from an EDA signal following the
approach in the thesis by Gamboa (2008).
* Gamboa, H. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology.
PhD Thesis Universidade.
"""
derivative = np.diff(np.sign(np.diff(eda_phasic)))
# find extrema
pi = np.nonzero(derivative < 0)[0] + 1
ni = np.nonzero(derivative > 0)[0] + 1
# sanity check
if len(pi) == 0 or len(ni) == 0:
raise ValueError(
"NeuroKit error: eda_findpeaks(): Could not find enough SCR peaks. Try another method."
)
# pair vectors
if ni[0] < pi[0]:
ni = ni[1:]
if pi[-1] > ni[-1]:
pi = pi[:-1]
if len(pi) > len(ni):
pi = pi[:-1]
li = min(len(pi), len(ni))
peaks = pi[:li]
onsets = ni[:li]
# indices
i0 = peaks - (onsets - peaks) / 2.0
if i0[0] < 0:
i0[0] = 0
# amplitude
amplitudes = np.array([np.max(eda_phasic[peaks[i] : onsets[i]]) for i in range(li)])
# output
info = {"SCR_Onsets": onsets, "SCR_Peaks": peaks, "SCR_Height": amplitudes}
return info
def _eda_findpeaks_kim2004(eda_phasic, sampling_rate=1000, amplitude_min=0.1):
"""KBK method to extract Skin Conductivity Responses (SCR) from an EDA signal following the approach by Kim et
al.(2004).
* Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term
monitoring of physiological signals. Medical and biological engineering and computing, 42(3),
419-427.
"""
# differentiation
df = np.diff(eda_phasic)
# smooth
df = signal_smooth(signal=df, kernel="bartlett", size=int(sampling_rate))
# zero crosses
zeros = signal_zerocrossings(df)
if np.all(df[: zeros[0]] > 0):
zeros = zeros[1:]
if np.all(df[zeros[-1] :] > 0):
zeros = zeros[:-1]
scrs, amps, ZC, pks = [], [], [], []
for i in range(0, len(zeros) - 1, 2):
scrs += [eda_phasic[zeros[i] : zeros[i + 1]]]
aux = scrs[-1].max()
if aux > 0:
amps += [aux]
ZC += [zeros[i]]
ZC += [zeros[i + 1]]
pks += [zeros[i] + np.argmax(eda_phasic[zeros[i] : zeros[i + 1]])]
amps = np.array(amps)
ZC = np.array(ZC)
pks = np.array(pks)
onsets = ZC[::2]
# exclude SCRs with small amplitude
masked = amps > (amplitude_min * np.nanmax(amps)) # threshold
amps = amps[masked]
pks = pks[masked]
onsets = onsets[masked]
# output
info = {"SCR_Onsets": onsets, "SCR_Peaks": pks, "SCR_Height": amps}
return info
def _eda_findpeaks_nabian2018(eda_phasic):
"""Basic method to extract Skin Conductivity Responses (SCR) from an EDA signal following the
approach by Nabian et al. (2018). The amplitude of the SCR is obtained by finding the maximum
value between these two zero-crossings, and calculating the difference between the initial zero
crossing and the maximum value. Detected SCRs with amplitudes smaller than 10 percent of the
maximum SCR amplitudes that are already detected on the differentiated signal will be
eliminated. It is crucial that artifacts are removed before finding peaks.
* Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., & Ostadabbas, S. (2018). An
Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data. IEEE
journal of translational engineering in health and medicine, 6, 2800711.
https://doi.org/10.1109/JTEHM.2018.2878000
"""
# differentiation
eda_phasic_diff = np.diff(eda_phasic)
# smooth
eda_phasic_smoothed = signal_smooth(eda_phasic_diff, kernel="bartlett", size=20)
# zero crossings
pos_crossings = signal_zerocrossings(eda_phasic_smoothed, direction="positive")
neg_crossings = signal_zerocrossings(eda_phasic_smoothed, direction="negative")
# if negative crossing happens before the positive crossing
# delete first negative crossing because we want to identify peaks
if neg_crossings[0] < pos_crossings[0]:
neg_crossings = neg_crossings[1:]
# Sanitize consecutive crossings
if len(pos_crossings) > len(neg_crossings):
pos_crossings = pos_crossings[0 : len(neg_crossings)]
elif len(pos_crossings) < len(neg_crossings):
neg_crossings = neg_crossings[0 : len(pos_crossings)]
peaks_list = []
onsets_list = []
amps_list = []
for i, j in zip(pos_crossings, neg_crossings):
window = eda_phasic[i:j]
# The amplitude of the SCR is obtained by finding the maximum value
# between these two zero-crossings and calculating the difference
# between the initial zero crossing and the maximum value.
# amplitude defined in neurokit2
amp = np.nanmax(window)
# Detected SCRs with amplitudes less than 10% of max SCR amplitude will be eliminated
# we append the first SCR
if len(amps_list) == 0:
# be careful, if two peaks have the same amplitude, np.where will return a list
peaks = np.where(eda_phasic == amp)[0]
# make sure that the peak is within the window
peaks = [peak for peak in [peaks] if peak > i and peak < j]
peaks_list.append(peaks[0])
onsets_list.append(i)
amps_list.append(amp)
else:
# we have a list of peaks
# amplitude defined in the paper
diff = amp - eda_phasic[i]
if not diff < (0.1 * max(amps_list)):
peaks = np.where(eda_phasic == amp)[0]
# make sure that the peak is within the window
peaks = [peak for peak in [peaks] if peak > i and peak < j]
peaks_list.append(peaks[0])
onsets_list.append(i)
amps_list.append(amp)
# output
info = {
"SCR_Onsets": np.array(onsets_list),
"SCR_Peaks": np.hstack(np.array(peaks_list)),
"SCR_Height": np.array(amps_list),
}
return info
| 12,988 | 36.432277 | 114 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_intervalrelated.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning
from .eda_autocor import eda_autocor
from .eda_sympathetic import eda_sympathetic
def eda_intervalrelated(data, sampling_rate=1000, **kwargs):
"""**EDA Analysis on Interval-Related Data**
Performs EDA analysis on longer periods of data (typically > 10 seconds), such as resting-state
data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A DataFrame containing the different processed signal(s) as different columns, typically
generated by :func:`eda_process` or :func:`bio_process`. Can also take a dict containing
sets of separately processed DataFrames.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
**kwargs
Other arguments to be passed to the functions.
Returns
-------
DataFrame
A dataframe containing the analyzed EDA features. The analyzed
features consist of the following:
* ``"SCR_Peaks_N"``: the number of occurrences of Skin Conductance Response (SCR).
* ``"SCR_Peaks_Amplitude_Mean"``: the mean amplitude of the SCR peak occurrences.
* ``"EDA_Tonic_SD"``: the mean amplitude of the SCR peak occurrences.
* ``"EDA_Sympathetic"``: see :func:`eda_sympathetic` (only computed if signal duration
> 64 sec).
* ``"EDA_Autocorrelation"``: see :func:`eda_autocor` (only computed if signal duration
> 30 sec).
See Also
--------
.bio_process, eda_eventrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_8min_100hz")
# Process the data
df, info = nk.eda_process(data["EDA"], sampling_rate=100)
# Single dataframe is passed
nk.eda_intervalrelated(df, sampling_rate=100)
epochs = nk.epochs_create(df, events=[0, 25300], sampling_rate=100, epochs_end=20)
nk.eda_intervalrelated(epochs, sampling_rate=100)
"""
# Format input
if isinstance(data, pd.DataFrame):
results = _eda_intervalrelated(data, sampling_rate=sampling_rate, **kwargs)
results = pd.DataFrame.from_dict(results, orient="index").T
elif isinstance(data, dict):
results = {}
for index in data:
results[index] = {} # Initialize empty container
# Add label info
results[index]["Label"] = data[index]["Label"].iloc[0]
results[index] = _eda_intervalrelated(
data[index], results[index], sampling_rate=sampling_rate, **kwargs
)
results = pd.DataFrame.from_dict(results, orient="index")
return results
# =============================================================================
# Internals
# =============================================================================
def _eda_intervalrelated(
data, output={}, sampling_rate=1000, method_sympathetic="posada", **kwargs
):
"""Format input for dictionary."""
# Sanitize input
colnames = data.columns.values
# SCR Peaks
if "SCR_Peaks" not in colnames:
warn(
"We couldn't find an `SCR_Peaks` column. Returning NaN for N peaks.",
category=NeuroKitWarning,
)
output["SCR_Peaks_N"] = np.nan
else:
output["SCR_Peaks_N"] = np.nansum(data["SCR_Peaks"].values)
# Peak amplitude
if "SCR_Amplitude" not in colnames:
warn(
"We couldn't find an `SCR_Amplitude` column. Returning NaN for peak amplitude.",
category=NeuroKitWarning,
)
output["SCR_Peaks_Amplitude_Mean"] = np.nan
else:
output["SCR_Peaks_Amplitude_Mean"] = np.nanmean(data["SCR_Amplitude"].values)
# Get variability of tonic
if "EDA_Tonic" in colnames:
output["EDA_Tonic_SD"] = np.nanstd(data["EDA_Tonic"].values)
# EDA Sympathetic
output.update({"EDA_Sympathetic": np.nan, "EDA_SympatheticN": np.nan}) # Default values
if len(data) > sampling_rate * 64:
if "EDA_Clean" in colnames:
output.update(
eda_sympathetic(
data["EDA_Clean"], sampling_rate=sampling_rate, method=method_sympathetic
)
)
elif "EDA_Raw" in colnames:
# If not clean signal, use raw
output.update(
eda_sympathetic(
data["EDA_Raw"], sampling_rate=sampling_rate, method=method_sympathetic
)
)
# EDA autocorrelation
output.update({"EDA_Autocorrelation": np.nan}) # Default values
if len(data) > sampling_rate * 30: # 30 seconds minimum (NOTE: somewhat arbitrary)
if "EDA_Clean" in colnames:
output["EDA_Autocorrelation"] = eda_autocor(
data["EDA_Clean"], sampling_rate=sampling_rate, **kwargs
)
elif "EDA_Raw" in colnames:
# If not clean signal, use raw
output["EDA_Autocorrelation"] = eda_autocor(
data["EDA_Raw"], sampling_rate=sampling_rate, **kwargs
)
return output
| 5,246 | 33.071429 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/__init__.py | """Submodule for NeuroKit."""
from .eda_analyze import eda_analyze
from .eda_autocor import eda_autocor
from .eda_changepoints import eda_changepoints
from .eda_clean import eda_clean
from .eda_eventrelated import eda_eventrelated
from .eda_findpeaks import eda_findpeaks
from .eda_fixpeaks import eda_fixpeaks
from .eda_intervalrelated import eda_intervalrelated
from .eda_peaks import eda_peaks
from .eda_phasic import eda_phasic
from .eda_plot import eda_plot
from .eda_process import eda_process
from .eda_simulate import eda_simulate
from .eda_sympathetic import eda_sympathetic
__all__ = [
"eda_simulate",
"eda_clean",
"eda_phasic",
"eda_findpeaks",
"eda_fixpeaks",
"eda_peaks",
"eda_process",
"eda_plot",
"eda_eventrelated",
"eda_intervalrelated",
"eda_analyze",
"eda_autocor",
"eda_changepoints",
"eda_sympathetic",
]
| 885 | 24.314286 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_sympathetic.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy
from ..misc import NeuroKitWarning
from ..signal import signal_filter, signal_resample, signal_timefrequency
from ..signal.signal_power import _signal_power_instant_compute
from ..signal.signal_psd import _signal_psd_welch
from ..stats import standardize
def eda_sympathetic(
eda_signal, sampling_rate=1000, frequency_band=[0.045, 0.25], method="posada", show=False
):
"""**Sympathetic Nervous System Index from Electrodermal activity (EDA)**
Derived from Posada-Quintero et al. (2016), who argue that dynamics of the sympathetic component
of EDA signal is represented in the frequency band of 0.045-0.25Hz. Note that the Posada method
requires a signal of a least 64 seconds.
Parameters
----------
eda_signal : Union[list, np.array, pd.Series]
The EDA signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
frequency_band : list
List indicating the frequency range to compute the the power spectral density in.
Defaults to [0.045, 0.25].
method : str
Can be one of ``"ghiasi"`` or ``"posada"``.
show : bool
If True, will return a plot of the power spectrum of the EDA signal within the specified
frequency band.
See Also
--------
.signal_filter, .signal_power, .signal_psd
Returns
-------
dict
A dictionary containing the EDA sympathetic indexes, accessible by keys
``"EDA_Sympathetic"`` and ``"EDA_SympatheticN"`` (normalized, obtained by dividing EDA_Symp
by total power).
Examples
--------
.. ipython:: python
import neurokit2 as nk
eda = nk.data('bio_resting_8min_100hz')['EDA']
@savefig p_eda_sympathetic1.png scale=100%
nk.eda_sympathetic(eda, sampling_rate=100, method='posada', show=True)
@suppress
plt.close()
results = nk.eda_sympathetic(eda, sampling_rate=100, method='ghiasi')
results
References
----------
* Ghiasi, S., Grecol, A., Nardelli, M., Catrambonel, V., Barbieri, R., Scilingo, E., & Valenza,
G. (2018). A New Sympathovagal Balance Index from Electrodermal Activity and Instantaneous
Vagal Dynamics: A Preliminary Cold Pressor Study. 2018 40th Annual International Conference
of the IEEE Engineering in Medicine and Biology Society (EMBC). doi:10.1109/embc.2018.8512932
* Posada-Quintero, H. F., Florian, J. P., Orjuela-Cañón, A. D., Aljama-Corrales, T.,
Charleston-Villalobos, S., & Chon, K. H. (2016). Power spectral density analysis of
electrodermal activity for sympathetic function assessment. Annals of biomedical engineering,
44(10), 3124-3135.
"""
out = {}
if method.lower() in ["ghiasi", "ghiasi2018"]:
out = _eda_sympathetic_ghiasi(
eda_signal, sampling_rate=sampling_rate, frequency_band=frequency_band, show=show
)
elif method.lower() in ["posada", "posada-quintero", "quintero", "posada2016"]:
out = _eda_sympathetic_posada(
eda_signal, sampling_rate=sampling_rate, frequency_band=frequency_band, show=show
)
else:
raise ValueError(
"NeuroKit error: eda_sympathetic(): 'method' should be " "one of 'ghiasi', 'posada'."
)
return out
# =============================================================================
# Methods
# =============================================================================
def _eda_sympathetic_posada(
eda_signal, frequency_band=[0.045, 0.25], sampling_rate=1000, show=True, out={}
):
# This method assumes signal longer than 64 s
if len(eda_signal) <= sampling_rate * 64:
warn(
"The 'posada2016' method requires a signal of length > 60 s. Try with"
+ " `method='ghiasi2018'`. Returning NaN values for now.",
category=NeuroKitWarning,
)
return {"EDA_Sympathetic": np.nan, "EDA_SympatheticN": np.nan}
# Resample the eda signal before calculate the synpathetic index based on Posada (2016)
eda_signal_400hz = signal_resample(
eda_signal, sampling_rate=sampling_rate, desired_sampling_rate=400
)
# 8-th order Chebyshev Type I low-pass filter
sos = scipy.signal.cheby1(8, 1, 0.8, "lowpass", fs=400, output="sos")
eda_signal_filtered = scipy.signal.sosfilt(sos, eda_signal_400hz)
# First step of downsampling
downsampled_1 = scipy.signal.decimate(eda_signal_filtered, q=10, n=8) # Keep every 10th sample
downsampled_2 = scipy.signal.decimate(downsampled_1, q=20, n=8) # Keep every 20th sample
# High pass filter
eda_filtered = signal_filter(
downsampled_2, sampling_rate=2, lowcut=0.01, highcut=None, method="butterworth", order=8
)
nperseg = 128
overlap = nperseg // 2 # 50 % data overlap
# Compute psd
frequency, power = _signal_psd_welch(
eda_filtered, sampling_rate=2, nperseg=nperseg, window_type="blackman", noverlap=overlap
)
psd = pd.DataFrame({"Frequency": frequency, "Power": power})
# Get sympathetic nervous system indexes
eda_symp = _signal_power_instant_compute(psd, (frequency_band[0], frequency_band[1]))
# Compute normalized psd
psd["Power"] /= np.max(psd["Power"])
eda_symp_normalized = _signal_power_instant_compute(psd, (frequency_band[0], frequency_band[1]))
psd_plot = psd.loc[
np.logical_and(psd["Frequency"] >= frequency_band[0], psd["Frequency"] <= frequency_band[1])
]
if show is True:
ax = psd_plot.plot(x="Frequency", y="Power", title="EDA Power Spectral Density (us^2/Hz)")
ax.set(xlabel="Frequency (Hz)", ylabel="Spectrum")
out = {"EDA_Sympathetic": eda_symp, "EDA_SympatheticN": eda_symp_normalized}
return out
def _eda_sympathetic_ghiasi(
eda_signal, sampling_rate=1000, frequency_band=[0.045, 0.25], show=True, out={}
):
min_frequency = frequency_band[0]
max_frequency = frequency_band[1]
# Downsample, normalize, filter
desired_sampling_rate = 50
downsampled = signal_resample(
eda_signal, sampling_rate=sampling_rate, desired_sampling_rate=desired_sampling_rate
)
normalized = standardize(downsampled)
filtered = signal_filter(
normalized,
sampling_rate=desired_sampling_rate,
lowcut=0.01,
highcut=0.5,
method="butterworth",
)
# Divide the signal into segments and obtain the timefrequency representation
overlap = 59 * 50 # overlap of 59s in samples
# TODO: the plot should be improved for this specific case
_, _, bins = signal_timefrequency(
filtered,
sampling_rate=desired_sampling_rate,
min_frequency=min_frequency,
max_frequency=max_frequency,
method="stft",
window=60,
window_type="blackman",
overlap=overlap,
show=show,
)
eda_symp = np.mean(bins)
eda_symp_normalized = eda_symp / np.max(bins)
out = {"EDA_Sympathetic": eda_symp, "EDA_SympatheticN": eda_symp_normalized}
return out
| 7,230 | 34.446078 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/eda/eda_plot.py | # -*- coding: utf-8 -*-
import matplotlib.collections
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def eda_plot(eda_signals, sampling_rate=None, static=True):
"""**Visualize electrodermal activity (EDA) data**
Parameters
----------
eda_signals : DataFrame
DataFrame obtained from :func:`eda_process()`.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). Defaults to None.
static : bool
If True, a static plot will be generated with matplotlib.
If False, an interactive plot will be generated with plotly.
Defaults to True.
Returns
-------
fig
Figure representing a plot of the processed EDA signals.
Examples
--------
.. ipython:: python
import neurokit2 as nk
eda_signal = nk.eda_simulate(duration=30, scr_number=5, drift=0.1, noise=0, sampling_rate=250)
eda_signals, info = nk.eda_process(eda_signal, sampling_rate=250)
@savefig p_eda_plot1.png scale=100%
nk.eda_plot(eda_signals)
@suppress
plt.close()
See Also
--------
eda_process
"""
# Determine peaks, onsets, and half recovery.
peaks = np.where(eda_signals["SCR_Peaks"] == 1)[0]
onsets = np.where(eda_signals["SCR_Onsets"] == 1)[0]
half_recovery = np.where(eda_signals["SCR_Recovery"] == 1)[0]
# Determine unit of x-axis.
if sampling_rate is not None:
x_label = "Seconds"
x_axis = np.linspace(0, len(eda_signals) / sampling_rate, len(eda_signals))
else:
x_label = "Samples"
x_axis = np.arange(0, len(eda_signals))
if static:
fig, (ax0, ax1, ax2) = plt.subplots(nrows=3, ncols=1, sharex=True)
last_ax = fig.get_axes()[-1]
last_ax.set_xlabel(x_label)
plt.tight_layout(h_pad=0.2)
# Plot cleaned and raw electrodermal activity.
ax0.set_title("Raw and Cleaned Signal")
fig.suptitle("Electrodermal Activity (EDA)", fontweight="bold")
ax0.plot(x_axis, eda_signals["EDA_Raw"], color="#B0BEC5", label="Raw", zorder=1)
ax0.plot(
x_axis,
eda_signals["EDA_Clean"],
color="#9C27B0",
label="Cleaned",
linewidth=1.5,
zorder=1,
)
ax0.legend(loc="upper right")
# Plot skin conductance response.
ax1.set_title("Skin Conductance Response (SCR)")
# Plot Phasic.
ax1.plot(
x_axis,
eda_signals["EDA_Phasic"],
color="#E91E63",
label="Phasic Component",
linewidth=1.5,
zorder=1,
)
# Mark segments.
risetime_coord, amplitude_coord, halfr_coord = _eda_plot_dashedsegments(
eda_signals, ax1, x_axis, onsets, peaks, half_recovery
)
risetime = matplotlib.collections.LineCollection(
risetime_coord, colors="#FFA726", linewidths=1, linestyle="dashed"
)
ax1.add_collection(risetime)
amplitude = matplotlib.collections.LineCollection(
amplitude_coord, colors="#1976D2", linewidths=1, linestyle="solid"
)
ax1.add_collection(amplitude)
halfr = matplotlib.collections.LineCollection(
halfr_coord, colors="#FDD835", linewidths=1, linestyle="dashed"
)
ax1.add_collection(halfr)
ax1.legend(loc="upper right")
# Plot Tonic.
ax2.set_title("Skin Conductance Level (SCL)")
ax2.plot(
x_axis,
eda_signals["EDA_Tonic"],
color="#673AB7",
label="Tonic Component",
linewidth=1.5,
)
ax2.legend(loc="upper right")
return fig
else:
# Create interactive plot with plotly.
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError as e:
raise ImportError(
"NeuroKit error: ppg_plot(): the 'plotly'",
" module is required when 'static' is False.",
" Please install it first (`pip install plotly`).",
) from e
fig = make_subplots(
rows=3,
cols=1,
shared_xaxes=True,
vertical_spacing=0.05,
subplot_titles=(
"Raw and Cleaned Signal",
"Skin Conductance Response (SCR)",
"Skin Conductance Level (SCL)",
),
)
# Plot cleaned and raw electrodermal activity.
fig.add_trace(
go.Scatter(
x=x_axis,
y=eda_signals["EDA_Raw"],
mode="lines",
name="Raw",
line=dict(color="#B0BEC5"),
showlegend=True,
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=eda_signals["EDA_Clean"],
mode="lines",
name="Cleaned",
line=dict(color="#9C27B0"),
showlegend=True,
),
row=1,
col=1,
)
# Plot skin conductance response.
fig.add_trace(
go.Scatter(
x=x_axis,
y=eda_signals["EDA_Phasic"],
mode="lines",
name="Phasic Component",
line=dict(color="#E91E63"),
showlegend=True,
),
row=2,
col=1,
)
# Mark segments.
_, _, _ = _eda_plot_dashedsegments(
eda_signals, fig, x_axis, onsets, peaks, half_recovery, static=static
)
# TODO add dashed segments to plotly version
# Plot skin conductance level.
fig.add_trace(
go.Scatter(
x=x_axis,
y=eda_signals["EDA_Tonic"],
mode="lines",
name="Tonic Component",
line=dict(color="#673AB7"),
showlegend=True,
),
row=3,
col=1,
)
# Add title to entire figure.
fig.update_layout(title_text="Electrodermal Activity (EDA)", title_x=0.5)
return fig
# =============================================================================
# Internals
# =============================================================================
def _eda_plot_dashedsegments(
eda_signals, ax, x_axis, onsets, peaks, half_recovery, static=True
):
# Mark onsets, peaks, and half-recovery.
onset_x_values = x_axis[onsets]
onset_y_values = eda_signals["EDA_Phasic"][onsets].values
peak_x_values = x_axis[peaks]
peak_y_values = eda_signals["EDA_Phasic"][peaks].values
halfr_x_values = x_axis[half_recovery]
halfr_y_values = eda_signals["EDA_Phasic"][half_recovery].values
end_onset = pd.Series(
eda_signals["EDA_Phasic"][onsets].values, eda_signals["EDA_Phasic"][peaks].index
)
risetime_coord = []
amplitude_coord = []
halfr_coord = []
for i in range(len(onsets)):
# Rise time.
start = (onset_x_values[i], onset_y_values[i])
end = (peak_x_values[i], onset_y_values[i])
risetime_coord.append((start, end))
for i in range(len(peaks)):
# SCR Amplitude.
start = (peak_x_values[i], onset_y_values[i])
end = (peak_x_values[i], peak_y_values[i])
amplitude_coord.append((start, end))
for i in range(len(half_recovery)):
# Half recovery.
end = (halfr_x_values[i], halfr_y_values[i])
peak_x_idx = np.where(peak_x_values < halfr_x_values[i])[0][-1]
start = (peak_x_values[peak_x_idx], halfr_y_values[i])
halfr_coord.append((start, end))
if static:
# Plot with matplotlib.
# Mark onsets, peaks, and half-recovery.
ax.scatter(
x_axis[onsets],
eda_signals["EDA_Phasic"][onsets],
color="#FFA726",
label="SCR - Onsets",
zorder=2,
)
ax.scatter(
x_axis[peaks],
eda_signals["EDA_Phasic"][peaks],
color="#1976D2",
label="SCR - Peaks",
zorder=2,
)
ax.scatter(
x_axis[half_recovery],
eda_signals["EDA_Phasic"][half_recovery],
color="#FDD835",
label="SCR - Half recovery",
zorder=2,
)
ax.scatter(x_axis[end_onset.index], end_onset.values, alpha=0)
else:
# Create interactive plot with plotly.
try:
import plotly.graph_objects as go
except ImportError as e:
raise ImportError(
"NeuroKit error: ppg_plot(): the 'plotly'",
" module is required when 'static' is False.",
" Please install it first (`pip install plotly`).",
) from e
# Plot with plotly.
# Mark onsets, peaks, and half-recovery.
ax.add_trace(
go.Scatter(
x=x_axis[onsets],
y=eda_signals["EDA_Phasic"][onsets],
mode="markers",
name="SCR - Onsets",
marker=dict(color="#FFA726"),
showlegend=True,
),
row=2,
col=1,
)
ax.add_trace(
go.Scatter(
x=x_axis[peaks],
y=eda_signals["EDA_Phasic"][peaks],
mode="markers",
name="SCR - Peaks",
marker=dict(color="#1976D2"),
showlegend=True,
),
row=2,
col=1,
)
ax.add_trace(
go.Scatter(
x=x_axis[half_recovery],
y=eda_signals["EDA_Phasic"][half_recovery],
mode="markers",
name="SCR - Half recovery",
marker=dict(color="#FDD835"),
showlegend=True,
),
row=2,
col=1,
)
ax.add_trace(
go.Scatter(
x=x_axis[end_onset.index],
y=end_onset.values,
mode="markers",
marker=dict(color="#FDD835", opacity=0),
showlegend=False,
),
row=2,
col=1,
)
return risetime_coord, amplitude_coord, halfr_coord
| 10,493 | 29.242075 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_delineate.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.signal
from ..epochs import epochs_create, epochs_to_df
from ..signal import (
signal_findpeaks,
signal_formatpeaks,
signal_rate,
signal_resample,
signal_smooth,
signal_zerocrossings,
)
from ..stats import standardize
from .ecg_peaks import ecg_peaks
from .ecg_segment import ecg_segment
def ecg_delineate(
ecg_cleaned,
rpeaks=None,
sampling_rate=1000,
method="dwt",
show=False,
show_type="peaks",
check=False,
**kwargs
):
"""**Delineate QRS complex**
Function to delineate the QRS complex, i.e., the different waves of the cardiac cycles. A
typical ECG heartbeat consists of a P wave, a QRS complex and a T wave. The P wave represents
the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex
reflects the rapid depolarization of the right and left ventricles. Since the ventricles are
the largest part of the heart, in terms of mass, the QRS complex usually has a much larger
amplitude than the P-wave. The T wave represents the ventricular repolarization of the
ventricles.On rare occasions, a U wave can be seen following the T wave. The U wave is believed
to be related to the last remnants of ventricular repolarization.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by ``ecg_clean()``.
rpeaks : Union[list, np.array, pd.Series]
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info
dictionary returned by ``ecg_findpeaks()``.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : str
Can be one of ``"peak"`` for a peak-based method, ``"cwt"`` for continuous wavelet transform
or ``"dwt"`` (default) for discrete wavelet transform.
show : bool
If ``True``, will return a plot to visualizing the delineated waves information.
show_type: str
The type of delineated waves information showed in the plot.
Can be ``"peaks"``, ``"bounds_R"``, ``"bounds_T"``, ``"bounds_P"`` or ``"all"``.
check : bool
Defaults to ``False``. If ``True``, replaces the delineated features with ``np.nan`` if its
standardized distance from R-peaks is more than 3.
**kwargs
Other optional arguments.
Returns
-------
waves : dict
A dictionary containing additional information.
For derivative method, the dictionary contains the samples at which P-peaks, Q-peaks,
S-peaks, T-peaks, P-onsets and T-offsets occur, accessible with the keys ``"ECG_P_Peaks"``,
``"ECG_Q_Peaks"``, ``"ECG_S_Peaks"``, ``"ECG_T_Peaks"``, ``"ECG_P_Onsets"``,
``"ECG_T_Offsets"``, respectively.
For wavelet methods, in addition to the above information, the dictionary contains the
samples at which QRS-onsets and QRS-offsets occur, accessible with the key
``"ECG_P_Peaks"``, ``"ECG_T_Peaks"``, ``"ECG_P_Onsets"``, ``"ECG_P_Offsets"``,
``"ECG_Q_Peaks"``, ``"ECG_S_Peaks"``, ``"ECG_T_Onsets"``, ``"ECG_T_Offsets"``,
``"ECG_R_Onsets"``, ``"ECG_R_Offsets"``, respectively.
signals : DataFrame
A DataFrame of same length as the input signal in which occurrences of
peaks, onsets and offsets marked as "1" in a list of zeros.
See Also
--------
ecg_clean, .signal_fixpeaks, ecg_peaks, .signal_rate, ecg_process, ecg_plot
Examples
--------
* Step 1. Delineate
.. ipython:: python
import neurokit2 as nk
# Simulate ECG signal
ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
# Get R-peaks location
_, rpeaks = nk.ecg_peaks(ecg, sampling_rate=1000)
# Delineate cardiac cycle
signals, waves = nk.ecg_delineate(ecg, rpeaks, sampling_rate=1000)
* Step 2. Plot P-Peaks and T-Peaks
.. ipython:: python
@savefig p_ecg_delineate1.png scale=100%
nk.events_plot([waves["ECG_P_Peaks"], waves["ECG_T_Peaks"]], ecg)
@suppress
plt.close()
References
--------------
- Martínez, J. P., Almeida, R., Olmos, S., Rocha, A. P., & Laguna, P. (2004). A wavelet-based
ECG delineator: evaluation on standard databases. IEEE Transactions on biomedical engineering,
51(4), 570-581.
"""
# Sanitize input for ecg_cleaned
if isinstance(ecg_cleaned, pd.DataFrame):
cols = [col for col in ecg_cleaned.columns if "ECG_Clean" in col]
if cols:
ecg_cleaned = ecg_cleaned[cols[0]].values
else:
raise ValueError(
"NeuroKit error: ecg_delineate(): Wrong input, we couldn't extract"
"cleaned signal."
)
elif isinstance(ecg_cleaned, dict):
for i in ecg_cleaned:
cols = [col for col in ecg_cleaned[i].columns if "ECG_Clean" in col]
if cols:
signals = epochs_to_df(ecg_cleaned)
ecg_cleaned = signals[cols[0]].values
else:
raise ValueError(
"NeuroKit error: ecg_delineate(): Wrong input, we couldn't extract"
"cleaned signal."
)
elif isinstance(ecg_cleaned, pd.Series):
ecg_cleaned = ecg_cleaned.values
# Sanitize input for rpeaks
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
rpeaks = rpeaks["ECG_R_Peaks"]
if isinstance(rpeaks, dict):
rpeaks = rpeaks["ECG_R_Peaks"]
method = method.lower() # remove capitalised letters
if method in ["peak", "peaks", "derivative", "gradient"]:
waves = _ecg_delineator_peak(
ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate
)
elif method in ["cwt", "continuous wavelet transform"]:
waves = _ecg_delineator_cwt(
ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate
)
elif method in ["dwt", "discrete wavelet transform"]:
waves = _dwt_ecg_delineator(ecg_cleaned, rpeaks, sampling_rate=sampling_rate)
else:
raise ValueError(
"NeuroKit error: ecg_delineate(): 'method' should be one of 'peak',"
"'cwt' or 'dwt'."
)
# Ensure that all indices are not larger than ECG signal indices
for _, value in waves.items():
if value[-1] >= len(ecg_cleaned):
value[-1] = np.nan
# Remove NaN in Peaks, Onsets, and Offsets
waves_noNA = waves.copy()
for feature in waves_noNA.keys():
waves_noNA[feature] = [
int(x) for x in waves_noNA[feature] if ~np.isnan(x) and x > 0
]
instant_peaks = signal_formatpeaks(waves_noNA, desired_length=len(ecg_cleaned))
signals = instant_peaks
waves_sanitized = {}
for feature, values in waves.items():
waves_sanitized[feature] = [x for x in values if x > 0 or x is np.nan]
if show is True:
_ecg_delineate_plot(
ecg_cleaned,
rpeaks=rpeaks,
signals=signals,
signal_features_type=show_type,
sampling_rate=sampling_rate,
**kwargs
)
if check is True:
waves_sanitized = _ecg_delineate_check(waves_sanitized, rpeaks)
return signals, waves_sanitized
# =============================================================================
# WAVELET METHOD (DWT)
# =============================================================================
def _dwt_resample_points(peaks, sampling_rate, desired_sampling_rate):
"""Resample given points to a different sampling rate."""
if isinstance(
peaks, np.ndarray
): # peaks are passed in from previous processing steps
# Prevent overflow by converting to np.int64 (peaks might be passed in containing np.int32).
peaks = peaks.astype(dtype=np.int64)
elif isinstance(peaks, list): # peaks returned from internal functions
# Cannot be converted to int since list might contain np.nan. Automatically cast to np.float64 if list contains np.nan.
peaks = np.array(peaks)
peaks_resample = peaks * desired_sampling_rate / sampling_rate
peaks_resample = [
np.nan if np.isnan(x) else int(x) for x in peaks_resample.tolist()
]
return peaks_resample
def _dwt_ecg_delineator(ecg, rpeaks, sampling_rate, analysis_sampling_rate=2000):
"""Delinate ecg signal using discrete wavelet transforms.
Parameters
----------
ecg : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by `ecg_clean()`.
rpeaks : Union[list, np.array, pd.Series]
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info dictionary
returned by `ecg_findpeaks()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
analysis_sampling_rate : int
The sampling frequency for analysis (in Hz, i.e., samples/second).
Returns
--------
dict
Dictionary of the points.
"""
# No dwt defined method for Q and S peak
# Adopting manual method from "peak" method
qpeaks = []
speaks = []
heartbeats = ecg_segment(ecg, rpeaks, sampling_rate=sampling_rate)
for i, rpeak in enumerate(rpeaks):
heartbeat = heartbeats[str(i + 1)]
# Get index of R peaks
R = heartbeat.index.get_loc(
np.min(heartbeat.index.values[heartbeat.index.values > 0])
)
# Q wave
Q_index, Q = _ecg_delineator_peak_Q(rpeak, heartbeat, R)
qpeaks.append(Q_index)
# S wave
S_index, S = _ecg_delineator_peak_S(rpeak, heartbeat)
speaks.append(S_index)
# dwt to delineate tp waves, onsets, offsets and qrs ontsets and offsets
ecg = signal_resample(
ecg, sampling_rate=sampling_rate, desired_sampling_rate=analysis_sampling_rate
)
dwtmatr = _dwt_compute_multiscales(ecg, 9)
# # only for debugging
# for idx in [0, 1, 2, 3]:
# plt.plot(dwtmatr[idx + 3], label=f'W[{idx}]')
# plt.plot(ecg, '--')
# plt.legend()
# plt.grid(True)
# plt.show()
rpeaks_resampled = _dwt_resample_points(
rpeaks, sampling_rate, analysis_sampling_rate
)
qpeaks_resampled = _dwt_resample_points(
qpeaks, sampling_rate, analysis_sampling_rate
)
tpeaks, ppeaks = _dwt_delineate_tp_peaks(
ecg, rpeaks_resampled, dwtmatr, sampling_rate=analysis_sampling_rate
)
qrs_onsets, qrs_offsets = _dwt_delineate_qrs_bounds(
rpeaks_resampled,
dwtmatr,
ppeaks,
tpeaks,
qpeaks_resampled,
sampling_rate=analysis_sampling_rate,
)
ponsets, poffsets = _dwt_delineate_tp_onsets_offsets(
ppeaks, rpeaks_resampled, dwtmatr, sampling_rate=analysis_sampling_rate
)
tonsets, toffsets = _dwt_delineate_tp_onsets_offsets(
tpeaks,
rpeaks_resampled,
dwtmatr,
sampling_rate=analysis_sampling_rate,
onset_weight=0.6,
duration_onset=0.6,
)
return dict(
ECG_P_Peaks=_dwt_resample_points(
ppeaks, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_P_Onsets=_dwt_resample_points(
ponsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_P_Offsets=_dwt_resample_points(
poffsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_Q_Peaks=qpeaks,
ECG_R_Onsets=_dwt_resample_points(
qrs_onsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_R_Offsets=_dwt_resample_points(
qrs_offsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_S_Peaks=speaks,
ECG_T_Peaks=_dwt_resample_points(
tpeaks, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_T_Onsets=_dwt_resample_points(
tonsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
ECG_T_Offsets=_dwt_resample_points(
toffsets, analysis_sampling_rate, desired_sampling_rate=sampling_rate
),
)
def _dwt_adjust_parameters(rpeaks, sampling_rate, duration=None, target=None):
average_rate = np.median(signal_rate(peaks=rpeaks, sampling_rate=sampling_rate))
if target == "degree":
# adjust defree of dwt by sampling_rate and HR
scale_factor = (sampling_rate / 250) / (average_rate / 60)
return int(np.log2(scale_factor))
elif target == "duration":
# adjust duration of search by HR
return np.round(duration * (60 / average_rate), 3)
def _dwt_delineate_tp_peaks(
ecg,
rpeaks,
dwtmatr,
sampling_rate=250,
qrs_width=0.13,
p2r_duration=0.2,
rt_duration=0.25,
degree_tpeak=3,
degree_ppeak=2,
epsilon_T_weight=0.25,
epsilon_P_weight=0.02,
):
"""
Parameters
----------
ecg : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by `ecg_clean()`.
rpeaks : Union[list, np.array, pd.Series]
The samples at which R-peaks occur. Accessible with the key "ECG_R_Peaks" in the info dictionary
returned by `ecg_findpeaks()`.
dwtmatr : np.array
Output of `_dwt_compute_multiscales()`. Multiscales of wavelet transform.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
qrs_width : int
Approximate duration of qrs in seconds. Default to 0.13 seconds.
p2r_duration : int
Approximate duration from P peaks to R peaks in seconds.
rt_duration : int
Approximate duration from R peaks to T peaks in secons.
degree_tpeak : int
Wavelet transform of scales 2**3.
degree_tpeak : int
Wavelet transform of scales 2**2.
epsilon_T_weight : int
Epsilon of RMS value of wavelet transform. Appendix (A.3).
epsilon_P_weight : int
Epsilon of RMS value of wavelet transform. Appendix (A.4).
"""
srch_bndry = int(0.5 * qrs_width * sampling_rate)
degree_add = _dwt_adjust_parameters(rpeaks, sampling_rate, target="degree")
# sanitize search duration by HR
p2r_duration = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=p2r_duration, target="duration"
)
rt_duration = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=rt_duration, target="duration"
)
tpeaks = []
for rpeak_ in rpeaks:
if np.isnan(rpeak_):
tpeaks.append(np.nan)
continue
# search for T peaks from R peaks
srch_idx_start = rpeak_ + srch_bndry
srch_idx_end = rpeak_ + 2 * int(rt_duration * sampling_rate)
dwt_local = dwtmatr[degree_tpeak + degree_add, srch_idx_start:srch_idx_end]
if len(dwt_local) == 0:
tpeaks.append(np.nan)
continue
height = epsilon_T_weight * np.sqrt(np.mean(np.square(dwt_local)))
ecg_local = ecg[srch_idx_start:srch_idx_end]
peaks, __ = scipy.signal.find_peaks(np.abs(dwt_local), height=height)
peaks = list(
filter(lambda p: np.abs(dwt_local[p]) > 0.025 * max(dwt_local), peaks)
) # pylint: disable=W0640
if dwt_local[0] > 0: # just append
peaks = [0] + peaks
# detect morphology
candidate_peaks = []
candidate_peaks_scores = []
for idx_peak, idx_peak_nxt in zip(peaks[:-1], peaks[1:]):
correct_sign = (
dwt_local[idx_peak] > 0 and dwt_local[idx_peak_nxt] < 0
) # pylint: disable=R1716
if correct_sign:
idx_zero = (
signal_zerocrossings(dwt_local[idx_peak : idx_peak_nxt + 1])[0]
+ idx_peak
)
# This is the score assigned to each peak. The peak with the highest score will be
# selected.
score = ecg_local[idx_zero] - (
float(idx_zero) / sampling_rate - (rt_duration - 0.5 * qrs_width)
)
candidate_peaks.append(idx_zero)
candidate_peaks_scores.append(score)
if not candidate_peaks:
tpeaks.append(np.nan)
continue
tpeaks.append(
candidate_peaks[np.argmax(candidate_peaks_scores)] + srch_idx_start
)
ppeaks = []
for rpeak in rpeaks:
if np.isnan(rpeak):
ppeaks.append(np.nan)
continue
# search for P peaks from Rpeaks
srch_idx_start = rpeak - 2 * int(p2r_duration * sampling_rate)
srch_idx_end = rpeak - srch_bndry
dwt_local = dwtmatr[degree_ppeak + degree_add, srch_idx_start:srch_idx_end]
if len(dwt_local) == 0:
ppeaks.append(np.nan)
continue
height = epsilon_P_weight * np.sqrt(np.mean(np.square(dwt_local)))
ecg_local = ecg[srch_idx_start:srch_idx_end]
peaks, __ = scipy.signal.find_peaks(np.abs(dwt_local), height=height)
peaks = list(
filter(lambda p: np.abs(dwt_local[p]) > 0.025 * max(dwt_local), peaks)
)
if dwt_local[0] > 0: # just append
peaks = [0] + peaks
# detect morphology
candidate_peaks = []
candidate_peaks_scores = []
for idx_peak, idx_peak_nxt in zip(peaks[:-1], peaks[1:]):
correct_sign = (
dwt_local[idx_peak] > 0 and dwt_local[idx_peak_nxt] < 0
) # pylint: disable=R1716
if correct_sign:
idx_zero = (
signal_zerocrossings(dwt_local[idx_peak : idx_peak_nxt + 1])[0]
+ idx_peak
)
# This is the score assigned to each peak. The peak with the highest score will be
# selected.
score = ecg_local[idx_zero] - abs(
float(idx_zero) / sampling_rate - p2r_duration
) # Minus p2r because of the srch_idx_start
candidate_peaks.append(idx_zero)
candidate_peaks_scores.append(score)
if not candidate_peaks:
ppeaks.append(np.nan)
continue
ppeaks.append(
candidate_peaks[np.argmax(candidate_peaks_scores)] + srch_idx_start
)
return tpeaks, ppeaks
def _dwt_delineate_tp_onsets_offsets(
peaks,
rpeaks,
dwtmatr,
sampling_rate=250,
duration_onset=0.3,
duration_offset=0.3,
onset_weight=0.4,
offset_weight=0.4,
degree_onset=2,
degree_offset=2,
):
# sanitize search duration by HR
duration_onset = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=duration_onset, target="duration"
)
duration_offset = _dwt_adjust_parameters(
rpeaks, sampling_rate, duration=duration_offset, target="duration"
)
degree = _dwt_adjust_parameters(rpeaks, sampling_rate, target="degree")
onsets = []
offsets = []
for i in range(len(peaks)): # pylint: disable=C0200
# look for onsets
srch_idx_start = peaks[i] - int(duration_onset * sampling_rate)
srch_idx_end = peaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
onsets.append(np.nan)
continue
dwt_local = dwtmatr[degree_onset + degree, srch_idx_start:srch_idx_end]
onset_slope_peaks, __ = scipy.signal.find_peaks(dwt_local)
if len(onset_slope_peaks) == 0:
onsets.append(np.nan)
continue
epsilon_onset = onset_weight * dwt_local[onset_slope_peaks[-1]]
if not (dwt_local[: onset_slope_peaks[-1]] < epsilon_onset).any():
onsets.append(np.nan)
continue
candidate_onsets = np.where(dwt_local[: onset_slope_peaks[-1]] < epsilon_onset)[
0
]
onsets.append(candidate_onsets[-1] + srch_idx_start)
# # only for debugging
# events_plot([candidate_onsets, onset_slope_peaks], dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.show()
for i in range(len(peaks)): # pylint: disable=C0200
# look for offset
srch_idx_start = peaks[i]
srch_idx_end = peaks[i] + int(duration_offset * sampling_rate)
if srch_idx_start is np.nan or srch_idx_end is np.nan:
offsets.append(np.nan)
continue
dwt_local = dwtmatr[degree_offset + degree, srch_idx_start:srch_idx_end]
offset_slope_peaks, __ = scipy.signal.find_peaks(-dwt_local)
if len(offset_slope_peaks) == 0:
offsets.append(np.nan)
continue
epsilon_offset = -offset_weight * dwt_local[offset_slope_peaks[0]]
if not (-dwt_local[offset_slope_peaks[0] :] < epsilon_offset).any():
offsets.append(np.nan)
continue
candidate_offsets = (
np.where(-dwt_local[offset_slope_peaks[0] :] < epsilon_offset)[0]
+ offset_slope_peaks[0]
)
offsets.append(candidate_offsets[0] + srch_idx_start)
# # only for debugging
# events_plot([candidate_offsets, offset_slope_peaks], dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.show()
return onsets, offsets
def _dwt_delineate_qrs_bounds(
rpeaks, dwtmatr, ppeaks, tpeaks, qpeaks, sampling_rate=250
):
degree = _dwt_adjust_parameters(rpeaks, sampling_rate, target="degree")
onsets = []
for i in range(len(qpeaks)): # pylint: disable=C0200
# look for onsets
srch_idx_start = ppeaks[i]
srch_idx_end = qpeaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
onsets.append(np.nan)
continue
dwt_local = dwtmatr[2 + degree, srch_idx_start:srch_idx_end]
onset_slope_peaks, __ = scipy.signal.find_peaks(-dwt_local)
if len(onset_slope_peaks) == 0:
onsets.append(np.nan)
continue
epsilon_onset = 0.5 * -dwt_local[onset_slope_peaks[-1]]
if not (-dwt_local[: onset_slope_peaks[-1]] < epsilon_onset).any():
onsets.append(np.nan)
continue
candidate_onsets = np.where(
-dwt_local[: onset_slope_peaks[-1]] < epsilon_onset
)[0]
onsets.append(candidate_onsets[-1] + srch_idx_start)
# only for debugging
# import neurokit as nk
# events_plot(candidate_onsets, -dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.legend()
# plt.show()
offsets = []
for i in range(len(rpeaks)): # pylint: disable=C0200
# look for offsets
srch_idx_start = rpeaks[i]
srch_idx_end = tpeaks[i]
if srch_idx_start is np.nan or srch_idx_end is np.nan:
offsets.append(np.nan)
continue
dwt_local = dwtmatr[2 + degree, srch_idx_start:srch_idx_end]
onset_slope_peaks, __ = scipy.signal.find_peaks(dwt_local)
if len(onset_slope_peaks) == 0:
offsets.append(np.nan)
continue
epsilon_offset = 0.5 * dwt_local[onset_slope_peaks[0]]
if not (dwt_local[onset_slope_peaks[0] :] < epsilon_offset).any():
offsets.append(np.nan)
continue
candidate_offsets = (
np.where(dwt_local[onset_slope_peaks[0] :] < epsilon_offset)[0]
+ onset_slope_peaks[0]
)
offsets.append(candidate_offsets[0] + srch_idx_start)
# # only for debugging
# events_plot(candidate_offsets, dwt_local)
# plt.plot(ecg[srch_idx_start: srch_idx_end], '--', label='ecg')
# plt.legend()
# plt.show()
return onsets, offsets
def _dwt_compute_multiscales(ecg: np.ndarray, max_degree):
"""Return multiscales wavelet transforms."""
def _apply_H_filter(signal_i, power=0):
zeros = np.zeros(2**power - 1)
timedelay = 2**power
banks = np.r_[
1.0 / 8,
zeros,
3.0 / 8,
zeros,
3.0 / 8,
zeros,
1.0 / 8,
]
signal_f = scipy.signal.convolve(signal_i, banks, mode="full")
signal_f[:-timedelay] = signal_f[timedelay:] # timeshift: 2 steps
return signal_f
def _apply_G_filter(signal_i, power=0):
zeros = np.zeros(2**power - 1)
timedelay = 2**power
banks = np.r_[2, zeros, -2]
signal_f = scipy.signal.convolve(signal_i, banks, mode="full")
signal_f[:-timedelay] = signal_f[timedelay:] # timeshift: 1 step
return signal_f
dwtmatr = []
intermediate_ret = np.array(ecg)
for deg in range(max_degree):
S_deg = _apply_G_filter(intermediate_ret, power=deg)
T_deg = _apply_H_filter(intermediate_ret, power=deg)
dwtmatr.append(S_deg)
intermediate_ret = np.array(T_deg)
dwtmatr = [
arr[: len(ecg)] for arr in dwtmatr
] # rescale transforms to the same length
return np.array(dwtmatr)
# =============================================================================
# WAVELET METHOD (CWT)
# =============================================================================
def _ecg_delineator_cwt(ecg, rpeaks=None, sampling_rate=1000):
# P-Peaks and T-Peaks
tpeaks, ppeaks = _peaks_delineator(ecg, rpeaks, sampling_rate=sampling_rate)
# qrs onsets and offsets
qrs_onsets, qrs_offsets = _onset_offset_delineator(
ecg, rpeaks, peak_type="rpeaks", sampling_rate=sampling_rate
)
# ppeaks onsets and offsets
p_onsets, p_offsets = _onset_offset_delineator(
ecg, ppeaks, peak_type="ppeaks", sampling_rate=sampling_rate
)
# tpeaks onsets and offsets
t_onsets, t_offsets = _onset_offset_delineator(
ecg, tpeaks, peak_type="tpeaks", sampling_rate=sampling_rate
)
# No dwt defined method for Q and S peak
# Adopting manual method from "peak" method
q_peaks = []
s_peaks = []
heartbeats = ecg_segment(ecg, rpeaks, sampling_rate=sampling_rate)
for i, rpeak in enumerate(rpeaks):
heartbeat = heartbeats[str(i + 1)]
# Get index of R peaks
R = heartbeat.index.get_loc(
np.min(heartbeat.index.values[heartbeat.index.values > 0])
)
# Q wave
Q_index, Q = _ecg_delineator_peak_Q(rpeak, heartbeat, R)
q_peaks.append(Q_index)
# S wave
S_index, S = _ecg_delineator_peak_S(rpeak, heartbeat)
s_peaks.append(S_index)
# Return info dictionary
return {
"ECG_P_Onsets": p_onsets,
"ECG_P_Peaks": ppeaks,
"ECG_P_Offsets": p_offsets,
"ECG_Q_Peaks": q_peaks,
"ECG_R_Onsets": qrs_onsets,
"ECG_R_Offsets": qrs_offsets,
"ECG_S_Peaks": s_peaks,
"ECG_T_Onsets": t_onsets,
"ECG_T_Peaks": tpeaks,
"ECG_T_Offsets": t_offsets,
}
# Internals
# ---------------------
def _onset_offset_delineator(ecg, peaks, peak_type="rpeaks", sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for this",
"method to run. ",
"Please install it first (`pip install PyWavelets`).",
)
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(ecg, scales, "gaus1", sampling_period=1.0 / sampling_rate)
half_wave_width = int(0.1 * sampling_rate) # NEED TO CHECK
onsets = []
offsets = []
for index_peak in peaks:
# find onset
if np.isnan(index_peak):
onsets.append(np.nan)
offsets.append(np.nan)
continue
if peak_type == "rpeaks":
search_window = cwtmatr[2, index_peak - half_wave_width : index_peak]
prominence = 0.20 * max(search_window)
height = 0.0
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
elif peak_type in ["tpeaks", "ppeaks"]:
search_window = -cwtmatr[4, index_peak - half_wave_width : index_peak]
prominence = 0.10 * max(search_window)
height = 0.0
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
if len(wt_peaks) == 0:
# print("Fail to find onset at index: %d", index_peak)
onsets.append(np.nan)
else:
# The last peak is nfirst in (Martinez, 2004)
nfirst = wt_peaks[-1] + index_peak - half_wave_width
if peak_type == "rpeaks":
if wt_peaks_data["peak_heights"][-1] > 0:
epsilon_onset = 0.05 * wt_peaks_data["peak_heights"][-1]
elif peak_type == "ppeaks":
epsilon_onset = 0.50 * wt_peaks_data["peak_heights"][-1]
elif peak_type == "tpeaks":
epsilon_onset = 0.25 * wt_peaks_data["peak_heights"][-1]
leftbase = wt_peaks_data["left_bases"][-1] + index_peak - half_wave_width
if peak_type == "rpeaks":
candidate_onsets = (
np.where(cwtmatr[2, nfirst - 100 : nfirst] < epsilon_onset)[0]
+ nfirst
- 100
)
elif peak_type in ["tpeaks", "ppeaks"]:
candidate_onsets = (
np.where(-cwtmatr[4, nfirst - 100 : nfirst] < epsilon_onset)[0]
+ nfirst
- 100
)
candidate_onsets = candidate_onsets.tolist() + [leftbase]
if len(candidate_onsets) == 0:
onsets.append(np.nan)
else:
onsets.append(max(candidate_onsets))
# find offset
if peak_type == "rpeaks":
search_window = -cwtmatr[2, index_peak : index_peak + half_wave_width]
prominence = 0.50 * max(search_window)
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
elif peak_type in ["tpeaks", "ppeaks"]:
search_window = cwtmatr[4, index_peak : index_peak + half_wave_width]
prominence = 0.10 * max(search_window)
wt_peaks, wt_peaks_data = scipy.signal.find_peaks(
search_window, height=height, prominence=prominence
)
if len(wt_peaks) == 0:
# print("Fail to find offsets at index: %d", index_peak)
offsets.append(np.nan)
else:
nlast = wt_peaks[0] + index_peak
epsilon_offset = 0 # Default value
if peak_type == "rpeaks":
if wt_peaks_data["peak_heights"][0] > 0:
epsilon_offset = 0.125 * wt_peaks_data["peak_heights"][0]
elif peak_type == "ppeaks":
epsilon_offset = 0.9 * wt_peaks_data["peak_heights"][0]
elif peak_type == "tpeaks":
epsilon_offset = 0.4 * wt_peaks_data["peak_heights"][0]
rightbase = wt_peaks_data["right_bases"][0] + index_peak
if peak_type == "rpeaks":
candidate_offsets = (
np.where((-cwtmatr[2, nlast : nlast + 100]) < epsilon_offset)[0]
+ nlast
)
elif peak_type in ["tpeaks", "ppeaks"]:
candidate_offsets = (
np.where((cwtmatr[4, nlast : nlast + 100]) < epsilon_offset)[0]
+ nlast
)
candidate_offsets = candidate_offsets.tolist() + [rightbase]
if len(candidate_offsets) == 0:
offsets.append(np.nan)
else:
offsets.append(min(candidate_offsets))
onsets = np.array(onsets, dtype="object")
offsets = np.array(offsets, dtype="object")
return onsets, offsets
def _peaks_delineator(ecg, rpeaks, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).",
)
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(ecg, scales, "gaus1", sampling_period=1.0 / sampling_rate)
qrs_duration = 0.1
search_boundary = int(0.9 * qrs_duration * sampling_rate / 2)
significant_peaks_groups = []
for i in range(len(rpeaks) - 1):
# search for T peaks and P peaks from R peaks
start = rpeaks[i] + search_boundary
end = rpeaks[i + 1] - search_boundary
search_window = cwtmatr[4, start:end]
height = 0.25 * np.sqrt(np.mean(np.square(search_window)))
peaks_tp, heights_tp = scipy.signal.find_peaks(
np.abs(search_window), height=height
)
peaks_tp = peaks_tp + rpeaks[i] + search_boundary
# set threshold for heights of peaks to find significant peaks in wavelet
threshold = 0.125 * max(search_window)
significant_peaks_tp = []
significant_peaks_tp = [
peaks_tp[j]
for j in range(len(peaks_tp))
if heights_tp["peak_heights"][j] > threshold
]
significant_peaks_groups.append(
_find_tppeaks(ecg, significant_peaks_tp, sampling_rate=sampling_rate)
)
tpeaks, ppeaks = zip(*[(g[0], g[-1]) for g in significant_peaks_groups])
tpeaks = np.array(tpeaks, dtype="object")
ppeaks = np.array(ppeaks, dtype="object")
return tpeaks, ppeaks
def _find_tppeaks(ecg, keep_tp, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).",
)
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(ecg, scales, "gaus1", sampling_period=1.0 / sampling_rate)
max_search_duration = 0.05
tppeaks = []
for index_cur, index_next in zip(keep_tp[:-1], keep_tp[1:]):
# limit 1
correct_sign = (
cwtmatr[4, :][index_cur] < 0 and cwtmatr[4, :][index_next] > 0
) # pylint: disable=R1716
# near = (index_next - index_cur) < max_wv_peak_dist #limit 2
# if near and correct_sign:
if correct_sign:
index_zero_cr = (
signal_zerocrossings(cwtmatr[4, :][index_cur : index_next + 1])[0]
+ index_cur
)
nb_idx = int(max_search_duration * sampling_rate)
index_max = np.argmax(
ecg[index_zero_cr - nb_idx : index_zero_cr + nb_idx]
) + (index_zero_cr - nb_idx)
tppeaks.append(index_max)
if len(tppeaks) == 0:
tppeaks = [np.nan]
return tppeaks
# =============================================================================
# PEAK METHOD
# =============================================================================
def _ecg_delineator_peak(ecg, rpeaks=None, sampling_rate=1000):
# Initialize
heartbeats = ecg_segment(ecg, rpeaks, sampling_rate)
Q_list = []
P_list = []
S_list = []
T_list = []
P_onsets = []
T_offsets = []
for i, rpeak in enumerate(rpeaks):
heartbeat = heartbeats[str(i + 1)]
# Get index of heartbeat
R = heartbeat.index.get_loc(
np.min(heartbeat.index.values[heartbeat.index.values > 0])
)
# Peaks ------
# Q wave
Q_index, Q = _ecg_delineator_peak_Q(rpeak, heartbeat, R)
Q_list.append(Q_index)
# P wave
P_index, P = _ecg_delineator_peak_P(rpeak, heartbeat, R, Q)
P_list.append(P_index)
# S wave
S_index, S = _ecg_delineator_peak_S(rpeak, heartbeat)
S_list.append(S_index)
# T wave
T_index, T = _ecg_delineator_peak_T(rpeak, heartbeat, R, S)
T_list.append(T_index)
# Onsets/Offsets ------
P_onsets.append(_ecg_delineator_peak_P_onset(rpeak, heartbeat, R, P))
T_offsets.append(_ecg_delineator_peak_T_offset(rpeak, heartbeat, R, T))
info = {
"ECG_P_Peaks": P_list,
"ECG_Q_Peaks": Q_list,
"ECG_S_Peaks": S_list,
"ECG_T_Peaks": T_list,
"ECG_P_Onsets": P_onsets,
"ECG_T_Offsets": T_offsets,
}
# Return info dictionary
return info
# Internal
# --------------------------
def _ecg_delineator_peak_Q(rpeak, heartbeat, R):
segment = heartbeat[:0] # Select left hand side
Q = signal_findpeaks(
-1 * segment["Signal"],
height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min()),
)
if len(Q["Peaks"]) == 0:
return np.nan, None
Q = Q["Peaks"][-1] # Select most right-hand side
from_R = R - Q # Relative to R
return rpeak - from_R, Q
def _ecg_delineator_peak_P(rpeak, heartbeat, R, Q):
if Q is None:
return np.nan, None
segment = heartbeat.iloc[:Q] # Select left of Q wave
P = signal_findpeaks(
segment["Signal"],
height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min()),
)
if len(P["Peaks"]) == 0:
return np.nan, None
P = P["Peaks"][np.argmax(P["Height"])] # Select heighest
from_R = R - P # Relative to R
return rpeak - from_R, P
def _ecg_delineator_peak_S(rpeak, heartbeat):
segment = heartbeat[0:] # Select right hand side
S = signal_findpeaks(
-segment["Signal"],
height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min()),
)
if len(S["Peaks"]) == 0:
return np.nan, None
S = S["Peaks"][0] # Select most left-hand side
return rpeak + S, S
def _ecg_delineator_peak_T(rpeak, heartbeat, R, S):
if S is None:
return np.nan, None
segment = heartbeat.iloc[R + S :] # Select right of S wave
T = signal_findpeaks(
segment["Signal"],
height_min=0.05 * (segment["Signal"].max() - segment["Signal"].min()),
)
if len(T["Peaks"]) == 0:
return np.nan, None
T = S + T["Peaks"][np.argmax(T["Height"])] # Select heighest
return rpeak + T, T
def _ecg_delineator_peak_P_onset(rpeak, heartbeat, R, P):
if P is None:
return np.nan
segment = heartbeat.iloc[:P] # Select left of P wave
try:
signal = signal_smooth(segment["Signal"].values, size=R / 10)
except TypeError:
signal = segment["Signal"]
if len(signal) < 2:
return np.nan
signal = np.gradient(np.gradient(signal))
P_onset = np.argmax(signal)
from_R = R - P_onset # Relative to R
return rpeak - from_R
def _ecg_delineator_peak_T_offset(rpeak, heartbeat, R, T):
if T is None:
return np.nan
segment = heartbeat.iloc[R + T :] # Select right of T wave
try:
signal = signal_smooth(segment["Signal"].values, size=R / 10)
except TypeError:
signal = segment["Signal"]
if len(signal) < 2:
return np.nan
signal = np.gradient(np.gradient(signal))
T_offset = np.argmax(signal)
return rpeak + T + T_offset
# =============================================================================
# Internals
# =============================================================================
def _ecg_delineate_plot(
ecg_signal,
rpeaks=None,
signals=None,
signal_features_type="all",
sampling_rate=1000,
window_start=-0.35,
window_end=0.55,
):
"""
import neurokit2 as nk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
ecg_signal = nk.data("ecg_100hz")
# Extract R-peaks locations
_, rpeaks = nk.ecg_peaks(ecg_signal, sampling_rate=1000)
# Delineate the ECG signal with ecg_delineate()
signals, waves = nk.ecg_delineate(ecg_signal, rpeaks, sampling_rate=1000)
# Plot the ECG signal with markings on ECG peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='peaks', sampling_rate=1000)
# Plot the ECG signal with markings on boundaries of R peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='bound_R', sampling_rate=1000)
# Plot the ECG signal with markings on boundaries of P peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='bound_P', sampling_rate=1000)
# Plot the ECG signal with markings on boundaries of T peaks
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='bound_T', sampling_rate=1000)
# Plot the ECG signal with markings on all peaks and boundaries
_ecg_delineate_plot(ecg_signal, rpeaks=rpeaks, signals=signals,
signal_features_type='all', sampling_rate=1000)
"""
data = pd.DataFrame({"Signal": list(ecg_signal)})
data = pd.concat([data, signals], axis=1)
# Try retrieving right column
if isinstance(rpeaks, dict):
rpeaks = rpeaks["ECG_R_Peaks"]
# Segment the signal around the R-peaks
epochs = epochs_create(
data,
events=rpeaks,
sampling_rate=sampling_rate,
epochs_start=window_start,
epochs_end=window_end,
)
data = epochs_to_df(epochs)
data_cols = data.columns.values
dfs = []
for feature in data_cols:
if signal_features_type == "peaks":
if any(x in str(feature) for x in ["Peak"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "bounds_R":
if any(x in str(feature) for x in ["ECG_R_Onsets", "ECG_R_Offsets"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "bounds_T":
if any(x in str(feature) for x in ["ECG_T_Onsets", "ECG_T_Offsets"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "bounds_P":
if any(x in str(feature) for x in ["ECG_P_Onsets", "ECG_P_Offsets"]):
df = data[feature]
dfs.append(df)
elif signal_features_type == "all":
if any(x in str(feature) for x in ["Peak", "Onset", "Offset"]):
df = data[feature]
dfs.append(df)
features = pd.concat(dfs, axis=1)
fig, ax = plt.subplots()
data.Label = data.Label.astype(int)
for label in data.Label.unique():
epoch_data = data[data.Label == label]
ax.plot(epoch_data.Time, epoch_data.Signal, color="grey", alpha=0.2)
for i, feature_type in enumerate(features.columns.values): # pylint: disable=W0612
event_data = data[data[feature_type] == 1.0]
ax.scatter(
event_data.Time, event_data.Signal, label=feature_type, alpha=0.5, s=200
)
ax.legend()
return fig
def _ecg_delineate_check(waves, rpeaks):
"""This function replaces the delineated features with np.nan if its standardized distance from R-peaks is more than
3."""
df = pd.DataFrame.from_dict(waves)
features_columns = df.columns
df = pd.concat([df, pd.DataFrame({"ECG_R_Peaks": rpeaks})], axis=1)
# loop through all columns to calculate the z distance
for column in features_columns: # pylint: disable=W0612
df = _calculate_abs_z(df, features_columns)
# Replace with nan if distance > 3
for col in features_columns:
for i in range(len(df)):
if df["Dist_R_" + col][i] > 3:
df[col][i] = np.nan
# Return df without distance columns
df = df[features_columns]
waves = df.to_dict("list")
return waves
def _calculate_abs_z(df, columns):
"""This function helps to calculate the absolute standardized distance between R-peaks and other delineated waves
features by `ecg_delineate()`"""
for column in columns:
df["Dist_R_" + column] = np.abs(
standardize(df[column].sub(df["ECG_R_Peaks"], axis=0))
)
return df
| 44,881 | 34.9056 | 127 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_simulate.py | # -*- coding: utf-8 -*-
import math
import numpy as np
import pandas as pd
import scipy
from ..misc import check_random_state, check_random_state_children
from ..signal import signal_distort, signal_resample
def ecg_simulate(
duration=10,
length=None,
sampling_rate=1000,
noise=0.01,
heart_rate=70,
heart_rate_std=1,
method="ecgsyn",
random_state=None,
random_state_distort="spawn",
**kwargs,
):
"""**Simulate an ECG/EKG signal**
Generate an artificial (synthetic) ECG signal of a given duration and sampling rate using either
the ECGSYN dynamical model (McSharry et al., 2003) or a simpler model based on Daubechies
wavelets to roughly approximate cardiac cycles.
Parameters
----------
duration : int
Desired recording length in seconds.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
length : int
The desired length of the signal (in samples).
noise : float
Noise level (amplitude of the laplace noise).
heart_rate : int
Desired simulated heart rate (in beats per minute). The default is 70. Note that for the
``"ECGSYN"`` method, random fluctuations are to be expected to mimick a real heart rate.
These fluctuations can cause some slight discrepancies between the requested heart rate and
the empirical heart rate, especially for shorter signals.
heart_rate_std : int
Desired heart rate standard deviation (beats per minute).
method : str
The model used to generate the signal. Can be ``"simple"`` for a simulation based on
Daubechies wavelets that roughly approximates a single cardiac cycle. If ``"ecgsyn"``
(default), will use the model desbribed `McSharry et al. (2003)
<https://physionet.org/content/ecgsyn/>`_. If
``"multileads"``, will return a DataFrame containing 12-leads (see `12-leads ECG simulation
<https://neuropsychology.github.io/NeuroKit/examples/ecg_generate_12leads/ecg_generate_12leads.html>`_).
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
random_state_distort : {'legacy', 'spawn'}, None, int, numpy.random.RandomState or numpy.random.Generator
Random state to be used to distort the signal. If ``"legacy"``, use the same random state used to
generate the signal (discouraged as it creates dependent random streams). If ``"spawn"``, spawn
independent children random number generators from the random_state argument. If any of the other types,
generate independent children random number generators from the random_state_distort provided (this
allows generating multiple version of the same signal distorted by different random noise realizations).
**kwargs
Other keywords parameters for ECGSYN algorithm, such as ``"lfhfratio"``, ``"ti"``, ``"ai"``, ``"bi"``.
Returns
-------
array
Vector containing the ECG signal.
Examples
----------
* **Example 1:** Simulate single lead ECG
.. ipython:: python
import neurokit2 as nk
ecg1 = nk.ecg_simulate(duration=10, method="simple")
ecg2 = nk.ecg_simulate(duration=10, method="ecgsyn")
# Visualize result
@savefig p_ecg_simulate1.png scale=100%
nk.signal_plot([ecg1, ecg2], labels=["simple", "ecgsyn"], subplots=True)
@suppress
plt.close()
* **Example 2:** Simulate 12-leads ECG
.. ipython:: python
ecg12 = nk.ecg_simulate(duration=10, method="multileads")
# Visualize result
@savefig p_ecg_simulate2.png scale=100%
nk.signal_plot(ecg12, subplots=True)
@suppress
plt.close()
See Also
--------
.rsp_simulate, .eda_simulate, .ppg_simulate, .emg_simulate
References
-----------
* McSharry, P. E., Clifford, G. D., Tarassenko, L., & Smith, L. A. (2003). A dynamical model for
generating synthetic electrocardiogram signals. IEEE transactions on biomedical engineering,
50 (3), 289-294.
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
if duration is None:
duration = length / sampling_rate
# Run appropriate method
if method.lower() in ["simple", "daubechies"]:
signals = _ecg_simulate_daubechies(
duration=duration, length=length, sampling_rate=sampling_rate, heart_rate=heart_rate
)
else:
approx_number_beats = int(np.round(duration * (heart_rate / 60)))
if method.lower() in ["multi", "multilead", "multileads", "multichannel"]:
# Gamma, a (12,5) matrix to modify the five waves' amplitudes of 12 leads (P, Q, R, S, T)
gamma = np.array(
[
[1, 0.1, 1, 1.2, 1],
[2, 0.2, 0.2, 0.2, 3],
[1, -0.1, -0.8, -1.1, 2.5],
[-1, -0.05, -0.8, -0.5, -1.2],
[0.05, 0.05, 1, 1, 1],
[1, -0.05, -0.1, -0.1, 3],
[-0.5, 0.05, 0.2, 0.5, 1],
[0.05, 0.05, 1.3, 2.5, 2],
[1, 0.05, 1, 2, 1],
[1.2, 0.05, 1, 2, 2],
[1.5, 0.1, 0.8, 1, 2],
[1.8, 0.05, 0.5, 0.1, 2],
]
)
signals, results = _ecg_simulate_ecgsyn(
sfecg=sampling_rate,
N=approx_number_beats,
hrmean=heart_rate,
hrstd=heart_rate_std,
sfint=sampling_rate,
gamma=gamma,
rng=rng,
**kwargs,
)
else:
signals, results = _ecg_simulate_ecgsyn(
sfecg=sampling_rate,
N=approx_number_beats,
hrmean=heart_rate,
hrstd=heart_rate_std,
sfint=sampling_rate,
gamma=np.ones((1, 5)),
rng=rng,
**kwargs,
)
# Cut to match expected length
for i in range(len(signals)):
signals[i] = signals[i][0:length]
# Add random noise
if noise > 0:
# Seed for random noise
random_state_distort = check_random_state_children(random_state, random_state_distort, n_children=len(signals))
# Call signal_distort on each signal
for i in range(len(signals)):
signals[i] = signal_distort(
signals[i],
sampling_rate=sampling_rate,
noise_amplitude=noise,
noise_frequency=[5, 10, 100],
noise_shape="laplace",
random_state=random_state_distort[i],
silent=True,
)
# Format
if len(signals) == 1:
ecg = signals[0]
else:
ecg = pd.DataFrame(
np.array(signals).T,
columns=["I", "II", "III", "aVR", "aVL", "aVF", "V1", "V2", "V3", "V4", "V5", "V6"],
)
return ecg
# =============================================================================
# Daubechies
# =============================================================================
def _ecg_simulate_daubechies(duration=10, length=None, sampling_rate=1000, heart_rate=70):
"""Generate an artificial (synthetic) ECG signal of a given duration and sampling rate.
It uses a 'Daubechies' wavelet that roughly approximates a single cardiac cycle.
This function is based on `this script <https://github.com/diarmaidocualain/ecg_simulation>`_.
"""
# The "Daubechies" wavelet is a rough approximation to a real, single, cardiac cycle
cardiac = scipy.signal.daub(10)
# Add the gap after the pqrst when the heart is resting.
cardiac = np.concatenate([cardiac, np.zeros(10)])
# Caculate the number of beats in capture time period
num_heart_beats = int(duration * heart_rate / 60)
# Concatenate together the number of heart beats needed
ecg = np.tile(cardiac, num_heart_beats)
# Change amplitude
ecg = ecg * 10
# Resample
ecg = signal_resample(
ecg,
sampling_rate=int(len(ecg) / 10),
desired_length=length,
desired_sampling_rate=sampling_rate,
)
# Return the signal in a list to match
# with the potential multichanel output of ecgsyn
return [ecg]
# =============================================================================
# ECGSYN
# =============================================================================
def _ecg_simulate_ecgsyn(
sfecg=256,
N=256,
hrmean=60,
hrstd=1,
lfhfratio=0.5,
sfint=512,
ti=(-70, -15, 0, 15, 100),
ai=(1.2, -5, 30, -7.5, 0.75),
bi=(0.25, 0.1, 0.1, 0.1, 0.4),
gamma=np.ones((1, 5)),
rng=None,
**kwargs,
):
"""
This function is a python translation of the matlab script by `McSharry & Clifford (2013)
<https://physionet.org/content/ecgsyn>`_.
Parameters
----------
sfecg:
ECG sampling frequency [256 Hertz]
N:
approximate number of heart beats [256]
Anoise:
Additive uniformly distributed measurement noise [0 mV]
hrmean:
Mean heart rate [60 beats per minute]
hrstd:
Standard deviation of heart rate [1 beat per minute]
lfhfratio:
LF/HF ratio [0.5]
sfint:
Internal sampling frequency [256 Hertz]
ti
angles of extrema (in degrees). Order of extrema is (P Q R S T).
ai
z-position of extrema.
bi
Gaussian width of peaks.
gamma
This determines the different leads.
Returns
-------
array
Vector containing simulated ecg signal.
# Examples
# --------
# >>> import matplotlib.pyplot as plt
# >>> import neurokit2 as nk
# >>>
# >>> s = _ecg_simulate_ecgsynth()
# >>> x = np.linspace(0, len(s)-1, len(s))
# >>> num_points = 4000
# >>>
# >>> num_points = min(num_points, len(s))
# >>> plt.plot(x[:num_points], s[:num_points]) #doctest: +SKIP
# >>> plt.show() #doctest: +SKIP
"""
if not isinstance(ti, np.ndarray):
ti = np.array(ti)
if not isinstance(ai, np.ndarray):
ai = np.array(ai)
if not isinstance(bi, np.ndarray):
bi = np.array(bi)
ti = ti * np.pi / 180
# Adjust extrema parameters for mean heart rate
hrfact = np.sqrt(hrmean / 60)
hrfact2 = np.sqrt(hrfact)
bi = hrfact * bi
ti = np.array([hrfact2, hrfact, 1, hrfact, hrfact2]) * ti
# Check that sfint is an integer multiple of sfecg
q = np.round(sfint / sfecg)
qd = sfint / sfecg
if q != qd:
raise ValueError(
"Internal sampling frequency (sfint) must be an integer multiple of the ECG sampling frequency"
" (sfecg). Your current choices are: sfecg = "
+ str(sfecg)
+ " and sfint = "
+ str(sfint)
+ "."
)
# Define frequency parameters for rr process
# flo and fhi correspond to the Mayer waves and respiratory rate respectively
flo = 0.1
fhi = 0.25
flostd = 0.01
fhistd = 0.01
# Calculate time scales for rr and total output
sfrr = 1
trr = 1 / sfrr
rrmean = 60 / hrmean
n = 2 ** (np.ceil(np.log2(N * rrmean / trr)))
rr0 = _ecg_simulate_rrprocess(flo, fhi, flostd, fhistd, lfhfratio, hrmean, hrstd, sfrr, n, rng)
# Upsample rr time series from 1 Hz to sfint Hz
rr = signal_resample(rr0, sampling_rate=1, desired_sampling_rate=sfint)
# Make the rrn time series
dt = 1 / sfint
rrn = np.zeros(len(rr))
tecg = 0
i = 0
while i < len(rr):
tecg += rr[i]
ip = int(np.round(tecg / dt))
rrn[i:ip] = rr[i]
i = ip
Nt = ip
# Integrate system using fourth order Runge-Kutta
x0 = np.array([1, 0, 0.04])
# tspan is a tuple of (min, max) which defines the lower and upper bound of t in ODE
# t_eval is the list of desired t points for ODE
# in Matlab, ode45 can accepts both tspan and t_eval in one argument
Tspan = [0, (Nt - 1) * dt]
t_eval = np.linspace(0, (Nt - 1) * dt, Nt)
# Initialize results containers
results = []
signals = []
# Multichannel modification (#625):
# --------------------------------------------------
# Loop over the twelve leads modifying ai in the loop to generate each lead's data
# Because these are all starting at the same position, it may make sense to grab a random
# segment within the series to simulate random phase and to forget the initial conditions
for lead in range(len(gamma)):
# as passing extra arguments to derivative function is not supported yet in solve_ivp
# lambda function is used to serve the purpose
result = scipy.integrate.solve_ivp(
lambda t, x: _ecg_simulate_derivsecgsyn(t, x, rrn, ti, sfint, gamma[lead] * ai, bi),
Tspan,
x0,
t_eval=t_eval,
)
results.append(result) # store results
X0 = result.y # get signal
# downsample to required sfecg
X = X0[:, np.arange(0, X0.shape[1], q).astype(int)]
# Scale signal to lie between -0.4 and 1.2 mV
z = X[2, :].copy()
zmin = np.min(z)
zmax = np.max(z)
zrange = zmax - zmin
z = (z - zmin) * 1.6 / zrange - 0.4
signals.append(z)
return signals, results
def _ecg_simulate_derivsecgsyn(t, x, rr, ti, sfint, ai, bi):
ta = math.atan2(x[1], x[0])
r0 = 1
a0 = 1.0 - np.sqrt(x[0] ** 2 + x[1] ** 2) / r0
ip = np.floor(t * sfint).astype(int)
w0 = 2 * np.pi / rr[min(ip, len(rr) - 1)]
# w0 = 2*np.pi/rr[ip[ip <= np.max(rr)]]
fresp = 0.25
zbase = 0.005 * np.sin(2 * np.pi * fresp * t)
dx1dt = a0 * x[0] - w0 * x[1]
dx2dt = a0 * x[1] + w0 * x[0]
# matlab rem and numpy rem are different
# dti = np.remainder(ta - ti, 2*np.pi)
dti = (ta - ti) - np.round((ta - ti) / 2 / np.pi) * 2 * np.pi
dx3dt = -np.sum(ai * dti * np.exp(-0.5 * (dti / bi) ** 2)) - 1 * (x[2] - zbase)
dxdt = np.array([dx1dt, dx2dt, dx3dt])
return dxdt
def _ecg_simulate_rrprocess(
flo=0.1,
fhi=0.25,
flostd=0.01,
fhistd=0.01,
lfhfratio=0.5,
hrmean=60,
hrstd=1,
sfrr=1,
n=256,
rng=None,
):
w1 = 2 * np.pi * flo
w2 = 2 * np.pi * fhi
c1 = 2 * np.pi * flostd
c2 = 2 * np.pi * fhistd
sig2 = 1
sig1 = lfhfratio
rrmean = 60 / hrmean
rrstd = 60 * hrstd / (hrmean * hrmean)
df = sfrr / n
w = np.arange(n) * 2 * np.pi * df
dw1 = w - w1
dw2 = w - w2
Hw1 = sig1 * np.exp(-0.5 * (dw1 / c1) ** 2) / np.sqrt(2 * np.pi * c1 ** 2)
Hw2 = sig2 * np.exp(-0.5 * (dw2 / c2) ** 2) / np.sqrt(2 * np.pi * c2 ** 2)
Hw = Hw1 + Hw2
Hw0 = np.concatenate((Hw[0 : int(n / 2)], Hw[int(n / 2) - 1 :: -1]))
Sw = (sfrr / 2) * np.sqrt(Hw0)
ph0 = 2 * np.pi * rng.uniform(size=int(n / 2 - 1))
ph = np.concatenate([[0], ph0, [0], -np.flipud(ph0)])
SwC = Sw * np.exp(1j * ph)
x = (1 / n) * np.real(np.fft.ifft(SwC))
xstd = np.std(x)
ratio = rrstd / xstd
return rrmean + x * ratio # Return RR
| 15,524 | 31.891949 | 119 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_eventrelated.py | # -*- coding: utf-8 -*-
from warnings import warn
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_rate,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
from ..misc import NeuroKitWarning
def ecg_eventrelated(epochs, silent=False):
"""**Event-related analysis of ECG**
Performs event-related ECG analysis on epochs containing ECG signals.
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial, usually obtained via ``epochs_create()``,
or a DataFrame containing all epochs, usually obtained via ``epochs_to_df()``.
silent : bool
If ``True``, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed ECG features for each epoch, with each epoch indicated
by the `Label` column (if not present, by the `Index` column). The analyzed features
consist of the following:
* ``ECG_Rate_Max``: the maximum heart rate after stimulus onset.
* ``ECG_Rate_Min``: the minimum heart rate after stimulus onset.
* ``ECG_Rate_Mean``: the mean heart rate after stimulus onset.
* ``ECG_Rate_SD``: the standard deviation of the heart rate after stimulus onset.
* ``ECG_Rate_Max_Time``: the time at which maximum heart rate occurs.
* ``ECG_Rate_Min_Time``: the time at which minimum heart rate occurs.
* ``ECG_Phase_Atrial``: indication of whether the onset of the event concurs with
respiratory systole (1) or diastole (0).
* ``ECG_Phase_Ventricular``: indication of whether the onset of the event concurs with
respiratory systole (1) or diastole (0).
* ``ECG_Phase_Atrial_Completion``: indication of the stage of the current cardiac (atrial)
phase (0 to 1) at the onset of the event.
* ``ECG_Phase_Ventricular_Completion``: indication of the stage of the current cardiac
(ventricular) phase (0 to 1) at the onset of the event.
We also include the following *experimental* features related to the parameters of a
quadratic model:
* ``ECG_Rate_Trend_Linear``: The parameter corresponding to the linear trend.
* ``ECG_Rate_Trend_Quadratic``: The parameter corresponding to the curvature.
* ``ECG_Rate_Trend_R2``: the quality of the quadratic model. If too low, the parameters
might not be reliable or meaningful.
See Also
--------
.events_find, .epochs_create, .bio_process
Examples
----------
* **Example 1**: Simulated Data
.. ipython:: python
import neurokit2 as nk
# Simulate ECG signal
signal = nk.ecg_simulate(duration=20)
# Preprocess
ecg, info = nk.ecg_process(signal)
# Create epochs
epochs = nk.epochs_create(ecg, events=[5000, 10000, 15000],
epochs_start=-0.1, epochs_end=1.9)
nk.ecg_eventrelated(epochs)
* **Example 2**: Real Data
.. ipython:: python
# Download real dataset
data = nk.data("bio_eventrelated_100hz")
# Process the data
df, info = nk.bio_process(ecg=data["ECG"], sampling_rate=100)
events = nk.events_find(data["Photosensor"],
threshold_keep='below',
event_conditions=["Negative", "Neutral",
"Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100,
epochs_start=-0.1, epochs_end=1.9)
nk.ecg_eventrelated(epochs)
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="ecg", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize empty container
# Rate
data[i] = _eventrelated_rate(epochs[i], data[i], var="ECG_Rate")
# Cardiac Phase
data[i] = _ecg_eventrelated_phase(epochs[i], data[i])
# Quality
data[i] = _ecg_eventrelated_quality(epochs[i], data[i])
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
# Return dataframe
return _eventrelated_sanitizeoutput(data)
# =============================================================================
# Internals
# =============================================================================
def _ecg_eventrelated_phase(epoch, output={}):
# Sanitize input
if "ECG_Phase_Atrial" not in epoch or "ECG_Phase_Ventricular" not in epoch:
warn(
"Input does not have an `ECG_Phase_Artrial` or `ECG_Phase_Ventricular` column."
" Will not indicate whether event onset concurs with cardiac phase.",
category=NeuroKitWarning,
)
return output
# Indication of atrial systole
output["ECG_Phase_Atrial"] = epoch["ECG_Phase_Atrial"][epoch.index > 0].iloc[0]
output["ECG_Phase_Completion_Atrial"] = epoch["ECG_Phase_Completion_Atrial"][
epoch.index > 0
].iloc[0]
# Indication of ventricular systole
output["ECG_Phase_Ventricular"] = epoch["ECG_Phase_Ventricular"][epoch.index > 0].iloc[0]
output["ECG_Phase_Completion_Ventricular"] = epoch["ECG_Phase_Completion_Ventricular"][
epoch.index > 0
].iloc[0]
return output
def _ecg_eventrelated_quality(epoch, output={}):
# Sanitize input
colnames = epoch.columns.values
if len([i for i in colnames if "ECG_Quality" in i]) == 0:
warn(
"Input does not have an `ECG_Quality` column."
" Quality of the signal is not computed.",
category=NeuroKitWarning,
)
return output
# Average signal quality over epochs
output["ECG_Quality_Mean"] = epoch["ECG_Quality"].mean()
return output
| 5,943 | 34.171598 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_rsp.py | from ..signal import signal_filter
def ecg_rsp(ecg_rate, sampling_rate=1000, method="vangent2019"):
"""**ECG-Derived Respiration (EDR)**
Extract ECG-Derived Respiration (EDR), a proxy of a respiratory signal based on heart rate.
Different methods include:
* **vangent2019**: 0.1-0.4 Hz filter.
* **soni2019**: 0-0.5 Hz filter.
* **charlton2016**: 0.066-1 Hz filter.
* **sarkar2015**: 0.1-0.7 Hz filter.
.. warning::
Help is required to double-check whether the implementation match the papers.
Parameters
----------
ecg_rate : array
The heart rate signal as obtained via ``ecg_rate()``.
sampling_rate : int
The sampling frequency of the signal that contains the R-peaks (in Hz,
i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of ``"vangent2019"`` (default), ``"soni2019"``, ``"charlton2016"`` or
``"sarkar2015"``.
Returns
-------
array
A Numpy array containing the derived respiratory rate.
Examples
--------
* **Example 1:** Compare to real RSP signal
.. ipython:: python
import neurokit2 as nk
# Get heart rate
data = nk.data("bio_eventrelated_100hz")
rpeaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
ecg_rate = nk.signal_rate(rpeaks, sampling_rate=100, desired_length=len(rpeaks))
# Get ECG Derived Respiration (EDR) and add to the data
data["EDR"] = nk.ecg_rsp(ecg_rate, sampling_rate=100)
# Visualize result
@savefig p_ecg_rsp1.png scale=100%
nk.signal_plot([data["RSP"], data["EDR"]], standardize = True)
@suppress
plt.close()
* **Example 2:** Methods comparison
.. ipython:: python
data["vangent2019"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="vangent2019")
data["sarkar2015"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="sarkar2015")
data["charlton2016"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="charlton2016")
data["soni2019"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="soni2019")
# Visualize results
@savefig p_ecg_rsp2.png scale=100%
nk.signal_plot([data["RSP"], data["vangent2019"], data["sarkar2015"],
data["charlton2016"], data["soni2019"]], standardize = True)
@suppress
plt.close()
References
----------
* van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate
algorithm for the analysis of noisy signals. Transportation research part F: traffic
psychology and behaviour, 66, 368-378.
* Sarkar, S., Bhattacherjee, S., & Pal, S. (2015). Extraction of respiration signal from ECG for
respiratory rate estimation.
* Charlton, P. H., Bonnici, T., Tarassenko, L., Clifton, D. A., Beale, R., & Watkinson, P. J.
(2016). An assessment of algorithms to estimate respiratory rate from the electrocardiogram
and photoplethysmogram. Physiological measurement, 37(4), 610.
* Soni, R., & Muniyandi, M. (2019). Breath rate variability: a novel measure to study the
meditation effects. International Journal of Yoga, 12(1), 45.
"""
# TODO: It would be interesting to run a study in which we modulate the different filtering
# parameters and compute the difference with the real RSP signal, and then suggest the optimal
# filtering parameters. If you're interested in helping out let us know!
method = method.lower()
if method in ["sarkar2015"]:
# https://www.researchgate.net/publication/304221962_Extraction_of_respiration_signal_from_ECG_for_respiratory_rate_estimation # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.7, order=6)
elif method in ["charlton2016"]:
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5390977/#__ffn_sectitle
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=4 / 60, highcut=60 / 60, order=6)
elif method in ["soni2019"]:
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6329220/
rsp = signal_filter(ecg_rate, sampling_rate, highcut=0.5, order=6)
elif method in ["vangent2019"]:
# https://github.com/paulvangentcom/heartrate_analysis_python/blob/1597e8c0b2602829428b22d8be88420cd335e939/heartpy/analysis.py#L541 # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.4, order=2)
else:
raise ValueError(
"`method` should be one of 'sarkar2015', 'charlton2016', 'soni2019' or "
"'vangent2019'."
)
return rsp
| 4,608 | 39.429825 | 153 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_findpeaks.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.signal
import scipy.stats
from ..signal import (signal_findpeaks, signal_plot, signal_sanitize,
signal_smooth, signal_zerocrossings)
def ecg_findpeaks(ecg_cleaned, sampling_rate=1000, method="neurokit", show=False, **kwargs):
"""**Locate R-peaks**
Low-level function used by :func:`ecg_peaks` to identify R-peaks in an ECG signal using a
different set of algorithms. Use the main function and see its documentation for details.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
See :func:`ecg_peaks()`.
sampling_rate : int
See :func:`ecg_peaks()`.
method : string
See :func:`ecg_peaks()`.
show : bool
If ``True``, will return a plot to visualizing the thresholds used in the algorithm.
Useful for debugging.
**kwargs
Additional keyword arguments, usually specific for each ``method``.
Returns
-------
info : dict
A dictionary containing additional information, in this case the
samples at which R-peaks occur, accessible with the key ``"ECG_R_Peaks"``.
See Also
--------
ecg_peaks, .signal_fixpeaks
"""
# Try retrieving right column
if isinstance(ecg_cleaned, pd.DataFrame):
try:
ecg_cleaned = ecg_cleaned["ECG_Clean"]
except (NameError, KeyError):
try:
ecg_cleaned = ecg_cleaned["ECG_Raw"]
except (NameError, KeyError):
ecg_cleaned = ecg_cleaned["ECG"]
# Sanitize input
ecg_cleaned = signal_sanitize(ecg_cleaned)
method = method.lower() # remove capitalised letters
# Run peak detection algorithm
try:
func = _ecg_findpeaks_findmethod(method)
rpeaks = func(ecg_cleaned, sampling_rate=sampling_rate, show=show, **kwargs)
except ValueError as error:
raise error
# Prepare output.
info = {"ECG_R_Peaks": rpeaks}
return info
# Returns the peak detector function by name
def _ecg_findpeaks_findmethod(method):
if method in ["nk", "nk2", "neurokit", "neurokit2"]:
return _ecg_findpeaks_neurokit
elif method in ["pantompkins", "pantompkins1985"]:
return _ecg_findpeaks_pantompkins
elif method in ["nabian", "nabian2018"]:
return _ecg_findpeaks_nabian2018
elif method in ["gamboa2008", "gamboa"]:
return _ecg_findpeaks_gamboa
elif method in ["ssf", "slopesumfunction"]:
return _ecg_findpeaks_ssf
elif method in ["zong", "zong2003", "wqrs"]:
return _ecg_findpeaks_zong
elif method in ["hamilton", "hamilton2002"]:
return _ecg_findpeaks_hamilton
elif method in ["christov", "christov2004"]:
return _ecg_findpeaks_christov
elif method in ["engzee", "engzee2012", "engzeemod", "engzeemod2012"]:
return _ecg_findpeaks_engzee
elif method in ["elgendi", "elgendi2010"]:
return _ecg_findpeaks_elgendi
elif method in ["kalidas2017", "swt", "kalidas", "kalidastamil", "kalidastamil2017"]:
return _ecg_findpeaks_kalidas
elif method in ["martinez2004", "martinez"]:
return _ecg_findpeaks_WT
elif method in ["rodrigues2020", "rodrigues2021", "rodrigues", "asi"]:
return _ecg_findpeaks_rodrigues
elif method in ["vg", "vgraph", "koka2022"]:
return _ecg_findpeaks_vgraph
elif method in ["promac", "all"]:
return _ecg_findpeaks_promac
else:
raise ValueError(f"NeuroKit error: ecg_findpeaks(): '{method}' not implemented.")
# =============================================================================
# Probabilistic Methods-Agreement via Convolution (ProMAC)
# =============================================================================
def _ecg_findpeaks_promac(
signal,
sampling_rate=1000,
show=False,
promac_methods=[
"neurokit",
"gamboa",
"ssf",
"zong",
"engzee",
"elgendi",
"kalidas",
"martinez",
"rodrigues",
],
threshold=0.33,
gaussian_sd=100,
**kwargs,
):
"""Probabilistic Methods-Agreement via Convolution (ProMAC).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The (cleaned) ECG channel, e.g. as returned by `ecg_clean()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
show : bool
If True, will return a plot to visualizing the thresholds used in the algorithm.
Useful for debugging.
promac_methods : list of string
The algorithms to be used for R-peak detection. See the list of acceptable algorithms for
the 'ecg_peaks' function.
threshold : float
The tolerance for peak acceptance. This value is a percentage of the signal's maximum
value. Only peaks found above this tolerance will be finally considered as actual peaks.
gaussian_sd : int
The standard deviation of the Gaussian distribution used to represent the peak location
probability. This value should be in millisencods and is usually taken as the size of
QRS complexes.
"""
x = np.zeros(len(signal))
promac_methods = [method.lower() for method in promac_methods] # remove capitalised letters
error_list = [] # Stores the failed methods
for method in promac_methods:
try:
func = _ecg_findpeaks_findmethod(method)
x = _ecg_findpeaks_promac_addconvolve(
signal, sampling_rate, x, func, gaussian_sd=gaussian_sd, **kwargs
)
except ValueError:
error_list.append(f"Method '{method}' is not valid.")
except Exception as error:
error_list.append(f"{method} error: {error}")
# Rescale
x = x / np.max(x)
convoluted = x.copy()
# Remove below threshold
x[x < threshold] = 0
# Find peaks
peaks = signal_findpeaks(x, height_min=threshold)["Peaks"]
if show is True:
signal_plot(pd.DataFrame({"ECG": signal, "Convoluted": convoluted}), standardize=True)
[
plt.axvline(x=peak, color="red", linestyle="--") for peak in peaks
] # pylint: disable=W0106
# I am not sure if mandatory print the best option
if error_list: # empty?
print(error_list)
return peaks
# _ecg_findpeaks_promac_addmethod + _ecg_findpeaks_promac_convolve
# Joining them makes parameters exposition more consistent
def _ecg_findpeaks_promac_addconvolve(signal, sampling_rate, x, fun, gaussian_sd=100, **kwargs):
peaks = fun(signal, sampling_rate=sampling_rate, **kwargs)
mask = np.zeros(len(signal))
mask[peaks] = 1
# SD is defined as a typical QRS size, which for adults if 100ms
sd = sampling_rate * gaussian_sd / 1000
shape = scipy.stats.norm.pdf(np.linspace(-sd * 4, sd * 4, num=int(sd * 8)), loc=0, scale=sd)
x += np.convolve(mask, shape, "same")
return x
# =============================================================================
# NeuroKit
# =============================================================================
def _ecg_findpeaks_neurokit(
signal,
sampling_rate=1000,
smoothwindow=0.1,
avgwindow=0.75,
gradthreshweight=1.5,
minlenweight=0.4,
mindelay=0.3,
show=False,
):
"""All tune-able parameters are specified as keyword arguments.
The `signal` must be the highpass-filtered raw ECG with a lowcut of .5 Hz.
"""
if show is True:
__, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
# Compute the ECG's gradient as well as the gradient threshold. Run with
# show=True in order to get an idea of the threshold.
grad = np.gradient(signal)
absgrad = np.abs(grad)
smooth_kernel = int(np.rint(smoothwindow * sampling_rate))
avg_kernel = int(np.rint(avgwindow * sampling_rate))
smoothgrad = signal_smooth(absgrad, kernel="boxcar", size=smooth_kernel)
avggrad = signal_smooth(smoothgrad, kernel="boxcar", size=avg_kernel)
gradthreshold = gradthreshweight * avggrad
mindelay = int(np.rint(sampling_rate * mindelay))
if show is True:
ax1.plot(signal)
ax2.plot(smoothgrad)
ax2.plot(gradthreshold)
# Identify start and end of QRS complexes.
qrs = smoothgrad > gradthreshold
beg_qrs = np.where(np.logical_and(np.logical_not(qrs[0:-1]), qrs[1:]))[0]
end_qrs = np.where(np.logical_and(qrs[0:-1], np.logical_not(qrs[1:])))[0]
# Throw out QRS-ends that precede first QRS-start.
end_qrs = end_qrs[end_qrs > beg_qrs[0]]
# Identify R-peaks within QRS (ignore QRS that are too short).
num_qrs = min(beg_qrs.size, end_qrs.size)
min_len = np.mean(end_qrs[:num_qrs] - beg_qrs[:num_qrs]) * minlenweight
peaks = [0]
for i in range(num_qrs):
beg = beg_qrs[i]
end = end_qrs[i]
len_qrs = end - beg
if len_qrs < min_len:
continue
if show is True:
ax2.axvspan(beg, end, facecolor="m", alpha=0.5)
# Find local maxima and their prominence within QRS.
data = signal[beg:end]
locmax, props = scipy.signal.find_peaks(data, prominence=(None, None))
if locmax.size > 0:
# Identify most prominent local maximum.
peak = beg + locmax[np.argmax(props["prominences"])]
# Enforce minimum delay between peaks.
if peak - peaks[-1] > mindelay:
peaks.append(peak)
peaks.pop(0)
if show is True:
ax1.scatter(peaks, signal[peaks], c="r")
peaks = np.asarray(peaks).astype(int) # Convert to int
return peaks
# =============================================================================
# Pan & Tompkins (1985)
# =============================================================================
def _ecg_findpeaks_pantompkins(signal, sampling_rate=1000, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- Pan, J., & Tompkins, W. J. (1985). A real-time QRS detection algorithm. IEEE transactions
on biomedical engineering, (3), 230-236.
"""
diff = np.diff(signal)
squared = diff * diff
N = int(0.12 * sampling_rate)
mwa = _ecg_findpeaks_MWA(squared, N)
mwa[: int(0.2 * sampling_rate)] = 0
mwa_peaks = _ecg_findpeaks_peakdetect(mwa, sampling_rate)
mwa_peaks = np.array(mwa_peaks, dtype="int")
return mwa_peaks
# ===========================================================================
# Nabian et al. (2018)
# ===========================================================================
def _ecg_findpeaks_nabian2018(signal, sampling_rate=1000, **kwargs):
"""R peak detection method by Nabian et al. (2018) inspired by the Pan-Tompkins algorithm.
- Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., Ostadabbas, S. (2018).
An Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data.
IEEE Journal of Translational Engineering in Health and Medicine, 6, 1-11.
"""
window_size = int(0.4 * sampling_rate)
peaks = np.zeros(len(signal))
for i in range(1 + window_size, len(signal) - window_size):
ecg_window = signal[i - window_size : i + window_size]
rpeak = np.argmax(ecg_window)
if i == (i - window_size - 1 + rpeak):
peaks[i] = 1
rpeaks = np.where(peaks == 1)[0]
# min_distance = 200
return rpeaks
# =============================================================================
# Hamilton (2002)
# =============================================================================
def _ecg_findpeaks_hamilton(signal, sampling_rate=1000, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
"""
diff = abs(np.diff(signal))
b = np.ones(int(0.08 * sampling_rate))
b = b / int(0.08 * sampling_rate)
a = [1]
ma = scipy.signal.lfilter(b, a, diff)
ma[0 : len(b) * 2] = 0
n_pks = []
n_pks_ave = 0.0
s_pks = []
s_pks_ave = 0.0
QRS = [0]
RR = []
RR_ave = 0.0
th = 0.0
i = 0
idx = []
peaks = []
for i in range(len(ma)): # pylint: disable=C0200,R1702
if (
i > 0 and i < len(ma) - 1 and ma[i - 1] < ma[i] and ma[i + 1] < ma[i]
): # pylint: disable=R1716
peak = i
peaks.append(peak)
if ma[peak] > th and (peak - QRS[-1]) > 0.3 * sampling_rate:
QRS.append(peak)
idx.append(peak)
s_pks.append(ma[peak])
if len(n_pks) > 8:
s_pks.pop(0)
s_pks_ave = np.mean(s_pks)
if RR_ave != 0.0 and QRS[-1] - QRS[-2] > 1.5 * RR_ave:
missed_peaks = peaks[idx[-2] + 1 : idx[-1]]
for missed_peak in missed_peaks:
if (
missed_peak - peaks[idx[-2]] > int(0.36 * sampling_rate)
and ma[missed_peak] > 0.5 * th
):
QRS.append(missed_peak)
QRS.sort()
break
if len(QRS) > 2:
RR.append(QRS[-1] - QRS[-2])
if len(RR) > 8:
RR.pop(0)
RR_ave = int(np.mean(RR))
else:
n_pks.append(ma[peak])
if len(n_pks) > 8:
n_pks.pop(0)
n_pks_ave = np.mean(n_pks)
th = n_pks_ave + 0.45 * (s_pks_ave - n_pks_ave)
i += 1
QRS.pop(0)
QRS = np.array(QRS, dtype="int")
return QRS
# =============================================================================
# Slope Sum Function (SSF) - Zong et al. (2003)
# =============================================================================
def _ecg_findpeaks_ssf(signal, sampling_rate=1000, threshold=20, before=0.03, after=0.01, **kwargs):
"""From https://github.com/PIA-
Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L448.
"""
# TODO: Doesn't really seems to work
# convert to samples
winB = int(before * sampling_rate)
winA = int(after * sampling_rate)
Rset = set()
length = len(signal)
# diff
dx = np.diff(signal)
dx[dx >= 0] = 0
dx = dx**2
# detection
(idx,) = np.nonzero(dx > threshold)
idx0 = np.hstack(([0], idx))
didx = np.diff(idx0)
# search
sidx = idx[didx > 1]
for item in sidx:
a = item - winB
if a < 0:
a = 0
b = item + winA
if b > length:
continue
r = np.argmax(signal[a:b]) + a
Rset.add(r)
# output
rpeaks = list(Rset)
rpeaks.sort()
rpeaks = np.array(rpeaks, dtype="int")
return rpeaks
# =============================================================================
# Zong (2003) - WQRS
# =============================================================================
def _ecg_findpeaks_zong(signal, sampling_rate=1000, cutoff=16, window=0.13, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- Zong, W., Moody, G. B., & Jiang, D. (2003, September). A robust open-source algorithm to
detect onset and duration of QRS complexes. In Computers in Cardiology, 2003 (pp. 737-740).
IEEE.
"""
# 1. Filter signal
# TODO: Should remove this step? It's technically part of cleaning,
# Not sure it is integral to the peak-detection per se. Opinions are welcome.
order = 2
# Cutoff normalized by nyquist frequency
b, a = scipy.signal.butter(order, cutoff / (0.5 * sampling_rate))
y = scipy.signal.lfilter(b, a, signal)
# Curve length transformation
w = int(np.ceil(window * sampling_rate))
tmp = np.zeros(len(y) - w)
for i, j in enumerate(np.arange(w, len(y))):
s = y[j - w : j]
tmp[i] = np.sum(
np.sqrt(np.power(1 / sampling_rate, 2) * np.ones(w - 1) + np.power(np.diff(s), 2))
)
# Pad with the first value
clt = np.concatenate([[tmp[0]] * w, tmp])
# Find adaptive threshold
window_size = 10 * sampling_rate
# Apply fast moving window average with 1D convolution
ret = np.pad(clt, (window_size - 1, 0), "constant", constant_values=(0, 0))
ret = np.convolve(ret, np.ones(window_size), "valid")
for i in range(1, window_size):
ret[i - 1] = ret[i - 1] / i
ret[window_size - 1 :] = ret[window_size - 1 :] / window_size
# Find peaks
peaks = []
for i in range(len(clt)):
z = sampling_rate * 0.35
if (len(peaks) == 0 or i > peaks[-1] + z) and clt[i] > ret[i]:
peaks.append(i)
return np.array(peaks)
# =============================================================================
# Christov (2004)
# =============================================================================
def _ecg_findpeaks_christov(signal, sampling_rate=1000, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- Ivaylo I. Christov, Real time electrocardiogram QRS detection using combined adaptive
threshold, BioMedical Engineering OnLine 2004, vol. 3:28, 2004.
"""
total_taps = 0
b = np.ones(int(0.02 * sampling_rate))
b = b / int(0.02 * sampling_rate)
total_taps += len(b)
a = [1]
MA1 = scipy.signal.lfilter(b, a, signal)
b = np.ones(int(0.028 * sampling_rate))
b = b / int(0.028 * sampling_rate)
total_taps += len(b)
a = [1]
MA2 = scipy.signal.lfilter(b, a, MA1)
Y = []
for i in range(1, len(MA2) - 1):
diff = abs(MA2[i + 1] - MA2[i - 1])
Y.append(diff)
b = np.ones(int(0.040 * sampling_rate))
b = b / int(0.040 * sampling_rate)
total_taps += len(b)
a = [1]
MA3 = scipy.signal.lfilter(b, a, Y)
MA3[0:total_taps] = 0
ms50 = int(0.05 * sampling_rate)
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms350 = int(0.35 * sampling_rate)
M = 0
newM5 = 0
M_list = []
MM = []
M_slope = np.linspace(1.0, 0.6, ms1200 - ms200)
F = 0
F_list = []
R = 0
RR = []
Rm = 0
R_list = []
MFR = 0
MFR_list = []
QRS = []
for i in range(len(MA3)): # pylint: disable=C0200
# M
if i < 5 * sampling_rate:
M = 0.6 * np.max(MA3[: i + 1])
MM.append(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * np.max(MA3[QRS[-1] : i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
if newM5 == 0:
newM5 = MM[-1]
MM.append(newM5)
if len(MM) > 5:
MM.pop(0)
M = np.mean(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = np.mean(MM) * M_slope[i - (QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * np.mean(MM)
# F
if i > ms350:
F_section = MA3[i - ms350 : i]
max_latest = np.max(F_section[-ms50:])
max_earliest = np.max(F_section[:ms50])
F += (max_latest - max_earliest) / 150.0
# R
if QRS and i < QRS[-1] + int((2.0 / 3.0 * Rm)):
R = 0
elif QRS and i > QRS[-1] + int((2.0 / 3.0 * Rm)) and i < QRS[-1] + Rm:
dec = (M - np.mean(MM)) / 1.4
R = 0 + dec
MFR = M + F + R
M_list.append(M)
F_list.append(F)
R_list.append(R)
MFR_list.append(MFR)
if not QRS and MA3[i] > MFR:
QRS.append(i)
elif QRS and i > QRS[-1] + ms200 and MA3[i] > MFR:
QRS.append(i)
if len(QRS) > 2:
RR.append(QRS[-1] - QRS[-2])
if len(RR) > 5:
RR.pop(0)
Rm = int(np.mean(RR))
QRS.pop(0)
QRS = np.array(QRS, dtype="int")
return QRS
# =============================================================================
# Gamboa (2008)
# =============================================================================
def _ecg_findpeaks_gamboa(signal, sampling_rate=1000, tol=0.002, **kwargs):
"""From https://github.com/PIA-
Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L834.
- Gamboa, H. (2008). Multi-modal behavioral biometrics based on HCI and electrophysiology
(Doctoral dissertation, Universidade Técnica de Lisboa).
"""
hist, edges = np.histogram(signal, 100, density=True)
TH = 0.01
F = np.cumsum(hist)
v0 = edges[np.nonzero(F > TH)[0][0]]
v1 = edges[np.nonzero(F < (1 - TH))[0][-1]]
nrm = max([abs(v0), abs(v1)])
norm_signal = signal / float(nrm)
d2 = np.diff(norm_signal, 2)
b = np.nonzero((np.diff(np.sign(np.diff(-d2)))) == -2)[0] + 2 # pylint: disable=E1130
b = np.intersect1d(b, np.nonzero(-d2 > tol)[0]) # pylint: disable=E1130
rpeaks = []
if len(b) >= 3:
b = b.astype("float")
previous = b[0]
# convert to samples
v_100ms = int(0.1 * sampling_rate)
v_300ms = int(0.3 * sampling_rate)
for i in b[1:]:
if i - previous > v_300ms:
previous = i
rpeaks.append(np.argmax(signal[int(i) : int(i + v_100ms)]) + i)
rpeaks = sorted(list(set(rpeaks)))
rpeaks = np.array(rpeaks, dtype="int")
return rpeaks
# =============================================================================
# Engzee Modified (2012)
# =============================================================================
def _ecg_findpeaks_engzee(signal, sampling_rate=1000, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- C. Zeelenberg, A single scan algorithm for QRS detection and feature extraction, IEEE Comp.
in Cardiology, vol. 6, pp. 37-42, 1979
- A. Lourenco, H. Silva, P. Leite, R. Lourenco and A. Fred, "Real Time Electrocardiogram
Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
engzee_fake_delay = 0
diff = np.zeros(len(signal))
for i in range(4, len(diff)):
diff[i] = signal[i] - signal[i - 4]
ci = [1, 4, 6, 4, 1]
low_pass = scipy.signal.lfilter(ci, 1, diff)
low_pass[: int(0.2 * sampling_rate)] = 0
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms160 = int(0.16 * sampling_rate)
neg_threshold = int(0.01 * sampling_rate)
M = 0
M_list = []
neg_m = []
MM = []
M_slope = np.linspace(1.0, 0.6, ms1200 - ms200)
QRS = []
r_peaks = []
counter = 0
thi_list = []
thi = False
thf_list = []
thf = False
newM5 = False
for i in range(len(low_pass)): # pylint: disable=C0200
# M
if i < 5 * sampling_rate:
M = 0.6 * np.max(low_pass[: i + 1])
MM.append(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * np.max(low_pass[QRS[-1] : i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif newM5 and QRS and i == QRS[-1] + ms200:
MM.append(newM5)
if len(MM) > 5:
MM.pop(0)
M = np.mean(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = np.mean(MM) * M_slope[i - (QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * np.mean(MM)
M_list.append(M)
neg_m.append(-M)
if not QRS and low_pass[i] > M:
QRS.append(i)
thi_list.append(i)
thi = True
elif QRS and i > QRS[-1] + ms200 and low_pass[i] > M:
QRS.append(i)
thi_list.append(i)
thi = True
if thi and i < thi_list[-1] + ms160:
if low_pass[i] < -M and low_pass[i - 1] > -M:
# thf_list.append(i)
thf = True
if thf and low_pass[i] < -M:
thf_list.append(i)
counter += 1
elif low_pass[i] > -M and thf:
counter = 0
thi = False
thf = False
elif thi and i > thi_list[-1] + ms160:
counter = 0
thi = False
thf = False
if counter > neg_threshold:
unfiltered_section = signal[thi_list[-1] - int(0.01 * sampling_rate) : i]
r_peaks.append(
engzee_fake_delay
+ np.argmax(unfiltered_section)
+ thi_list[-1]
- int(0.01 * sampling_rate)
)
counter = 0
thi = False
thf = False
r_peaks.pop(
0
) # removing the 1st detection as it 1st needs the QRS complex amplitude for the threshold
r_peaks = np.array(r_peaks, dtype="int")
return r_peaks
# =============================================================================
# Stationary Wavelet Transform (SWT) - Kalidas and Tamil (2017)
# =============================================================================
def _ecg_findpeaks_kalidas(signal, sampling_rate=1000, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- Vignesh Kalidas and Lakshman Tamil (2017). Real-time QRS detector using Stationary Wavelet Transform
for Automated ECG Analysis. In: 2017 IEEE 17th International Conference on Bioinformatics and
Bioengineering (BIBE). Uses the Pan and Tompkins thresolding.
"""
# Try loading pywt
try:
import pywt
except ImportError as import_error:
raise ImportError(
"NeuroKit error: ecg_findpeaks(): the 'PyWavelets' module is required for"
" this method to run. Please install it first (`pip install PyWavelets`)."
) from import_error
signal_length = len(signal)
swt_level = 3
padding = -1
for i in range(1000):
if (len(signal) + i) % 2**swt_level == 0:
padding = i
break
if padding > 0:
signal = np.pad(signal, (0, padding), "edge")
elif padding == -1:
print("Padding greater than 1000 required\n")
swt_ecg = pywt.swt(signal, "db3", level=swt_level)
swt_ecg = np.array(swt_ecg)
swt_ecg = swt_ecg[0, 1, :]
squared = swt_ecg * swt_ecg
f1 = 0.01 / (0.5 * sampling_rate)
f2 = 10 / (0.5 * sampling_rate)
sos = scipy.signal.butter(3, [f1, f2], btype="bandpass", output="sos")
filtered_squared = scipy.signal.sosfilt(sos, squared)
# Drop padding to avoid detecting peaks inside it (#456)
filtered_squared = filtered_squared[:signal_length]
filt_peaks = _ecg_findpeaks_peakdetect(filtered_squared, sampling_rate)
filt_peaks = np.array(filt_peaks, dtype="int")
return filt_peaks
# =============================================================================
# Elgendi et al. (2010)
# =============================================================================
def _ecg_findpeaks_elgendi(signal, sampling_rate=1000, **kwargs):
"""From https://github.com/berndporr/py-ecg-detectors/
- Elgendi, Mohamed & Jonkman, Mirjam & De Boer, Friso. (2010). Frequency Bands Effects on QRS
Detection. The 3rd International Conference on Bio-inspired Systems and Signal Processing
(BIOSIGNALS2010). 428-431.
"""
window1 = int(0.12 * sampling_rate)
mwa_qrs = _ecg_findpeaks_MWA(abs(signal), window1)
window2 = int(0.6 * sampling_rate)
mwa_beat = _ecg_findpeaks_MWA(abs(signal), window2)
blocks = np.zeros(len(signal))
block_height = np.max(signal)
for i in range(len(mwa_qrs)): # pylint: disable=C0200
blocks[i] = block_height if mwa_qrs[i] > mwa_beat[i] else 0
QRS = []
for i in range(1, len(blocks)):
if blocks[i - 1] == 0 and blocks[i] == block_height:
start = i
elif blocks[i - 1] == block_height and blocks[i] == 0:
end = i - 1
if end - start > int(0.08 * sampling_rate):
detection = np.argmax(signal[start : end + 1]) + start
if QRS:
if detection - QRS[-1] > int(0.3 * sampling_rate):
QRS.append(detection)
else:
QRS.append(detection)
QRS = np.array(QRS, dtype="int")
return QRS
# =============================================================================
# Continuous Wavelet Transform (CWT) - Martinez et al. (2004)
# =============================================================================
#
def _ecg_findpeaks_WT(signal, sampling_rate=1000, **kwargs):
# Try loading pywt
try:
import pywt
except ImportError as import_error:
raise ImportError(
"NeuroKit error: ecg_delineator(): the 'PyWavelets' module is required for"
" this method to run. Please install it first (`pip install PyWavelets`)."
) from import_error
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, __ = pywt.cwt(signal, scales, "gaus1", sampling_period=1.0 / sampling_rate)
# For wt of scale 2^4
signal_4 = cwtmatr[4, :]
epsilon_4 = np.sqrt(np.mean(np.square(signal_4)))
peaks_4, _ = scipy.signal.find_peaks(np.abs(signal_4), height=epsilon_4)
# For wt of scale 2^3
signal_3 = cwtmatr[3, :]
epsilon_3 = np.sqrt(np.mean(np.square(signal_3)))
peaks_3, _ = scipy.signal.find_peaks(np.abs(signal_3), height=epsilon_3)
# Keep only peaks_3 that are nearest to peaks_4
peaks_3_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)): # pylint: disable=C0200
peaks_distance = abs(peaks_4[i] - peaks_3)
peaks_3_keep[i] = peaks_3[np.argmin(peaks_distance)]
# For wt of scale 2^2
signal_2 = cwtmatr[2, :]
epsilon_2 = np.sqrt(np.mean(np.square(signal_2)))
peaks_2, _ = scipy.signal.find_peaks(np.abs(signal_2), height=epsilon_2)
# Keep only peaks_2 that are nearest to peaks_3
peaks_2_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_3_keep[i] - peaks_2)
peaks_2_keep[i] = peaks_2[np.argmin(peaks_distance)]
# For wt of scale 2^1
signal_1 = cwtmatr[1, :]
epsilon_1 = np.sqrt(np.mean(np.square(signal_1)))
peaks_1, _ = scipy.signal.find_peaks(np.abs(signal_1), height=epsilon_1)
# Keep only peaks_1 that are nearest to peaks_2
peaks_1_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_2_keep[i] - peaks_1)
peaks_1_keep[i] = peaks_1[np.argmin(peaks_distance)]
# Find R peaks
max_R_peak_dist = int(0.1 * sampling_rate)
rpeaks = []
for index_cur, index_next in zip(peaks_1_keep[:-1], peaks_1_keep[1:]):
correct_sign = signal_1[index_cur] < 0 and signal_1[index_next] > 0 # pylint: disable=R1716
near = (index_next - index_cur) < max_R_peak_dist # limit 2
if near and correct_sign:
rpeaks.append(signal_zerocrossings(signal_1[index_cur : index_next + 1])[0] + index_cur)
rpeaks = np.array(rpeaks, dtype="int")
return rpeaks
# =============================================================================
# ASI (FSM based 2020)
# =============================================================================
def _ecg_findpeaks_rodrigues(signal, sampling_rate=1000, **kwargs):
"""Segmenter by Tiago Rodrigues, inspired by on Gutierrez-Rivas (2015) and Sadhukhan (2012).
References
----------
- Gutiérrez-Rivas, R., García, J. J., Marnane, W. P., & Hernández, A. (2015). Novel real-time
low-complexity QRS complex detector based on adaptive thresholding. IEEE Sensors Journal,
15(10), 6036-6043.
- Sadhukhan, D., & Mitra, M. (2012). R-peak detection algorithm for ECG using double difference
and RR interval processing. Procedia Technology, 4, 873-877.
"""
N = int(np.round(3 * sampling_rate / 128))
Nd = N - 1
Pth = (0.7 * sampling_rate) / 128 + 2.7
# Pth = 3, optimal for fs = 250 Hz
Rmin = 0.26
rpeaks = []
i = 1
Ramptotal = 0
# Double derivative squared
diff_ecg = [signal[i] - signal[i - Nd] for i in range(Nd, len(signal))]
ddiff_ecg = [diff_ecg[i] - diff_ecg[i - 1] for i in range(1, len(diff_ecg))]
squar = np.square(ddiff_ecg)
# Integrate moving window
b = np.array(np.ones(N))
a = [1]
processed_ecg = scipy.signal.lfilter(b, a, squar)
tf = len(processed_ecg)
rpeakpos = 0
# R-peak finder FSM
while i < tf: # ignore last sample of recording
# State 1: looking for maximum
tf1 = np.round(i + Rmin * sampling_rate)
Rpeakamp = 0
while i < tf1 and i < tf:
# Rpeak amplitude and position
if processed_ecg[i] > Rpeakamp:
Rpeakamp = processed_ecg[i]
rpeakpos = i + 1
i += 1
Ramptotal = (19 / 20) * Ramptotal + (1 / 20) * Rpeakamp
rpeaks.append(rpeakpos)
# State 2: waiting state
d = tf1 - rpeakpos
tf2 = i + np.round(0.2 * 2 - d)
while i <= tf2:
i += 1
# State 3: decreasing threshold
Thr = Ramptotal
while i < tf and processed_ecg[i] < Thr:
Thr *= np.exp(-Pth / sampling_rate)
i += 1
rpeaks = np.array(rpeaks, dtype="int")
return rpeaks
# =============================================================================
# Visibility graph transformation - by Koka and Muma (2022)
# =============================================================================
def _ecg_findpeaks_vgraph(signal, sampling_rate=1000, lowcut=3, order=2, **kwargs):
"""R-Peak Detector Using Visibility Graphs by Taulant Koka and Michael Muma (2022).
References
----------
- T. Koka and M. Muma (2022), Fast and Sample Accurate R-Peak Detection for Noisy ECG Using
Visibility Graphs. In: 2022 44th Annual International Conference of the IEEE Engineering
in Medicine & Biology Society (EMBC). Uses the Pan and Tompkins thresholding.
"""
# Try loading ts2vg
try:
import ts2vg
except ImportError as import_error:
raise ImportError(
"NeuroKit error: ecg_findpeaks(): the 'ts2vg' module is required for"
" this method to run. Please install it first (`pip install ts2vg`)."
) from import_error
N = len(signal)
M = 2 * sampling_rate
w = np.zeros(N)
rpeaks = []
beta = 0.55
gamma = 0.5
L = 0
R = M
# Compute number of segments
deltaM = int(gamma * M)
n_segments = int((N - deltaM) / (M - deltaM)) + 1
for segment in range(n_segments):
y = signal[L:R]
# Compute the adjacency matrix to the directed visibility graph
A = ts2vg.NaturalVG(directed="top_to_bottom").build(y).adjacency_matrix()
_w = np.ones(len(y))
# Computee the weights for the ecg using its VG transformation
while np.count_nonzero(_w) / len(y) >= beta:
_w = np.matmul(A, _w) / np.linalg.norm(_w)
# Update the weight vector
if L == 0:
w[L:R] = _w
elif N - deltaM <= L < L < N:
w[L:] = 0.5 * (_w + w[L:])
else:
w[L: L + deltaM] = 0.5 * (_w[:deltaM] + w[L: L + deltaM])
w[L + deltaM: R] = _w[deltaM:]
# Update the boundary conditions
L = L + (M - deltaM)
if R + (M - deltaM) <= N:
R = R + (M - deltaM)
else:
R = N
# Multiply signal by its weights and apply thresholding algorithm
weighted_signal = signal * w
rpeaks = _ecg_findpeaks_peakdetect(weighted_signal, sampling_rate)
rpeaks = np.array(rpeaks, dtype="int")
return rpeaks
# =============================================================================
# Utilities
# =============================================================================
def _ecg_findpeaks_MWA(signal, window_size, **kwargs):
"""Based on https://github.com/berndporr/py-ecg-detectors/
Optimized for vectorized computation.
"""
window_size = int(window_size)
# Scipy's uniform_filter1d is a fast and accurate way of computing
# moving averages. By default it computes the averages of `window_size`
# elements centered around each element in the input array, including
# `(window_size - 1) // 2` elements after the current element (when
# `window_size` is even, the extra element is taken from before). To
# return causal moving averages, i.e. each output element is the average
# of window_size input elements ending at that position, we use the
# `origin` argument to shift the filter computation accordingly.
mwa = scipy.ndimage.uniform_filter1d(signal, window_size, origin=(window_size - 1) // 2)
# Compute actual moving averages for the first `window_size - 1` elements,
# which the uniform_filter1d function computes using padding. We want
# those output elements to be averages of only the input elements until
# that position.
head_size = min(window_size - 1, len(signal))
mwa[:head_size] = np.cumsum(signal[:head_size]) / np.linspace(1, head_size, head_size)
return mwa
def _ecg_findpeaks_peakdetect(detection, sampling_rate=1000, **kwargs):
"""Based on https://github.com/berndporr/py-ecg-detectors/
Optimized for vectorized computation.
"""
min_peak_distance = int(0.3 * sampling_rate)
min_missed_distance = int(0.25 * sampling_rate)
signal_peaks = []
SPKI = 0.0
NPKI = 0.0
last_peak = 0
last_index = -1
# NOTE: Using plateau_size=(1,1) here avoids detecting flat peaks and
# maintains original py-ecg-detectors behaviour. Flat peaks are typically
# found in measurement artifacts where the signal saturates at maximum
# recording amplitude. Such cases should not be detected as peaks. If we
# do encounter recordings where even normal R peaks are flat, then changing
# this to something like plateau_size=(1, sampling_rate // 10) might make
# sense. See also https://github.com/neuropsychology/NeuroKit/pull/450.
peaks, _ = scipy.signal.find_peaks(detection, plateau_size=(1, 1))
for index, peak in enumerate(peaks):
peak_value = detection[peak]
threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)
if peak_value > threshold_I1 and peak > last_peak + min_peak_distance:
signal_peaks.append(peak)
# RR_missed threshold is based on the previous eight R-R intervals
if len(signal_peaks) > 9:
RR_ave = (signal_peaks[-2] - signal_peaks[-10]) // 8
RR_missed = int(1.66 * RR_ave)
if peak - last_peak > RR_missed:
missed_peaks = peaks[last_index + 1 : index]
missed_peaks = missed_peaks[
(missed_peaks > last_peak + min_missed_distance)
& (missed_peaks < peak - min_missed_distance)
]
threshold_I2 = 0.5 * threshold_I1
missed_peaks = missed_peaks[detection[missed_peaks] > threshold_I2]
if len(missed_peaks) > 0:
signal_peaks[-1] = missed_peaks[np.argmax(detection[missed_peaks])]
signal_peaks.append(peak)
last_peak = peak
last_index = index
SPKI = 0.125 * peak_value + 0.875 * SPKI
else:
NPKI = 0.125 * peak_value + 0.875 * NPKI
return signal_peaks
| 40,059 | 31.998353 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_invert.py | import matplotlib.pyplot as plt
import numpy as np
from .ecg_clean import ecg_clean
def ecg_invert(ecg_signal, sampling_rate=1000, force=False, show=False):
"""**ECG signal inversion**
Checks whether an ECG signal is inverted, and if so, corrects for this inversion.
To automatically detect the inversion, the ECG signal is cleaned, the mean is subtracted,
and with a rolling window of 2 seconds, the original value corresponding to the maximum
of the squared signal is taken. If the median of these values is negative, it is
assumed that the signal is inverted.
Parameters
----------
ecg_signal : Union[list, np.array, pd.Series]
The raw ECG channel.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
force : bool
Whether to force inversion of the signal regardless of whether it is
detected as inverted. The default is False.
show : bool
Shows a plot of the original and inverted signal.
Returns
-------
array
Vector containing the corrected ECG signal.
bool
Whether the inversion was performed.
Examples
--------
.. ipython:: python
import neurokit2 as nk
ecg = -1 * nk.ecg_simulate(duration=10, sampling_rate=200, heart_rate=70)
# Invert if necessary
@savefig p_ecg_invert1.png scale=100%
ecg_fixed, is_inverted = nk.ecg_invert(ecg, sampling_rate=200, show=True)
@suppress
plt.close()
"""
# Invert in any case (needed to perform the check)
inverted_ecg = np.array(ecg_signal) * -1 + 2 * np.nanmean(ecg_signal)
if show is True:
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True)
ax[0].plot(ecg_signal)
ax[0].set_title("Original ECG")
ax[1].plot(inverted_ecg)
ax[1].set_title("Inverted ECG")
if force:
was_inverted = True
else:
if _ecg_inverted(ecg_signal, sampling_rate=sampling_rate):
was_inverted = True
else:
inverted_ecg = ecg_signal
was_inverted = False
return inverted_ecg, was_inverted
def _ecg_inverted(ecg_signal, sampling_rate=1000, window_time=2.0):
"""Checks whether an ECG signal is inverted."""
ecg_cleaned = ecg_clean(ecg_signal, sampling_rate=sampling_rate)
# mean should already be close to zero after filtering but just in case, subtract
ecg_cleaned_meanzero = ecg_cleaned - np.nanmean(ecg_cleaned)
# take the median of the original value of the maximum of the squared signal
# over a window where we would expect at least one heartbeat
med_max_squared = np.nanmedian(
_roll_orig_max_squared(ecg_cleaned_meanzero, window=int(window_time * sampling_rate))
)
# if median is negative, assume inverted
return med_max_squared < 0
def _roll_orig_max_squared(x, window=2000):
"""With a rolling window, takes the original value corresponding to the maximum of the squared signal."""
x_rolled = np.lib.stride_tricks.sliding_window_view(x, window, axis=0)
# https://stackoverflow.com/questions/61703879/in-numpy-how-to-select-elements-based-on-the-maximum-of-their-absolute-values
shape = np.array(x_rolled.shape)
shape[-1] = -1
return np.take_along_axis(x_rolled, np.square(x_rolled).argmax(-1).reshape(shape), axis=-1)
| 3,388 | 35.44086 | 128 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_plot.py | # -*- coding: utf-8 -*-
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..ecg import ecg_peaks
from ..epochs import epochs_to_df
from ..signal import signal_fixpeaks
from ..stats import rescale
from .ecg_segment import ecg_segment
def ecg_plot(ecg_signals, rpeaks=None, sampling_rate=None, show_type="default"):
"""**Visualize ECG data**
Plot ECG signals and R-peaks.
Parameters
----------
ecg_signals : DataFrame
DataFrame obtained from ``ecg_process()``.
rpeaks : dict
The samples at which the R-peak occur. Dict returned by
``ecg_process()``. Defaults to ``None``.
sampling_rate : int
The sampling frequency of the ECG (in Hz, i.e., samples/second). Needs to be supplied if the
data should be plotted over time in seconds. Otherwise the data is plotted over samples.
Defaults to ``None``. Must be specified to plot artifacts.
show_type : str
Visualize the ECG data with ``"default"`` or visualize artifacts thresholds with
``"artifacts"`` produced by ``ecg_fixpeaks()``, or ``"full"`` to visualize both.
See Also
--------
ecg_process
Returns
-------
Though the function returns nothing, the figure can be retrieved and saved as follows:
.. code-block:: console
# To be run after ecg_plot()
fig = plt.gcf()
fig.savefig("myfig.png")
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate data
ecg = nk.ecg_simulate(duration=15, sampling_rate=1000, heart_rate=80)
# Process signal
signals, info = nk.ecg_process(ecg, sampling_rate=1000)
# Plot
@savefig p_ecg_plot.png scale=100%
nk.ecg_plot(signals, sampling_rate=1000, show_type='default')
@suppress
plt.close()
"""
# Sanity-check input.
if not isinstance(ecg_signals, pd.DataFrame):
raise ValueError(
"NeuroKit error: ecg_plot(): The `ecg_signals` argument must be the "
"DataFrame returned by `ecg_process()`."
)
# Extract R-peaks.
peaks = np.where(ecg_signals["ECG_R_Peaks"] == 1)[0]
# Prepare figure and set axes.
if show_type in ["default", "full"]:
if sampling_rate is not None:
x_axis = np.linspace(0, ecg_signals.shape[0] / sampling_rate, ecg_signals.shape[0])
gs = matplotlib.gridspec.GridSpec(2, 2, width_ratios=[1 - 1 / np.pi, 1 / np.pi])
fig = plt.figure(constrained_layout=False)
ax0 = fig.add_subplot(gs[0, :-1])
ax1 = fig.add_subplot(gs[1, :-1])
ax2 = fig.add_subplot(gs[:, -1])
ax0.set_xlabel("Time (seconds)")
ax1.set_xlabel("Time (seconds)")
ax2.set_xlabel("Time (seconds)")
else:
x_axis = np.arange(0, ecg_signals.shape[0])
fig, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)
ax0.set_xlabel("Samples")
ax1.set_xlabel("Samples")
fig.suptitle("Electrocardiogram (ECG)", fontweight="bold")
plt.tight_layout(h_pad=0.3, w_pad=0.1)
# Plot cleaned, raw ECG, R-peaks and signal quality.
ax0.set_title("Raw and Cleaned Signal")
quality = rescale(
ecg_signals["ECG_Quality"],
to=[np.min(ecg_signals["ECG_Clean"]), np.max(ecg_signals["ECG_Clean"])],
)
minimum_line = np.full(len(x_axis), quality.min())
# Plot quality area first
ax0.fill_between(
x_axis,
minimum_line,
quality,
alpha=0.12,
zorder=0,
interpolate=True,
facecolor="#4CAF50",
label="Quality",
)
# Plot signals
ax0.plot(x_axis, ecg_signals["ECG_Raw"], color="#B0BEC5", label="Raw", zorder=1)
ax0.plot(
x_axis,
ecg_signals["ECG_Clean"],
color="#E91E63",
label="Cleaned",
zorder=1,
linewidth=1.5,
)
ax0.scatter(
x_axis[peaks],
ecg_signals["ECG_Clean"][peaks],
color="#FFC107",
label="R-peaks",
zorder=2,
)
# Optimize legend
handles, labels = ax0.get_legend_handles_labels()
order = [2, 0, 1, 3]
ax0.legend(
[handles[idx] for idx in order], [labels[idx] for idx in order], loc="upper right"
)
# Plot heart rate.
ax1.set_title("Heart Rate")
ax1.set_ylabel("Beats per minute (bpm)")
ax1.plot(x_axis, ecg_signals["ECG_Rate"], color="#FF5722", label="Rate", linewidth=1.5)
rate_mean = ecg_signals["ECG_Rate"].mean()
ax1.axhline(y=rate_mean, label="Mean", linestyle="--", color="#FF9800")
ax1.legend(loc="upper right")
# Plot individual heart beats.
if sampling_rate is not None:
ax2.set_title("Individual Heart Beats")
heartbeats = ecg_segment(ecg_signals["ECG_Clean"], peaks, sampling_rate)
heartbeats = epochs_to_df(heartbeats)
heartbeats_pivoted = heartbeats.pivot(index="Time", columns="Label", values="Signal")
ax2.plot(heartbeats_pivoted)
cmap = iter(
plt.cm.YlOrRd(
np.linspace(0, 1, num=int(heartbeats["Label"].nunique()))
) # pylint: disable=E1101
) # Aesthetics of heart beats
lines = []
for x, color in zip(heartbeats_pivoted, cmap):
(line,) = ax2.plot(heartbeats_pivoted[x], color=color)
lines.append(line)
# Plot artifacts
if show_type in ["artifacts", "full"]:
if sampling_rate is None:
raise ValueError(
"NeuroKit error: ecg_plot(): Sampling rate must be specified for artifacts"
" to be plotted."
)
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_signals["ECG_Clean"], sampling_rate=sampling_rate)
fig = signal_fixpeaks(
rpeaks, sampling_rate=sampling_rate, iterative=True, show=True, method="Kubios"
)
| 6,225 | 31.768421 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_segment.py | # - * - coding: utf-8 - * -
import matplotlib.pyplot as plt
import numpy as np
from ..epochs import epochs_create, epochs_to_df
from ..signal import signal_rate
from .ecg_peaks import ecg_peaks
def ecg_segment(ecg_cleaned, rpeaks=None, sampling_rate=1000, show=False):
"""**Segment an ECG signal into single heartbeats**
Segment an ECG signal into single heartbeats. Convenient for visualizing all the heart beats.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by ``ecg_clean()``.
rpeaks : dict
The samples at which the R-peaks occur. Dict returned by ``ecg_peaks()``. Defaults to ``None``.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
show : bool
If ``True``, will return a plot of heartbeats. Defaults to ``False``.
Returns
-------
dict
A dict containing DataFrames for all segmented heartbeats.
See Also
--------
ecg_clean, ecg_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
ecg = nk.ecg_simulate(duration=15, sampling_rate=1000, heart_rate=80, noise = 0.05)
@savefig p_ecg_segment.png scale=100%
qrs_epochs = nk.ecg_segment(ecg, rpeaks=None, sampling_rate=1000, show=True)
"""
# Sanitize inputs
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate, correct_artifacts=True)
rpeaks = rpeaks["ECG_R_Peaks"]
epochs_start, epochs_end = _ecg_segment_window(
rpeaks=rpeaks, sampling_rate=sampling_rate, desired_length=len(ecg_cleaned)
)
heartbeats = epochs_create(
ecg_cleaned,
rpeaks,
sampling_rate=sampling_rate,
epochs_start=epochs_start,
epochs_end=epochs_end,
)
# pad last heartbeat with nan so that segments are equal length
last_heartbeat_key = str(np.max(np.array(list(heartbeats.keys()), dtype=int)))
after_last_index = heartbeats[last_heartbeat_key]["Index"] < len(ecg_cleaned)
heartbeats[last_heartbeat_key].loc[after_last_index, "Signal"] = np.nan
if show:
heartbeats_plot = epochs_to_df(heartbeats)
heartbeats_pivoted = heartbeats_plot.pivot(index="Time", columns="Label", values="Signal")
plt.plot(heartbeats_pivoted)
plt.xlabel("Time (s)")
plt.title("Individual Heart Beats")
cmap = iter(
plt.cm.YlOrRd(np.linspace(0, 1, num=int(heartbeats_plot["Label"].nunique())))
) # pylint: disable=no-member
lines = []
for x, color in zip(heartbeats_pivoted, cmap):
(line,) = plt.plot(heartbeats_pivoted[x], color=color)
lines.append(line)
return heartbeats
def _ecg_segment_window(heart_rate=None, rpeaks=None, sampling_rate=1000, desired_length=None):
# Extract heart rate
if heart_rate is not None:
heart_rate = np.mean(heart_rate)
if rpeaks is not None:
heart_rate = np.mean(
signal_rate(rpeaks, sampling_rate=sampling_rate, desired_length=desired_length)
)
# Modulator
m = heart_rate / 60
# Window
epochs_start = -0.35 / m
epochs_end = 0.5 / m
# Adjust for high heart rates
if heart_rate >= 80:
c = 0.1
epochs_start = epochs_start - c
epochs_end = epochs_end + c
return epochs_start, epochs_end
| 3,456 | 31.009259 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_phase.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..signal import signal_phase
from .ecg_delineate import ecg_delineate
from .ecg_peaks import ecg_peaks
def ecg_phase(ecg_cleaned, rpeaks=None, delineate_info=None, sampling_rate=None):
"""**Find the Cardiac Phase**
Compute cardiac phase (for both atrial and ventricular), labelled as 1 for systole and 0 for diastole.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by ``ecg_clean()``.
rpeaks : list or array or DataFrame or Series or dict
The samples at which the different ECG peaks occur. If a dict or a DataFrame is passed, it
is assumed that these containers were obtained with ``ecg_findpeaks()`` or ``ecg_peaks()``.
delineate_info : dict
A dictionary containing additional information of ecg delineation and can be obtained with
``ecg_delineate()``.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to ``None``.
Returns
-------
signals : DataFrame
A DataFrame of same length as ``ecg_signal`` containing the following columns:
* ``"ECG_Phase_Atrial"``: cardiac phase, marked by "1" for systole and "0" for diastole.
* ``"ECG_Phase_Completion_Atrial"``: cardiac phase (atrial) completion, expressed in
percentage (from 0 to 1), representing the stage of the current cardiac phase.
* ``"ECG_Phase_Ventricular"``: cardiac phase, marked by "1" for systole and "0" for
diastole.
* ``"ECG_Phase_Completion_Ventricular"``: cardiac phase (ventricular) completion, expressed
in percentage (from 0 to 1), representing the stage of the current cardiac phase.
See Also
--------
ecg_clean, ecg_peaks, ecg_delineate, ecg_process, ecg_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
ecg = nk.ecg_simulate(duration=6, sampling_rate=1000)
_, rpeaks = nk.ecg_peaks(ecg)
signals, waves = nk.ecg_delineate(ecg, rpeaks, sampling_rate=1000)
cardiac_phase = nk.ecg_phase(ecg_cleaned=ecg, rpeaks=rpeaks,
delineate_info=waves, sampling_rate=1000)
@savefig p_ecg_phase.png scale=100%
nk.signal_plot([ecg, cardiac_phase], standardize=True)
@suppress
plt.close()
"""
# Sanitize inputs
if rpeaks is None:
if sampling_rate is not None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
else:
raise ValueError(
"R-peaks will be obtained using `nk.ecg_peaks`. Please provide the sampling_rate of ecg_signal."
)
# Try retrieving right column
if isinstance(rpeaks, dict):
rpeaks = rpeaks["ECG_R_Peaks"]
if delineate_info is None:
__, delineate_info = ecg_delineate(ecg_cleaned, sampling_rate=sampling_rate)
# Try retrieving right column
if isinstance(
delineate_info, dict
): # FIXME: if this evaluates to False, toffsets and ppeaks are not instantiated
toffsets = np.full(len(ecg_cleaned), False, dtype=bool)
toffsets_idcs = [int(x) for x in delineate_info["ECG_T_Offsets"] if ~np.isnan(x)]
toffsets[toffsets_idcs] = True
ppeaks = np.full(len(ecg_cleaned), False, dtype=bool)
ppeaks_idcs = [int(x) for x in delineate_info["ECG_P_Peaks"] if ~np.isnan(x)]
ppeaks[ppeaks_idcs] = True
# Atrial Phase
atrial = np.full(len(ecg_cleaned), np.nan)
atrial[rpeaks] = 0.0
atrial[ppeaks] = 1.0
last_element = np.where(~np.isnan(atrial))[0][-1] # Avoid filling beyond the last peak/trough
atrial[0:last_element] = pd.Series(atrial).fillna(method="ffill").values[0:last_element]
# Atrial Phase Completion
atrial_completion = signal_phase(atrial, method="percent")
# Ventricular Phase
ventricular = np.full(len(ecg_cleaned), np.nan)
ventricular[toffsets] = 0.0
ventricular[rpeaks] = 1.0
last_element = np.where(~np.isnan(ventricular))[0][
-1
] # Avoid filling beyond the last peak/trough
ventricular[0:last_element] = (
pd.Series(ventricular).fillna(method="ffill").values[0:last_element]
)
# Ventricular Phase Completion
ventricular_comletion = signal_phase(ventricular, method="percent")
return pd.DataFrame(
{
"ECG_Phase_Atrial": atrial,
"ECG_Phase_Completion_Atrial": atrial_completion,
"ECG_Phase_Ventricular": ventricular,
"ECG_Phase_Completion_Ventricular": ventricular_comletion,
}
)
| 4,697 | 36.584 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..signal import signal_rate, signal_sanitize
from .ecg_clean import ecg_clean
from .ecg_delineate import ecg_delineate
from .ecg_peaks import ecg_peaks
from .ecg_phase import ecg_phase
from .ecg_quality import ecg_quality
def ecg_process(ecg_signal, sampling_rate=1000, method="neurokit"):
"""**Automated pipeline for preprocessing an ECG signal**
This function runs different preprocessing steps. **Help us improve the documentation of
this function by making it more tidy and useful!**
Parameters
----------
ecg_signal : Union[list, np.array, pd.Series]
The raw ECG channel.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : str
The processing pipeline to apply. Defaults to ``"neurokit"``. We aim at improving this
aspect to make the available methods more transparent, and be able to generate specific
reports. Please get in touch if you are interested in helping out with this.
Returns
-------
signals : DataFrame
A DataFrame of the same length as the ``ecg_signal`` containing the following columns:
* ``"ECG_Raw"``: the raw signal.
* ``"ECG_Clean"``: the cleaned signal.
* ``"ECG_R_Peaks"``: the R-peaks marked as "1" in a list of zeros.
* ``"ECG_Rate"``: heart rate interpolated between R-peaks.
* ``"ECG_P_Peaks"``: the P-peaks marked as "1" in a list of zeros
* ``"ECG_Q_Peaks"``: the Q-peaks marked as "1" in a list of zeros .
* ``"ECG_S_Peaks"``: the S-peaks marked as "1" in a list of zeros.
* ``"ECG_T_Peaks"``: the T-peaks marked as "1" in a list of zeros.
* ``"ECG_P_Onsets"``: the P-onsets marked as "1" in a list of zeros.
* ``"ECG_P_Offsets"``: the P-offsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_T_Onsets"``: the T-onsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_T_Offsets"``: the T-offsets marked as "1" in a list of zeros.
* ``"ECG_R_Onsets"``: the R-onsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_R_Offsets"``: the R-offsets marked as "1" in a list of zeros (only when method in
``ecg_delineate()`` is wavelet).
* ``"ECG_Phase_Atrial"``: cardiac phase, marked by "1" for systole and "0" for diastole.
* ``"ECG_Phase_Ventricular"``: cardiac phase, marked by "1" for systole and "0" for
diastole.
* ``"ECG_Atrial_PhaseCompletion"``: cardiac phase (atrial) completion, expressed in
percentage
(from 0 to 1), representing the stage of the current cardiac phase.
* ``"ECG_Ventricular_PhaseCompletion"``: cardiac phase (ventricular) completion, expressed
in percentage (from 0 to 1), representing the stage of the current cardiac phase.
* **This list is not up-to-date. Help us improve the documentation!**
info : dict
A dictionary containing the samples at which the R-peaks occur, accessible with the key
``"ECG_R_Peaks"``, as well as the signals' sampling rate.
See Also
--------
ecg_clean, ecg_peaks, ecg_delineate, ecg_phase, ecg_plot, .signal_rate
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate ECG signal
ecg = nk.ecg_simulate(duration=15, sampling_rate=1000, heart_rate=80)
# Preprocess ECG signal
signals, info = nk.ecg_process(ecg, sampling_rate=1000)
# Visualize
@savefig p_ecg_process.png scale=100%
nk.ecg_plot(signals)
@suppress
plt.close()
"""
# Sanitize input
ecg_signal = signal_sanitize(ecg_signal)
ecg_cleaned = ecg_clean(ecg_signal, sampling_rate=sampling_rate, method=method)
# R-peaks
(instant_peaks, rpeaks,) = ecg_peaks(
ecg_cleaned=ecg_cleaned, sampling_rate=sampling_rate, method=method, correct_artifacts=True
)
rate = signal_rate(rpeaks, sampling_rate=sampling_rate, desired_length=len(ecg_cleaned))
quality = ecg_quality(ecg_cleaned, rpeaks=rpeaks["ECG_R_Peaks"], sampling_rate=sampling_rate)
signals = pd.DataFrame(
{"ECG_Raw": ecg_signal, "ECG_Clean": ecg_cleaned, "ECG_Rate": rate, "ECG_Quality": quality}
)
# Additional info of the ecg signal
delineate_signal, delineate_info = ecg_delineate(
ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate
)
cardiac_phase = ecg_phase(ecg_cleaned=ecg_cleaned, rpeaks=rpeaks, delineate_info=delineate_info)
signals = pd.concat([signals, instant_peaks, delineate_signal, cardiac_phase], axis=1)
# Rpeaks location and sampling rate in dict info
info = rpeaks
info["sampling_rate"] = sampling_rate
return signals, info
| 4,986 | 40.907563 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_quality.py | # - * - coding: utf-8 - * -
from warnings import warn
import numpy as np
import scipy
from ..epochs import epochs_to_df
from ..misc import NeuroKitWarning
from ..signal import signal_interpolate
from ..signal.signal_power import signal_power
from ..stats import distance, rescale
from .ecg_peaks import ecg_peaks
from .ecg_segment import ecg_segment
def ecg_quality(ecg_cleaned, rpeaks=None, sampling_rate=1000, method="averageQRS", approach=None):
"""**ECG Signal Quality Assessment**
Assess the quality of the ECG Signal using various methods:
* The ``"averageQRS"`` method computes a continuous index of quality of the ECG signal, by
interpolating the distance of each QRS segment from the average QRS segment present in the *
data. This index is therefore relative: 1 corresponds to heartbeats that are the closest to
the average sample and 0 corresponds to the most distant heartbeat from that average sample.
Note that 1 does not necessarily means "good": if the majority of samples are bad, than being
close to the average will likely mean bad as well. Use this index with care and plot it
alongside your ECG signal to see if it makes sense.
* The ``"zhao2018"`` method (Zhao et al., 2018) extracts several signal quality indexes (SQIs):
QRS wave power spectrum distribution pSQI, kurtosis kSQI, and baseline relative power basSQI.
An additional R peak detection match qSQI was originally computed in the paper but left out
in this algorithm. The indices were originally weighted with a ratio of [0.4, 0.4, 0.1, 0.1]
to generate the final classification outcome, but because qSQI was dropped, the weights have
been rearranged to [0.6, 0.2, 0.2] for pSQI, kSQI and basSQI respectively.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG signal in the form of a vector of values.
rpeaks : tuple or list
The list of R-peak samples returned by ``ecg_peaks()``. If None, peaks is computed from
the signal input.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
method : str
The method for computing ECG signal quality, can be ``"averageQRS"`` (default) or ``"zhao2018"``.
approach : str
The data fusion approach as documented in Zhao et al. (2018). Can be ``"simple"``
or ``"fuzzy"``. The former performs simple heuristic fusion of SQIs and the latter performs
fuzzy comprehensive evaluation. If ``None`` (default), simple heuristic fusion is used.
**kwargs
Keyword arguments to be passed to ``signal_power()`` in the computation of basSQI and pSQI.
Returns
-------
array or str
Vector containing the quality index ranging from 0 to 1 for ``"averageQRS"`` method,
returns string classification (``Unacceptable``, ``Barely Acceptable`` or ``Excellent``)
of the signal for ``"zhao2018 method"``.
See Also
--------
ecg_segment, ecg_delineate
References
----------
* Zhao, Z., & Zhang, Y. (2018). "SQI quality evaluation mechanism of single-lead ECG signal
based on simple heuristic fusion and fuzzy comprehensive evaluation". Frontiers in
Physiology, 9, 727.
Examples
--------
* **Example 1:** 'averageQRS' method
.. ipython:: python
import neurokit2 as nk
ecg = nk.ecg_simulate(duration=30, sampling_rate=300, noise=0.2)
ecg_cleaned = nk.ecg_clean(ecg, sampling_rate=300)
quality = nk.ecg_quality(ecg_cleaned, sampling_rate=300)
@savefig p_ecg_quality.png scale=100%
nk.signal_plot([ecg_cleaned, quality], standardize=True)
@suppress
plt.close()
* **Example 2:** Zhao et al. (2018) method
.. ipython:: python
nk.ecg_quality(ecg_cleaned,
sampling_rate=300,
method="zhao2018",
approach="fuzzy")
"""
method = method.lower() # remove capitalised letters
# Run peak detection algorithm
if method in ["averageqrs"]:
quality = _ecg_quality_averageQRS(ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate)
elif method in ["zhao2018", "zhao", "SQI"]:
if approach is None:
approach = "simple"
elif approach not in ["simple", "fuzzy"]:
warn(
"Please enter a relevant input if using method='zhao2018',"
" 'simple' for simple heuristic fusion approach or"
" 'fuzzy' for fuzzy comprehensive evaluation.",
category=NeuroKitWarning,
)
quality = _ecg_quality_zhao2018(
ecg_cleaned, rpeaks=rpeaks, sampling_rate=sampling_rate, mode=approach
)
return quality
# =============================================================================
# Average QRS method
# =============================================================================
def _ecg_quality_averageQRS(ecg_cleaned, rpeaks=None, sampling_rate=1000):
# Sanitize inputs
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
rpeaks = rpeaks["ECG_R_Peaks"]
# Get heartbeats
heartbeats = ecg_segment(ecg_cleaned, rpeaks, sampling_rate)
data = epochs_to_df(heartbeats).pivot(index="Label", columns="Time", values="Signal")
data.index = data.index.astype(int)
data = data.sort_index()
# Filter Nans
missing = data.T.isnull().sum().values
nonmissing = np.where(missing == 0)[0]
data = data.iloc[nonmissing, :]
# Compute distance
dist = distance(data, method="mean")
dist = rescale(np.abs(dist), to=[0, 1])
dist = np.abs(dist - 1) # So that 1 is top quality
# Replace missing by 0
quality = np.zeros(len(heartbeats))
quality[nonmissing] = dist
# Interpolate
quality = signal_interpolate(
rpeaks, quality, x_new=np.arange(len(ecg_cleaned)), method="quadratic"
)
return quality
# =============================================================================
# Zhao (2018) method
# =============================================================================
def _ecg_quality_zhao2018(
ecg_cleaned,
rpeaks=None,
sampling_rate=1000,
window=1024,
kurtosis_method="fisher",
mode="simple",
**kwargs
):
"""Return ECG quality classification of based on Zhao et al. (2018),
based on three indices: pSQI, kSQI, basSQI (qSQI not included here).
If "Excellent", the ECG signal quality is good.
If "Unacceptable", analyze the SQIs. If kSQI and basSQI are unqualified, it means that
noise artefacts are present, and de-noising the signal is important before reevaluating the
ECG signal quality. If pSQI (or qSQI, not included here) are unqualified, recollect ECG data.
If "Barely acceptable", ECG quality assessment should be performed again to determine if the
signal is excellent or unacceptable.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG signal in the form of a vector of values.
rpeaks : tuple or list
The list of R-peak samples returned by `ecg_peaks()`. If None, peaks is computed from
the signal input.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
window : int
Length of each window in seconds. See `signal_psd()`.
kurtosis_method : str
Compute kurtosis (kSQI) based on "fisher" (default) or "pearson" definition.
mode : str
The data fusion approach as documented in Zhao et al. (2018). Can be "simple" (default)
or "fuzzy". The former performs simple heuristic fusion of SQIs and the latter performs
fuzzy comprehensive evaluation.
**kwargs
Keyword arguments to be passed to `signal_power()`.
Returns
-------
str
Quality classification.
"""
# Sanitize inputs
if rpeaks is None:
_, rpeaks = ecg_peaks(ecg_cleaned, sampling_rate=sampling_rate)
rpeaks = rpeaks["ECG_R_Peaks"]
# Compute indexes
kSQI = _ecg_quality_kSQI(ecg_cleaned, method=kurtosis_method)
pSQI = _ecg_quality_pSQI(ecg_cleaned, sampling_rate=sampling_rate, window=window, **kwargs)
basSQI = _ecg_quality_basSQI(ecg_cleaned, sampling_rate=sampling_rate, window=window, **kwargs)
# Classify indices based on simple heuristic fusion
if mode == "simple":
# First stage rules (0 = unqualified, 1 = suspicious, 2 = optimal)
# Get the maximum bpm
if len(rpeaks) > 1:
ecg_rate = 60000.0 / (1000.0 / sampling_rate * np.min(np.diff(rpeaks)))
else:
ecg_rate = 1
# pSQI classification
if ecg_rate < 130:
l1, l2, l3 = 0.5, 0.8, 0.4
else:
l1, l2, l3 = 0.4, 0.7, 0.3
if pSQI > l1 and pSQI < l2:
pSQI_class = 2
elif pSQI > l3 and pSQI < l1:
pSQI_class = 1
else:
pSQI_class = 0
# kSQI classification
if kSQI > 5:
kSQI_class = 2
else:
kSQI_class = 0
# basSQI classification
if basSQI >= 0.95:
basSQI_class = 2
elif basSQI < 0.9:
basSQI_class = 0
else:
basSQI_class = 1
class_matrix = np.array([pSQI_class, kSQI_class, basSQI_class])
n_optimal = len(np.where(class_matrix == 2)[0])
n_suspicious = len(np.where(class_matrix == 1)[0])
n_unqualified = len(np.where(class_matrix == 0)[0])
if n_unqualified >= 2 or (n_unqualified == 1 and n_suspicious == 2):
return "Unacceptable"
elif n_optimal >= 2 and n_unqualified == 0:
return "Excellent"
else:
return "Barely acceptable"
# Classify indices based on fuzzy comprehensive evaluation
elif mode == "fuzzy":
# *R1 left out because of lack of qSQI
# pSQI
# UpH
if pSQI <= 0.25:
UpH = 0
elif pSQI >= 0.35:
UpH = 1
else:
UpH = 0.1 * (pSQI - 0.25)
# UpI
if pSQI < 0.18:
UpI = 0
elif pSQI >= 0.32:
UpI = 0
elif pSQI >= 0.18 and pSQI < 0.22:
UpI = 25 * (pSQI - 0.18)
elif pSQI >= 0.22 and pSQI < 0.28:
UpI = 1
else:
UpI = 25 * (0.32 - pSQI)
# UpJ
if pSQI < 0.15:
UpJ = 1
elif pSQI > 0.25:
UpJ = 0
else:
UpJ = 0.1 * (0.25 - pSQI)
# Get R2
R2 = np.array([UpH, UpI, UpJ])
# kSQI
# Get R3
if kSQI > 5:
R3 = np.array([1, 0, 0])
else:
R3 = np.array([0, 0, 1])
# basSQI
# UbH
if basSQI <= 90:
UbH = 0
elif basSQI >= 95:
UbH = basSQI / 100.0
else:
UbH = 1.0 / (1 + (1 / np.power(0.8718 * (basSQI - 90), 2)))
# UbJ
if basSQI <= 85:
UbJ = 1
else:
UbJ = 1.0 / (1 + np.power((basSQI - 85) / 5.0, 2))
# UbI
UbI = 1.0 / (1 + np.power((basSQI - 95) / 2.5, 2))
# Get R4
R4 = np.array([UbH, UbI, UbJ])
# evaluation matrix R (remove R1 because of lack of qSQI)
# R = np.vstack([R1, R2, R3, R4])
R = np.vstack([R2, R3, R4])
# weight vector W (remove first weight because of lack of qSQI)
# W = np.array([0.4, 0.4, 0.1, 0.1])
W = np.array([0.6, 0.2, 0.2])
S = np.array([np.sum((R[:, 0] * W)), np.sum((R[:, 1] * W)), np.sum((R[:, 2] * W))])
# classify
V = np.sum(np.power(S, 2) * [1, 2, 3]) / np.sum(np.power(S, 2))
if V < 1.5:
return "Excellent"
elif V >= 2.40:
return "Unnacceptable"
else:
return "Barely acceptable"
def _ecg_quality_kSQI(ecg_cleaned, method="fisher"):
"""Return the kurtosis of the signal, with Fisher's or Pearson's method."""
if method == "fisher":
return scipy.stats.kurtosis(ecg_cleaned, fisher=True)
elif method == "pearson":
return scipy.stats.kurtosis(ecg_cleaned, fisher=False)
def _ecg_quality_pSQI(
ecg_cleaned,
sampling_rate=1000,
window=1024,
num_spectrum=[5, 15],
dem_spectrum=[5, 40],
**kwargs
):
"""Power Spectrum Distribution of QRS Wave."""
psd = signal_power(
ecg_cleaned,
sampling_rate=sampling_rate,
frequency_band=[num_spectrum, dem_spectrum],
method="welch",
normalize=False,
window=window,
**kwargs
)
num_power = psd.iloc[0][0]
dem_power = psd.iloc[0][1]
return num_power / dem_power
def _ecg_quality_basSQI(
ecg_cleaned,
sampling_rate=1000,
window=1024,
num_spectrum=[0, 1],
dem_spectrum=[0, 40],
**kwargs
):
"""Relative Power in the Baseline."""
psd = signal_power(
ecg_cleaned,
sampling_rate=sampling_rate,
frequency_band=[num_spectrum, dem_spectrum],
method="welch",
normalize=False,
window=window,
**kwargs
)
num_power = psd.iloc[0][0]
dem_power = psd.iloc[0][1]
return (1 - num_power) / dem_power
| 13,378 | 31.631707 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/__init__.py | """Submodule for NeuroKit."""
# Aliases
from ..signal import signal_rate as ecg_rate
from .ecg_analyze import ecg_analyze
from .ecg_clean import ecg_clean
from .ecg_delineate import ecg_delineate
from .ecg_eventrelated import ecg_eventrelated
from .ecg_findpeaks import ecg_findpeaks
from .ecg_intervalrelated import ecg_intervalrelated
from .ecg_peaks import ecg_peaks
from .ecg_phase import ecg_phase
from .ecg_plot import ecg_plot
from .ecg_process import ecg_process
from .ecg_quality import ecg_quality
from .ecg_rsp import ecg_rsp
from .ecg_segment import ecg_segment
from .ecg_simulate import ecg_simulate
from .ecg_invert import ecg_invert
__all__ = [
"ecg_simulate",
"ecg_clean",
"ecg_findpeaks",
"ecg_peaks",
"ecg_segment",
"ecg_process",
"ecg_plot",
"ecg_delineate",
"ecg_rsp",
"ecg_phase",
"ecg_quality",
"ecg_eventrelated",
"ecg_intervalrelated",
"ecg_analyze",
"ecg_rate",
"ecg_invert",
]
| 969 | 23.871795 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_analyze.py | # -*- coding: utf-8 -*-
import pandas as pd
from .ecg_eventrelated import ecg_eventrelated
from .ecg_intervalrelated import ecg_intervalrelated
def ecg_analyze(data, sampling_rate=1000, method="auto"):
"""**Automated Analysis ECG**
Performs ECG analysis by computing relevant features and indices on either epochs
(event-related analysis) or on longer periods of data (interval-related analysis), such as
resting-state data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A dictionary of epochs, containing one DataFrame per epoch, usually obtained via
``epochs_create()``, or a DataFrame containing all epochs, usually obtained via
``epochs_to_df()``. Can also take a DataFrame of processed signals from a longer period of
data, typically generated by ``ecg_process()`` or ``bio_process()``. Can also take a dict
containing sets of separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs, or
``"interval-related"`` for analysis on longer periods of data. Defaults to ``auto``,
where the method will be chosen based on the mean duration of the data (``"event-related"``
for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed ECG features. If
event-related analysis is conducted, each epoch is indicated
by the ``Label`` column. See ``ecg_eventrelated()`` and
``ecg_intervalrelated()`` docstrings for details.
See Also
--------
.bio_process, ecg_process, .epochs_create, ecg_eventrelated, ecg_intervalrelated
Examples
----------
* **Example 1**: Event-related analysis
.. ipython:: python
import neurokit2 as nk
# Download the data
data = nk.data("bio_eventrelated_100hz")
# Process the data for event-related analysis
df, info = nk.bio_process(ecg=data["ECG"], sampling_rate=100)
events = nk.events_find(data["Photosensor"], threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=1.9)
# Analyze
analyze_epochs = nk.ecg_analyze(epochs, sampling_rate=100)
# Get a dataframe with all the results
analyze_epochs
* **Example 2**: Interval-related analysis
.. ipython:: python
import neurokit2 as nk
# Download the resting-state data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.ecg_process(data["ECG"], sampling_rate=100)
# Analyze
analyze_df = nk.ecg_analyze(df, sampling_rate=100)
# Get results
analyze_df
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError(
"NeuroKit error: ecg_analyze(): Wrong input or method,"
"we couldn't extract epochs features."
)
else:
features = ecg_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = ecg_intervalrelated(data, sampling_rate=sampling_rate)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = ecg_intervalrelated(data, sampling_rate=sampling_rate)
else:
features = ecg_eventrelated(data)
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
epoch_len = data["Label"].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = ecg_intervalrelated(data, sampling_rate=sampling_rate)
else:
features = ecg_eventrelated(data)
return features
| 4,548 | 33.462121 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_intervalrelated.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..hrv import hrv
def ecg_intervalrelated(data, sampling_rate=1000):
"""**Interval-related analysis of ECG**
Performs ECG analysis on longer periods of data (typically > 10 seconds), such as resting-state
data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A DataFrame containing the different processed signal(s) as different columns, typically
generated by ``ecg_process()`` or ``bio_process()``. Can also take a dict containing sets of
separately processed DataFrames.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Defaults to 1000Hz.
Returns
-------
DataFrame
A dataframe containing the analyzed ECG features. The analyzed features consist of the following:
* ``ECG_Rate_Mean``: the mean heart rate.
* ``ECG_HRV``: the different heart rate variability metrices.
See :func:`.hrv_summary()` docstrings for details.
See Also
--------
.bio_process, ecg_eventrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.ecg_process(data["ECG"], sampling_rate=100)
# Single dataframe is passed
nk.ecg_intervalrelated(df, sampling_rate=100)
epochs = nk.epochs_create(df, events=[0, 15000], sampling_rate=100, epochs_end=150)
nk.ecg_intervalrelated(epochs)
"""
intervals = {}
# Format input
if isinstance(data, pd.DataFrame):
rate_cols = [col for col in data.columns if "ECG_Rate" in col]
if len(rate_cols) == 1:
intervals.update(_ecg_intervalrelated_formatinput(data))
intervals.update(_ecg_intervalrelated_hrv(data, sampling_rate))
else:
raise ValueError(
"NeuroKit error: ecg_intervalrelated(): Wrong input,"
"we couldn't extract heart rate. Please make sure"
"your DataFrame contains an `ECG_Rate` column."
)
ecg_intervals = pd.DataFrame.from_dict(intervals, orient="index").T
elif isinstance(data, dict):
for index in data:
intervals[index] = {} # Initialize empty container
# Add label info
intervals[index]["Label"] = data[index]["Label"].iloc[0]
# Rate
intervals[index] = _ecg_intervalrelated_formatinput(data[index], intervals[index])
# HRV
intervals[index] = _ecg_intervalrelated_hrv(
data[index], sampling_rate, intervals[index]
)
ecg_intervals = pd.DataFrame.from_dict(intervals, orient="index")
return ecg_intervals
# =============================================================================
# Internals
# =============================================================================
def _ecg_intervalrelated_formatinput(data, output={}):
# Sanitize input
colnames = data.columns.values
if len([i for i in colnames if "ECG_Rate" in i]) == 0:
raise ValueError(
"NeuroKit error: ecg_intervalrelated(): Wrong input,"
"we couldn't extract heart rate. Please make sure"
"your DataFrame contains an `ECG_Rate` column."
)
signal = data["ECG_Rate"].values
output["ECG_Rate_Mean"] = np.mean(signal)
return output
def _ecg_intervalrelated_hrv(data, sampling_rate, output={}):
# Sanitize input
colnames = data.columns.values
if len([i for i in colnames if "ECG_R_Peaks" in i]) == 0:
raise ValueError(
"NeuroKit error: ecg_intervalrelated(): Wrong input,"
"we couldn't extract R-peaks. Please make sure"
"your DataFrame contains an `ECG_R_Peaks` column."
)
# Transform rpeaks from "signal" format to "info" format.
rpeaks = np.where(data["ECG_R_Peaks"].values)[0]
rpeaks = {"ECG_R_Peaks": rpeaks}
results = hrv(rpeaks, sampling_rate=sampling_rate)
for column in results.columns:
# Add and convert to float
output[column] = results[[column]].values
return output
| 4,259 | 30.791045 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_peaks.py | from ..signal import signal_fixpeaks, signal_formatpeaks
from .ecg_findpeaks import ecg_findpeaks
def ecg_peaks(
ecg_cleaned, sampling_rate=1000, method="neurokit", correct_artifacts=False, **kwargs
):
"""**Find R-peaks in an ECG signal**
Find R-peaks in an ECG signal using the specified method. The method accepts unfiltered ECG
signals as input, although it is expected that a filtered (cleaned) ECG will result in better
results.
Different algorithms for peak-detection include:
* **neurokit** (default): QRS complexes are detected based on the steepness of the absolute
gradient of the ECG signal. Subsequently, R-peaks are detected as local maxima in
the QRS complexes. Unpublished, but see https://github.com/neuropsychology/NeuroKit/issues/476
* **pantompkins1985**: Algorithm by Pan & Tompkins (1985).
* **hamilton2002**: Algorithm by Hamilton (2002).
* **zong2003**: Algorithm by Zong et al. (2003).
* **martinez2004**: Algorithm by Martinez et al (2004).
* **christov2004**: Algorithm by Christov (2004).
* **gamboa2008**: Algorithm by Gamboa (2008).
* **elgendi2010**: Algorithm by Elgendi et al. (2010).
* **engzeemod2012**: Original algorithm by Engelse & Zeelenberg (1979) modified by Lourenço et
al. (2012).
* **kalidas2017**: Algorithm by Kalidas et al. (2017).
* **nabian2018**: Algorithm by Nabian et al. (2018) based on the Pan-Tompkins algorithm.
* **rodrigues2021**: Adaptation of the work by Sadhukhan & Mitra (2012) and Gutiérrez-Rivas et
al. (2015) by Rodrigues et al. (2021).
* **koka2022**: Algorithm by Koka et al. (2022) based on the visibility graphs.
* **promac**: ProMAC combines the result of several R-peak detectors in a probabilistic way.
For a given peak detector, the binary signal representing the peak locations is convolved
with a Gaussian distribution, resulting in a probabilistic representation of each peak
location. This procedure is repeated for all selected methods and the resulting
signals are accumulated. Finally, a threshold is used to accept or reject the peak locations.
See this discussion for more information on the origins of the method:
https://github.com/neuropsychology/NeuroKit/issues/222
.. note::
Please help us improve the methods' documentation by adding a small description.
Parameters
----------
ecg_cleaned : Union[list, np.array, pd.Series]
The cleaned ECG channel as returned by ``ecg_clean()``.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : string
The algorithm to be used for R-peak detection.
correct_artifacts : bool
Whether or not to first identify and fix artifacts, using the method by
Lipponen & Tarvainen (2019).
**kwargs
Additional keyword arguments, usually specific for each method.
Returns
-------
signals : DataFrame
A DataFrame of same length as the input signal in which occurrences of R-peaks marked as
``1`` in a list of zeros with the same length as ``ecg_cleaned``. Accessible with the keys
``"ECG_R_Peaks"``.
info : dict
A dictionary containing additional information, in this case the samples at which R-peaks
occur, accessible with the key ``"ECG_R_Peaks"``, as well as the signals' sampling rate,
accessible with the key ``"sampling_rate"``.
See Also
--------
ecg_clean, ecg_findpeaks, .signal_fixpeaks
Examples
--------
* **Example 1**: Find R-peaks using the default method (``"neurokit"``).
.. ipython:: python
import neurokit2 as nk
ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
signals, info = nk.ecg_peaks(ecg, correct_artifacts=True)
@savefig p_ecg_peaks1.png scale=100%
nk.events_plot(info["ECG_R_Peaks"], ecg)
@suppress
plt.close()
* **Example 2**: Compare different methods
.. ipython:: python
# neurokit (default)
cleaned = nk.ecg_clean(ecg, method="neurokit")
_, neurokit = nk.ecg_peaks(cleaned, method="neurokit")
# pantompkins1985
cleaned = nk.ecg_clean(ecg, method="pantompkins1985")
_, pantompkins1985 = nk.ecg_peaks(cleaned, method="pantompkins1985")
# nabian2018
_, nabian2018 = nk.ecg_peaks(ecg, method="nabian2018")
# hamilton2002
cleaned = nk.ecg_clean(ecg, method="hamilton2002")
_, hamilton2002 = nk.ecg_peaks(cleaned, method="hamilton2002")
# martinez2004
_, martinez2004 = nk.ecg_peaks(ecg, method="martinez2004")
# zong2003
_, zong2003 = nk.ecg_peaks(ecg, method="zong2003")
# christov2004
_, christov2004 = nk.ecg_peaks(cleaned, method="christov2004")
# gamboa2008
cleaned = nk.ecg_clean(ecg, method="gamboa2008")
_, gamboa2008 = nk.ecg_peaks(cleaned, method="gamboa2008")
# elgendi2010
cleaned = nk.ecg_clean(ecg, method="elgendi2010")
_, elgendi2010 = nk.ecg_peaks(cleaned, method="elgendi2010")
# engzeemod2012
cleaned = nk.ecg_clean(ecg, method="engzeemod2012")
_, engzeemod2012 = nk.ecg_peaks(cleaned, method="engzeemod2012")
# kalidas2017
cleaned = nk.ecg_clean(ecg, method="kalidas2017")
_, kalidas2017 = nk.ecg_peaks(cleaned, method="kalidas2017")
# rodrigues2021
_, rodrigues2021 = nk.ecg_peaks(ecg, method="rodrigues2021")
# koka2022
_, koka2022 = nk.ecg_peaks(ecg, method="koka2022")
# Collect all R-peak lists by iterating through the result dicts
rpeaks = [
i["ECG_R_Peaks"]
for i in [
neurokit,
pantompkins1985,
nabian2018,
hamilton2002,
martinez2004,
christov2004,
gamboa2008,
elgendi2010,
engzeemod2012,
kalidas2017,
rodrigues2021,
koka2022
]
]
# Visualize results
@savefig p_ecg_peaks2.png scale=100%
nk.events_plot(rpeaks, ecg)
@suppress
plt.close()
* **Example 3**: Method-agreement procedure ('promac')
.. ipython:: python
ecg = nk.ecg_simulate(duration=10, sampling_rate=500)
ecg = nk.signal_distort(ecg,
sampling_rate=500,
noise_amplitude=0.05, noise_frequency=[25, 50],
artifacts_amplitude=0.05, artifacts_frequency=50)
@savefig p_ecg_peaks3.png scale=100%
info = nk.ecg_findpeaks(ecg, sampling_rate=500, method="promac", show=True)
@suppress
plt.close()
References
----------
* Pan, J., & Tompkins, W. J. (1985). A real-time QRS detection algorithm. IEEE transactions
on biomedical engineering, (3), 230-236.
* Hamilton, P. (2002). Open source ECG analysis. In Computers in cardiology (pp. 101-104).
IEEE.
* Zong, W., Heldt, T., Moody, G. B., & Mark, R. G. (2003). An open-source algorithm to
detect onset of arterial blood pressure pulses. In Computers in Cardiology, 2003 (pp.
259-262). IEEE.
* Zong, W., Moody, G. B., & Jiang, D. (2003, September). A robust open-source algorithm to
detect onset and duration of QRS complexes. In Computers in Cardiology, 2003 (pp.
737-740). IEEE.
* Martinez, J. P., Almeida, R., Olmos, S., Rocha, A. P., & Laguna, P. (2004) A wavelet-based
ECG delineator: evaluation on standard databases. IEEE Trans Biomed Eng, 51(4), 570–581.
* Christov, I. I. (2004). Real time electrocardiogram QRS detection using combined adaptive
threshold. Biomedical engineering online, 3(1), 1-9.
* Gamboa, H. (2008). Multi-modal behavioral biometrics based on HCI and electrophysiology
(Doctoral dissertation, Universidade Técnica de Lisboa).
* Elgendi, M., Jonkman, M., & De Boer, F. (2010). Frequency Bands Effects on QRS Detection.
Biosignals, Proceedings of the Third International Conference on Bio-inspired Systems and
Signal Processing, 428-431.
* Engelse, W. A., & Zeelenberg, C. (1979). A single scan algorithm for QRS-detection and
feature extraction. Computers in cardiology, 6(1979), 37-42.
* Lourenço, A., Silva, H., Leite, P., Lourenço, R., & Fred, A. L. (2012, February). Real
Time Electrocardiogram Segmentation for Finger based ECG Biometrics. In Biosignals (pp.
49-54).
* Kalidas, V., & Tamil, L. (2017, October). Real-time QRS detector using stationary wavelet
transform for automated ECG analysis. In 2017 IEEE 17th International Conference on
Bioinformatics and Bioengineering (BIBE) (pp. 457-461). IEEE.
* Nabian, M., Yin, Y., Wormwood, J., Quigley, K. S., Barrett, L. F., Ostadabbas, S. (2018).
An Open-Source Feature Extraction Tool for the Analysis of Peripheral Physiological Data.
IEEE Journal of Translational Engineering in Health and Medicine, 6, 1-11.
* Sadhukhan, D., & Mitra, M. (2012). R-peak detection algorithm for ECG using double
difference and RR interval processing. Procedia Technology, 4, 873-877.
* Gutiérrez-Rivas, R., García, J. J., Marnane, W. P., & Hernández, A. (2015). Novel
real-time low-complexity QRS complex detector based on adaptive thresholding. IEEE
Sensors Journal, 15(10), 6036-6043.
* Rodrigues, T., Samoutphonh, S., Silva, H., & Fred, A. (2021, January). A Low-Complexity
R-peak Detection Algorithm with Adaptive Thresholding for Wearable Devices. In 2020 25th
International Conference on Pattern Recognition (ICPR) (pp. 1-8). IEEE.
* T. Koka and M. Muma, "Fast and Sample Accurate R-Peak Detection for Noisy ECG Using
Visibility Graphs," 2022 44th Annual International Conference of the IEEE Engineering in
Medicine & Biology Society (EMBC), 2022, pp. 121-126.
* ``nabian2018``
* ``gamboa2008``
* ``hamilton2002``
* ``christov2004``
* ``engzeemod2012``
* ``elgendi2010``
* ``kalidas2017``
* ``rodrigues2021``
* ``koka2022``
* ``promac``
* **Unpublished.** It runs different methods and derives a probability index using
convolution. See this discussion for more information on the method:
https://github.com/neuropsychology/NeuroKit/issues/222
* Lipponen, J. A., & Tarvainen, M. P. (2019). A robust algorithm for heart rate variability
time series artefact correction using novel beat classification. Journal of medical
engineering & technology, 43(3), 173-181.
"""
rpeaks = ecg_findpeaks(ecg_cleaned, sampling_rate=sampling_rate, method=method, **kwargs)
if correct_artifacts:
_, rpeaks = signal_fixpeaks(
rpeaks, sampling_rate=sampling_rate, iterative=True, method="Kubios"
)
rpeaks = {"ECG_R_Peaks": rpeaks}
instant_peaks = signal_formatpeaks(rpeaks, desired_length=len(ecg_cleaned), peak_indices=rpeaks)
signals = instant_peaks
info = rpeaks
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
return signals, info
| 11,219 | 40.865672 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/ecg/ecg_clean.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning, as_vector
from ..signal import signal_filter
def ecg_clean(ecg_signal, sampling_rate=1000, method="neurokit", **kwargs):
"""**ECG Signal Cleaning**
Clean an ECG signal to remove noise and improve peak-detection accuracy. Different cleaning
method are implemented.
* ``'neurokit'`` (default): 0.5 Hz high-pass butterworth filter (order = 5), followed by
powerline filtering (see ``signal_filter()``). By default, ``powerline = 50``.
* ``'biosppy'``: Method used in the BioSPPy package. A FIR filter ([0.67, 45] Hz; order = 1.5 *
SR). The 0.67 Hz cutoff value was selected based on the fact that there are no morphological
features below the heartrate (assuming a minimum heart rate of 40 bpm).
* ``'pantompkins1985'``: Method used in Pan & Tompkins (1985). **Please help providing a better
description!**
* ``'hamilton2002'``: Method used in Hamilton (2002). **Please help providing a better
description!**
* ``'elgendi2010'``: Method used in Elgendi et al. (2010). **Please help providing a better
description!**
* ``'engzeemod2012'``: Method used in Engelse & Zeelenberg (1979). **Please help providing a
better description!**
Parameters
----------
ecg_signal : Union[list, np.array, pd.Series]
The raw ECG channel.
sampling_rate : int
The sampling frequency of ``ecg_signal`` (in Hz, i.e., samples/second). Defaults to 1000.
method : str
The processing pipeline to apply. Can be one of ``"neurokit"`` (default),
``"biosppy"``, ``"pantompkins1985"``, ``"hamilton2002"``, ``"elgendi2010"``,
``"engzeemod2012"``.
**kwargs
Other arguments to be passed to specific methods.
Returns
-------
array
Vector containing the cleaned ECG signal.
See Also
--------
ecg_peaks, ecg_process, ecg_plot, .signal_rate, .signal_filter
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
import matplotlib.pyplot as plt
ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
signals = pd.DataFrame({"ECG_Raw" : ecg,
"ECG_NeuroKit" : nk.ecg_clean(ecg, sampling_rate=1000, method="neurokit"),
"ECG_BioSPPy" : nk.ecg_clean(ecg, sampling_rate=1000, method="biosppy"),
"ECG_PanTompkins" : nk.ecg_clean(ecg, sampling_rate=1000, method="pantompkins1985"),
"ECG_Hamilton" : nk.ecg_clean(ecg, sampling_rate=1000, method="hamilton2002"),
"ECG_Elgendi" : nk.ecg_clean(ecg, sampling_rate=1000, method="elgendi2010"),
"ECG_EngZeeMod" : nk.ecg_clean(ecg, sampling_rate=1000, method="engzeemod2012")})
@savefig p_ecg_clean.png scale=100%
signals.plot()
References
--------------
* Engelse, W. A., & Zeelenberg, C. (1979). A single scan algorithm for QRS-detection and
feature extraction. Computers in cardiology, 6(1979), 37-42.
* Pan, J., & Tompkins, W. J. (1985). A real-time QRS detection algorithm. IEEE transactions
on biomedical engineering, (3), 230-236.
* Hamilton, P. (2002). Open source ECG analysis. In Computers in cardiology (pp. 101-104).
IEEE.
* Elgendi, M., Jonkman, M., & De Boer, F. (2010). Frequency Bands Effects on QRS Detection.
Biosignals, Proceedings of the Third International Conference on Bio-inspired Systems and
Signal Processing, 428-431.
"""
ecg_signal = as_vector(ecg_signal)
# Missing data
n_missing = np.sum(np.isnan(ecg_signal))
if n_missing > 0:
warn(
"There are " + str(n_missing) + " missing data points in your signal."
" Filling missing values by using the forward filling method.",
category=NeuroKitWarning,
)
ecg_signal = _ecg_clean_missing(ecg_signal)
method = method.lower() # remove capitalised letters
if method in ["nk", "nk2", "neurokit", "neurokit2"]:
clean = _ecg_clean_nk(ecg_signal, sampling_rate, **kwargs)
elif method in ["biosppy", "gamboa2008"]:
clean = _ecg_clean_biosppy(ecg_signal, sampling_rate)
elif method in ["pantompkins", "pantompkins1985"]:
clean = _ecg_clean_pantompkins(ecg_signal, sampling_rate)
elif method in ["hamilton", "hamilton2002"]:
clean = _ecg_clean_hamilton(ecg_signal, sampling_rate)
elif method in ["elgendi", "elgendi2010"]:
clean = _ecg_clean_elgendi(ecg_signal, sampling_rate)
elif method in ["engzee", "engzee2012", "engzeemod", "engzeemod2012"]:
clean = _ecg_clean_engzee(ecg_signal, sampling_rate)
elif method in ["vg", "vgraph", "koka2022"]:
clean = _ecg_clean_vgraph(ecg_signal, sampling_rate)
elif method in [
"christov",
"christov2004",
"ssf",
"slopesumfunction",
"zong",
"zong2003",
"kalidas2017",
"swt",
"kalidas",
"kalidastamil",
"kalidastamil2017",
]:
clean = ecg_signal
else:
raise ValueError(
"NeuroKit error: ecg_clean(): 'method' should be "
"one of 'neurokit', 'biosppy', 'pantompkins1985',"
" 'hamilton2002', 'elgendi2010', 'engzeemod2012'."
)
return clean
# =============================================================================
# Handle missing data
# =============================================================================
def _ecg_clean_missing(ecg_signal):
ecg_signal = pd.DataFrame.pad(pd.Series(ecg_signal))
return ecg_signal
# =============================================================================
# NeuroKit
# =============================================================================
def _ecg_clean_nk(ecg_signal, sampling_rate=1000, **kwargs):
# Remove slow drift and dc offset with highpass Butterworth.
clean = signal_filter(
signal=ecg_signal,
sampling_rate=sampling_rate,
lowcut=0.5,
method="butterworth",
order=5,
)
clean = signal_filter(
signal=clean, sampling_rate=sampling_rate, method="powerline", **kwargs
)
return clean
# =============================================================================
# Biosppy
# =============================================================================
def _ecg_clean_biosppy(ecg_signal, sampling_rate=1000):
"""Adapted from https://github.com/PIA-
Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L69.
"""
# The order and frequency was recently changed
# (see https://github.com/scientisst/BioSPPy/pull/12)
order = int(1.5 * sampling_rate)
if order % 2 == 0:
order += 1 # Enforce odd number
# -> filter_signal()
frequency = [0.67, 45]
# -> get_filter()
# -> _norm_freq()
frequency = (
2 * np.array(frequency) / sampling_rate
) # Normalize frequency to Nyquist Frequency (Fs/2).
# -> get coeffs
a = np.array([1])
b = scipy.signal.firwin(numtaps=order, cutoff=frequency, pass_zero=False)
# _filter_signal()
filtered = scipy.signal.filtfilt(b, a, ecg_signal)
# DC offset
filtered -= np.mean(filtered)
return filtered
# =============================================================================
# Pan & Tompkins (1985)
# =============================================================================
def _ecg_clean_pantompkins(ecg_signal, sampling_rate=1000):
"""Adapted from https://github.com/PIA-
Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L69.
"""
order = 1
clean = signal_filter(
signal=ecg_signal,
sampling_rate=sampling_rate,
lowcut=5,
highcut=15,
method="butterworth_zi",
order=order,
)
return clean # Return filtered
# =============================================================================
# Elgendi et al. (2010)
# =============================================================================
def _ecg_clean_elgendi(ecg_signal, sampling_rate=1000):
"""From https://github.com/berndporr/py-ecg-detectors/
- Elgendi, Mohamed & Jonkman, Mirjam & De Boer, Friso. (2010). Frequency Bands Effects on QRS
Detection. The 3rd International Conference on Bio-inspired Systems and Signal Processing
(BIOSIGNALS2010). 428-431.
"""
order = 2
clean = signal_filter(
signal=ecg_signal,
sampling_rate=sampling_rate,
lowcut=8,
highcut=20,
method="butterworth_zi",
order=order,
)
return clean # Return filtered
# =============================================================================
# Hamilton (2002)
# =============================================================================
def _ecg_clean_hamilton(ecg_signal, sampling_rate=1000):
"""Adapted from https://github.com/PIA-
Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L69.
"""
order = 1
clean = signal_filter(
signal=ecg_signal,
sampling_rate=sampling_rate,
lowcut=8,
highcut=16,
method="butterworth_zi",
order=order,
)
return clean # Return filtered
# =============================================================================
# Engzee Modified (2012)
# =============================================================================
def _ecg_clean_engzee(ecg_signal, sampling_rate=1000):
"""From https://github.com/berndporr/py-ecg-detectors/
- C. Zeelenberg, A single scan algorithm for QRS detection and feature extraction, IEEE Comp.
in Cardiology, vol. 6, pp. 37-42, 1979.
- A. Lourenco, H. Silva, P. Leite, R. Lourenco and A. Fred, "Real Time Electrocardiogram Segmentation
for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
order = 4
clean = signal_filter(
signal=ecg_signal,
sampling_rate=sampling_rate,
lowcut=52,
highcut=48,
method="butterworth_zi",
order=order,
)
return clean # Return filtered
# =============================================================================
# Engzee Modified (2012)
# =============================================================================
def _ecg_clean_vgraph(ecg_signal, sampling_rate=1000):
"""Filtering used by Taulant Koka and Michael Muma (2022).
References
----------
- T. Koka and M. Muma (2022), Fast and Sample Accurate R-Peak Detection for Noisy ECG Using
Visibility Graphs. In: 2022 44th Annual International Conference of the IEEE Engineering
in Medicine & Biology Society (EMBC). Uses the Pan and Tompkins thresholding.
"""
order = 2
clean = signal_filter(
signal=ecg_signal,
sampling_rate=sampling_rate,
lowcut=4,
method="butterworth",
order=order,
)
return clean # Return filtered
| 11,249 | 34.046729 | 114 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_fixpeaks.py | # -*- coding: utf-8 -*-
from ..signal.signal_formatpeaks import _signal_formatpeaks_sanitize
def rsp_fixpeaks(peaks, troughs=None):
"""**Correct RSP peaks**
Low-level function used by :func:`.rsp_peaks` to correct the peaks found
by :func:`.rsp_findpeaks`. Doesn't do anything for now for RSP.
See :func:`.rsp_peaks` for details.
Parameters
----------
peaks : list or array or DataFrame or Series or dict
The samples at which respiration peaks (exhalation onsets) occur. If a dict or a DataFrame
is passed, it is assumed that these containers were obtained with :func:`.rsp_findpeaks`.
troughs : list or array or DataFrame or Series or dict
The samples at which respiration troughs (inhalation onsets) occur. If a dict or a
DataFrame is passed, it is assumed that these containers were obtained
with :func:`.rsp_findpeaks`.
Returns
-------
info : dict
A dictionary containing additional information, in this case the samples at which
inhalation onsets and exhalation onsets occur, accessible with the keys ``"RSP_Troughs"``
and ``"RSP_Peaks"``, respectively.
See Also
--------
rsp_clean, rsp_findpeaks, rsp_peaks, rsp_amplitude, rsp_process, rsp_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=30, respiratory_rate=15)
cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
info = nk.rsp_findpeaks(cleaned)
info = nk.rsp_fixpeaks(info)
@savefig p_rsp_fixpeaks1.png scale=100%
nk.events_plot([info["RSP_Peaks"], info["RSP_Troughs"]], cleaned)
@suppress
plt.close()
"""
# Format input.
peaks, troughs = _rsp_fixpeaks_retrieve(peaks, troughs)
# Do whatever fixing is required (nothing for now)
# Prepare output
info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
return info
# =============================================================================
# Internals
# =============================================================================
def _rsp_fixpeaks_retrieve(peaks, troughs=None):
# Format input.
original_input = peaks
peaks = _signal_formatpeaks_sanitize(original_input, key="Peaks")
if troughs is None:
troughs = _signal_formatpeaks_sanitize(original_input, key="Troughs")
return peaks, troughs
| 2,410 | 32.957746 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_symmetry.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning, find_closest
from ..signal import signal_interpolate
from ..stats import rescale
from .rsp_fixpeaks import _rsp_fixpeaks_retrieve
def rsp_symmetry(
rsp_cleaned,
peaks,
troughs=None,
interpolation_method="monotone_cubic",
show=False,
):
"""**Respiration Cycle Symmetry Features**
Compute symmetry features of the respiration cycle, such as the Peak-Trough symmetry and the
Rise-Decay symmetry (see Cole, 2019). Note that the values for each cycle are interpolated to
the same length as the signal (and the first and last cycles, for which one cannot compute the
symmetry characteristics, are padded).
.. figure:: ../img/cole2019.png
:alt: Figure from Cole and Voytek (2019).
:target: https://journals.physiology.org/doi/full/10.1152/jn.00273.2019
Parameters
----------
rsp_cleaned : Union[list, np.array, pd.Series]
The cleaned respiration channel as returned by :func:`.rsp_clean`.
peaks : list or array or DataFrame or Series or dict
The samples at which the inhalation peaks occur. If a dict or a DataFrame is passed, it is
assumed that these containers were obtained with :func:`.rsp_findpeaks`.
troughs : list or array or DataFrame or Series or dict
The samples at which the inhalation troughs occur. If a dict or a DataFrame is passed, it is
assumed that these containers were obtained with :func:`.rsp_findpeaks`.
interpolation_method : str
Method used to interpolate the amplitude between peaks. See :func:`.signal_interpolate`.
``"monotone_cubic"`` is chosen as the default interpolation method since it ensures monotone
interpolation between data point (i.e., it prevents physiologically implausible "overshoots"
or "undershoots" in the y-direction). In contrast, the widely used cubic spline
'interpolation does not ensure monotonicity.
show : bool
If True, show a plot of the symmetry features.
Returns
-------
pd.DataFrame
A DataFrame of same length as :func:`.rsp_signal` containing the following columns:
* ``"RSP_Symmetry_PeakTrough"``
* ``"RSP_Symmetry_RiseDecay"``
See Also
--------
rsp_clean, rsp_peaks, rsp_amplitude, rsp_phase
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=45, respiratory_rate=15)
cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
peak_signal, info = nk.rsp_peaks(cleaned)
@savefig p_rsp_symmetry1.png scale=100%
symmetry = nk.rsp_symmetry(cleaned, peak_signal, show=True)
@suppress
plt.close()
References
----------
* Cole, S., & Voytek, B. (2019). Cycle-by-cycle analysis of neural oscillations. Journal of
neurophysiology, 122(2), 849-861.
"""
# Format input.
peaks, troughs = _rsp_fixpeaks_retrieve(peaks, troughs)
# Sanity checks -----------------------------------------------------------
failed_checks = False
if len(peaks) <= 4 or len(troughs) <= 4:
warn(
"Not enough peaks and troughs (signal too short?) to compute symmetry"
+ ", returning nan for symmetry.",
category=NeuroKitWarning,
)
failed_checks = True
if np.any(peaks - troughs < 0):
warn(
"Peaks and troughs are not correctly aligned (i.e., not consecutive)"
+ ", returning nan for symmetry.",
category=NeuroKitWarning,
)
failed_checks = True
if failed_checks:
return pd.DataFrame(
{
"RSP_Symmetry_PeakTrough": np.full(len(rsp_cleaned), np.nan),
"RSP_Symmetry_RiseDecay": np.full(len(rsp_cleaned), np.nan),
}
)
# Compute symmetry features -----------------------------------------------
# See https://twitter.com/bradleyvoytek/status/1591495571269124096/photo/1
# Rise-decay symmetry
through_to_peak = peaks - troughs
peak_to_through = troughs[1:] - peaks[:-1]
risedecay_symmetry = through_to_peak[:-1] / (through_to_peak[:-1] + peak_to_through)
# Find half-way points (trough to peak)
halfway_values = (rsp_cleaned[peaks] - rsp_cleaned[troughs]) / 2
halfway_values += rsp_cleaned[troughs]
halfway_locations = np.zeros(len(halfway_values))
for i in range(len(peaks)):
segment = rsp_cleaned[troughs[i] : peaks[i]]
halfway_locations[i] = (
find_closest(halfway_values[i], segment, return_index=True) + troughs[i]
)
# Find half-way points (peak to next through)
halfway_values2 = (rsp_cleaned[peaks[:-1]] - rsp_cleaned[troughs[1::]]) / 2
halfway_values2 += rsp_cleaned[troughs[1::]]
halfway_locations2 = np.zeros(len(halfway_values2))
for i in range(len(peaks[:-1])):
segment = rsp_cleaned[peaks[i] : troughs[i + 1]]
halfway_locations2[i] = (
find_closest(halfway_values2[i], segment, return_index=True) + peaks[i]
)
# Peak-trough symmetry
asc_to_desc = halfway_locations2[1:] - halfway_locations[1:-1]
desc_to_asc = halfway_locations[1:-1] - halfway_locations2[:-1]
peaktrough_symmetry = desc_to_asc / (asc_to_desc + desc_to_asc)
# Interpolate to length of rsp_cleaned.
risedecay_symmetry = signal_interpolate(
peaks[:-1],
risedecay_symmetry,
x_new=np.arange(len(rsp_cleaned)),
method=interpolation_method,
)
peaktrough_symmetry = signal_interpolate(
peaks[1:-1],
peaktrough_symmetry,
x_new=np.arange(len(rsp_cleaned)),
method=interpolation_method,
)
if show is True:
normalized = rescale(rsp_cleaned) # Rescale to 0-1
plt.plot(normalized, color="grey", label="Respiration (normalized)")
plt.scatter(peaks, normalized[peaks], color="red")
plt.scatter(troughs, normalized[troughs], color="blue")
plt.scatter(halfway_locations, normalized[halfway_locations.astype(int)], color="orange")
plt.scatter(
halfway_locations2, normalized[halfway_locations2.astype(int)], color="darkgreen"
)
plt.plot(risedecay_symmetry, color="purple", label="Rise-decay symmetry")
plt.plot(peaktrough_symmetry, color="green", label="Peak-trough symmetry")
plt.legend()
return pd.DataFrame(
{
"RSP_Symmetry_PeakTrough": peaktrough_symmetry,
"RSP_Symmetry_RiseDecay": risedecay_symmetry,
}
)
| 6,702 | 36.238889 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_plot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def rsp_plot(rsp_signals, sampling_rate=None, figsize=(10, 10), static=True):
"""**Visualize respiration (RSP) data**
Parameters
----------
rsp_signals : DataFrame
DataFrame obtained from :func:`.rsp_process`.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
figsize : tuple
The size of the figure (width, height) in inches.
static : bool
If True, a static plot will be generated with matplotlib.
If False, an interactive plot will be generated with plotly.
Defaults to True.
See Also
--------
rsp_process
Returns
-------
fig
Figure representing a plot of the processed RSP signals.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate data
rsp = nk.rsp_simulate(duration=90, respiratory_rate=15)
# Process signal
rsp_signals, info = nk.rsp_process(rsp, sampling_rate=1000)
# Plot
@savefig p_rsp_plot1.png scale=100%
nk.rsp_plot(rsp_signals, sampling_rate=1000)
@suppress
plt.close()
"""
# Mark peaks, troughs and phases.
peaks = np.where(rsp_signals["RSP_Peaks"] == 1)[0]
troughs = np.where(rsp_signals["RSP_Troughs"] == 1)[0]
inhale = np.where(rsp_signals["RSP_Phase"] == 1)[0]
exhale = np.where(rsp_signals["RSP_Phase"] == 0)[0]
nrow = 2
# Determine mean rate.
rate_mean = np.mean(rsp_signals["RSP_Rate"])
if "RSP_Amplitude" in list(rsp_signals.columns):
nrow += 1
# Determine mean amplitude.
amplitude_mean = np.mean(rsp_signals["RSP_Amplitude"])
if "RSP_RVT" in list(rsp_signals.columns):
nrow += 1
# Determine mean RVT.
rvt_mean = np.mean(rsp_signals["RSP_RVT"])
if "RSP_Symmetry_PeakTrough" in list(rsp_signals.columns):
nrow += 1
# Get signals marking inspiration and expiration.
exhale_signal, inhale_signal = _rsp_plot_phase(rsp_signals, troughs, peaks)
# Determine unit of x-axis.
if sampling_rate is not None:
x_label = "Time (seconds)"
x_axis = np.linspace(0, len(rsp_signals) / sampling_rate, len(rsp_signals))
else:
x_label = "Samples"
x_axis = np.arange(0, len(rsp_signals))
if static:
fig, ax = plt.subplots(nrows=nrow, ncols=1, sharex=True, figsize=figsize)
last_ax = fig.get_axes()[-1]
last_ax.set_xlabel(x_label)
# Plot cleaned and raw respiration as well as peaks and troughs.
ax[0].set_title("Raw and Cleaned Signal")
fig.suptitle("Respiration (RSP)", fontweight="bold")
ax[0].plot(
x_axis, rsp_signals["RSP_Raw"], color="#B0BEC5", label="Raw", zorder=1
)
ax[0].plot(
x_axis,
rsp_signals["RSP_Clean"],
color="#2196F3",
label="Cleaned",
zorder=2,
linewidth=1.5,
)
ax[0].scatter(
x_axis[peaks],
rsp_signals["RSP_Clean"][peaks],
color="red",
label="Exhalation Onsets",
zorder=3,
)
ax[0].scatter(
x_axis[troughs],
rsp_signals["RSP_Clean"][troughs],
color="orange",
label="Inhalation Onsets",
zorder=4,
)
# Shade region to mark inspiration and expiration.
ax[0].fill_between(
x_axis[exhale],
exhale_signal[exhale],
rsp_signals["RSP_Clean"][exhale],
where=rsp_signals["RSP_Clean"][exhale] > exhale_signal[exhale],
color="#CFD8DC",
linestyle="None",
label="exhalation",
)
ax[0].fill_between(
x_axis[inhale],
inhale_signal[inhale],
rsp_signals["RSP_Clean"][inhale],
where=rsp_signals["RSP_Clean"][inhale] > inhale_signal[inhale],
color="#ECEFF1",
linestyle="None",
label="inhalation",
)
ax[0].legend(loc="upper right")
# Plot rate and optionally amplitude.
ax[1].set_title("Breathing Rate")
ax[1].plot(
x_axis,
rsp_signals["RSP_Rate"],
color="#4CAF50",
label="Rate",
linewidth=1.5,
)
ax[1].axhline(y=rate_mean, label="Mean", linestyle="--", color="#4CAF50")
ax[1].legend(loc="upper right")
if "RSP_Amplitude" in list(rsp_signals.columns):
ax[2].set_title("Breathing Amplitude")
ax[2].plot(
x_axis,
rsp_signals["RSP_Amplitude"],
color="#009688",
label="Amplitude",
linewidth=1.5,
)
ax[2].axhline(
y=amplitude_mean, label="Mean", linestyle="--", color="#009688"
)
ax[2].legend(loc="upper right")
if "RSP_RVT" in list(rsp_signals.columns):
ax[3].set_title("Respiratory Volume per Time")
ax[3].plot(
x_axis,
rsp_signals["RSP_RVT"],
color="#00BCD4",
label="RVT",
linewidth=1.5,
)
ax[3].axhline(y=rvt_mean, label="Mean", linestyle="--", color="#009688")
ax[3].legend(loc="upper right")
if "RSP_Symmetry_PeakTrough" in list(rsp_signals.columns):
ax[4].set_title("Cycle Symmetry")
ax[4].plot(
x_axis,
rsp_signals["RSP_Symmetry_PeakTrough"],
color="green",
label="Peak-Trough Symmetry",
linewidth=1.5,
)
ax[4].plot(
x_axis,
rsp_signals["RSP_Symmetry_RiseDecay"],
color="purple",
label="Rise-Decay Symmetry",
linewidth=1.5,
)
ax[4].legend(loc="upper right")
return fig
else:
# Generate interactive plot with plotly.
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError as e:
raise ImportError(
"NeuroKit error: rsp_plot(): the 'plotly'",
" module is required when 'static' is False.",
" Please install it first (`pip install plotly`).",
) from e
subplot_titles = ["Raw and Cleaned Signal", "Breathing Rate"]
if "RSP_Amplitude" in list(rsp_signals.columns):
subplot_titles.append("Breathing Amplitude")
if "RSP_RVT" in list(rsp_signals.columns):
subplot_titles.append("Respiratory Volume per Time")
if "RSP_Symmetry_PeakTrough" in list(rsp_signals.columns):
subplot_titles.append("Cycle Symmetry")
subplot_titles = tuple(subplot_titles)
fig = make_subplots(
rows=nrow,
cols=1,
shared_xaxes=True,
subplot_titles=subplot_titles,
)
# Plot cleaned and raw RSP
fig.add_trace(
go.Scatter(
x=x_axis, y=rsp_signals["RSP_Raw"], name="Raw", marker_color="#B0BEC5"
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=rsp_signals["RSP_Clean"],
name="Cleaned",
marker_color="#2196F3",
),
row=1,
col=1,
)
# Plot peaks and troughs.
fig.add_trace(
go.Scatter(
x=x_axis[peaks],
y=rsp_signals["RSP_Clean"][peaks],
name="Exhalation Onsets",
marker_color="red",
mode="markers",
),
row=1,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis[troughs],
y=rsp_signals["RSP_Clean"][troughs],
name="Inhalation Onsets",
marker_color="orange",
mode="markers",
),
row=1,
col=1,
)
# TODO: Shade region to mark inspiration and expiration.
# Plot rate and optionally amplitude.
fig.add_trace(
go.Scatter(
x=x_axis, y=rsp_signals["RSP_Rate"], name="Rate", marker_color="#4CAF50"
),
row=2,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=[rate_mean] * len(x_axis),
name="Mean Rate",
marker_color="#4CAF50",
line=dict(dash="dash"),
),
row=2,
col=1,
)
if "RSP_Amplitude" in list(rsp_signals.columns):
fig.add_trace(
go.Scatter(
x=x_axis,
y=rsp_signals["RSP_Amplitude"],
name="Amplitude",
marker_color="#009688",
),
row=3,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=[amplitude_mean] * len(x_axis),
name="Mean Amplitude",
marker_color="#009688",
line=dict(dash="dash"),
),
row=3,
col=1,
)
if "RSP_RVT" in list(rsp_signals.columns):
fig.add_trace(
go.Scatter(
x=x_axis,
y=rsp_signals["RSP_RVT"],
name="RVT",
marker_color="#00BCD4",
),
row=4,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=[rvt_mean] * len(x_axis),
name="Mean RVT",
marker_color="#00BCD4",
line=dict(dash="dash"),
),
row=4,
col=1,
)
if "RSP_Symmetry_PeakTrough" in list(rsp_signals.columns):
fig.add_trace(
go.Scatter(
x=x_axis,
y=rsp_signals["RSP_Symmetry_PeakTrough"],
name="Peak-Trough Symmetry",
marker_color="green",
),
row=5,
col=1,
)
fig.add_trace(
go.Scatter(
x=x_axis,
y=rsp_signals["RSP_Symmetry_RiseDecay"],
name="Rise-Decay Symmetry",
marker_color="purple",
),
row=5,
col=1,
)
fig.update_layout(title_text="Respiration (RSP)", height=1250, width=750)
for i in range(1, nrow + 1):
fig.update_xaxes(title_text=x_label, row=i, col=1)
return fig
# =============================================================================
# Internals
# =============================================================================
def _rsp_plot_phase(rsp_signals, troughs, peaks):
exhale_signal = pd.Series(np.full(len(rsp_signals), np.nan))
exhale_signal[troughs] = rsp_signals["RSP_Clean"][troughs].values
exhale_signal[peaks] = rsp_signals["RSP_Clean"][peaks].values
exhale_signal = exhale_signal.fillna(method="backfill")
inhale_signal = pd.Series(np.full(len(rsp_signals), np.nan))
inhale_signal[troughs] = rsp_signals["RSP_Clean"][troughs].values
inhale_signal[peaks] = rsp_signals["RSP_Clean"][peaks].values
inhale_signal = inhale_signal.fillna(method="ffill")
return exhale_signal, inhale_signal
| 11,984 | 30.12987 | 88 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_rate.py | # -*- coding: utf-8 -*-
import numpy as np
from ..signal import (signal_filter, signal_interpolate, signal_rate,
signal_resample)
from .rsp_peaks import rsp_peaks
def rsp_rate(
rsp_cleaned,
troughs=None,
sampling_rate=1000,
window=10,
hop_size=1,
method="trough",
peak_method="khodadad2018",
interpolation_method="monotone_cubic",
):
"""**Find respiration rate**
Parameters
----------
rsp_cleaned : Union[list, np.array, pd.Series]
The cleaned respiration channel as returned by :func:`.rsp_clean`.
troughs : Union[list, np.array, pd.Series, pd.DataFrame]
The respiration troughs (inhalation onsets) as returned by :func:`.rsp_peaks`.
If None (default), inhalation onsets will be automatically identified from the
:func:`.rsp_clean` signal.
sampling_rate : int
The sampling frequency of :func:`.rsp_cleaned` (in Hz, i.e., samples/second).
window : int
The duration of the sliding window (in second). Default to 10 seconds.
hop_size : int
The number of samples between each successive window. Default to 1 sample.
method : str
Method can either be ``"trough"`` or ``"xcorr"``. In ``"trough"`` method, respiratory rate
is calculated from the periods between successive inspirations (i.e., inhalation onsets/
troughs). In ``"xcorr"`` method, cross-correlations between the changes in respiration with
a bank of sinusoids of different frequencies are calculated to identify the principal
frequency of oscillation.
peak_method : str
Method to identify successive respiratory inspirations, only relevant if method is
``"trough"``. Can be one of ``"khodadad2018"`` (default) or ``"biosppy"``.
interpolation_method : str
Method used to interpolate the rate between inhalation onsets.
See :func:`.signal_interpolate`. ``"monotone_cubic"`` is chosen as the default
interpolation method since it ensures monotone interpolation between data points (i.e., it
prevents physiologically implausible "overshoots" or "undershoots" in the
y-direction). In contrast, the widely used cubic spline interpolation does not ensure
monotonicity.
Return
------
rsp_rate : np.ndarray
Instantenous respiration rate.
Example
-------
.. ipython:: python
import neurokit2 as nk
rsp_signal = nk.data("rsp_1000hz")
rsp_cleaned = nk.rsp_clean(rsp_signal, sampling_rate=1000)
rsp_rate_onsets = nk.rsp_rate(rsp_cleaned, sampling_rate=1000, method="trough")
rsp_rate_xcorr = nk.rsp_rate(rsp_cleaned, sampling_rate=1000, method="xcorr")
"""
if method.lower() in ["period", "peak", "peaks", "trough", "troughs", "signal_rate"]:
if troughs is None:
_, troughs = rsp_peaks(rsp_cleaned, sampling_rate=sampling_rate, method=peak_method)
rate = signal_rate(
troughs["RSP_Troughs"],
sampling_rate=sampling_rate,
desired_length=len(rsp_cleaned),
interpolation_method=interpolation_method,
)
elif method.lower() in ["cross-correlation", "xcorr"]:
rate = _rsp_rate_xcorr(
rsp_cleaned,
sampling_rate=sampling_rate,
window=window,
hop_size=hop_size,
interpolation_method=interpolation_method,
)
else:
raise ValueError(
"NeuroKit error: rsp_rate(): 'method' should be"
" one of 'trough', or 'cross-correlation'."
)
return rate
# =============================================================================
# Cross-correlation method
# =============================================================================
def _rsp_rate_xcorr(
rsp_cleaned, sampling_rate=1000, window=10, hop_size=1, interpolation_method="monotone_cubic"
):
N = len(rsp_cleaned)
# Downsample data to 10Hz
desired_sampling_rate = 10
rsp = signal_resample(
rsp_cleaned, sampling_rate=sampling_rate, desired_sampling_rate=desired_sampling_rate
)
# Define paramters
window_length = int(desired_sampling_rate * window)
rsp_rate = []
for start in np.arange(0, N, hop_size):
window_segment = rsp[start : start + window_length]
if len(window_segment) < window_length:
break # the last frames that are smaller than windlow_length
# Calculate the 1-order difference
diff = np.ediff1d(window_segment)
norm_diff = diff / np.max(diff)
# Find xcorr for all frequencies with diff
xcorr = []
t = np.linspace(0, window, len(diff))
for frequency in np.arange(5 / 60, 30.25 / 60, 0.25 / 50):
# Define the sin waves
sin_wave = np.sin(2 * np.pi * frequency * t)
# Calculate cross-correlation
_xcorr = np.corrcoef(norm_diff, sin_wave)[0, 1]
xcorr.append(_xcorr)
# Find frequency with the highest xcorr with diff
max_frequency_idx = np.argmax(xcorr)
max_frequency = np.arange(5 / 60, 30.25 / 60, 0.25 / 60)[max_frequency_idx]
# Append max_frequency to rsp_rate - instanteneous rate
rsp_rate.append(max_frequency)
x = np.arange(len(rsp_rate))
y = rsp_rate
rsp_rate = signal_interpolate(x, y, x_new=len(rsp_cleaned), method=interpolation_method)
# Smoothing
rsp_rate = signal_filter(rsp_rate, highcut=0.1, order=4, sampling_rate=sampling_rate)
# Convert to Brpm
rsp_rate = np.multiply(rsp_rate, 60)
return np.array(rsp_rate)
| 5,651 | 36.430464 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_rvt.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
from ..misc import NeuroKitWarning
from ..signal import signal_interpolate
from ..stats import rescale
from .rsp_clean import rsp_clean
from .rsp_peaks import rsp_findpeaks
def rsp_rvt(
rsp_signal,
sampling_rate=1000,
method="power2020",
boundaries=[2.0, 1 / 30],
iterations=10,
show=False,
silent=False,
**kwargs
):
"""**Respiratory Volume per Time (RVT)**
Computes Respiratory Volume per Time (RVT). RVT is the product of respiratory volume and
breathing rate. RVT can be used to identify the global fMRI confounds of breathing, which is
often considered noise.
Parameters
----------
rsp_signal : array
Array containing the respiratory rate, produced by :func:`.signal_rate`.
sampling_rate : int, optional
The sampling frequency of the signal (in Hz, i.e., samples/second).
method: str, optional
The rvt method to apply. Can be one of ``"power2020"`` (default), ``"harrison2021"`` or
``"birn2006"``.
boundaries : list, optional
Only applies if method is ``"harrison"``. Lower and upper limit of (humanly possible)
breath frequency in Hertz.
iterations : int, optional
Only applies if method is ``"harrison"``. Amount of phase refinement estimates
to remove high frequencies. Synthetic samples often take less than 3.
show : bool, optional
If ``True``, will return a simple plot of the RVT (with the re-scaled original RSP signal).
silent : bool, optional
If ``True``, warnings will not be printed.
**kwargs
Arguments to be passed to the underlying peak detection algorithm.
Returns
-------
array
Array containing the current RVT at every timestep.
See Also
--------
signal_rate, rsp_peaks, rsp_process, rsp_clean
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=60, random_state=1)
@savefig p_rsp_rvt1.png scale=100%
nk.rsp_rvt(rsp, method="power2020", show=True)
@suppress
plt.close()
@savefig p_rsp_rvt2.png scale=100%
nk.rsp_rvt(rsp, method="harrison2021", show=True)
@suppress
plt.close()
@savefig p_rsp_rvt3.png scale=100%
nk.rsp_rvt(rsp, method="birn2006", show=True)
@suppress
plt.close()
References
----------
* Birn, R. M., Diamond, J. B., Smith, M. A., & Bandettini, P. A. (2006). Separating
respiratory-variation-related fluctuations from neuronal-activity-related fluctuations in
fMRI. Neuroimage, 31(4), 1536-1548.
* Power, J. D., Lynch, C. J., Dubin, M. J., Silver, B. M., Martin, A., & Jones, R. M. (2020).
Characteristics of respiratory measures in young adults scanned at rest, including systematic
changes and "missed" deep breaths. Neuroimage, 204, 116234.
* Harrison, S. J., Bianchi, S., Heinzle, J., Stephan, K. E., Iglesias, S., & Kasper, L. (2021).
A Hilbert-based method for processing respiratory timeseries. Neuroimage, 230, 117787.
"""
method = method.lower() # remove capitalised letters
if method in ["harrison", "harrison2021"]:
rvt = _rsp_rvt_harrison(
rsp_signal,
sampling_rate=sampling_rate,
silent=silent,
boundaries=boundaries,
iterations=iterations,
)
elif method in ["birn", "birn2006"]:
rvt = _rsp_rvt_birn(rsp_signal, sampling_rate=sampling_rate, silent=silent, **kwargs)
elif method in ["power", "power2020"]:
rvt = _rsp_rvt_power(rsp_signal, sampling_rate=sampling_rate, silent=silent, **kwargs)
else:
raise ValueError("NeuroKit error: rsp_rvt(): 'method' should be one of 'birn', 'power' or 'harrison'.")
if show:
_rsp_rvt_plot(rvt, rsp_signal, sampling_rate)
return rvt
def _rsp_rvt_birn(
rsp_signal,
sampling_rate=1000,
silent=False,
window_length=0.4,
peak_distance=0.8,
peak_prominence=0.5,
interpolation_method="linear",
):
zsmooth_signal = _smooth_rsp_data(
rsp_signal,
sampling_rate=sampling_rate,
window_length=window_length,
silent=silent,
)
info = rsp_findpeaks(
zsmooth_signal,
method="scipy",
peak_distance=peak_distance,
peak_prominence=peak_prominence,
)
peak_coords = info["RSP_Peaks"]
trough_coords = info["RSP_Troughs"]
# prepare for loop
seconds_delta = [np.nan]
mid_peak = [np.nan]
# loop over peaks
for ending_peak_index in range(1, len(peak_coords)):
starting_peak = peak_coords[ending_peak_index - 1]
ending_peak = peak_coords[ending_peak_index]
mid_peak.append(round((starting_peak + ending_peak) / 2))
seconds_delta.append((ending_peak - starting_peak) / sampling_rate)
# Interpolate
output_range = range(len(zsmooth_signal))
rvt_time = signal_interpolate(mid_peak, seconds_delta, output_range, method=interpolation_method)
rvt_peaks = signal_interpolate(
peak_coords,
zsmooth_signal[peak_coords],
output_range,
method=interpolation_method,
)
rvt_troughs = signal_interpolate(
trough_coords,
zsmooth_signal[trough_coords],
output_range,
method=interpolation_method,
)
# what is trigvec?
# trigvec = (TR * signal_rate):len(zsmoothresp)
rvt = (rvt_peaks - rvt_troughs) / rvt_time
rvt[np.isinf(rvt)] = np.nan
return rvt
def _rsp_rvt_power(
rsp_signal,
sampling_rate=1000,
silent=False,
window_length=0.4,
peak_distance=0.8,
peak_prominence=0.5,
interpolation_method="linear",
):
# preprocess signal
zsmooth_signal = _smooth_rsp_data(
rsp_signal,
sampling_rate=sampling_rate,
silent=silent,
window_length=window_length,
)
# find peaks and troughs
info = rsp_findpeaks(
zsmooth_signal,
method="scipy",
peak_distance=peak_distance,
peak_prominence=peak_prominence,
)
peak_coords = info["RSP_Peaks"]
trough_coords = info["RSP_Troughs"]
# initialize for loop
peak_heights = [np.nan] * len(peak_coords)
# go over loop
for peak_index in range(1, len(peak_coords)):
# find peak and trough
peak_loc = peak_coords[peak_index]
prev_peak_loc = peak_coords[peak_index - 1]
# find troughs between prev_peak_loc and peak_loc
trough_locs = trough_coords[(trough_coords > prev_peak_loc) & (trough_coords < peak_loc)]
# safety catch if there is no trough found
if len(trough_locs) == 0:
continue
trough_loc = max(trough_locs)
# calculate peak_height for peak at peak_index
peak_heights[peak_index] = (zsmooth_signal[peak_loc] - zsmooth_signal[trough_loc]) / (peak_loc - prev_peak_loc)
return signal_interpolate(peak_coords, peak_heights, range(len(rsp_signal)), method=interpolation_method)
def _smooth_rsp_data(signal, sampling_rate=1000, window_length=0.4, silent=False):
signal = rsp_clean(
signal,
sampling_rate=sampling_rate,
window_length=window_length,
method="hampel",
)
smooth_signal = scipy.signal.savgol_filter(
signal,
window_length=_make_uneven_filter_size(window_length * sampling_rate, silent),
polyorder=2,
)
zsmooth_signal = scipy.stats.zscore(smooth_signal)
return zsmooth_signal
def _rsp_rvt_harrison(
rsp_signal,
sampling_rate=1000,
boundaries=[2.0, 1 / 30],
iterations=10,
silent=False,
):
# low-pass filter at not too far above breathing-rate to remove high-frequency noise
n_pad = int(np.ceil(10 * sampling_rate))
d = scipy.signal.iirfilter(N=10, Wn=0.75, btype="lowpass", analog=False, output="sos", fs=sampling_rate)
fr_lp = scipy.signal.sosfiltfilt(d, np.pad(rsp_signal, n_pad, "symmetric"))
fr_lp = fr_lp[n_pad : (len(fr_lp) - n_pad)]
# derive Hilbert-transform
fr_filt = fr_lp
fr_mag = abs(scipy.signal.hilbert(fr_filt))
for _ in range(iterations):
# analytic signal to phase
fr_phase = np.unwrap(np.angle(scipy.signal.hilbert(fr_filt)))
# Remove any phase decreases that may occur
# Find places where the gradient changes sign
# maybe can be changed with signal.signal_zerocrossings
fr_phase_diff = np.diff(np.sign(np.gradient(fr_phase)))
decrease_inds = np.argwhere(fr_phase_diff < 0)
increase_inds = np.append(np.argwhere(fr_phase_diff > 0), [len(fr_phase) - 1])
for n_max in decrease_inds:
# Find value of `fr_phase` at max and min:
fr_max = fr_phase[n_max].squeeze()
n_min, fr_min = _rsp_rvt_find_min(increase_inds, fr_phase, n_max, silent)
if n_min is None:
# There is no finishing point to the interpolation at the very end
continue
# Find where `fr_phase` passes `fr_min` for the first time
n_start = np.argwhere(fr_phase > fr_min)
if len(n_start) == 0:
n_start = n_max
else:
n_start = n_start[0].squeeze()
# Find where `fr_phase` exceeds `fr_max` for the first time
n_end = np.argwhere(fr_phase < fr_max)
if len(n_end) == 0:
n_end = n_min
else:
n_end = n_end[-1].squeeze()
# Linearly interpolate from n_start to n_end
fr_phase[n_start:n_end] = np.linspace(fr_min, fr_max, num=n_end - n_start).squeeze()
# Filter out any high frequencies from phase-only signal
fr_filt = scipy.signal.sosfiltfilt(d, np.pad(np.cos(fr_phase), n_pad, "symmetric"))
fr_filt = fr_filt[n_pad : (len(fr_filt) - n_pad)]
# Keep phase only signal as reference
fr_filt = np.cos(fr_phase)
# Make RVT
# Low-pass filter to remove within_cycle changes
# Note factor of two is for compatability with the common definition of RV
# as the difference between max and min inhalation (i.e. twice the amplitude)
d = scipy.signal.iirfilter(N=10, Wn=0.2, btype="lowpass", analog=False, output="sos", fs=sampling_rate)
fr_rv = 2 * scipy.signal.sosfiltfilt(d, np.pad(fr_mag, n_pad, "symmetric"))
fr_rv = fr_rv[n_pad : (len(fr_rv) - n_pad)]
fr_rv[fr_rv < 0] = 0
# Breathing rate is instantaneous frequency
fr_if = sampling_rate * np.gradient(fr_phase) / (2 * np.pi)
fr_if = scipy.signal.sosfiltfilt(d, np.pad(fr_if, n_pad, "symmetric"))
fr_if = fr_if[n_pad : (len(fr_if) - n_pad)]
# remove in-human patterns, since both limits are in Hertz, the upper_limit is lower
fr_if = np.clip(fr_if, boundaries[1], boundaries[0])
# RVT = magnitude * breathing rate
rvt = np.multiply(fr_rv, fr_if)
# Downsampling is not needed as we assume always the same sampling rate and operate always in the same sampling rate
return rvt
def _rsp_rvt_find_min(increase_inds, fr_phase, smaller_index, silent):
bigger_n_max = np.argwhere(increase_inds > smaller_index)
if len(bigger_n_max) == 0:
if not silent:
warn(
"rsp_rvt(): There is no next increasing point as end point for the interpolation. "
"Interpolation is skipped for this case.",
category=NeuroKitWarning,
)
return None, None
bigger_n_max = bigger_n_max[0].squeeze()
n_min = increase_inds[bigger_n_max]
fr_min = fr_phase[n_min].squeeze()
# Sometime fr_min is the same as n_max and it caused problems
if fr_phase[smaller_index].squeeze() < fr_min:
if not silent:
warn(
"rsp_rvt(): The next bigger increasing index has a bigger value than the chosen decreasing index, "
"this might be due to very small/noisy breaths or saddle points. "
"Interpolation is skipped for this case.",
category=NeuroKitWarning,
)
return None, None
return n_min, fr_min
def _rsp_rvt_plot(rvt, rsp_signal, sampling_rate):
plt.figure()
plt.title("Respiratory Volume per Time (RVT)")
plt.xlabel("Time [s]")
plt.plot(
rescale(rsp_signal, to=[np.nanmin(rvt), np.nanmax(rvt)]),
label="RSP",
color="#CFD8DC",
)
plt.plot(rvt, label="RVT", color="#00BCD4")
plt.legend()
tickpositions = plt.gca().get_xticks()[1:-1]
plt.xticks(tickpositions, [tickposition / sampling_rate for tickposition in tickpositions])
def _make_uneven_filter_size(number, silent=False):
if number < 0:
if not silent:
warn(
"Received a negative filter size, progressed with filter size 1.",
category=NeuroKitWarning,
)
return 1
if number % 2 == 1:
return int(number)
if number > 0:
return int(number - 1)
return 1
| 13,112 | 34.633152 | 120 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_amplitude.py | # -*- coding: utf-8 -*-
import numpy as np
from ..signal import signal_interpolate
from .rsp_fixpeaks import _rsp_fixpeaks_retrieve
def rsp_amplitude(
rsp_cleaned, peaks, troughs=None, method="standard", interpolation_method="monotone_cubic"
):
"""**Compute respiratory amplitude**
Compute respiratory amplitude given the raw respiration signal and its extrema. The
**standard** method computes the amplitude as the difference between a peak and its preceding
trough, while the **prepost** method computes the amplitude as the average of the differences
of peak to its preceding and succeeding troughs (Cole, 2019).
Parameters
----------
rsp_cleaned : Union[list, np.array, pd.Series]
The cleaned respiration channel as returned by :func:`.rsp_clean`.
peaks : list or array or DataFrame or Series or dict
The samples at which the respiration peaks (exhalation onsets) occur. If a dict or a
DataFrame is passed, it is assumed that these containers were obtained
with :func:`rsp_findpeaks`.
troughs : list or array or DataFrame or Series or dict
The samples at which the respiration troughs (inhalation onsets) occur. If a dict or a
is passed, it is assumed that these containers were obtained with :func:`.rsp_findpeaks`.
method : str
The method to use to compute the amplitude. Can be ``"standard"`` or ``"prepost"``.
interpolation_method : str
Method used to interpolate the amplitude between peaks. See :func:`.signal_interpolate`.
``"monotone_cubic"`` is chosen as the default interpolation method since it ensures monotone
interpolation between data point (i.e., it prevents physiologically implausible "overshoots"
or "undershoots" in the y-direction). In contrast, the widely used cubic spline
'interpolation does not ensure monotonicity.
Returns
-------
array
A vector containing the respiratory amplitude.
See Also
--------
rsp_clean, rsp_peaks, signal_rate, rsp_process, rsp_plot, rsp_symmetry
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=90, respiratory_rate=15)
cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
peak_signals, info = nk.rsp_peaks(cleaned)
amplitude = nk.rsp_amplitude(cleaned, peak_signals)
@savefig p_rsp_amp1.png scale=100%
fig = nk.signal_plot([rsp, amplitude], labels=["RSP", "Amplitude"], subplots=True)
@suppress
plt.close()
.. ipython:: python
amp2 = nk.rsp_amplitude(cleaned, peak_signals, method="prepost")
@savefig p_rsp_amp2.png scale=100%
fig = nk.signal_plot([amplitude, amp2], labels=["Standard", "Prepost"])
@suppress
plt.close()
References
----------
* Cole, S., & Voytek, B. (2019). Cycle-by-cycle analysis of neural oscillations. Journal of
neurophysiology, 122(2), 849-861.
"""
# Format input.
peaks, troughs = _rsp_fixpeaks_retrieve(peaks, troughs)
# To consistently calculate amplitude, peaks and troughs must have the same
# number of elements, and the first trough must precede the first peak.
if (peaks.size != troughs.size) or (peaks[0] <= troughs[0]):
raise TypeError(
"NeuroKit error: Please provide one of the containers ",
"returned by `rsp_findpeaks()` as `extrema` argument and do ",
"not modify its content.",
)
# Calculate amplitude in units of the raw signal, based on vertical
# difference of each peak to the preceding trough.
amplitude = rsp_cleaned[peaks] - rsp_cleaned[troughs]
# The above is the standard amplitude (each peak height to the preceding trough).
if method in ["prepost"]:
# Alternative amplitude calculation that corresponds to the average of
# the peak height to the preceding and following troughs.
# https://twitter.com/bradleyvoytek/status/1591495571269124096/photo/1
# (Note that it cannot be done for the last peak)
amplitude[0:-1] += rsp_cleaned[peaks[0:-1]] - rsp_cleaned[troughs[1::]]
amplitude[0:-1] /= 2
# Interpolate amplitude to length of rsp_cleaned.
if len(peaks) == 1:
amplitude = np.full(rsp_cleaned.shape, amplitude[0])
else:
amplitude = signal_interpolate(
peaks, amplitude, x_new=np.arange(len(rsp_cleaned)), method=interpolation_method
)
return amplitude
| 4,516 | 39.330357 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_analyze.py | # -*- coding: utf-8 -*-
import pandas as pd
from .rsp_eventrelated import rsp_eventrelated
from .rsp_intervalrelated import rsp_intervalrelated
def rsp_analyze(data, sampling_rate=1000, method="auto"):
"""**RSP Analysis**
Performs RSP analysis on either epochs (event-related analysis) or on longer periods of data such as resting-state data.
Parameters
----------
data : dict or DataFrame
A dictionary of epochs, containing one DataFrame per epoch, usually obtained via
:func:`.epochs_create`, or a DataFrame containing all epochs, usually obtained via
:func:`.epochs_to_df`. Can also take a DataFrame of processed signals from a longer period
of data, typically generated by :func:`.rsp_process` or :func:`.bio_process`. Can also take
a dict containing sets of separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs, or
``"interval-related"`` for analysis on longer periods of data. Defaults to ``"auto"`` where
the right method will be chosen based on the mean duration of the data (``"event-related"``
for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed RSP features. If event-related analysis is conducted,
each epoch is indicated by the `Label` column. See :func:`.rsp_eventrelated` and
:func:`.rsp_intervalrelated` docstrings for details.
See Also
--------
bio_process, rsp_process, epochs_create, rsp_eventrelated, rsp_intervalrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example 1: Download the data for event-related analysis
data = nk.data("bio_eventrelated_100hz")
# Process the data for event-related analysis
df, info = nk.bio_process(rsp=data["RSP"], sampling_rate=100)
events = nk.events_find(data["Photosensor"], threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=1.9)
# Analyze
nk.rsp_analyze(epochs, sampling_rate=100)
# Example 2: Download the resting-state data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.rsp_process(data["RSP"], sampling_rate=100)
# Analyze
nk.rsp_analyze(df, sampling_rate=100)
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError(
"NeuroKit error: rsp_analyze(): Wrong input or method, we couldn't extract extract epochs features."
)
else:
features = rsp_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = rsp_intervalrelated(data, sampling_rate)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = rsp_intervalrelated(data, sampling_rate)
else:
features = rsp_eventrelated(data)
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
epoch_len = data["Label"].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = rsp_intervalrelated(data, sampling_rate)
else:
features = rsp_eventrelated(data)
return features
| 4,175 | 35.955752 | 124 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_simulate.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import check_random_state, check_random_state_children
from ..signal import signal_distort, signal_simulate, signal_smooth
def rsp_simulate(
duration=10,
length=None,
sampling_rate=1000,
noise=0.01,
respiratory_rate=15,
method="breathmetrics",
random_state=None,
random_state_distort="spawn",
):
"""**Simulate a respiratory signal**
Generate an artificial (synthetic) respiratory signal of a given duration
and rate.
Parameters
----------
duration : int
Desired length of duration (s).
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
length : int
The desired length of the signal (in samples).
noise : float
Noise level (amplitude of the laplace noise).
respiratory_rate : float
Desired number of breath cycles in one minute.
method : str
The model used to generate the signal. Can be ``"sinusoidal"`` for a simulation based on a
trigonometric sine wave that roughly approximates a single respiratory cycle. If
``"breathmetrics"`` (default), will use an advanced model desbribed by
`Noto, et al. (2018) <https://github.com/zelanolab/breathmetrics>`_.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
random_state_distort : {'legacy', 'spawn'}, None, int, numpy.random.RandomState or numpy.random.Generator
Random state to be used to distort the signal. If ``"legacy"``, use the same random state used to
generate the signal (discouraged as it creates dependent random streams). If ``"spawn"``, spawn
independent children random number generators from the random_state argument. If any of the other types,
generate independent children random number generators from the random_state_distort provided (this
allows generating multiple version of the same signal distorted by different random noise realizations).
See Also
--------
rsp_clean, rsp_findpeaks, signal_rate, rsp_process, rsp_plot
Returns
-------
array
Vector containing the respiratory signal.
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
rsp1 = nk.rsp_simulate(duration=30, method="sinusoidal")
rsp2 = nk.rsp_simulate(duration=30, method="breathmetrics")
@savefig p_rsp_simulate1.png scale=100%
pd.DataFrame({"RSP_Simple": rsp1, "RSP_Complex": rsp2}).plot(subplots=True)
@suppress
plt.close()
References
----------
* Noto, T., Zhou, G., Schuele, S., Templer, J., & Zelano, C. (2018). Automated analysis of
breathing waveforms using BreathMetrics: A respiratory signal processing toolbox. Chemical Senses, 43(8), 583-597.
"""
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
random_state_distort = check_random_state_children(random_state, random_state_distort, n_children=1)
# Generate number of samples automatically if length is unspecified
if length is None:
length = duration * sampling_rate
if method.lower() in ["sinusoidal", "sinus", "simple"]:
rsp = _rsp_simulate_sinusoidal(
duration=duration, sampling_rate=sampling_rate, respiratory_rate=respiratory_rate
)
else:
rsp = _rsp_simulate_breathmetrics(
duration=duration,
sampling_rate=sampling_rate,
respiratory_rate=respiratory_rate,
rng=rng,
)
rsp = rsp[0:length]
# Add random noise
if noise > 0:
rsp = signal_distort(
rsp,
sampling_rate=sampling_rate,
noise_amplitude=noise,
noise_frequency=[5, 10, 100],
noise_shape="laplace",
random_state=random_state_distort[0],
silent=True,
)
return rsp
# =============================================================================
# Simple Sinusoidal Model
# =============================================================================
def _rsp_simulate_sinusoidal(duration=10, sampling_rate=1000, respiratory_rate=15):
"""Generate an artificial (synthetic) respiratory signal by trigonometric sine wave that
roughly approximates a single respiratory cycle."""
# Generate values along the length of the duration
rsp = signal_simulate(
duration=duration,
sampling_rate=sampling_rate,
frequency=respiratory_rate / 60,
amplitude=0.5,
)
return rsp
# =============================================================================
# BreathMetrics Model
# =============================================================================
def _rsp_simulate_breathmetrics_original(
nCycles=100,
sampling_rate=1000,
breathing_rate=0.25,
average_amplitude=0.5,
amplitude_variance=0.1,
phase_variance=0.1,
inhale_pause_percent=0.3,
inhale_pause_avgLength=0.2,
inhale_pauseLength_variance=0.5,
exhale_pause_percent=0.3,
exhale_pause_avgLength=0.2,
exhale_pauseLength_variance=0.5,
pause_amplitude=0.1,
pause_amplitude_variance=0.2,
signal_noise=0.1,
rng=None,
):
"""Simulates a recording of human airflow data by appending individually constructed sin waves and pauses in
sequence. This is translated from the matlab code available `here.
<https://github.com/zelanolab/breathmetrics/blob/master/simulateRespiratoryData.m>`_ by Noto, et al. (2018).
Parameters
----------
nCycles : int or float
number of breathing cycles to simulate.
sampling_rate : int
sampling rate.
breathing_rate : float
average breathing rate.
average_amplitude : float
average amplitude of inhales and exhales.
amplitude_variance: float
variance in respiratory amplitudes.
phase_variance: float
variance in duration of individual breaths.
inhale_pause_percent : float
percent of inhales followed by a pause.
inhale_pause_avgLength : float
average length of inhale pauses.
inhale_pauseLength_variance : float
variance in inhale pause length.
exhale_pause_percent : float
percent of exhales followed by a pause.
exhale_pause_avgLength : float
average length of exhale pauses.
exhale_pauseLength_variance : float
variance in exhale pause length.
pause_amplitude : float
noise amplitude of pauses.
pause_amplitude_variance : float
variance in pause noise.
signal_noise : float
percent of noise saturation in the simulated signal.
Returns
----------
signal
vector containing breathmetrics simulated rsp signal.
"""
# Define additional parameters
sample_phase = sampling_rate / breathing_rate
inhale_pause_phase = np.round(inhale_pause_avgLength * sample_phase).astype(int)
exhale_pause_phase = np.round(exhale_pause_avgLength * sample_phase).astype(int)
# Normalize variance by average breath amplitude
amplitude_variance_normed = average_amplitude * amplitude_variance
amplitudes_with_noise = rng.standard_normal(nCycles) * amplitude_variance_normed + average_amplitude
amplitudes_with_noise[amplitudes_with_noise < 0] = 0
# Normalize phase by average breath length
phase_variance_normed = phase_variance * sample_phase
phases_with_noise = np.round(rng.standard_normal(nCycles) * phase_variance_normed + sample_phase).astype(int)
phases_with_noise[phases_with_noise < 0] = 0
# Normalize pause lengths by phase and variation
inhale_pauseLength_variance_normed = inhale_pause_phase * inhale_pauseLength_variance
inhale_pauseLengths_with_noise = np.round(
rng.standard_normal(nCycles) * inhale_pauseLength_variance_normed + inhale_pause_phase
).astype(int)
inhale_pauseLengths_with_noise[inhale_pauseLengths_with_noise < 0] = 0
exhale_pauseLength_variance_normed = exhale_pause_phase * exhale_pauseLength_variance
exhale_pauseLengths_with_noise = np.round(
rng.standard_normal(nCycles) * exhale_pauseLength_variance_normed + inhale_pause_phase
).astype(int)
# why inhale pause phase?
exhale_pauseLengths_with_noise[exhale_pauseLengths_with_noise < 0] = 0
# Normalize pause amplitudes
pause_amplitude_variance_normed = pause_amplitude * pause_amplitude_variance
# Initialize empty vector to fill with simulated data
simulated_respiration = []
# Initialize parameters to save
inhale_onsets = np.zeros(nCycles)
exhale_onsets = np.zeros(nCycles)
inhale_pause_onsets = np.zeros(nCycles)
exhale_pause_onsets = np.zeros(nCycles)
inhale_lengths = np.zeros(nCycles)
inhale_pauseLengths = np.zeros(nCycles)
exhale_lengths = np.zeros(nCycles)
exhale_pauseLengths = np.zeros(nCycles)
inhale_peaks = np.zeros(nCycles)
exhale_troughs = np.zeros(nCycles)
i = 1
for c in range(nCycles):
# Determine length of inhale pause for this cycle
if rng.uniform() < inhale_pause_percent:
this_inhale_pauseLength = inhale_pauseLengths_with_noise[c]
this_inhale_pause = rng.standard_normal(this_inhale_pauseLength) * pause_amplitude_variance_normed
this_inhale_pause[this_inhale_pause < 0] = 0
else:
this_inhale_pauseLength = 0
this_inhale_pause = []
# Determine length of exhale pause for this cycle
if rng.uniform() < exhale_pause_percent:
this_exhale_pauseLength = exhale_pauseLengths_with_noise[c]
this_exhale_pause = rng.standard_normal(this_exhale_pauseLength) * pause_amplitude_variance_normed
this_exhale_pause[this_exhale_pause < 0] = 0
else:
this_exhale_pauseLength = 0
this_exhale_pause = []
# Determine length of inhale and exhale for this cycle to main
# breathing rate
cycle_length = phases_with_noise[c] - (this_inhale_pauseLength + this_exhale_pauseLength)
# If pauses are longer than the time alloted for this breath, set them
# to 0 so a real breath can be simulated. This will deviate the
# statistics from those initialized but is unavaoidable at the current
# state
if (cycle_length <= 0) or (cycle_length < min(phases_with_noise) / 4):
this_inhale_pauseLength = 0
this_inhale_pause = []
this_exhale_pauseLength = 0
this_exhale_pause = []
cycle_length = phases_with_noise[c] - (
this_inhale_pauseLength + this_exhale_pauseLength
)
# Compute inhale and exhale for this cycle
this_cycle = np.sin(np.linspace(0, 2 * np.pi, cycle_length)) * amplitudes_with_noise[c]
half_cycle = np.round(len(this_cycle) / 2).astype(int)
this_inhale = this_cycle[0:half_cycle]
this_inhale_length = len(this_inhale)
this_exhale = this_cycle[half_cycle:]
this_exhale_length = len(this_exhale)
# Save parameters for checking
inhale_lengths[c] = this_inhale_length
inhale_pauseLengths[c] = this_inhale_pauseLength
exhale_lengths[c] = this_exhale_length
exhale_pauseLengths[c] = this_exhale_pauseLength
inhale_onsets[c] = i
exhale_onsets[c] = i + this_inhale_length + this_inhale_pauseLength
if len(this_inhale_pause) > 0:
inhale_pause_onsets[c] = i + this_inhale_length
else:
inhale_pause_onsets[c] = np.nan
if len(this_exhale_pause) > 0:
exhale_pause_onsets[c] = (
i + this_inhale_length + this_inhale_pauseLength + this_exhale_length
)
else:
exhale_pause_onsets[c] = np.nan
# Compose breath from parameters
this_breath = np.hstack([this_inhale, this_inhale_pause, this_exhale, this_exhale_pause])
# Compute max flow for inhale and exhale for this breath
max_ID = np.argmax(this_breath)
min_ID = np.argmin(this_breath)
inhale_peaks[c] = i + max_ID
exhale_troughs[c] = i + min_ID
# Append breath to simulated resperation vector
simulated_respiration = np.hstack([simulated_respiration, this_breath])
i = i + len(this_breath) - 1
# Smooth signal
simulated_respiration = signal_smooth(
simulated_respiration, kernel="boxzen", size=sampling_rate / 2
)
if signal_noise == 0:
signal_noise = 0.0001
noise_vector = rng.uniform(size=simulated_respiration.shape) * average_amplitude
simulated_respiration = simulated_respiration * (1 - signal_noise) + noise_vector * signal_noise
raw_features = {
"Inhale Onsets": inhale_onsets,
"Exhale Onsets": exhale_onsets,
"Inhale Pause Onsets": inhale_pause_onsets,
"Exhale Pause Onsets": exhale_pause_onsets,
"Inhale Lengths": inhale_lengths / sampling_rate,
"Inhale Pause Lengths": inhale_pauseLengths / sampling_rate,
"Exhale Lengths": exhale_lengths / sampling_rate,
"Exhale Pause Lengths": exhale_pauseLengths / sampling_rate,
"Inhale Peaks": inhale_peaks,
"Exhale Troughs": exhale_troughs,
}
if len(inhale_pauseLengths[inhale_pauseLengths > 0]) > 0:
avg_inhale_pauseLength = np.mean(inhale_pauseLengths[inhale_pauseLengths > 0])
else:
avg_inhale_pauseLength = 0
if len(exhale_pauseLengths[exhale_pauseLengths > 0]) > 0:
avg_exhale_pauseLength = np.mean(exhale_pauseLengths[exhale_pauseLengths > 0])
else:
avg_exhale_pauseLength = 0
estimated_breathing_rate = (1 / np.mean(np.diff(inhale_onsets))) * sampling_rate
feature_stats = {
"Breathing Rate": estimated_breathing_rate,
"Average Inhale Length": np.mean(inhale_lengths / sampling_rate),
"Average Inhale Pause Length": avg_inhale_pauseLength / sampling_rate,
"Average Exhale Length": np.mean(exhale_lengths / sampling_rate),
"Average Exhale Pause Length": avg_exhale_pauseLength / sampling_rate,
}
return simulated_respiration, raw_features, feature_stats
def _rsp_simulate_breathmetrics(duration=10, sampling_rate=1000, respiratory_rate=15, rng=None):
n_cycles = int(respiratory_rate / 60 * duration)
# Loop until it doesn't fail
rsp = False
while rsp is False:
# Generate a longer than necessary signal so it won't be shorter
rsp, _, __ = _rsp_simulate_breathmetrics_original(
nCycles=int(n_cycles * 1.5),
sampling_rate=sampling_rate,
breathing_rate=respiratory_rate / 60,
signal_noise=0,
rng=rng,
)
return rsp
| 14,964 | 37.87013 | 120 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_findpeaks.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.signal
def rsp_findpeaks(
rsp_cleaned,
sampling_rate=1000,
method="khodadad2018",
amplitude_min=0.3,
peak_distance=0.8,
peak_prominence=0.5,
):
"""**Extract extrema in a respiration (RSP) signal**
Low-level function used by :func:`.rsp_peaks` to identify inhalation and exhalation onsets
(troughs and peaks respectively) in a preprocessed respiration signal using different sets of
parameters. See :func:`.rsp_peaks` for details.
Parameters
----------
rsp_cleaned : Union[list, np.array, pd.Series]
The cleaned respiration channel as returned by :func:`.rsp_clean`.
sampling_rate : int
The sampling frequency of :func:`.rsp_cleaned` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"khodadad2018"`` (default), ``"scipy"`` or
``"biosppy"``.
amplitude_min : float
Only applies if method is ``"khodadad2018"``. Extrema that have a vertical distance smaller
than(outlier_threshold * average vertical distance) to any direct neighbour are removed as
false positive outliers. I.e., outlier_threshold should be a float with positive sign (the
default is 0.3). Larger values of outlier_threshold correspond to more conservative
thresholds (i.e., more extrema removed as outliers).
peak_distance: float
Only applies if method is ``"scipy"``. Minimal distance between peaks. Default is 0.8
seconds.
peak_prominence: float
Only applies if method is ``"scipy"``. Minimal prominence between peaks. Default is 0.5.
Returns
-------
info : dict
A dictionary containing additional information, in this case the samples at which inhalation
onsets and exhalation onsets occur, accessible with the keys ``"RSP_Troughs"`` and
``"RSP_Peaks"``, respectively.
See Also
--------
rsp_clean, rsp_fixpeaks, rsp_peaks, signal_rate, rsp_amplitude, rsp_process, rsp_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=30, respiratory_rate=15)
cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
info = nk.rsp_findpeaks(cleaned)
@savefig p_rsp_findpeaks1.png scale=100%
nk.events_plot([info["RSP_Peaks"], info["RSP_Troughs"]], cleaned)
@suppress
plt.close()
"""
# Try retrieving correct column
if isinstance(rsp_cleaned, pd.DataFrame):
try:
rsp_cleaned = rsp_cleaned["RSP_Clean"]
except NameError:
try:
rsp_cleaned = rsp_cleaned["RSP_Raw"]
except NameError:
rsp_cleaned = rsp_cleaned["RSP"]
cleaned = np.array(rsp_cleaned)
# Find peaks
method = method.lower() # remove capitalised letters
if method in ["khodadad", "khodadad2018"]:
info = _rsp_findpeaks_khodadad(cleaned, amplitude_min=amplitude_min)
elif method == "biosppy":
info = _rsp_findpeaks_biosppy(cleaned, sampling_rate=sampling_rate)
elif method == "scipy":
info = _rsp_findpeaks_scipy(
cleaned,
sampling_rate=sampling_rate,
peak_distance=peak_distance,
peak_prominence=peak_prominence,
)
else:
raise ValueError(
"NeuroKit error: rsp_findpeaks(): 'method' should be one of 'khodadad2018', 'scipy' or 'biosppy'."
)
return info
# =============================================================================
# Methods
# =============================================================================
def _rsp_findpeaks_biosppy(rsp_cleaned, sampling_rate):
"""https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/resp.py"""
extrema = _rsp_findpeaks_extrema(rsp_cleaned)
extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0)
peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)
# Apply minimum period outlier-criterion (exclude inter-breath-intervals
# that produce breathing rate larger than 35 breaths per minute.
outlier_idcs = np.where((np.diff(peaks) / sampling_rate) < 1.7)[0]
peaks = np.delete(peaks, outlier_idcs)
troughs = np.delete(troughs, outlier_idcs)
info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
return info
def _rsp_findpeaks_khodadad(rsp_cleaned, amplitude_min=0.3):
"""https://iopscience.iop.org/article/10.1088/1361-6579/aad7e6/meta"""
extrema = _rsp_findpeaks_extrema(rsp_cleaned)
extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=amplitude_min)
peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)
info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
return info
def _rsp_findpeaks_scipy(rsp_cleaned, sampling_rate, peak_distance=0.8, peak_prominence=0.5):
"""https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html"""
peak_distance = sampling_rate * peak_distance
peaks, _ = scipy.signal.find_peaks(
rsp_cleaned, distance=peak_distance, prominence=peak_prominence
)
troughs, _ = scipy.signal.find_peaks(
-rsp_cleaned, distance=peak_distance, prominence=peak_prominence
)
# Combine peaks and troughs and sort them.
extrema = np.sort(np.concatenate((peaks, troughs)))
# Sanitize.
extrema, amplitudes = _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0)
peaks, troughs = _rsp_findpeaks_sanitize(extrema, amplitudes)
info = {"RSP_Peaks": peaks, "RSP_Troughs": troughs}
return info
# =============================================================================
# Internals
# =============================================================================
def _rsp_findpeaks_extrema(rsp_cleaned):
# Detect zero crossings (note that these are zero crossings in the raw
# signal, not in its gradient).
greater = rsp_cleaned > 0
smaller = rsp_cleaned < 0
risex = np.where(np.bitwise_and(smaller[:-1], greater[1:]))[0]
fallx = np.where(np.bitwise_and(greater[:-1], smaller[1:]))[0]
if risex[0] < fallx[0]:
startx = "rise"
elif fallx[0] < risex[0]:
startx = "fall"
allx = np.concatenate((risex, fallx))
allx.sort(kind="mergesort")
# Find extrema by searching minima between falling zero crossing and
# rising zero crossing, and searching maxima between rising zero
# crossing and falling zero crossing.
extrema = []
for i in range(len(allx) - 1):
# Determine whether to search for minimum or maximum.
if startx == "rise":
if (i + 1) % 2 != 0:
argextreme = np.argmax
else:
argextreme = np.argmin
elif startx == "fall":
if (i + 1) % 2 != 0:
argextreme = np.argmin
else:
argextreme = np.argmax
# Get the two zero crossings between which the extreme will be
# searched.
beg = allx[i]
end = allx[i + 1]
extreme = argextreme(rsp_cleaned[beg:end])
extrema.append(beg + extreme)
extrema = np.asarray(extrema)
return extrema
def _rsp_findpeaks_outliers(rsp_cleaned, extrema, amplitude_min=0.3):
# Only consider those extrema that have a minimum vertical distance to
# their direct neighbor, i.e., define outliers in absolute amplitude
# difference between neighboring extrema.
vertical_diff = np.abs(np.diff(rsp_cleaned[extrema]))
median_diff = np.median(vertical_diff)
min_diff = np.where(vertical_diff > (median_diff * amplitude_min))[0]
extrema = extrema[min_diff]
# Make sure that the alternation of peaks and troughs is unbroken. If
# alternation of sign in extdiffs is broken, remove the extrema that
# cause the breaks.
amplitudes = rsp_cleaned[extrema]
extdiffs = np.sign(np.diff(amplitudes))
extdiffs = np.add(extdiffs[0:-1], extdiffs[1:])
removeext = np.where(extdiffs != 0)[0] + 1
extrema = np.delete(extrema, removeext)
amplitudes = np.delete(amplitudes, removeext)
return extrema, amplitudes
def _rsp_findpeaks_sanitize(extrema, amplitudes):
# To be able to consistently calculate breathing amplitude, make sure that
# the extrema always start with a trough and end with a peak, since
# breathing amplitude will be defined as vertical distance between each
# peak and the preceding trough. Note that this also ensures that the
# number of peaks and troughs is equal.
if amplitudes[0] > amplitudes[1]:
extrema = np.delete(extrema, 0)
if amplitudes[-1] < amplitudes[-2]:
extrema = np.delete(extrema, -1)
peaks = extrema[1::2]
troughs = extrema[0:-1:2]
return peaks, troughs
| 8,895 | 35.760331 | 110 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_eventrelated.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_rate,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
from ..misc import NeuroKitWarning, find_closest
def rsp_eventrelated(epochs, silent=False):
"""**Performs event-related RSP analysis on epochs**
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial, usually obtained via
:func:`.epochs_create`, or a DataFrame containing all epochs, usually obtained
via :func:`.epochs_to_df`.
silent : bool
If ``True``, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed RSP features for each epoch, with each epoch indicated
by the `Label` column (if not present, by the `Index` column). The analyzed features
consist of the following:
* ``"RSP_Rate_Max"``: the maximum respiratory rate after stimulus onset.
* ``"RSP_Rate_Min"``: the minimum respiratory rate after stimulus onset.
* ``"RSP_Rate_Mean"``: the mean respiratory rate after stimulus onset.
* ``"RSP_Rate_SD"``: the standard deviation of the respiratory rate after stimulus onset.
* ``"RSP_Rate_Max_Time"``: the time at which maximum respiratory rate occurs.
* ``"RSP_Rate_Min_Time"``: the time at which minimum respiratory rate occurs.
* ``"RSP_Amplitude_Baseline"``: the respiratory amplitude at stimulus onset.
* ``"RSP_Amplitude_Max"``: the change in maximum respiratory amplitude from before stimulus
onset.
* ``"RSP_Amplitude_Min"``: the change in minimum respiratory amplitude from before stimulus
onset.
* ``"RSP_Amplitude_Mean"``: the change in mean respiratory amplitude from before stimulus
onset.
* ``"RSP_Amplitude_SD"``: the standard deviation of the respiratory amplitude after
stimulus onset.
* ``"RSP_Phase"``: indication of whether the onset of the event concurs with respiratory
inspiration (1) or expiration (0).
* ``"RSP_PhaseCompletion"``: indication of the stage of the current respiration phase (0 to
1) at the onset of the event.
See Also
--------
events_find, epochs_create, bio_process
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example with simulated data
rsp, info = nk.rsp_process(nk.rsp_simulate(duration=120))
epochs = nk.epochs_create(rsp, events=[5000, 10000, 15000], epochs_start=-0.1, epochs_end=1.9)
# Analyze
nk.rsp_eventrelated(epochs)
.. ipython:: python
# Example with real data
data = nk.data("bio_eventrelated_100hz")
# Process the data
df, info = nk.bio_process(rsp=data["RSP"], sampling_rate=100)
events = nk.events_find(data["Photosensor"], threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
epochs = nk.epochs_create(df, events, sampling_rate=100, epochs_start=-0.1, epochs_end=2.9)
# Analyze
nk.rsp_eventrelated(epochs)
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="rsp", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize empty container
# Rate
data[i] = _eventrelated_rate(epochs[i], data[i], var="RSP_Rate")
# Amplitude
data[i] = _rsp_eventrelated_amplitude(epochs[i], data[i])
# Inspiration
data[i] = _rsp_eventrelated_inspiration(epochs[i], data[i])
# RVT
data[i] = _rsp_eventrelated_rvt(epochs[i], data[i])
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
df = _eventrelated_sanitizeoutput(data)
return df
# =============================================================================
# Internals
# =============================================================================
def _rsp_eventrelated_amplitude(epoch, output={}):
# Sanitize input
if "RSP_Amplitude" not in epoch:
warn(
"Input does not have an `RSP_Amplitude` column."
" Will skip all amplitude-related features.",
category=NeuroKitWarning,
)
return output
# Get baseline
zero = find_closest(0, epoch.index.values, return_index=True) # Find index closest to 0
baseline = epoch["RSP_Amplitude"].iloc[zero]
signal = epoch["RSP_Amplitude"].values[zero + 1 : :]
# Max / Min / Mean
output["RSP_Amplitude_Baseline"] = baseline
output["RSP_Amplitude_Max"] = np.max(signal) - baseline
output["RSP_Amplitude_Min"] = np.min(signal) - baseline
output["RSP_Amplitude_MeanRaw"] = np.mean(signal)
output["RSP_Amplitude_Mean"] = output["RSP_Amplitude_MeanRaw"] - baseline
output["RSP_Amplitude_SD"] = np.std(signal)
return output
def _rsp_eventrelated_inspiration(epoch, output={}):
# Sanitize input
if "RSP_Phase" not in epoch:
warn(
"Input does not have an `RSP_Phase` column."
" Will not indicate whether event onset concurs with inspiration.",
category=NeuroKitWarning,
)
return output
# Indication of inspiration
output["RSP_Phase"] = epoch["RSP_Phase"][epoch.index > 0].iloc[0]
output["RSP_Phase_Completion"] = epoch["RSP_Phase_Completion"][epoch.index > 0].iloc[0]
return output
def _rsp_eventrelated_rvt(epoch, output={}):
# Sanitize input
if "RSP_RVT" not in epoch:
warn(
"Input does not have an `RSP_RVT` column. Will skip all RVT-related features.",
category=NeuroKitWarning,
)
return output
# Get baseline
zero = find_closest(0, epoch.index.values, return_index=True) # Find index closest to 0
baseline = epoch["RSP_RVT"].iloc[zero]
signal = epoch["RSP_RVT"].values[zero + 1 : :]
# Mean
output["RSP_RVT_Baseline"] = baseline
output["RSP_RVT_Mean"] = np.mean(signal) - baseline
return output
def _rsp_eventrelated_symmetry(epoch, output={}):
# Sanitize input
if "RSP_Symmetry_PeakTrough" not in epoch:
warn(
"Input does not have an `RSP_Symmetry_PeakTrough` column."
+ " Will skip all symmetry-related features.",
category=NeuroKitWarning,
)
return output
# Get baseline
zero = find_closest(0, epoch.index.values, return_index=True) # Find index closest to 0
baseline1 = epoch["RSP_Symmetry_PeakTrough"].iloc[zero]
signal1 = epoch["RSP_Symmetry_PeakTrough"].values[zero + 1 : :]
baseline2 = epoch["RSP_Symmetry_RiseDecay"].iloc[zero]
signal2 = epoch["RSP_Symmetry_RiseDecay"].values[zero + 1 : :]
# Mean
output["RSP_Symmetry_PeakTrough_Baseline"] = baseline1
output["RSP_Symmetry_RiseDecay_Baseline"] = baseline2
output["RSP_Symmetry_PeakTrough_Mean"] = np.mean(signal1) - baseline1
output["RSP_Symmetry_RiseDecay_Mean"] = np.mean(signal2) - baseline2
return output
| 7,274 | 32.995327 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_phase.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..signal import signal_phase
from .rsp_fixpeaks import _rsp_fixpeaks_retrieve
def rsp_phase(peaks, troughs=None, desired_length=None):
"""**Compute respiratory phase (inspiration and expiration)**
Finds the respiratory phase, labelled as 1 for inspiration and 0 for expiration.
Parameters
----------
peaks : list or array or DataFrame or Series or dict
The samples at which the inhalation peaks occur. If a dict or a DataFrame is passed, it is
assumed that these containers were obtained with :func:`.rsp_findpeaks`.
troughs : list or array or DataFrame or Series or dict
The samples at which the inhalation troughs occur. If a dict or a DataFrame is passed, it is
assumed that these containers were obtained with :func:`.rsp_findpeaks`.
desired_length : int
By default, the returned respiration rate has the same number of elements as ``peaks``. If
set to an integer, the returned rate will be interpolated between ``peaks`` over
``desired_length`` samples. Has no effect if a DataFrame is passed in as the ``peaks``
argument.
Returns
-------
pd.DataFrame
A DataFrame of same length as :func:`.rsp_signal` containing the following columns:
* ``"RSP_Phase"``: breathing phase, marked by "1" for inspiration and "0" for expiration.
* ``"RSP_Phase_Completion"``: breathing phase completion, expressed in percentage (from 0
to 1), representing the stage of the current respiratory phase.
See Also
--------
rsp_clean, rsp_peaks, rsp_amplitude, rsp_process, rsp_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=30, respiratory_rate=15)
cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
peak_signal, info = nk.rsp_peaks(cleaned)
phase = nk.rsp_phase(peak_signal, desired_length=len(cleaned))
@savefig p_rsp_phase1.png scale=100%
fig = nk.signal_plot([rsp, phase], standardize=True)
@suppress
plt.close()
"""
# Format input.
peaks, troughs = _rsp_fixpeaks_retrieve(peaks, troughs)
# Phase
inspiration = np.full(desired_length, np.nan)
inspiration[peaks] = 0.0
inspiration[troughs] = 1.0
last_element = np.where(~np.isnan(inspiration))[0][
-1
] # Avoid filling beyond the last peak/trough
inspiration[0:last_element] = pd.Series(inspiration).fillna(method="pad").values[0:last_element]
# Phase Completion
completion = signal_phase(inspiration, method="percent")
return pd.DataFrame({"RSP_Phase": inspiration, "RSP_Phase_Completion": completion})
| 2,746 | 35.626667 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_methods.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc.report import get_kwargs
from .rsp_clean import rsp_clean
from .rsp_peaks import rsp_peaks
from .rsp_rvt import rsp_rvt
def rsp_methods(
sampling_rate=1000,
method="khodadad",
method_cleaning="default",
method_peaks="default",
method_rvt="power",
**kwargs,
):
"""**RSP Preprocessing Methods**
This function analyzes and specifies the methods used in the preprocessing, and create a
textual description of the methods used. It is used by :func:`rsp_process()` to dispatch the
correct methods to each subroutine of the pipeline and :func:`rsp_report()` to create a
preprocessing report.
Parameters
----------
sampling_rate : int
The sampling frequency of the raw RSP signal (in Hz, i.e., samples/second).
method : str
The method used for cleaning and peak finding if ``"method_cleaning"``
and ``"method_peaks"`` are set to ``"default"``. Can be one of ``"Khodadad"``, ``"BioSPPy"``.
Defaults to ``"Khodadad"``.
method_cleaning: str
The method used to clean the raw RSP signal. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.rsp_clean`.
method_peaks: str
The method used to find peaks. If ``"default"``,
will be set to the value of ``"method"``. Defaults to ``"default"``.
For more information, see the ``"method"`` argument
of :func:`.rsp_peaks`.
method_rvt: str
The method used to compute respiratory volume per time. Defaults to ``"harrison"``.
For more information, see the ``"method"`` argument
of :func:`.rsp_rvt`.
**kwargs
Other arguments to be passed to :func:`.rsp_clean`,
:func:`.rsp_peaks`, and :func:`.rsp_rvt`.
Returns
-------
report_info : dict
A dictionary containing the keyword arguments passed to the cleaning
and peak finding functions, text describing the methods, and the corresponding
references.
See Also
--------
rsp_process, rsp_clean, rsp_findpeaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
methods = nk.rsp_methods(sampling_rate=100, method="Khodadad", method_cleaning="hampel")
print(methods["text_cleaning"])
print(methods["references"][0])
"""
# Sanitize inputs
method_cleaning = (
str(method).lower() if method_cleaning == "default" else str(method_cleaning).lower()
)
method_peaks = str(method).lower() if method_peaks == "default" else str(method_peaks).lower()
method_rvt = str(method_rvt).lower()
# Create dictionary with all inputs
report_info = {
"sampling_rate": sampling_rate,
"method": method,
"method_cleaning": method_cleaning,
"method_peaks": method_peaks,
"method_rvt": method_rvt,
**kwargs,
}
# Get arguments to be passed to cleaning and peak finding functions
kwargs_cleaning, report_info = get_kwargs(report_info, rsp_clean)
kwargs_peaks, report_info = get_kwargs(report_info, rsp_peaks)
kwargs_rvt, report_info = get_kwargs(report_info, rsp_rvt)
# Save keyword arguments in dictionary
report_info["kwargs_cleaning"] = kwargs_cleaning
report_info["kwargs_peaks"] = kwargs_peaks
report_info["kwargs_rvt"] = kwargs_rvt
# Initialize refs list with NeuroKit2 reference
refs = ["""Makowski, D., Pham, T., Lau, Z. J., Brammer, J. C., Lespinasse, F., Pham, H.,
Schölzel, C., & Chen, S. A. (2021). NeuroKit2: A Python toolbox for neurophysiological signal processing.
Behavior Research Methods, 53(4), 1689–1696. https://doi.org/10.3758/s13428-020-01516-y
"""]
# 1. Cleaning
# ------------
report_info["text_cleaning"] = f"The raw signal, sampled at {sampling_rate} Hz,"
if method_cleaning in ["khodadad", "khodadad2018"]:
report_info["text_cleaning"] += (
" linear detrending followed by a fifth order 2Hz low-pass IIR Butterworth filter; "
+ "following Khoadadad et al., 2018."
)
refs.append(
"""Khodadad, D., Nordebo, S., Müller, B., Waldmann, A., Yerworth, R., Becher, T.,... & Bayford, R. (2018).
Optimized breath detection algorithm in electrical impedance tomography.
Physiological measurement, 39(9), 094001."""
)
elif method_cleaning in ["hampel", "power", "power2020"]:
report_info["text_cleaning"] += (
" was preprocessed using a median-based Hampel filter by replacing values which"
+ f' are {report_info.get("threshold", 3)} median absolute deviation away from the rolling median;'
+ "following Power et al. 2020."
)
refs.append(
"""Power, J., Lynch, C., Dubin, M., Silver, B., Martin, A., Jones, R.,(2020)
Characteristics of respiratory measures in young adults scanned at rest,
including systematic changes and “missed” deep breaths.
NeuroImage, Volume 204, 116234"""
)
elif method_cleaning in ["biosppy"]:
report_info["text_cleaning"] += (
" was preprocessed using a second order 0.1-0.35 Hz bandpass "
+ "Butterworth filter followed by a constant detrending."
)
elif method_cleaning in ["none"]:
report_info[
"text_cleaning"
] += "was directly used for peak detection without preprocessing."
else:
# just in case more methods are added
report_info["text_cleaning"] += f"was cleaned following the {method} method."
# 2. Peaks
# ----------
if method_peaks in ["khodadad", "khodadad2018"]:
report_info[
"text_peaks"
] = "The peak detection was carried out using the method described in Khoadadad et al. (2018)."
refs.append(
"""Khodadad, D., Nordebo, S., Müller, B., Waldmann, A., Yerworth, R., Becher, T., ... & Bayford, R. (2018).
Optimized breath detection algorithm in electrical impedance tomography.
Physiological measurement, 39(9), 094001."""
)
elif method_peaks in ["biosppy"]:
report_info[
"text_peaks"
] = "The peak detection was carried out using the method provided by the Python library BioSPpy (/signals/resp.py)."
elif method_peaks in ["scipy"]:
report_info[
"text_peaks"
] = "The peak detection was carried out using the method provided by the Python library SciPy (signal.find_peaks)."
elif method_peaks in ["none"]:
report_info["text_peaks"] = "There was no peak detection carried out."
else:
report_info[
"text_peaks"
] = f"The peak detection was carried out using the method {method_peaks}."
# 3. RVT
# ----------
if method_rvt in ["harrison", "harrison2021"]:
report_info[
"text_rvt"
] = "The respiratory volume per time computation was carried out using the method described in Harrison et al. (2021)."
refs.append(
"""Harrison, S. J., Bianchi, S., Heinzle, J., Stephan, K. E., Iglesias, S., & Kasper, L. (2021).
A Hilbert-based method for processing respiratory timeseries.
Neuroimage, 230, 117787."""
)
elif method_rvt in ["birn", "birn2006"]:
report_info[
"text_rvt"
] = "The respiratory volume per time computation was carried out using the method described in Birn et al. (2006)."
refs.append(
"""Birn, R. M., Diamond, J. B., Smith, M. A., & Bandettini, P. A. (2006).
Separating respiratory-variation-related fluctuations from neuronal-activity-related fluctuations in
fMRI. Neuroimage, 31(4), 1536-1548."""
)
elif method_rvt in ["power", "power2020"]:
report_info[
"text_rvt"
] = "The respiratory volume per time computation was carried out using the method described in Power at al. (2020)."
refs.append(
"""Power, J. D., Lynch, C. J., Dubin, M. J., Silver, B. M., Martin, A., & Jones, R. M. (2020).
Characteristics of respiratory measures in young adults scanned at rest, including systematic
changes and "missed" deep breaths. Neuroimage, 204, 116234."""
)
elif method_rvt in ["none"]:
report_info["text_rvt"] = "Respiratory volume per time was not computed."
else:
report_info[
"text_rvt"
] = f"The respiratory volume per time computation was carried out using the method described in {method_rvt}."
report_info["references"] = list(np.unique(refs))
return report_info
| 8,824 | 41.427885 | 127 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/__init__.py | """Submodule for NeuroKit."""
# Aliases
# from ..signal import signal_rate as rsp_rate
from .rsp_amplitude import rsp_amplitude
from .rsp_analyze import rsp_analyze
from .rsp_clean import rsp_clean
from .rsp_eventrelated import rsp_eventrelated
from .rsp_findpeaks import rsp_findpeaks
from .rsp_fixpeaks import rsp_fixpeaks
from .rsp_intervalrelated import rsp_intervalrelated
from .rsp_methods import rsp_methods
from .rsp_peaks import rsp_peaks
from .rsp_phase import rsp_phase
from .rsp_plot import rsp_plot
from .rsp_process import rsp_process
from .rsp_rate import rsp_rate
from .rsp_rrv import rsp_rrv
from .rsp_rvt import rsp_rvt
from .rsp_simulate import rsp_simulate
from .rsp_symmetry import rsp_symmetry
__all__ = [
"rsp_simulate",
"rsp_clean",
"rsp_findpeaks",
"rsp_fixpeaks",
"rsp_peaks",
"rsp_phase",
"rsp_amplitude",
"rsp_process",
"rsp_plot",
"rsp_eventrelated",
"rsp_rrv",
"rsp_rvt",
"rsp_intervalrelated",
"rsp_analyze",
"rsp_rate",
"rsp_symmetry",
"rsp_methods",
]
| 1,055 | 24.142857 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..misc import as_vector
from ..misc.report import create_report
from ..signal import signal_rate
from .rsp_amplitude import rsp_amplitude
from .rsp_clean import rsp_clean
from .rsp_methods import rsp_methods
from .rsp_peaks import rsp_peaks
from .rsp_phase import rsp_phase
from .rsp_plot import rsp_plot
from .rsp_rvt import rsp_rvt
from .rsp_symmetry import rsp_symmetry
def rsp_process(
rsp_signal,
sampling_rate=1000,
method="khodadad2018",
method_rvt="harrison2021",
report=None,
**kwargs
):
"""**Process a respiration (RSP) signal**
Convenience function that automatically processes a respiration signal with one of the
following methods:
* `Khodadad et al. (2018) <https://iopscience.iop.org/article/10.1088/1361-6579/aad7e6/meta>`_
* `BioSPPy <https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/resp.py>`_
Parameters
----------
rsp_signal : Union[list, np.array, pd.Series]
The raw respiration channel (as measured, for instance, by a respiration belt).
sampling_rate : int
The sampling frequency of :func:`.rsp_signal` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"khodadad2018"`` (default)
or ``"biosppy"``.
method_rvt : str
The rvt method to apply. Can be one of ``"harrison2021"`` (default), ``"birn2006"``
or ``"power2020"``.
report : str
The filename of a report containing description and figures of processing
(e.g. ``"myreport.html"``). Needs to be supplied if a report file
should be generated. Defaults to ``None``. Can also be ``"text"`` to
just print the text in the console without saving anything.
**kwargs
Other arguments to be passed to specific methods. For more information,
see :func:`.rsp_methods`.
Returns
-------
signals : DataFrame
A DataFrame of same length as :func:`.rsp_signal` containing the following columns:
* ``"RSP_Raw"``: the raw signal.
* ``"RSP_Clean"``: the cleaned signal.
* ``"RSP_Peaks"``: the respiratory peaks (exhalation onsets) marked as "1" in a list of
zeros.
* ``"RSP_Troughs"``: the respiratory troughs (inhalation onsets) marked as "1" in a list of
zeros.
* ``"RSP_Rate"``: breathing rate interpolated between inhalation peaks.
* ``"RSP_Amplitude"``: breathing amplitude interpolated between inhalation peaks.
* ``"RSP_Phase"``: breathing phase, marked by "1" for inspiration and "0" for expiration.
* ``"RSP_Phase_Completion"``: breathing phase completion, expressed in percentage (from 0 to
1), representing the stage of the current respiratory phase.
* ``"RSP_RVT"``: respiratory volume per time (RVT).
info : dict
A dictionary containing the samples at which inhalation peaks and exhalation troughs occur,
accessible with the keys ``"RSP_Peaks"``, and ``"RSP_Troughs"`` respectively, as well as the
signals' sampling rate.
See Also
--------
rsp_clean, rsp_findpeaks, .signal_rate, rsp_amplitude, rsp_plot, rsp_phase, rsp_rvt, rsp_symmetry
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=90, respiratory_rate=15)
signals, info = nk.rsp_process(rsp, sampling_rate=1000, report="text")
@savefig p_rsp_process_1.png scale=100%
fig = nk.rsp_plot(signals, sampling_rate=1000)
@suppress
plt.close()
"""
# Sanitize input
rsp_signal = as_vector(rsp_signal)
methods = rsp_methods(
sampling_rate=sampling_rate, method=method, method_rvt=method_rvt, **kwargs
)
# Clean signal
rsp_cleaned = rsp_clean(
rsp_signal,
sampling_rate=sampling_rate,
method=methods["method_cleaning"],
**methods["kwargs_cleaning"],
)
# Extract, fix and format peaks
peak_signal, info = rsp_peaks(
rsp_cleaned,
sampling_rate=sampling_rate,
method=methods["method_peaks"],
amplitude_min=0.3,
**methods["kwargs_peaks"],
)
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
# Get additional parameters
phase = rsp_phase(peak_signal, desired_length=len(rsp_signal))
amplitude = rsp_amplitude(rsp_cleaned, peak_signal)
rate = signal_rate(
info["RSP_Troughs"], sampling_rate=sampling_rate, desired_length=len(rsp_signal)
)
symmetry = rsp_symmetry(rsp_cleaned, peak_signal)
rvt = rsp_rvt(
rsp_cleaned,
method=methods["method_rvt"],
sampling_rate=sampling_rate,
silent=True,
)
# Prepare output
signals = pd.DataFrame(
{
"RSP_Raw": rsp_signal,
"RSP_Clean": rsp_cleaned,
"RSP_Amplitude": amplitude,
"RSP_Rate": rate,
"RSP_RVT": rvt,
}
)
signals = pd.concat([signals, phase, symmetry, peak_signal], axis=1)
if report is not None:
# Generate report containing description and figures of processing
if ".html" in str(report):
fig = rsp_plot(signals, sampling_rate=sampling_rate)
else:
fig = None
create_report(file=report, signals=signals, info=methods, fig=fig)
return signals, info
| 5,438 | 34.090323 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_peaks.py | # -*- coding: utf-8 -*-
from ..signal import signal_formatpeaks
from .rsp_findpeaks import rsp_findpeaks
from .rsp_fixpeaks import rsp_fixpeaks
def rsp_peaks(rsp_cleaned, sampling_rate=1000, method="khodadad2018", **kwargs):
"""**Identify extrema in a respiration (RSP) signal**
This function runs :func:`.rsp_findpeaks` and :func:`.rsp_fixpeaks` to identify and process
peaks (exhalation onsets) and troughs (inhalation onsets) in a preprocessed respiration signal
using different sets of parameters, such as:
* **khodad2018**: Uses the parameters in Khodadad et al. (2018).
* **biosppy**: Uses the parameters in `BioSPPy's <https://github.com/PIA-Group/BioSPPy>`_
``resp()`` function.
* **scipy** Uses the `scipy <https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html>`_
peak-detection function.
Parameters
----------
rsp_cleaned : Union[list, np.array, pd.Series]
The cleaned respiration channel as returned by :func:`.rsp_clean`.
sampling_rate : int
The sampling frequency of :func:`.rsp_cleaned` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"khodadad2018"`` (default), ``"biosppy"``
or ``"scipy"``.
**kwargs
Other arguments to be passed to the different peak finding methods. See
:func:`.rsp_findpeaks`.
Returns
-------
info : dict
A dictionary containing additional information, in this case the samples at which peaks
(exhalation onsets) and troughs (inhalation onsets) occur, accessible with the keys
``"RSP_Peaks"``, and ``"RSP_Troughs"``, respectively, as well as the signals' sampling rate.
peak_signal : DataFrame
A DataFrame of same length as the input signal in which occurrences of peaks (exhalation
onsets) and troughs (inhalation onsets) are marked as "1" in lists of zeros with the same
length as :func:`.rsp_cleaned`. Accessible with the keys ``"RSP_Peaks"`` and
``"RSP_Troughs"`` respectively.
See Also
--------
rsp_clean, signal_rate, rsp_findpeaks, rsp_fixpeaks, rsp_amplitude, rsp_process, rsp_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
import pandas as pd
rsp = nk.rsp_simulate(duration=30, respiratory_rate=15)
cleaned = nk.rsp_clean(rsp, sampling_rate=1000)
peak_signal, info = nk.rsp_peaks(cleaned, sampling_rate=1000)
data = pd.concat([pd.DataFrame({"RSP": rsp}), peak_signal], axis=1)
@savefig p_rsp_peaks1.png scale=100%
fig = nk.signal_plot(data)
@suppress
plt.close()
References
----------
* Khodadad, D., Nordebo, S., Müller, B., Waldmann, A., Yerworth, R., Becher, T., ... & Bayford,
R. (2018). Optimized breath detection algorithm in electrical impedance tomography.
Physiological measurement, 39(9), 094001.
"""
info = rsp_findpeaks(rsp_cleaned, sampling_rate=sampling_rate, method=method, **kwargs)
info = rsp_fixpeaks(info)
peak_signal = signal_formatpeaks(
info, desired_length=len(rsp_cleaned), peak_indices=info["RSP_Peaks"]
)
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
return peak_signal, info
| 3,310 | 38.416667 | 117 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_clean.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning, as_vector
from ..signal import signal_detrend, signal_filter
from ..stats import mad
def rsp_clean(rsp_signal, sampling_rate=1000, method="khodadad2018", **kwargs):
"""**Preprocess a respiration (RSP) signal**
Clean a respiration signal using different sets of parameters, such as:
* **khodadad2018**: Linear detrending followed by a fifth order 2Hz low-pass IIR Butterworth
filter)
* **BioSPPy**: Second order 0.1-0.35 Hz bandpass Butterworth filter followed by a constant
detrending).
* **hampel**: Applies a median-based Hampel filter by replacing values which are 3 (can be
changed via ``threshold``) :func:`.mad` away from the rolling median.
Parameters
----------
rsp_signal : Union[list, np.array, pd.Series]
The raw respiration channel (as measured, for instance, by a respiration belt).
sampling_rate : int, optional
The sampling frequency of :func:`.rsp_signal` (in Hz, i.e., samples/second).
method : str, optional
The processing pipeline to apply. Can be one of ``"khodadad2018"`` (default),
``"biosppy"`` or ``"hampel"``.
**kwargs
Other arguments to pass to the cleaning method.
Returns
-------
array
Vector containing the cleaned respiratory signal.
See Also
--------
rsp_findpeaks, signal_rate, rsp_amplitude, rsp_process, rsp_plot
Examples
--------
.. ipython:: python
import pandas as pd
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=30, sampling_rate=50, noise=0.1)
signals = pd.DataFrame({
"RSP_Raw": rsp,
"RSP_Khodadad2018": nk.rsp_clean(rsp, sampling_rate=50, method="khodadad2018"),
"RSP_BioSPPy": nk.rsp_clean(rsp, sampling_rate=50, method="biosppy"),
"RSP_Hampel": nk.rsp_clean(rsp, sampling_rate=50, method="hampel", threshold=3)
})
@savefig p_rsp_clean1.png scale=100%
signals.plot()
@suppress
plt.close()
References
----------
* Khodadad, D., Nordebo, S., Müller, B., Waldmann, A., Yerworth, R., Becher, T., ... & Bayford,
R. (2018). Optimized breath detection algorithm in electrical impedance tomography.
Physiological measurement, 39(9), 094001.
* Power, J., Lynch, C., Dubin, M., Silver, B., Martin, A., Jones, R.,(2020)
Characteristics of respiratory measures in young adults scanned at rest,
including systematic changes and “missed” deep breaths.
NeuroImage, Volume 204, 116234
"""
rsp_signal = as_vector(rsp_signal)
# Missing data
n_missing = np.sum(np.isnan(rsp_signal))
if n_missing > 0:
warn(
f"There are {n_missing} missing data points in your signal."
" Filling missing values by using the forward filling method.",
category=NeuroKitWarning,
)
rsp_signal = _rsp_clean_missing(rsp_signal)
method = method.lower() # remove capitalised letters
if method in ["khodadad", "khodadad2018"]:
clean = _rsp_clean_khodadad2018(rsp_signal, sampling_rate)
elif method == "biosppy":
clean = _rsp_clean_biosppy(rsp_signal, sampling_rate)
elif method in ["power", "power2020", "hampel"]:
clean = _rsp_clean_hampel(
rsp_signal,
**kwargs,
)
elif method is None or method == "none":
clean = rsp_signal
else:
raise ValueError(
"NeuroKit error: rsp_clean(): 'method' should be one of 'khodadad2018', 'biosppy' or 'hampel'."
)
return clean
# =============================================================================
# Handle missing data
# =============================================================================
def _rsp_clean_missing(rsp_signal):
rsp_signal = pd.DataFrame.pad(pd.Series(rsp_signal))
return rsp_signal
# =============================================================================
# Khodadad et al. (2018)
# =============================================================================
def _rsp_clean_khodadad2018(rsp_signal, sampling_rate=1000):
"""The algorithm is based on (but not an exact implementation of) the "Zero-crossing algorithm with amplitude
threshold" by `Khodadad et al. (2018)
<https://iopscience.iop.org/article/10.1088/1361-6579/aad7e6/meta>`_.
"""
# Slow baseline drifts / fluctuations must be removed from the raw
# breathing signal (i.e., the signal must be centered around zero) in order
# to be able to reliable detect zero-crossings.
# Remove baseline by applying a lowcut at .05Hz (preserves breathing rates
# higher than 3 breath per minute) and high frequency noise by applying a
# highcut at 3 Hz (preserves breathing rates slower than 180 breath per
# minute).
clean = signal_filter(
rsp_signal,
sampling_rate=sampling_rate,
lowcut=0.05,
highcut=3,
order=2,
method="butterworth",
)
return clean
# =============================================================================
# BioSPPy
# =============================================================================
def _rsp_clean_biosppy(rsp_signal, sampling_rate=1000):
"""Uses the same defaults as `BioSPPy.
<https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/resp.py>`_.
"""
# Parameters
order = 2
frequency = [0.1, 0.35]
# Normalize frequency to Nyquist Frequency (Fs/2).
frequency = 2 * np.array(frequency) / sampling_rate
# Filtering
b, a = scipy.signal.butter(N=order, Wn=frequency, btype="bandpass", analog=False)
filtered = scipy.signal.filtfilt(b, a, rsp_signal)
# Baseline detrending
clean = signal_detrend(filtered, order=0)
return clean
# =============================================================================
# Hampel filter
# =============================================================================
def _rsp_clean_hampel(rsp_signal, sampling_rate=1000, window_length=0.1, threshold=3, **kwargs):
"""Explanation MatLabs' https://www.mathworks.com/help/dsp/ref/hampelfilter.html. From
https://stackoverflow.com/a/51731332.
Parameters
----------
rsp_signal : Union[list, np.array, pd.Series]
The raw respiration channel (as measured, for instance, by a respiration belt).
window_length : int, optional
Window to be considered when cleaning, by default 0.1. In seconds.
threshold : float, optional
Threshold of deviations after which a point is considered an outlier, by default 3.
"""
# Get window length in samples
window_length = int(window_length * sampling_rate)
# Convert to Series to use its rolling methods
rsp_signal = pd.Series(rsp_signal)
rolling_median = rsp_signal.rolling(window=window_length, center=True).median()
rolling_MAD = rsp_signal.rolling(window=window_length, center=True).apply(mad)
threshold = threshold * rolling_MAD
difference = np.abs(rsp_signal - rolling_median)
# Find outliers
outlier_idx = difference > threshold
# Substitute outliers with rolling median
rsp_signal[outlier_idx] = rolling_median[outlier_idx]
return as_vector(rsp_signal)
| 7,393 | 34.893204 | 113 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_intervalrelated.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .rsp_rrv import rsp_rrv
def rsp_intervalrelated(data, sampling_rate=1000):
"""**Performs RSP analysis on longer periods of data (typically > 10 seconds), such as resting-state data**
Parameters
----------
data : DataFrame or dict
A DataFrame containing the different processed signal(s) as different columns, typically
generated by :func:`.rsp_process` or :func:`.bio_process`. Can also take a dict containing
sets of separately processed DataFrames.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Returns
-------
DataFrame
A dataframe containing the analyzed RSP features.
The analyzed features consist of the following:
* ``"RSP_Rate_Mean"``: the mean respiratory rate.
* ``"RSP_Amplitude_Mean"``: the mean respiratory amplitude.
* ``"RSP_RRV"``: the different respiratory rate variability metrices.
See :func:`.rsp_rrv` docstrings for details.
* ``"RSP_Phase_Duration_Inspiration"``: the average inspiratory duration.
* ``"RSP_Phase_Duration_Expiration"``: the average expiratory duration.
* ``"RSP_Phase_Duration_Ratio "``: the inspiratory-to-expiratory time ratio (I/E).
See Also
--------
bio_process, rsp_eventrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Process the data
df, info = nk.rsp_process(data["RSP"], sampling_rate=100)
# Single dataframe is passed
nk.rsp_intervalrelated(df, sampling_rate=100)
epochs = nk.epochs_create(df, events=[0, 15000], sampling_rate=100, epochs_end=150)
nk.rsp_intervalrelated(epochs)
"""
# If one interval dataframe
if isinstance(data, pd.DataFrame):
intervals = _rsp_intervalrelated_features(data, sampling_rate)
intervals = pd.DataFrame.from_dict(intervals, orient="index").T
# If data is a dict (containing multiple intervals)
elif isinstance(data, dict):
intervals = {}
for index in data:
intervals[index] = {} # Initialize empty container
# Add label info
intervals[index]["Label"] = data[index]["Label"].iloc[0]
# Features
intervals[index] = _rsp_intervalrelated_features(
data[index], sampling_rate, intervals[index]
)
intervals = pd.DataFrame.from_dict(intervals, orient="index")
return intervals
# =============================================================================
# Internals
# =============================================================================
def _rsp_intervalrelated_features(data, sampling_rate, output={}):
# Sanitize input
colnames = data.columns.values
if "RSP_Rate" in colnames:
output["RSP_Rate_Mean"] = np.nanmean(data["RSP_Rate"].values)
rrv = rsp_rrv(data, sampling_rate=sampling_rate)
for column in rrv.columns:
output[column] = rrv[column].values.astype("float")
if "RSP_Amplitude" in colnames:
output["RSP_Amplitude_Mean"] = np.nanmean(data["RSP_Amplitude"].values)
if "RSP_RVT" in colnames:
output["RSP_RVT"] = np.nanmean(data["RSP_RVT"].values)
if "RSP_Symmetry_PeakTrough" in colnames:
output["RSP_Symmetry_PeakTrough"] = np.nanmean(data["RSP_Symmetry_PeakTrough"].values)
output["RSP_Symmetry_RiseDecay"] = np.nanmean(data["RSP_Symmetry_RiseDecay"].values)
if "RSP_Phase" in colnames:
# Extract inspiration durations
insp_phases = data[data["RSP_Phase"] == 1]
insp_start = insp_phases.index[insp_phases["RSP_Phase_Completion"] == 0]
insp_end = insp_phases.index[insp_phases["RSP_Phase_Completion"] == 1]
# Check that start of phase is before end of phase
if insp_start[0] > insp_end[0]:
insp_end = insp_end[1:]
# Check for unequal lengths
diff = abs(len(insp_start) - len(insp_end))
if len(insp_start) > len(insp_end):
insp_start = insp_start[: len(insp_start) - diff] # remove extra start points
elif len(insp_end) > len(insp_start):
insp_end = insp_end[: len(insp_end) - diff] # remove extra end points
insp_times = np.array(insp_end - insp_start) / sampling_rate
# Extract expiration durations
exp_phases = data[data["RSP_Phase"] == 0]
exp_start = exp_phases.index[exp_phases["RSP_Phase_Completion"] == 0]
exp_end = exp_phases.index[exp_phases["RSP_Phase_Completion"] == 1]
# Check that start of phase is before end of phase
if exp_start[0] > exp_end[0]:
exp_end = exp_end[1:]
# Check for unequal lengths
diff = abs(len(exp_start) - len(exp_end))
if len(exp_start) > len(exp_end):
exp_start = exp_start[: len(exp_start) - diff] # remove extra start points
elif len(exp_end) > len(exp_start):
exp_end = exp_end[: len(exp_end) - diff] # remove extra end points
exp_times = np.array(exp_end - exp_start) / sampling_rate
output["RSP_Phase_Duration_Inspiration"] = np.mean(insp_times)
output["RSP_Phase_Duration_Expiration"] = np.mean(exp_times)
output["RSP_Phase_Duration_Ratio"] = (
output["RSP_Phase_Duration_Inspiration"] / output["RSP_Phase_Duration_Expiration"]
)
return output
def _rsp_intervalrelated_rrv(data, sampling_rate, output={}):
return output
| 5,657 | 34.810127 | 111 | py |
NeuroKit | NeuroKit-master/neurokit2/rsp/rsp_rrv.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..complexity import entropy_approximate, entropy_sample, fractal_dfa
from ..misc import NeuroKitWarning
from ..signal import signal_power, signal_rate
from ..signal.signal_formatpeaks import _signal_formatpeaks_sanitize
from ..stats import mad
def rsp_rrv(rsp_rate, troughs=None, sampling_rate=1000, show=False, silent=True):
"""**Respiratory Rate Variability (RRV)**
Computes time domain and frequency domain features for Respiratory Rate Variability (RRV)
analysis.
Parameters
----------
rsp_rate : array
Array containing the respiratory rate, produced by :func:`.signal_rate`.
troughs : dict
The samples at which the inhalation onsets occur.
Dict returned by :func:`rsp_peaks` (Accessible with the key, ``"RSP_Troughs"``).
Defaults to ``None``.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
show : bool
If ``True``, will return a Poincaré plot, a scattergram, which plots each breath-to-breath
interval against the next successive one. The ellipse centers around the average
breath-to-breath interval. Defaults to ``False``.
silent : bool
If ``False``, warnings will be printed. Default to ``True``.
Returns
-------
DataFrame
DataFrame consisting of the computed RRV metrics, which includes:
* ``"RRV_SDBB"``: the standard deviation of the breath-to-breath intervals.
* ``"RRV_RMSSD"``: the root mean square of successive differences of the breath-to-breath
intervals.
* ``"RRV_SDSD"``: the standard deviation of the successive differences between adjacent
breath-to-breath intervals.
* ``"RRV_BBx"``: the number of successive interval differences that are greater than x
seconds.
* ``"RRV-pBBx"``: the proportion of breath-to-breath intervals that are greater than x
seconds,
out of the total number of intervals.
* ``"RRV_VLF"``: spectral power density pertaining to very low frequency band (i.e., 0 to .
04 Hz) by default.
* ``"RRV_LF"``: spectral power density pertaining to low frequency band (i.e., .04 to .15
Hz) by default.
* ``"RRV_HF"``: spectral power density pertaining to high frequency band (i.e., .15 to .4
Hz) by default.
* ``"RRV_LFHF"``: the ratio of low frequency power to high frequency power.
* ``"RRV_LFn"``: the normalized low frequency, obtained by dividing the low frequency
power by the total power.
* ``"RRV_HFn"``: the normalized high frequency, obtained by dividing the low frequency
power by total power.
* ``"RRV_SD1"``: SD1 is a measure of the spread of breath-to-breath intervals on the
Poincaré plot perpendicular to the line of identity. It is an index of short-term
variability.
* ``"RRV_SD2"``: SD2 is a measure of the spread of breath-to-breath intervals on the
Poincaré plot along the line of identity. It is an index of long-term variability.
* ``"RRV_SD2SD1"``: the ratio between short and long term fluctuations of the
breath-to-breath intervals (SD2 divided by SD1).
* ``"RRV_ApEn"``: the approximate entropy of RRV, calculated
by :func:`.entropy_approximate`.
* ``"RRV_SampEn"``: the sample entropy of RRV, calculated by :func:`.entropy_sample`.
* ``"RRV_DFA_alpha1"``: the "short-term" fluctuation value generated from Detrended
Fluctuation Analysis i.e. the root mean square deviation from the fitted trend of the
breath-to-breath intervals. Will only be computed if mora than 160 breath cycles in the
signal.
* ``"RRV_DFA_alpha2"``: the long-term fluctuation value. Will only be computed if mora
than 640 breath cycles in the signal.
* **MFDFA indices**: Indices related to the :func:`multifractal spectrum <.fractal_dfa()>`.
See Also
--------
signal_rate, rsp_peaks, signal_power, entropy_sample, entropy_approximate
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=90, respiratory_rate=15)
rsp, info = nk.rsp_process(rsp)
nk.rsp_rrv(rsp, show=True)
References
----------
* Soni, R., & Muniyandi, M. (2019). Breath rate variability: a novel measure to study the
meditation effects. International Journal of Yoga, 12(1), 45.
"""
# Sanitize input
rsp_rate, troughs = _rsp_rrv_formatinput(rsp_rate, troughs, sampling_rate)
# Get raw and interpolated R-R intervals
bbi = np.diff(troughs) / sampling_rate * 1000
rsp_period = 60 * sampling_rate / rsp_rate
# Get indices
rrv = {} # Initialize empty dict
rrv.update(_rsp_rrv_time(bbi))
rrv.update(
_rsp_rrv_frequency(rsp_period, sampling_rate=sampling_rate, show=show, silent=silent)
)
rrv.update(_rsp_rrv_nonlinear(bbi))
rrv = pd.DataFrame.from_dict(rrv, orient="index").T.add_prefix("RRV_")
if show:
_rsp_rrv_plot(bbi)
return rrv
# =============================================================================
# Methods (Domains)
# =============================================================================
def _rsp_rrv_time(bbi):
diff_bbi = np.diff(bbi)
out = {} # Initialize empty dict
# Mean based
out["RMSSD"] = np.sqrt(np.mean(diff_bbi**2))
out["MeanBB"] = np.nanmean(bbi)
out["SDBB"] = np.nanstd(bbi, ddof=1)
out["SDSD"] = np.nanstd(diff_bbi, ddof=1)
out["CVBB"] = out["SDBB"] / out["MeanBB"]
out["CVSD"] = out["RMSSD"] / out["MeanBB"]
# Robust
out["MedianBB"] = np.nanmedian(bbi)
out["MadBB"] = mad(bbi)
out["MCVBB"] = out["MadBB"] / out["MedianBB"]
# # Extreme-based
# nn50 = np.sum(np.abs(diff_rri) > 50)
# nn20 = np.sum(np.abs(diff_rri) > 20)
# out["pNN50"] = nn50 / len(rri) * 100
# out["pNN20"] = nn20 / len(rri) * 100
#
# # Geometrical domain
# bar_y, bar_x = np.histogram(rri, bins=range(300, 2000, 8))
# bar_y, bar_x = np.histogram(rri, bins="auto")
# out["TINN"] = np.max(bar_x) - np.min(bar_x) # Triangular Interpolation of the NN Interval Histogram
# out["HTI"] = len(rri) / np.max(bar_y) # HRV Triangular Index
return out
def _rsp_rrv_frequency(
rsp_period,
vlf=(0, 0.04),
lf=(0.04, 0.15),
hf=(0.15, 0.4),
sampling_rate=1000,
method="welch",
show=False,
silent=True,
):
power = signal_power(
rsp_period,
frequency_band=[vlf, lf, hf],
sampling_rate=sampling_rate,
method=method,
max_frequency=0.5,
show=show,
)
power.columns = ["VLF", "LF", "HF"]
out = power.to_dict(orient="index")[0]
if silent is False:
for frequency in out.keys():
if out[frequency] == 0.0:
warn(
"The duration of recording is too short to allow"
" reliable computation of signal power in frequency band " + frequency + "."
" Its power is returned as zero.",
category=NeuroKitWarning,
)
# Normalized
total_power = np.sum(power.values)
out["LFHF"] = out["LF"] / out["HF"]
out["LFn"] = out["LF"] / total_power
out["HFn"] = out["HF"] / total_power
return out
def _rsp_rrv_nonlinear(bbi):
diff_bbi = np.diff(bbi)
out = {}
# Poincaré plot
out["SD1"] = np.sqrt(np.std(diff_bbi, ddof=1) ** 2 * 0.5)
out["SD2"] = np.sqrt(2 * np.std(bbi, ddof=1) ** 2 - 0.5 * np.std(diff_bbi, ddof=1) ** 2)
out["SD2SD1"] = out["SD2"] / out["SD1"]
# CSI / CVI
# T = 4 * out["SD1"]
# L = 4 * out["SD2"]
# out["CSI"] = L / T
# out["CVI"] = np.log10(L * T)
# out["CSI_Modified"] = L ** 2 / T
# Entropy
out["ApEn"] = entropy_approximate(bbi, dimension=2)[0]
out["SampEn"] = entropy_sample(bbi, dimension=2, tolerance=0.2 * np.std(bbi, ddof=1))[0]
# DFA
if len(bbi) / 10 > 16:
out["DFA_alpha1"] = fractal_dfa(bbi, scale=np.arange(4, 17), multifractal=False)[0]
# For multifractal
mdfa_alpha1, _ = fractal_dfa(
bbi, multifractal=True, q=np.arange(-5, 6), scale=np.arange(4, 17)
)
for k in mdfa_alpha1.columns:
out["MFDFA_alpha1_" + k] = mdfa_alpha1[k].values[0]
if len(bbi) > 65:
out["DFA_alpha2"] = fractal_dfa(bbi, scale=np.arange(16, 65), multifractal=False)[0]
# For multifractal
mdfa_alpha2, _ = fractal_dfa(
bbi, multifractal=True, q=np.arange(-5, 6), scale=np.arange(16, 65)
)
for k in mdfa_alpha2.columns:
out["MFDFA_alpha2_" + k] = mdfa_alpha2[k].values[0]
return out
# =============================================================================
# Internals
# =============================================================================
def _rsp_rrv_formatinput(rsp_rate, troughs, sampling_rate=1000):
if isinstance(rsp_rate, tuple):
rsp_rate = rsp_rate[0]
troughs = None
if isinstance(rsp_rate, pd.DataFrame):
df = rsp_rate.copy()
cols = [col for col in df.columns if "RSP_Rate" in col]
if len(cols) == 0:
cols = [col for col in df.columns if "RSP_Troughs" in col]
if len(cols) == 0:
raise ValueError(
"NeuroKit error: _rsp_rrv_formatinput(): Wrong input, "
"we couldn't extract rsp_rate and respiratory troughs indices."
)
else:
rsp_rate = signal_rate(
df[cols], sampling_rate=sampling_rate, desired_length=len(df)
)
else:
rsp_rate = df[cols[0]].values
if troughs is None:
try:
troughs = _signal_formatpeaks_sanitize(df, key="RSP_Troughs")
except NameError as e:
raise ValueError(
"NeuroKit error: _rsp_rrv_formatinput(): "
"Wrong input, we couldn't extract "
"respiratory troughs indices."
) from e
else:
troughs = _signal_formatpeaks_sanitize(troughs, key="RSP_Troughs")
return rsp_rate, troughs
def _rsp_rrv_plot(bbi):
# Axes
ax1 = bbi[:-1]
ax2 = bbi[1:]
# Compute features
poincare_features = _rsp_rrv_nonlinear(bbi)
sd1 = poincare_features["SD1"]
sd2 = poincare_features["SD2"]
mean_bbi = np.mean(bbi)
# Plot
fig = plt.figure(figsize=(12, 12))
ax = fig.add_subplot(111)
plt.title("Poincaré Plot", fontsize=20)
plt.xlabel("BB_n (s)", fontsize=15)
plt.ylabel("BB_n+1 (s)", fontsize=15)
plt.xlim(min(bbi) - 10, max(bbi) + 10)
plt.ylim(min(bbi) - 10, max(bbi) + 10)
ax.scatter(ax1, ax2, c="b", s=4)
# Ellipse plot feature
ellipse = matplotlib.patches.Ellipse(
xy=(mean_bbi, mean_bbi),
width=2 * sd2 + 1,
height=2 * sd1 + 1,
angle=45,
linewidth=2,
fill=False,
)
ax.add_patch(ellipse)
ellipse = matplotlib.patches.Ellipse(
xy=(mean_bbi, mean_bbi), width=2 * sd2, height=2 * sd1, angle=45
)
ellipse.set_alpha(0.02)
ellipse.set_facecolor("blue")
ax.add_patch(ellipse)
# Arrow plot feature
sd1_arrow = ax.arrow(
mean_bbi,
mean_bbi,
-sd1 * np.sqrt(2) / 2,
sd1 * np.sqrt(2) / 2,
linewidth=3,
ec="r",
fc="r",
label="SD1",
)
sd2_arrow = ax.arrow(
mean_bbi,
mean_bbi,
sd2 * np.sqrt(2) / 2,
sd2 * np.sqrt(2) / 2,
linewidth=3,
ec="y",
fc="y",
label="SD2",
)
plt.legend(handles=[sd1_arrow, sd2_arrow], fontsize=12, loc="best")
return fig
| 12,078 | 33.121469 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/progress_bar.py | import sys
def progress_bar(it, prefix="", size=40, verbose=True):
"""**Progress Bar**
Display a progress bar.
Parameters
----------
it : iterable
An iterable object.
prefix : str
A prefix to display before the progress bar.
size : int
The size of the progress bar.
verbose : bool
Whether to display the progress bar.
Examples
--------
..ipython:: python
import neurokit2 as nk
for i, j in nk.progress_bar(["a", "b", "c"], prefix="Progress: "):
pass
print(i, j)
"""
if verbose is False:
for i, item in enumerate(it):
yield i, item
else:
count = len(it)
def show(j):
x = int(size * j / count)
print(
f"{prefix}[{u'█'*x}{('.'*(size-x))}] {j}/{count}",
end="\r",
file=sys.stdout,
flush=True,
)
show(0)
for i, item in enumerate(it):
yield i, item
show(i + 1)
print("\n", flush=True, file=sys.stdout)
| 1,114 | 20.862745 | 74 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/parallel_run.py | def parallel_run(function, arguments_list, n_jobs=-2, **kwargs):
"""**Parallel processing utility function** (requires the ```joblib`` package)
Parameters
-----------
function : function
A callable function.
arguments_list : list
A list of dictionaries. The function will iterate through this list and pass each dictionary
inside as ``**kwargs`` to the main function.
n_jobs : int
Number of cores to use. ``-2`` means all but 1. See :func:`.joblib.Parallel`.
**kwargs
Other arguments that can be passed to :func:`.joblib.Parallel`, such as ``verbose``.
Returns
-------
list
A list of outputs.
Examples
---------
.. ipython:: python
import neurokit2 as nk
import time
# The function simply returns the input (but waits 3 seconds.)
def my_function(x):
time.sleep(3)
return x
arguments_list = [{"x": 1}, {"x": 2}, {"x": 3}]
nk.parallel_run(my_function, arguments_list)
"""
# Try loading mne
try:
import joblib
except ImportError as e:
raise ImportError(
"NeuroKit error: parallel_run(): the 'joblib' module is required for this function to run. ",
"Please install it first (`pip install joblib`).",
) from e
parallel = joblib.Parallel(n_jobs=n_jobs, **kwargs)
funs = (joblib.delayed(function)(**arguments) for arguments in arguments_list)
return parallel(funs)
| 1,501 | 29.04 | 105 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/find_groups.py | import itertools
def find_groups(x):
"""**Find and group repeating (identical) values in a list**
Parameters
----------
x : list
The list to look in.
Returns
-------
list
A list of tuples corresponding to groups containing all the consecutive numbers.
Examples
---------
.. ipython:: python
import neurokit2 as nk
x = [2, 2, 2, 2, 1, 3, 3, 2, 2, 2, 1]
groups = nk.find_groups(x)
groups
"""
return [list(j) for i, j in itertools.groupby(x)]
| 537 | 16.933333 | 88 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/check_type.py | import numpy as np
import pandas as pd
def check_type(x, what="str"):
"""**Check type of input**
Creates a list of boolean values to check if the input is of the target type.
Parameters
----------
x : int, list, pd.DataFrame, pd.Series, np.ndarray
Target of checking
what : str
Compare the dtype of target with what.
Returns
-------
array
An array of boolean values.
Examples
---------
.. ipython:: python
import neurokit2 as nk
nk.check_type([1, 2, 3, "hello"], what="str")
nk.check_type(pd.DataFrame({"A": [3, 1, 2, 4, 6, np.nan],
"B": [3, 1, 2, "hello", 6, 5]}), what="str")
"""
if what == "str":
out = is_string(x)
return out
def is_string(x):
if isinstance(x, list):
out = [isinstance(member, str) for member in x]
elif isinstance(x, pd.DataFrame):
out = [member == 'object' for member in list(x.dtypes)]
elif isinstance(x, pd.Series):
out = [x.dtype == "object"]
elif isinstance(x, np.ndarray):
out = [x.dtype == "U1"]
else:
out = isinstance(x, str)
return np.array(out)
| 1,198 | 22.057692 | 81 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/type_converters.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def as_vector(x):
"""**Convert to vector**
Examples
--------
import neurokit2 as nk
x = nk.as_vector(x=range(3))
y = nk.as_vector(x=[0, 1, 2])
z = nk.as_vector(x=np.array([0, 1, 2]))
z
x = nk.as_vector(x=0)
x
x = nk.as_vector(x=pd.Series([0, 1, 2]))
y = nk.as_vector(x=pd.DataFrame([0, 1, 2]))
y
"""
if isinstance(x, (pd.Series, pd.DataFrame)):
out = x.values
elif isinstance(x, (str, float, int, np.intc, np.int8, np.int16, np.int32, np.int64)):
out = np.array([x])
else:
out = np.array(x)
if isinstance(out, np.ndarray):
shape = out.shape
if len(shape) == 1:
pass
elif len(shape) != 1 and len(shape) == 2 and shape[1] == 1:
out = out[:, 0]
else:
raise ValueError(
"NeuroKit error: we expect the user to provide a "
"vector, i.e., a one-dimensional array (such as a "
"list of values). Current input of shape: " + str(shape)
)
return out
| 1,159 | 23.680851 | 90 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/find_closest.py | import numpy as np
import pandas as pd
from .type_converters import as_vector
def find_closest(
closest_to, list_to_search_in, direction="both", strictly=False, return_index=False
):
"""**Find the closest number in the array from a given number x**
Parameters
----------
closest_to : float
The target number(s) to find the closest of.
list_to_search_in : list
The list of values to look in.
direction : str
``"both"`` for smaller or greater, ``"greater"`` for only greater numbers and ``"smaller"`` for the closest smaller.
strictly : bool
``False`` for strictly superior or inferior or ``True`` for including equal.
return_index : bool
If ``True``, will return the index of the closest value in the list.
Returns
----------
closest : int
The closest number in the array.
Example
----------
.. ipython:: python
import neurokit2 as nk
# Single number
x = nk.find_closest(1.8, [3, 5, 6, 1, 2])
x
y = nk.find_closest(1.8, [3, 5, 6, 1, 2], return_index=True)
y
# Vectorized version
x = nk.find_closest([1.8, 3.6], [3, 5, 6, 1, 2])
x
"""
# Transform to arrays
closest_to = as_vector(closest_to)
list_to_search_in = pd.Series(as_vector(list_to_search_in))
out = [
_find_closest(i, list_to_search_in, direction, strictly, return_index) for i in closest_to
]
if len(out) == 1:
return out[0]
else:
return np.array(out)
# =============================================================================
# Internal
# =============================================================================
def _find_closest(
closest_to, list_to_search_in, direction="both", strictly=False, return_index=False
):
try:
index, closest = _find_closest_single_pandas(
closest_to, list_to_search_in, direction, strictly
)
except ValueError:
index, closest = np.nan, np.nan
if return_index is True:
return index
else:
return closest
# =============================================================================
# Methods
# =============================================================================
def _findclosest_base(x, vals, direction="both", strictly=False):
if direction == "both":
closest = min(vals, key=lambda y: np.abs(y - x))
if direction == "smaller":
if strictly is True:
closest = max(y for y in vals if y < x)
else:
closest = max(y for y in vals if y <= x)
if direction == "greater":
if strictly is True:
closest = min(filter(lambda y: y > x, vals))
else:
closest = min(filter(lambda y: y >= x, vals))
return closest
def _find_closest_single_pandas(x, vals, direction="both", strictly=False):
if direction in ["both", "all"]:
index = (np.abs(vals - x)).idxmin()
if direction in ["smaller", "below"]:
if strictly is True:
index = (np.abs(vals[vals < x] - x)).idxmin()
else:
index = (np.abs(vals[vals <= x] - x)).idxmin()
if direction in ["greater", "above"]:
if strictly is True:
index = (vals[vals > x] - x).idxmin()
else:
index = (vals[vals >= x] - x).idxmin()
closest = vals[index]
return index, closest
| 3,426 | 26.416 | 124 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/expspace.py | import numpy as np
def expspace(start, stop, num=50, out=int, base=1):
"""**Exponential range**
Creates a list of integer values (by default) of a given length from start to stop, spread by
an exponential function.
Parameters
----------
start : int
Minimum range values.
stop : int
Maximum range values.
num : int
Number of samples to generate. Default is 50. Must be non-negative.
out : type
Type of the returned values. Default is int.
base : float
If 1, will use :func:`.np.exp`, if 2 will use :func:`.np.exp2`.
Returns
-------
array
An array of integer values spread by the exponential function.
Examples
---------
.. ipython:: python
import neurokit2 as nk
nk.expspace(start=4, stop=100, num=10)
nk.expspace(start=0, stop=1, num=10, out=float)
"""
if base == 1:
seq = np.exp(np.linspace(np.log(1 + start), np.log(1 + stop), num, endpoint=True)) - 1
else:
seq = np.exp2(
np.linspace(np.log2(1 + start), np.log2(1 + stop), num, endpoint=True) - 1
) # pylint: disable=E1111
# Round and convert to int
if out == int:
seq = np.round(seq).astype(int)
return seq
| 1,270 | 24.938776 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/find_knee.py | import warnings
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
from ..stats import rescale
def find_knee(y, x=None, S=1, show=False, verbose=True):
"""**Find Knee / Elbow**
Find the knee / elbow in a curve using a basic adaptation of the *kneedle* algorithm.
Parameters
----------
x : list
A vector of values for which to detect the knee / elbow.
S : float
The sensitivity parameter allows us to adjust how aggressive we want to be when
detecting knees. Smaller values detect knees quicker, while larger values are more
conservative.
Examples
---------
.. ipython:: python
import neurokit2 as nk
y = np.log(np.arange(1, 100))
y += np.random.normal(0, 0.2, len(y))
nk.find_knee(y, show=True)
References
-----------
* Satopaa, V., Albrecht, J., Irwin, D., & Raghavan, B. (2011, June). Finding a" kneedle" in a
haystack: Detecting knee points in system behavior. In 2011 31st international conference on
distributed computing systems workshops (pp. 166-171). IEEE.
"""
n = len(y)
if n <= 5:
raise ValueError("Input vector must have at least six values.")
if x is None:
x = np.arange(n)
idx = rescale(x, to=[0, 1])
# Smooth using spline
spline = scipy.interpolate.UnivariateSpline(x=idx, y=y, k=5)
smoothed = spline(idx)
# Normalize to the unit square (0 - 1)
smoothed = (smoothed - np.min(smoothed)) / (np.max(smoothed) - np.min(smoothed))
Y_d = smoothed - idx
X_lm = []
Y_lm = []
maxima_ids = []
for i in range(1, n - 1):
if Y_d[i] > Y_d[i - 1] and Y_d[i] > Y_d[i + 1]:
X_lm.append(idx[i])
Y_lm.append(Y_d[i])
maxima_ids.append(i)
T_lm = Y_lm - S * np.sum(np.diff(idx)) / (n - 1)
knee_point_index = _locate(Y_d, T_lm, maxima_ids)
# If no knee point was found, return the last point
if knee_point_index is None:
if verbose is True:
warnings.warn("No knee point found, retuning last.")
knee = n - 1
else:
knee_point = X_lm[knee_point_index]
# Which index
knee = np.where(idx == knee_point)[0][0]
if show is True:
plt.plot(x, y, label="Original")
plt.plot(x, rescale(smoothed, to=[np.nanmin(y), np.nanmax(y)]), label="Smoothed")
plt.plot(x, rescale(Y_d, to=[np.nanmin(y), np.nanmax(y)]), label="Difference")
plt.axvline(x=x[knee], color="red", linestyle="--")
plt.legend()
return x[knee]
def _locate(Y_d, T_lm, maxima_ids):
n = len(Y_d)
for j in range(0, n):
for index, i in enumerate(maxima_ids):
if j <= i:
continue
if Y_d[j] <= T_lm[index]:
return index
| 2,830 | 28.185567 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/find_consecutive.py | import itertools
def find_consecutive(x):
"""**Find and group consecutive values in a list**
Parameters
----------
x : list
The list to look in.
Returns
-------
list
A list of tuples corresponding to groups containing all the consecutive numbers.
Examples
---------
.. ipython:: python
import neurokit2 as nk
x = [2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 20]
nk.find_consecutive(x)
"""
return [tuple(g) for k, g in itertools.groupby(x, lambda n, c=itertools.count(): n - next(c))]
| 568 | 17.966667 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/listify.py | # -*- coding: utf-8 -*-
def listify(**kwargs):
"""**Transforms arguments into lists of the same length**
Examples
--------
.. ipython:: python
import neurokit2 as nk
nk.listify(a=3, b=[3, 5], c=[3])
"""
args = kwargs
maxi = 1
# Find max length
for key, value in args.items():
if isinstance(value, str) is False:
try:
if len(value) > maxi:
maxi = len(value)
except TypeError:
pass
# Transform to lists
for key, value in args.items():
if isinstance(value, list):
args[key] = _multiply_list(value, maxi)
else:
args[key] = _multiply_list([value], maxi)
return args
def _multiply_list(lst, length):
q, r = divmod(length, len(lst))
return q * lst + lst[:r]
| 854 | 19.853659 | 61 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/_warnings.py | # -*- coding: utf-8 -*-
# Note that this files starts with a '_' so that it's always on top of the order
# of loadings (preventing circular imports)
class NeuroKitWarning(RuntimeWarning):
"""Category for runtime warnings that occur within the NeuroKit library."""
| 271 | 29.222222 | 80 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/random.py | import copy
import numbers
import numpy as np
def check_random_state(seed=None):
"""**Turn seed into a random number generator**
Parameters
----------
seed : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. If seed is None, a numpy.random.Generator is created with fresh,
unpredictable entropy. If seed is an int, a new numpy.random.RandomState instance is created, seeded with
seed. If seed is already a Generator or RandomState instance then that instance is used.
The manin difference between the legacy RandomState class and the new Generator class is that the former
has better reproducibililty and compatibililty guarantees (it is effectively frozen from NumPy v1.16)
while the latter has better statistical "randomness" properties and lower computational cost.
See: https://numpy.org/doc/stable/reference/random/legacy.html for further information.
Note: to initialise the new Generator class with an integer seed, use, e.g.:
``check_random_state(np.random.SeedSequence(123))``.
Returns
-------
rng: numpy.random.Generator or numpy.random.RandomState
Random number generator.
"""
# If seed is an integer, use the legacy RandomState class
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
# If seed is already a random number generator class return it as it is
if isinstance(seed, (np.random.Generator, np.random.RandomState)):
return seed
# If seed is something else, use the new Generator class
return np.random.default_rng(seed)
def spawn_random_state(rng, n_children=1):
"""**Create new independent children random number generators from parent generator/seed**
Parameters
----------
rng : None, int, numpy.random.RandomState or numpy.random.Generator
Random number generator to be spawned (numpy.random.RandomState or numpy.random.Generator). If it is None
or an int seed, then a parent random number generator is first created with ``misc.check_random_state``.
n_children : int
Number of children generators to be spawned.
Returns
-------
children_generators : list of generators
List of children random number generators.
Examples
----------
* **Example 1**: Simulate data for a cohort of participants
.. ipython:: python
import neurokit2 as nk
master_seed = 42
n_participants = 8
participants_RNGs = nk.misc.spawn_random_state(master_seed, n_children=n_participants)
PPGs = []
for i in range(n_participants):
PPGs.append(nk.ppg_simulate(random_state=participants_RNGs[i]))
"""
rng = check_random_state(rng)
try:
# Try to spawn the rng by using the new API
return rng.spawn(n_children)
except AttributeError:
# It looks like this version of numpy does not implement rng.spawn(), so we do its job
# manually; see: https://github.com/numpy/numpy/pull/23195
if rng._bit_generator._seed_seq is not None:
rng_class = type(rng)
bit_generator_class = type(rng._bit_generator)
return [rng_class(bit_generator_class(seed=s)) for s in rng._bit_generator._seed_seq.spawn(n_children)]
except TypeError:
# The rng does not support spawning through SeedSequence, see below
pass
# Implement a rudimentary but reproducible substitute for spawning rng's that also works for
# RandomState with the legacy MT19937 bit generator
# NOTE: Spawning the same generator multiple times is not supported (may lead to mutually
# dependent spawned generators). Spawning the children (in a tree structure) is allowed.
# Start by creating an rng to sample integers (to be used as seeds for the children) without
# advancing the original rng
temp_rng = rng._bit_generator.jumped()
# Generate and return children initialised with the seeds obtained from temp_rng
return [np.random.RandomState(seed=s) for s in temp_rng.random_raw(n_children)]
def check_random_state_children(random_state_parent, random_state_children, n_children=1):
"""**Create new independent children random number generators to be used in sub-functions**
Parameters
----------
random_state_parent : None, int, numpy.random.RandomState or numpy.random.Generator
Parent's random state (see ``misc.check_random_state``).
random_state_children : {'legacy', 'spawn'}, None, int, numpy.random.RandomState or numpy.random.Generator
If ``"legacy"``, use the same random state as the parent (discouraged as it generates dependent random
streams). If ``"spawn"``, spawn independent children random number generators from the parent random
state. If any of the other types, generate independent children random number generators from the
random_state_children provided.
n_children : int
Number of children generators to be spawned.
Returns
-------
children_generators : list of generators
List of children random number generators.
"""
if random_state_children == "legacy":
return [copy.copy(random_state_parent) for _ in range(n_children)]
elif random_state_children == "spawn":
return spawn_random_state(random_state_parent, n_children)
else:
return spawn_random_state(random_state_children, n_children)
| 5,499 | 43.715447 | 115 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/report.py | # -*- coding: utf-8 -*-
import inspect
import matplotlib
import numpy as np
import pandas as pd
def create_report(file="myreport.html", signals=None, info={"sampling_rate": 1000}, fig=None):
"""**Reports**
Create report containing description and figures of processing.
This function is meant to be used via the :func:`.rsp_process` or :func:`.ppg_process`
functions.
Parameters
----------
file : str
Name of the file to save the report to. Can also be ``"text"`` to simply print the text in
the console.
signals : pd.DataFrame
A DataFrame of signals. Usually obtained from :func:`.rsp_process`, :func:`.ppg_process`, or
:func:`.emg_process`.
info : dict
A dictionary containing the information of peaks and the signals' sampling rate. Usually
obtained from :func:`.rsp_process` or :func:`.ppg_process`.
fig : matplotlib.figure.Figure or plotly.graph_objects.Figure
A figure containing the processed signals. Usually obtained from :func:`.rsp_plot`,
:func:`.ppg_plot`, or :func:`.emg_plot`.
Returns
-------
str
The report as a string.
See Also
--------
rsp_process, ppg_process, emg_process
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=30, sampling_rate=200, random_state=0)
signals, info = nk.rsp_process(rsp, sampling_rate=200, report="text")
"""
description, ref = text_combine(info)
table_html, table_md = summarize_table(signals)
# Print text in the console
for key in [k for k in info.keys() if "text_" in k]:
print(info[key] + "\n")
print(table_md)
print("\nReferences")
for s in info["references"]:
print("- " + s)
# Save report
if ".html" in file:
# Make figures
fig_html = '<h2 style="background-color: #FB661C">Visualization</h1>'
fig_html += fig_to_html(fig)
print(f"The report has been saved to {file}")
contents = [description, table_html, fig_html, ref]
html_save(contents=contents, file=file)
def summarize_table(signals):
"""Create table to summarize statistics of a signal."""
# TODO: add more features
summary = {}
rate_cols = [col for col in signals.columns if "Rate" in col]
if len(rate_cols) > 0:
rate_col = rate_cols[0]
summary[rate_col + "_Mean"] = np.mean(signals[rate_col])
summary[rate_col + "_SD"] = np.std(signals[rate_col])
summary_table = pd.DataFrame(summary, index=[0])
# Make HTML and Markdown versions
html = '<h2 style="background-color: #D60574">Summary table</h1>' + summary_table.to_html(
index=None
)
try:
md = summary_table.to_markdown(index=None)
except ImportError:
md = summary_table # in case printing markdown export fails
return html, md
else:
return "", ""
def text_combine(info):
"""Reformat dictionary describing processing methods as strings to be inserted into HTML file."""
preprocessing = '<h2 style="background-color: #FB1CF0">Preprocessing</h1>'
for key in ["text_cleaning", "text_peaks"]:
if key in info.keys():
preprocessing += info[key] + "<br>"
ref = '<h2 style="background-color: #FBB41C">References</h1>'
if "references" in info.keys():
ref += "\n <ul> \n"
for reference in info["references"]:
ref += "<li>" + reference + "</li>" + "\n"
ref += "\n </ul> \n"
return preprocessing, ref
def fig_to_html(fig):
"""Convert a figure to HTML."""
if isinstance(fig, str):
return fig
elif isinstance(fig, matplotlib.pyplot.Figure):
# https://stackoverflow.com/questions/48717794/matplotlib-embed-figures-in-auto-generated-html
import base64
from io import BytesIO
temp_file = BytesIO()
fig.savefig(temp_file, format="png")
encoded = base64.b64encode(temp_file.getvalue()).decode("utf-8")
return "<img src='data:image/png;base64,{}'>".format(encoded)
else:
try:
import plotly
if isinstance(fig, plotly.graph_objs._figure.Figure):
# https://stackoverflow.com/questions/59868987/plotly-saving-multiple-plots-into-a-single-html
return fig.to_html().split("<body>")[1].split("</body>")[0]
else:
return ""
except ImportError:
return ""
def html_save(contents=[], file="myreport.html"):
"""Combine figures and text in a single HTML document."""
# https://stackoverflow.com/questions/59868987/plotly-saving-multiple-plots-into-a-single-html
with open(file, "w") as page:
page.write(
r"""<html>
<head>
<style>
h1 {
text-align: center;
font-family: Arial, Helvetica, sans-serif;
}
h2 {
text-align: center;
font-family: Arial, Helvetica, sans-serif;
}
p {
text-align: left;
font-family: Arial, Helvetica, sans-serif;
}
div {
text-align: center;
font-family: Arial, Helvetica, sans-serif;
}
ul {
text-align: left;
list-style-position: inside;
font-family: Arial, Helvetica, sans-serif;
}
</style>
</head>
<body>
<h1>NeuroKit Processing Report</h1>"""
)
for content in contents:
if isinstance(content, str):
inner_html = content
else:
# assume the content is an interactive plotly figure and export to HTML
inner_html = content.to_html().split("<body>")[1].split("</body>")[0]
page.write(inner_html)
page.write("<br>")
page.write("</body></html>" + "\n")
def get_default_args(func):
"""Get the default values of a function's arguments."""
# https://stackoverflow.com/questions/12627118/get-a-function-arguments-default-value
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
def get_kwargs(report_info, func):
"""Get keyword arguments from report_info and update report_info if defaults."""
defaults = get_default_args(func)
kwargs = {}
for key in defaults:
if key not in ["sampling_rate", "method"]:
# if arguments have not been specified by user,
# set them to the defaults
if key not in report_info:
report_info[key] = defaults[key]
elif report_info[key] != defaults[key]:
kwargs[key] = report_info[key]
return kwargs, report_info
| 7,440 | 34.433333 | 110 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/find_plateau.py | # -*- coding: utf-8 -*-
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal
from ..events.events_plot import events_plot
def find_plateau(values, show=True):
"""**Find the point of plateau in an array of values**
Parameters
----------
values : ndarray
An array of values.
show : bool
If ``True``, will return the plot visualizing the trajectory and point of plateau.
Returns
----------
plateau : int
The index of the plateau.
Example
----------
.. ipython:: python
import neurokit2 as nk
# Generate values manually
x = np.linspace(1, 5, 50)
y = 2.5 * np.log(1.3 * x) + 0.5
y = y + 0.05 * np.random.normal(size=len(x))
# Find plateau
@savefig p_find_plateau1.png scale=100%
plateau = nk.find_plateau(y, show=True)
@suppress
plt.close()
plateau
"""
# find indices in increasing segments
increasing_segments = np.where(np.diff(values) > 0)[0]
# get indices where positive gradients are becoming less positive
slope_change = np.diff(np.diff(values))
gradients = np.where(slope_change < 0)[0]
indices = np.intersect1d(increasing_segments, gradients)
# exclude inverse peaks
peaks = scipy.signal.find_peaks(-1 * values)[0]
if len(peaks) > 0:
indices = [i for i in indices if i not in peaks]
# find greatest change in slopes amongst filtered indices
largest = np.argsort(slope_change)[: int(0.1 * len(slope_change))] # get top 10%
optimal = [i for i in largest if i in indices]
if len(optimal) >= 1:
plateau = np.where(values == np.max(values[optimal]))[0][0]
if show:
events_plot([plateau], values)
# _find_plateau_plot(values, increasing_segments, indices, optimal, plateau)
else:
plateau = None
return plateau
def _find_plateau_plot(values, increasing_segments, indices, optimal, plateau):
"""For visualization of the steps involved in `find_plateau()`"""
# Prepare fig
fig = plt.figure(constrained_layout=False)
spec = gs.GridSpec(ncols=2, nrows=2, height_ratios=[1, 1], width_ratios=[1, 1])
ax1 = fig.add_subplot(spec[0, 0])
ax2 = fig.add_subplot(spec[0, 1])
ax3 = fig.add_subplot(spec[1, 0])
ax4 = fig.add_subplot(spec[1, 1])
# Plot
ax1.plot(values)
ax1.set_title("Points of increasing segments")
for i in increasing_segments:
ax1.axvline(x=i, color="red", linestyle="--")
ax2.plot(values)
ax2.set_title("Points of decelerating positive gradients")
for i in indices:
ax2.axvline(x=i, color="blue", linestyle="--")
ax3.plot(values)
ax3.set_title("Points of greatest slope changes")
for i in optimal:
ax3.axvline(x=i, color="purple", linestyle="--")
ax4.plot(values)
ax4.set_title("Optimal Point")
ax4.axvline(x=plateau, color="orange", linestyle="--")
return fig
| 3,005 | 27.358491 | 90 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/__init__.py | """Submodule for NeuroKit.
isort:skip_file (since isort-ing the imports generates circular imports)
"""
from ._warnings import NeuroKitWarning
from .random import check_random_state, check_random_state_children, spawn_random_state
from .check_type import check_type
from .copyfunction import copyfunction
from .expspace import expspace
from .find_closest import find_closest
from .find_consecutive import find_consecutive
from .find_groups import find_groups
from .find_knee import find_knee
from .find_outliers import find_outliers
from .find_plateau import find_plateau
from .listify import listify
from .parallel_run import parallel_run
from .progress_bar import progress_bar
from .replace import replace
from .type_converters import as_vector
from .report import create_report
__all__ = [
"listify",
"find_closest",
"find_consecutive",
"find_groups",
"find_knee",
"as_vector",
"expspace",
"replace",
"NeuroKitWarning",
"check_type",
"find_outliers",
"parallel_run",
"progress_bar",
"find_plateau",
"copyfunction",
"check_random_state",
"check_random_state_children",
"spawn_random_state",
"create_report",
]
| 1,193 | 24.404255 | 87 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/find_outliers.py | import numpy as np
import scipy
from ..stats import standardize
def find_outliers(data, exclude=2, side="both", method="sd", **kwargs):
"""**Identify outliers (abnormal values)**
Extreme values identification using different methods, such as:
* **sd**: Data is :func:`standardized <.standardize>`, i.e., centered and
scaled, and absolute value beyond a certain SD threshold are considered as outliers.
* **norm**: Extreme values identified using theoretical percentiles to identify outliers
beyond a certain theoretical percentile (assuming the data comes from a normal distribution).
For example, with this method, ``exclude=0.025`` (one-sided) corresponds to the 2.5% lower
bound of the normal distribution, which corresponds to approx. -1.96 SD. This method is
related to the **SD** one, but instead of specifying the threshold in SDs, it is specified in
percentiles.
* **percentile**: Extreme values identified using percentiles.
Parameters
----------
data : list or ndarray
Data array
exclude : int, float
Amount of outliers to detect (depends on the chosen method).
side: str
Can be ``"both"``, ``"left"`` or ``"right"``. If ``exclude=0.05`` and ``side="both"`` and
``method="norm"``, 2.5% of extreme observation of each side will be marked as outliers.
method: str
Can be "standardize" or "percentile". The default is "standardize".
**kwargs : optional
Other arguments to be passed to :func:`standardize`.
Returns
----------
outliers : ndarray
A boolean vector of with ``True`` being the outliers.
See Also
----------
.standardize
Example
----------
.. ipython:: python
import neurokit2 as nk
data = [-12, 2, 1, 3, 66.6, 2, 1, 3, 2, -42, 2, 4, 1, 12]
# Outliers beyond 2 SD of the mean
outliers = nk.find_outliers(data, exclude=2, side="both", method="sd")
np.where(outliers)[0]
# Outliers beyond 1 MAD of the Median on one side
outliers = nk.find_outliers(data, exclude=1, side="left", method="sd", robust=True)
np.where(outliers)[0]
# 2.5% theoretical percentiles on each side
outliers = nk.find_outliers(data, exclude=0.05, method="norm")
np.where(outliers)[0]
# Outliers are beyond interquartile range
outliers = nk.find_outliers(data, exclude=(0.25, 0.75), method="percentile")
np.where(outliers)[0]
# Outliers are beyond interdecile range
outliers = nk.find_outliers(data, exclude=(0.1, 0.9), method="percentile")
np.where(outliers)[0]
"""
# Sanity checks
if side not in ["both", "left", "right"]:
raise ValueError("side must be 'both', 'left' or 'right'.")
method = method.lower()
if method not in ["standardize", "z", "sd", "percentile", "norm"]:
raise ValueError("method must be 'standardize' or 'percentile'.")
# Force array
data = np.array(data)
# Find thresholds
if method in ["percentile"]:
if isinstance(exclude, (list, tuple, np.ndarray)):
right = np.percentile(data, exclude[1] * 100)
left = np.percentile(data, exclude[0] * 100)
else:
right = np.percentile(data, (1 - (exclude / 2)) * 100)
left = np.percentile(data, (exclude / 2) * 100)
elif method in ["sd"]:
if isinstance(exclude, (list, tuple, np.ndarray)):
right = exclude[1]
left = exclude[0]
else:
right = exclude
left = -right
else:
if side == "both":
exclude = exclude / 2
right = scipy.stats.norm.ppf(1 - exclude)
left = -right
if method in ["standardize", "z", "sd", "norm"]:
data = np.array(standardize(data, **kwargs))
if side == "both":
outliers = (data < left) | (data > right)
elif side == "left":
outliers = data < left
elif side == "right":
outliers = data > right
return outliers
| 4,051 | 33.338983 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/replace.py | # -*- coding: utf-8 -*-
import numpy as np
def replace(data, replacement_dict):
"""**Replace values using a dictionary**
Parameters
----------
data : array
The data to replace values.
replacement_dict : dict
A replacement dictionary of the form ``{old_value: new_value}``.
Returns
-------
array
Array containing the replaced values.
Examples
--------
import neurokit2 as nk
data = [0, 1, 2, 3]
replacement = {0: 99, 3: 42}
replaced = nk.replace(data, replacement)
replaced
"""
# Extract out keys and values
k = np.array(list(replacement_dict.keys()))
v = np.array(list(replacement_dict.values()))
# Get argsort indices
sidx = k.argsort()
ks = k[sidx]
vs = v[sidx]
idx = np.searchsorted(ks, data)
idx[idx == len(vs)] = 0
mask = ks[idx] == data
return np.where(mask, vs[idx], data)
| 932 | 20.204545 | 72 | py |
NeuroKit | NeuroKit-master/neurokit2/misc/copyfunction.py | import functools
def copyfunction(func, *args, **kwargs):
"""**Copy Function**
"""
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
| 227 | 19.727273 | 59 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv.py | # -*- coding: utf-8 -*-
import matplotlib.gridspec as gs
import matplotlib.pyplot as plt
import pandas as pd
from ..stats import summary_plot
from .hrv_frequency import _hrv_frequency_show, hrv_frequency
from .hrv_nonlinear import _hrv_nonlinear_show, hrv_nonlinear
from .hrv_rsa import hrv_rsa
from .hrv_time import hrv_time
from .hrv_utils import _hrv_format_input
from .intervals_process import intervals_process
def hrv(peaks, sampling_rate=1000, show=False, **kwargs):
"""**Heart Rate Variability (HRV)**
This function computes all HRV indices available in NeuroKit. It is essentially a convenience
function that aggregates results from the :func:`time domain <hrv_time>`, :func:`frequency
domain <hrv_frequency>`, and :func:`non-linear domain <hrv_nonlinear>`.
.. tip::
We strongly recommend checking our open-access paper `Pham et al. (2021)
<https://doi.org/10.3390/s21123998>`_ on HRV indices as well as `Frasch (2022)
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC9307944/>`_ for more information.
Parameters
----------
peaks : dict
Samples at which R-peaks occur. Can be a list of indices or the output(s) of other
functions such as :func:`.ecg_peaks`, :func:`.ppg_peaks`, :func:`.ecg_process` or
:func:`bio_process`. Can also be a dict containing the keys `RRI` and `RRI_Time`
to directly pass the R-R intervals and their timestamps, respectively.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur. Should be at
least twice as high as the highest frequency in vhf. By default 1000.
show : bool, optional
If ``True``, returns the plots that are generated for each of the domains.
Returns
-------
DataFrame
Contains HRV indices in a DataFrame. If RSP data was provided (e.g., output of
:func:`bio_process`), RSA indices will also be included.
See Also
--------
hrv_time, hrv_frequency, hrv_nonlinear, hrv_rsa, .ecg_peaks, .ppg_peaks
Examples
--------
**Example 1**: Only using a list of R-peaks locations
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
plt.rc('font', size=8)
# Download data
data = nk.data("bio_resting_5min_100hz")
# Clean signal and Find peaks
ecg_cleaned = nk.ecg_clean(data["ECG"], sampling_rate=100)
peaks, info = nk.ecg_peaks(ecg_cleaned, sampling_rate=100, correct_artifacts=True)
# Compute HRV indices
@savefig p_hrv1.png scale=100%
hrv_indices = nk.hrv(peaks, sampling_rate=100, show=True)
@suppress
plt.close()
**Example 2**: Compute HRV directly from processed data
.. ipython:: python
# Download data
data = nk.data("bio_resting_5min_100hz")
# Process
signals, info = nk.bio_process(data, sampling_rate=100)
# Get HRV
nk.hrv(signals, sampling_rate=100)
References
----------
* Pham, T., Lau, Z. J., Chen, S. H. A., & Makowski, D. (2021). Heart Rate Variability in
Psychology: A Review of HRV Indices and an Analysis Tutorial. Sensors, 21(12), 3998.
https://doi.org/10.3390/s21123998
* Frasch, M. G. (2022). Comprehensive HRV estimation pipeline in Python using Neurokit2:
Application to sleep physiology. MethodsX, 9, 101782.
* Stein, P. K. (2002). Assessing heart rate variability from real-world Holter reports. Cardiac
electrophysiology review, 6(3), 239-244.
* Shaffer, F., & Ginsberg, J. P. (2017). An overview of heart rate variability metrics and
norms. Frontiers in public health, 5, 258.
"""
# Get indices
out = [] # initialize empty container
# Gather indices
out.append(hrv_time(peaks, sampling_rate=sampling_rate))
out.append(hrv_frequency(peaks, sampling_rate=sampling_rate))
out.append(hrv_nonlinear(peaks, sampling_rate=sampling_rate))
# Compute RSA if rsp data is available
if isinstance(peaks, pd.DataFrame):
if ("RSP_Phase" in peaks.columns) and ("RSP_Phase_Completion" in peaks.columns):
rsp_signals = peaks[["RSP_Phase", "RSP_Phase_Completion"]]
rsa = hrv_rsa(peaks, rsp_signals, sampling_rate=sampling_rate)
out.append(pd.DataFrame([rsa]))
out = pd.concat(out, axis=1)
# Plot
if show:
if isinstance(peaks, dict):
peaks = peaks["ECG_R_Peaks"]
# Indices for plotting
out_plot = out.copy(deep=False)
_hrv_plot(peaks, out_plot, sampling_rate, **kwargs)
return out
# =============================================================================
# Plot
# =============================================================================
def _hrv_plot(peaks, out, sampling_rate=1000, interpolation_rate=100, **kwargs):
fig = plt.figure(constrained_layout=False)
spec = gs.GridSpec(ncols=2, nrows=2, height_ratios=[1, 1], width_ratios=[1, 1])
# Arrange grids
ax_distrib = fig.add_subplot(spec[0, :-1])
ax_distrib.set_xlabel("R-R intervals (ms)")
ax_distrib.set_title("Distribution of R-R intervals")
ax_psd = fig.add_subplot(spec[1, :-1])
spec_within = gs.GridSpecFromSubplotSpec(4, 4, subplot_spec=spec[:, -1], wspace=0.025, hspace=0.05)
ax_poincare = fig.add_subplot(spec_within[1:4, 0:3])
ax_marg_x = fig.add_subplot(spec_within[0, 0:3])
ax_marg_x.set_title("Poincaré Plot")
ax_marg_y = fig.add_subplot(spec_within[1:4, 3])
plt.tight_layout(h_pad=0.5, w_pad=0.5)
# Distribution of RR intervals
rri, rri_time, rri_missing = _hrv_format_input(peaks, sampling_rate=sampling_rate)
ax_distrib = summary_plot(rri, ax=ax_distrib, **kwargs)
# Poincare plot
out.columns = [col.replace("HRV_", "") for col in out.columns]
_hrv_nonlinear_show(rri, rri_time=rri_time, rri_missing=rri_missing, out=out, ax=ax_poincare, ax_marg_x=ax_marg_x, ax_marg_y=ax_marg_y)
# PSD plot
rri, rri_time, sampling_rate = intervals_process(
rri, intervals_time=rri_time, interpolate=True, interpolation_rate=interpolation_rate, **kwargs
)
frequency_bands = out[["ULF", "VLF", "LF", "HF", "VHF"]]
_hrv_frequency_show(rri, frequency_bands, sampling_rate=sampling_rate, ax=ax_psd)
| 6,318 | 36.613095 | 139 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv_frequency.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning
from ..signal.signal_power import _signal_power_instant_plot, signal_power
from ..signal.signal_psd import signal_psd
from .hrv_utils import _hrv_format_input
from .intervals_process import intervals_process
def hrv_frequency(
peaks,
sampling_rate=1000,
ulf=(0, 0.0033),
vlf=(0.0033, 0.04),
lf=(0.04, 0.15),
hf=(0.15, 0.4),
vhf=(0.4, 0.5),
psd_method="welch",
show=False,
silent=True,
normalize=True,
order_criteria=None,
interpolation_rate=100,
**kwargs
):
"""**Computes frequency-domain indices of Heart Rate Variability (HRV)**
Computes frequency domain HRV metrics, such as the power in different frequency bands.
* **ULF**: The spectral power of ultra low frequencies (by default, .0 to
.0033 Hz). Very long signals are required for this to index to be
extracted, otherwise, will return NaN.
* **VLF**: The spectral power of very low frequencies (by default, .0033 to .04 Hz).
* **LF**: The spectral power of low frequencies (by default, .04 to .15 Hz).
* **HF**: The spectral power of high frequencies (by default, .15 to .4 Hz).
* **VHF**: The spectral power of very high frequencies (by default, .4 to .5 Hz).
* **TP**: The total spectral power.
* **LFHF**: The ratio obtained by dividing the low frequency power by the high frequency power.
* **LFn**: The normalized low frequency, obtained by dividing the low frequency power by
the total power.
* **HFn**: The normalized high frequency, obtained by dividing the low frequency power by
the total power.
* **LnHF**: The log transformed HF.
Note that a minimum duration of the signal containing the peaks is recommended for some HRV
indices to be meaningful. For instance, 1, 2 and 5 minutes of high quality signal are the
recommended minima for HF, LF and LF/HF, respectively.
.. tip::
We strongly recommend checking our open-access paper `Pham et al. (2021)
<https://doi.org/10.3390/s21123998>`_ on HRV indices for more information.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Can be a list of indices or the output(s) of other functions such as :func:`.ecg_peaks`,
:func:`.ppg_peaks`, :func:`.ecg_process` or :func:`.bio_process`.
Can also be a dict containing the keys `RRI` and `RRI_Time`
to directly pass the R-R intervals and their timestamps, respectively.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur.
ulf : tuple, optional
Upper and lower limit of the ultra-low frequency band. By default (0, 0.0033).
vlf : tuple, optional
Upper and lower limit of the very-low frequency band. By default (0.0033, 0.04).
lf : tuple, optional
Upper and lower limit of the low frequency band. By default (0.04, 0.15).
hf : tuple, optional
Upper and lower limit of the high frequency band. By default (0.15, 0.4).
vhf : tuple, optional
Upper and lower limit of the very-high frequency band. By default (0.4, 0.5).
psd_method : str
Method used for spectral density estimation. For details see :func:`.signal_power`.
By default ``"welch"``.
silent : bool
If ``False``, warnings will be printed. Default to ``True``.
show : bool
If ``True``, will plot the power in the different frequency bands.
normalize : bool
Normalization of power by maximum PSD value. Default to ``True``.
Normalization allows comparison between different PSD methods.
order_criteria : str
The criteria to automatically select order in parametric PSD (only used for autoregressive
(AR) methods such as ``"burg"``). Defaults to ``None``.
interpolation_rate : int, optional
Sampling rate (Hz) of the interpolated interbeat intervals. Should be at least twice as
high as the highest frequency in vhf. By default 100. To replicate Kubios defaults, set to 4.
To not interpolate, set interpolation_rate to None (in case the interbeat intervals are already
interpolated or when using the ``"lombscargle"`` psd_method for which interpolation is not required).
**kwargs
Additional other arguments.
Returns
-------
DataFrame
Contains frequency domain HRV metrics.
See Also
--------
ecg_peaks, ppg_peaks, hrv_summary, hrv_time, hrv_nonlinear
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
# Compute HRV indices using method="welch"
@savefig p_hrv_freq1.png scale=100%
hrv_welch = nk.hrv_frequency(peaks, sampling_rate=100, show=True, psd_method="welch")
@suppress
plt.close()
.. ipython:: python
# Using method ="burg"
@savefig p_hrv_freq2.png scale=100%
hrv_burg = nk.hrv_frequency(peaks, sampling_rate=100, show=True, psd_method="burg")
@suppress
plt.close()
.. ipython:: python
# Using method = "lomb" (requires installation of astropy)
@savefig p_hrv_freq3.png scale=100%
hrv_lomb = nk.hrv_frequency(peaks, sampling_rate=100, show=True, psd_method="lomb")
@suppress
plt.close()
.. ipython:: python
# Using method="multitapers"
@savefig p_hrv_freq4.png scale=100%
hrv_multitapers = nk.hrv_frequency(peaks, sampling_rate=100, show=True,psd_method="multitapers")
@suppress
plt.close()
References
----------
* Pham, T., Lau, Z. J., Chen, S. H. A., & Makowski, D. (2021). Heart Rate Variability in
Psychology: A Review of HRV Indices and an Analysis Tutorial. Sensors, 21(12), 3998.
* Stein, P. K. (2002). Assessing heart rate variability from real-world Holter reports. Cardiac
electrophysiology review, 6(3), 239-244.
* Shaffer, F., & Ginsberg, J. P. (2017). An overview of heart rate variability metrics and
norms. Frontiers in public health, 5, 258.
* Boardman, A., Schlindwein, F. S., & Rocha, A. P. (2002). A study on the optimum order of
autoregressive models for heart rate variability. Physiological measurement, 23(2), 325.
* Bachler, M. (2017). Spectral Analysis of Unevenly Spaced Data: Models and Application in Heart
Rate Variability. Simul. Notes Eur., 27(4), 183-190.
"""
# Sanitize input
# If given peaks, compute R-R intervals (also referred to as NN) in milliseconds
rri, rri_time, _ = _hrv_format_input(peaks, sampling_rate=sampling_rate)
# Process R-R intervals (interpolated at 100 Hz by default)
rri, rri_time, sampling_rate = intervals_process(
rri, intervals_time=rri_time, interpolate=True, interpolation_rate=interpolation_rate, **kwargs
)
if interpolation_rate is None:
t = rri_time
else:
t = None
frequency_band = [ulf, vlf, lf, hf, vhf]
# Find maximum frequency
max_frequency = np.max([np.max(i) for i in frequency_band])
power = signal_power(
rri,
frequency_band=frequency_band,
sampling_rate=sampling_rate,
method=psd_method,
max_frequency=max_frequency,
show=False,
normalize=normalize,
order_criteria=order_criteria,
t=t,
**kwargs
)
power.columns = ["ULF", "VLF", "LF", "HF", "VHF"]
out = power.to_dict(orient="index")[0]
out_bands = out.copy() # Components to be entered into plot
if silent is False:
for frequency in out.keys():
if out[frequency] == 0.0:
warn(
"The duration of recording is too short to allow"
" reliable computation of signal power in frequency band " + frequency + "."
" Its power is returned as zero.",
category=NeuroKitWarning,
)
# Normalized
total_power = np.nansum(power.values)
out["TP"] = total_power
out["LFHF"] = out["LF"] / out["HF"]
out["LFn"] = out["LF"] / total_power
out["HFn"] = out["HF"] / total_power
# Log
out["LnHF"] = np.log(out["HF"]) # pylint: disable=E1111
out = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("HRV_")
# Plot
if show:
_hrv_frequency_show(
rri,
out_bands,
ulf=ulf,
vlf=vlf,
lf=lf,
hf=hf,
vhf=vhf,
sampling_rate=sampling_rate,
psd_method=psd_method,
order_criteria=order_criteria,
normalize=normalize,
max_frequency=max_frequency,
t=t,
)
return out
def _hrv_frequency_show(
rri,
out_bands,
ulf=(0, 0.0033),
vlf=(0.0033, 0.04),
lf=(0.04, 0.15),
hf=(0.15, 0.4),
vhf=(0.4, 0.5),
sampling_rate=1000,
psd_method="welch",
order_criteria=None,
normalize=True,
max_frequency=0.5,
t=None,
**kwargs
):
if "ax" in kwargs:
ax = kwargs.get("ax")
kwargs.pop("ax")
else:
__, ax = plt.subplots()
frequency_band = [ulf, vlf, lf, hf, vhf]
# Compute sampling rate for plot windows
if sampling_rate is None:
med_sampling_rate = np.median(np.diff(t)) # This is just for visualization purposes (#800)
else:
med_sampling_rate = sampling_rate
for i in range(len(frequency_band)): # pylint: disable=C0200
min_frequency = frequency_band[i][0]
if min_frequency == 0:
min_frequency = 0.001 # sanitize lowest frequency
window_length = int((2 / min_frequency) * med_sampling_rate)
if window_length <= len(rri) / 2:
break
psd = signal_psd(
rri,
sampling_rate=sampling_rate,
show=False,
min_frequency=min_frequency,
method=psd_method,
max_frequency=max_frequency,
order_criteria=order_criteria,
normalize=normalize,
t=t,
)
_signal_power_instant_plot(psd, out_bands, frequency_band, ax=ax)
| 10,410 | 33.819398 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv_nonlinear.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from ..complexity import (
complexity_lempelziv,
entropy_approximate,
entropy_fuzzy,
entropy_multiscale,
entropy_sample,
entropy_shannon,
fractal_correlation,
fractal_dfa,
fractal_higuchi,
fractal_katz,
)
from ..misc import NeuroKitWarning, find_consecutive
from ..signal import signal_zerocrossings
from .hrv_utils import _hrv_format_input
from .intervals_utils import _intervals_successive
def hrv_nonlinear(peaks, sampling_rate=1000, show=False, **kwargs):
"""**Nonlinear indices of Heart Rate Variability (HRV)**
This function computes non-linear indices, which include features derived from the *Poincaré
plot*, as well as other :func:`.complexity` indices corresponding to entropy or fractal
dimension.
.. hint::
There exist many more complexity indices available in NeuroKit2, that could be applied to
HRV. The :func:`.hrv_nonlinear` function only includes the most commonly used indices.
Please see the documentation page for all the func:`.complexity` features.
The **Poincaré plot** is a graphical representation of each NN interval plotted against its
preceding NN interval. The ellipse that emerges is a visual quantification of the correlation
between successive NN intervals.
Basic indices derived from the Poincaré plot analysis include:
* **SD1**: Standard deviation perpendicular to the line of identity. It is an index of
short-term RR interval fluctuations, i.e., beat-to-beat variability. It is equivalent
(although on another scale) to RMSSD, and therefore it is redundant to report correlation
with both.
* **SD2**: Standard deviation along the identity line. Index of long-term HRV changes.
* **SD1/SD2**: ratio of *SD1* to *SD2*. Describes the ratio of short term to long term
variations in HRV.
* **S**: Area of ellipse described by *SD1* and *SD2* (``pi * SD1 * SD2``). It is
proportional to *SD1SD2*.
* **CSI**: The Cardiac Sympathetic Index (Toichi, 1997) is a measure of cardiac sympathetic
function independent of vagal activity, calculated by dividing the longitudinal variability of
the Poincaré plot (``4*SD2``) by its transverse variability (``4*SD1``).
* **CVI**: The Cardiac Vagal Index (Toichi, 1997) is an index of cardiac parasympathetic
function (vagal activity unaffected by sympathetic activity), and is equal equal to the
logarithm of the product of longitudinal (``4*SD2``) and transverse variability (``4*SD1``).
* **CSI_Modified**: The modified CSI (Jeppesen, 2014) obtained by dividing the square of the
longitudinal variability by its transverse variability.
Indices of **Heart Rate Asymmetry** (HRA), i.e., asymmetry of the Poincaré plot (Yan, 2017),
include:
* **GI**: Guzik's Index, defined as the distance of points above line of identity (LI) to LI
divided by the distance of all points in Poincaré plot to LI except those that are located on
LI.
* **SI**: Slope Index, defined as the phase angle of points above LI divided by the phase angle
of all points in Poincaré plot except those that are located on LI.
* **AI**: Area Index, defined as the cumulative area of the sectors corresponding to the points
that are located above LI divided by the cumulative area of sectors corresponding to all
points in the Poincaré plot except those that are located on LI.
* **PI**: Porta's Index, defined as the number of points below LI divided by the total number
of points in Poincaré plot except those that are located on LI.
* **SD1d** and **SD1a**: short-term variance of contributions of decelerations (prolongations
of RR intervals) and accelerations (shortenings of RR intervals), respectively (Piskorski,
2011)
* **C1d** and **C1a**: the contributions of heart rate decelerations and accelerations to
short-term HRV, respectively (Piskorski, 2011).
* **SD2d** and **SD2a**: long-term variance of contributions of decelerations (prolongations of
RR intervals) and accelerations (shortenings of RR intervals), respectively (Piskorski, 2011).
* **C2d** and **C2a**: the contributions of heart rate decelerations and accelerations to
long-term HRV, respectively (Piskorski, 2011).
* **SDNNd** and **SDNNa**: total variance of contributions of decelerations (prolongations of
RR intervals) and accelerations (shortenings of RR intervals), respectively (Piskorski, 2011).
* **Cd** and **Ca**: the total contributions of heart rate decelerations and accelerations to
HRV.
Indices of **Heart Rate Fragmentation** (Costa, 2017) include:
* **PIP**: Percentage of inflection points of the RR intervals series.
* **IALS**: Inverse of the average length of the acceleration/deceleration segments.
* **PSS**: Percentage of short segments.
* **PAS**: Percentage of NN intervals in alternation segments.
Indices of **Complexity** and **Fractal Physiology** include:
* **ApEn**: See :func:`.entropy_approximate`.
* **SampEn**: See :func:`.entropy_sample`.
* **ShanEn**: See :func:`.entropy_shannon`.
* **FuzzyEn**: See :func:`.entropy_fuzzy`.
* **MSE**: See :func:`.entropy_multiscale`.
* **CMSE**: See :func:`.entropy_multiscale`.
* **RCMSE**: See :func:`.entropy_multiscale`.
* **CD**: See :func:`.fractal_correlation`.
* **HFD**: See :func:`.fractal_higuchi` (with ``kmax`` set to ``"default"``).
* **KFD**: See :func:`.fractal_katz`.
* **LZC**: See :func:`.complexity_lempelziv`.
* **DFA_alpha1**: The monofractal detrended fluctuation analysis of the HR signal,
corresponding to short-term correlations. See :func:`.fractal_dfa`.
* **DFA_alpha2**: The monofractal detrended fluctuation analysis of the HR signal,
corresponding to long-term correlations. See :func:`.fractal_dfa`.
* **MFDFA indices**: Indices related to the :func:`multifractal spectrum <.fractal_dfa()>`.
Other non-linear indices include those based on Recurrence Quantification Analysis (RQA), but
are not implemented yet (but soon).
.. tip::
We strongly recommend checking our open-access paper `Pham et al. (2021)
<https://doi.org/10.3390/s21123998>`_ on HRV indices, as well as `Lau et al. (2021)
<https://psyarxiv.com/f8k3x/>`_ on complexity, for more information.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Can be a list of indices or the output(s) of other functions such as :func:`.ecg_peaks`,
:func:`.ppg_peaks`, :func:`.ecg_process` or :func:`.bio_process`.
Can also be a dict containing the keys `RRI` and `RRI_Time`
to directly pass the R-R intervals and their timestamps, respectively.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur. Should be at
least twice as high as the highest frequency in vhf. By default 1000.
show : bool, optional
If ``True``, will return a Poincaré plot, a scattergram, which plots each RR interval
against the next successive one. The ellipse centers around the average RR interval. By
default ``False``.
**kwargs
Other arguments to be passed into :func:`.fractal_dfa` and :func:`.fractal_correlation`.
Returns
-------
DataFrame
Contains non-linear HRV metrics.
See Also
--------
ecg_peaks, ppg_peaks, hrv_frequency, hrv_time, hrv_summary
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
# Compute HRV indices
@savefig p_hrv_nonlinear1.png scale=100%
hrv = nk.hrv_nonlinear(peaks, sampling_rate=100, show=True)
@suppress
plt.close()
.. ipython:: python
hrv
References
----------
* Pham, T., Lau, Z. J., Chen, S. H., & Makowski, D. (2021). Heart Rate Variability in
Psychology: A Review of HRV Indices and an Analysis Tutorial. Sensors, 21(12), 3998.
https://doi.org/10.3390/s21123998
* Yan, C., Li, P., Ji, L., Yao, L., Karmakar, C., & Liu, C. (2017). Area asymmetry of heart
rate variability signal. Biomedical engineering online, 16(1), 112.
* Ciccone, A. B., Siedlik, J. A., Wecht, J. M., Deckert, J. A., Nguyen, N. D., & Weir, J. P.\
(2017). Reminder: RMSSD and SD1 are identical heart rate variability metrics. Muscle & nerve,
56 (4), 674-678.
* Shaffer, F., & Ginsberg, J. P. (2017). An overview of heart rate variability metrics and
norms. Frontiers in public health, 5, 258.
* Costa, M. D., Davis, R. B., & Goldberger, A. L. (2017). Heart rate fragmentation: a new
approach to the analysis of cardiac interbeat interval dynamics. Front. Physiol. 8, 255.
* Jeppesen, J., Beniczky, S., Johansen, P., Sidenius, P., & Fuglsang-Frederiksen, A. (2014).
Using Lorenz plot and Cardiac Sympathetic Index of heart rate variability for detecting
seizures for patients with epilepsy. In 2014 36th Annual International Conference of the IEE
Engineering in Medicine and Biology Society (pp. 4563-4566). IEEE.
* Piskorski, J., & Guzik, P. (2011). Asymmetric properties of long-term and total heart rate
variability. Medical & biological engineering & computing, 49(11), 1289-1297.
* Stein, P. K. (2002). Assessing heart rate variability from real-world Holter reports. Cardiac
electrophysiology review, 6(3), 239-244.
* Brennan, M. et al. (2001). Do Existing Measures of Poincaré Plot Geometry Reflect Nonlinear
Features of Heart Rate Variability?. IEEE Transactions on Biomedical Engineering, 48(11),
1342-1347.
* Toichi, M., Sugiura, T., Murai, T., & Sengoku, A. (1997). A new method of assessing cardiac
autonomic function and its comparison with spectral analysis and coefficient of variation of
R-R interval. Journal of the autonomic nervous system, 62(1-2), 79-84.
* Acharya, R. U., Lim, C. M., & Joseph, P. (2002). Heart rate variability analysis using
correlation dimension and detrended fluctuation analysis. Itbm-Rbm, 23(6), 333-339.
"""
# Sanitize input
# If given peaks, compute R-R intervals (also referred to as NN) in milliseconds
rri, rri_time, rri_missing = _hrv_format_input(peaks, sampling_rate=sampling_rate)
if rri_missing:
warn(
"Missing interbeat intervals have been detected. "
"Note that missing intervals can distort some HRV features, in particular "
"nonlinear indices.",
category=NeuroKitWarning,
)
# Initialize empty container for results
out = {}
# Poincaré features (SD1, SD2, etc.)
out = _hrv_nonlinear_poincare(rri, rri_time=rri_time, rri_missing=rri_missing, out=out)
# Heart Rate Fragmentation
out = _hrv_nonlinear_fragmentation(rri, rri_time=rri_time, rri_missing=rri_missing, out=out)
# Heart Rate Asymmetry
out = _hrv_nonlinear_poincare_hra(rri, rri_time=rri_time, rri_missing=rri_missing, out=out)
# DFA
out = _hrv_dfa(rri, out, **kwargs)
# Complexity
tolerance = 0.2 * np.std(rri, ddof=1)
out["ApEn"], _ = entropy_approximate(rri, delay=1, dimension=2, tolerance=tolerance)
out["SampEn"], _ = entropy_sample(rri, delay=1, dimension=2, tolerance=tolerance)
out["ShanEn"], _ = entropy_shannon(rri)
out["FuzzyEn"], _ = entropy_fuzzy(rri, delay=1, dimension=2, tolerance=tolerance)
out["MSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="MSEn")
out["CMSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="CMSEn")
out["RCMSEn"], _ = entropy_multiscale(rri, dimension=2, tolerance=tolerance, method="RCMSEn")
out["CD"], _ = fractal_correlation(rri, delay=1, dimension=2, **kwargs)
out["HFD"], _ = fractal_higuchi(rri, k_max=10, **kwargs)
out["KFD"], _ = fractal_katz(rri)
out["LZC"], _ = complexity_lempelziv(rri, **kwargs)
if show:
_hrv_nonlinear_show(rri, rri_time=rri_time, rri_missing=rri_missing, out=out)
out = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("HRV_")
return out
# =============================================================================
# Get SD1 and SD2
# =============================================================================
def _hrv_nonlinear_poincare(rri, rri_time=None, rri_missing=False, out={}):
"""Compute SD1 and SD2.
- Brennan (2001). Do existing measures of Poincare plot geometry reflect nonlinear features of
heart rate variability?
"""
# HRV and hrvanalysis
rri_n = rri[:-1]
rri_plus = rri[1:]
if rri_missing:
# Only include successive differences
rri_plus = rri_plus[_intervals_successive(rri, intervals_time=rri_time)]
rri_n = rri_n[_intervals_successive(rri, intervals_time=rri_time)]
x1 = (rri_n - rri_plus) / np.sqrt(2) # Eq.7
x2 = (rri_n + rri_plus) / np.sqrt(2)
sd1 = np.std(x1, ddof=1)
sd2 = np.std(x2, ddof=1)
out["SD1"] = sd1
out["SD2"] = sd2
# SD1 / SD2
out["SD1SD2"] = sd1 / sd2
# Area of ellipse described by SD1 and SD2
out["S"] = np.pi * out["SD1"] * out["SD2"]
# CSI / CVI
T = 4 * out["SD1"]
L = 4 * out["SD2"]
out["CSI"] = L / T
out["CVI"] = np.log10(L * T)
out["CSI_Modified"] = L**2 / T
return out
def _hrv_nonlinear_poincare_hra(rri, rri_time=None, rri_missing=False, out={}):
"""Heart Rate Asymmetry Indices.
- Asymmetry of Poincaré plot (or termed as heart rate asymmetry, HRA) - Yan (2017)
- Asymmetric properties of long-term and total heart rate variability - Piskorski (2011)
"""
N = len(rri) - 1
x = rri[:-1] # rri_n, x-axis
y = rri[1:] # rri_plus, y-axis
if rri_missing:
# Only include successive differences
x = x[_intervals_successive(rri, intervals_time=rri_time)]
y = y[_intervals_successive(rri, intervals_time=rri_time)]
N = len(x)
diff = y - x
decelerate_indices = np.where(diff > 0)[0] # set of points above IL where y > x
accelerate_indices = np.where(diff < 0)[0] # set of points below IL where y < x
nochange_indices = np.where(diff == 0)[0]
# Distances to centroid line l2
centroid_x = np.mean(x)
centroid_y = np.mean(y)
dist_l2_all = abs((x - centroid_x) + (y - centroid_y)) / np.sqrt(2)
# Distances to LI
dist_all = abs(y - x) / np.sqrt(2)
# Calculate the angles
theta_all = abs(np.arctan(1) - np.arctan(y / x)) # phase angle LI - phase angle of i-th point
# Calculate the radius
r = np.sqrt(x**2 + y**2)
# Sector areas
S_all = 1 / 2 * theta_all * r**2
# Guzik's Index (GI)
den_GI = np.sum(dist_all)
num_GI = np.sum(dist_all[decelerate_indices])
out["GI"] = (num_GI / den_GI) * 100
# Slope Index (SI)
den_SI = np.sum(theta_all)
num_SI = np.sum(theta_all[decelerate_indices])
out["SI"] = (num_SI / den_SI) * 100
# Area Index (AI)
den_AI = np.sum(S_all)
num_AI = np.sum(S_all[decelerate_indices])
out["AI"] = (num_AI / den_AI) * 100
# Porta's Index (PI)
m = N - len(nochange_indices) # all points except those on LI
b = len(accelerate_indices) # number of points below LI
out["PI"] = (b / m) * 100
# Short-term asymmetry (SD1)
sd1d = np.sqrt(np.sum(dist_all[decelerate_indices] ** 2) / (N - 1))
sd1a = np.sqrt(np.sum(dist_all[accelerate_indices] ** 2) / (N - 1))
sd1I = np.sqrt(sd1d**2 + sd1a**2)
out["C1d"] = (sd1d / sd1I) ** 2
out["C1a"] = (sd1a / sd1I) ** 2
out["SD1d"] = sd1d # SD1 deceleration
out["SD1a"] = sd1a # SD1 acceleration
# out["SD1I"] = sd1I # SD1 based on LI, whereas SD1 is based on centroid line l1
# Long-term asymmetry (SD2)
longterm_dec = np.sum(dist_l2_all[decelerate_indices] ** 2) / (N - 1)
longterm_acc = np.sum(dist_l2_all[accelerate_indices] ** 2) / (N - 1)
longterm_nodiff = np.sum(dist_l2_all[nochange_indices] ** 2) / (N - 1)
sd2d = np.sqrt(longterm_dec + 0.5 * longterm_nodiff)
sd2a = np.sqrt(longterm_acc + 0.5 * longterm_nodiff)
sd2I = np.sqrt(sd2d**2 + sd2a**2)
out["C2d"] = (sd2d / sd2I) ** 2
out["C2a"] = (sd2a / sd2I) ** 2
out["SD2d"] = sd2d # SD2 deceleration
out["SD2a"] = sd2a # SD2 acceleration
# out["SD2I"] = sd2I # identical with SD2
# Total asymmerty (SDNN)
sdnnd = np.sqrt(0.5 * (sd1d**2 + sd2d**2)) # SDNN deceleration
sdnna = np.sqrt(0.5 * (sd1a**2 + sd2a**2)) # SDNN acceleration
sdnn = np.sqrt(sdnnd**2 + sdnna**2) # should be similar to sdnn in hrv_time
out["Cd"] = (sdnnd / sdnn) ** 2
out["Ca"] = (sdnna / sdnn) ** 2
out["SDNNd"] = sdnnd
out["SDNNa"] = sdnna
return out
def _hrv_nonlinear_fragmentation(rri, rri_time=None, rri_missing=False, out={}):
"""Heart Rate Fragmentation Indices - Costa (2017)
The more fragmented a time series is, the higher the PIP, IALS, PSS, and PAS indices will be.
"""
diff_rri = np.diff(rri)
if rri_missing:
# Only include successive differences
diff_rri = diff_rri[_intervals_successive(rri, intervals_time=rri_time)]
zerocrossings = signal_zerocrossings(diff_rri)
# Percentage of inflection points (PIP)
N = len(diff_rri) + 1
out["PIP"] = len(zerocrossings) / N
# Inverse of the average length of the acceleration/deceleration segments (IALS)
accelerations = np.where(diff_rri > 0)[0]
decelerations = np.where(diff_rri < 0)[0]
consecutive = find_consecutive(accelerations) + find_consecutive(decelerations)
lengths = [len(i) for i in consecutive]
out["IALS"] = 1 / np.average(lengths)
# Percentage of short segments (PSS) - The complement of the percentage of NN intervals in
# acceleration and deceleration segments with three or more NN intervals
out["PSS"] = np.sum(np.asarray(lengths) < 3) / len(lengths)
# Percentage of NN intervals in alternation segments (PAS). An alternation segment is a sequence
# of at least four NN intervals, for which heart rate acceleration changes sign every beat. We note
# that PAS quantifies the amount of a particular sub-type of fragmentation (alternation). A time
# series may be highly fragmented and have a small amount of alternation. However, all time series
# with large amount of alternation are highly fragmented.
alternations = find_consecutive(zerocrossings)
lengths = [len(i) for i in alternations]
out["PAS"] = np.sum(np.asarray(lengths) >= 4) / len(lengths)
return out
# =============================================================================
# DFA
# =============================================================================
def _hrv_dfa(rri, out, n_windows="default", **kwargs):
# if "dfa_windows" in kwargs:
# dfa_windows = kwargs["dfa_windows"]
# else:
# dfa_windows = [(4, 11), (12, None)]
# consider using dict.get() mthd directly
dfa_windows = kwargs.get("dfa_windows", [(4, 11), (12, None)])
# Determine max beats
if dfa_windows[1][1] is None:
max_beats = (len(rri) + 1) / 10 # Number of peaks divided by 10
else:
max_beats = dfa_windows[1][1]
# No. of windows to compute for short and long term
if n_windows == "default":
n_windows_short = int(dfa_windows[0][1] - dfa_windows[0][0] + 1)
n_windows_long = int(max_beats - dfa_windows[1][0] + 1)
elif isinstance(n_windows, list):
n_windows_short = n_windows[0]
n_windows_long = n_windows[1]
# Compute DFA alpha1
short_window = np.linspace(dfa_windows[0][0], dfa_windows[0][1], n_windows_short).astype(int)
# For monofractal
out["DFA_alpha1"], _ = fractal_dfa(rri, multifractal=False, scale=short_window, **kwargs)
# For multifractal
mdfa_alpha1, _ = fractal_dfa(rri, multifractal=True, q=np.arange(-5, 6), scale=short_window, **kwargs)
for k in mdfa_alpha1.columns:
out["MFDFA_alpha1_" + k] = mdfa_alpha1[k].values[0]
# Compute DFA alpha2
# sanatize max_beats
if max_beats < dfa_windows[1][0] + 1:
warn(
"DFA_alpha2 related indices will not be calculated. "
"The maximum duration of the windows provided for the long-term correlation is smaller "
"than the minimum duration of windows. Refer to the `scale` argument in `nk.fractal_dfa()` "
"for more information.",
category=NeuroKitWarning,
)
return out
else:
long_window = np.linspace(dfa_windows[1][0], int(max_beats), n_windows_long).astype(int)
# For monofractal
out["DFA_alpha2"], _ = fractal_dfa(rri, multifractal=False, scale=long_window, **kwargs)
# For multifractal
mdfa_alpha2, _ = fractal_dfa(rri, multifractal=True, q=np.arange(-5, 6), scale=long_window, **kwargs)
for k in mdfa_alpha2.columns:
out["MFDFA_alpha2_" + k] = mdfa_alpha2[k].values[0]
return out
# =============================================================================
# Plot
# =============================================================================
def _hrv_nonlinear_show(rri, rri_time=None, rri_missing=False, out={}, ax=None, ax_marg_x=None, ax_marg_y=None):
mean_heart_period = np.nanmean(rri)
sd1 = out["SD1"]
sd2 = out["SD2"]
if isinstance(sd1, pd.Series):
sd1 = float(sd1.iloc[0])
if isinstance(sd2, pd.Series):
sd2 = float(sd2.iloc[0])
# Poincare values
ax1 = rri[:-1]
ax2 = rri[1:]
if rri_missing:
# Only include successive differences
ax1 = ax1[_intervals_successive(rri, intervals_time=rri_time)]
ax2 = ax2[_intervals_successive(rri, intervals_time=rri_time)]
# Set grid boundaries
ax1_lim = (max(ax1) - min(ax1)) / 10
ax2_lim = (max(ax2) - min(ax2)) / 10
ax1_min = min(ax1) - ax1_lim
ax1_max = max(ax1) + ax1_lim
ax2_min = min(ax2) - ax2_lim
ax2_max = max(ax2) + ax2_lim
# Prepare figure
if ax is None and ax_marg_x is None and ax_marg_y is None:
gs = matplotlib.gridspec.GridSpec(4, 4)
fig = plt.figure(figsize=(8, 8))
ax_marg_x = plt.subplot(gs[0, 0:3])
ax_marg_y = plt.subplot(gs[1:4, 3])
ax = plt.subplot(gs[1:4, 0:3])
gs.update(wspace=0.025, hspace=0.05) # Reduce spaces
plt.suptitle("Poincaré Plot")
else:
fig = None
# Create meshgrid
xx, yy = np.mgrid[ax1_min:ax1_max:100j, ax2_min:ax2_max:100j]
# Fit Gaussian Kernel
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([ax1, ax2])
kernel = scipy.stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
cmap = matplotlib.cm.get_cmap("Blues", 10)
ax.contourf(xx, yy, f, cmap=cmap)
ax.imshow(np.rot90(f), extent=[ax1_min, ax1_max, ax2_min, ax2_max], aspect="auto")
# Marginal densities
ax_marg_x.hist(ax1, bins=int(len(ax1) / 10), density=True, alpha=1, color="#ccdff0", edgecolor="none")
ax_marg_y.hist(
ax2,
bins=int(len(ax2) / 10),
density=True,
alpha=1,
color="#ccdff0",
edgecolor="none",
orientation="horizontal",
zorder=1,
)
kde1 = scipy.stats.gaussian_kde(ax1)
x1_plot = np.linspace(ax1_min, ax1_max, len(ax1))
x1_dens = kde1.evaluate(x1_plot)
ax_marg_x.fill(x1_plot, x1_dens, facecolor="none", edgecolor="#1b6aaf", alpha=0.8, linewidth=2)
kde2 = scipy.stats.gaussian_kde(ax2)
x2_plot = np.linspace(ax2_min, ax2_max, len(ax2))
x2_dens = kde2.evaluate(x2_plot)
ax_marg_y.fill_betweenx(x2_plot, x2_dens, facecolor="none", edgecolor="#1b6aaf", linewidth=2, alpha=0.8, zorder=2)
# Turn off marginal axes labels
ax_marg_x.axis("off")
ax_marg_y.axis("off")
# Plot ellipse
angle = 45
width = 2 * sd2 + 1
height = 2 * sd1 + 1
xy = (mean_heart_period, mean_heart_period)
ellipse = matplotlib.patches.Ellipse(xy=xy, width=width, height=height, angle=angle, linewidth=2, fill=False)
ellipse.set_alpha(0.5)
ellipse.set_facecolor("#2196F3")
ax.add_patch(ellipse)
# Plot points only outside ellipse
cos_angle = np.cos(np.radians(180.0 - angle))
sin_angle = np.sin(np.radians(180.0 - angle))
xc = ax1 - xy[0]
yc = ax2 - xy[1]
xct = xc * cos_angle - yc * sin_angle
yct = xc * sin_angle + yc * cos_angle
rad_cc = (xct**2 / (width / 2.0) ** 2) + (yct**2 / (height / 2.0) ** 2)
points = np.where(rad_cc > 1)[0]
ax.plot(ax1[points], ax2[points], "o", color="k", alpha=0.5, markersize=4)
# SD1 and SD2 arrow
sd1_arrow = ax.arrow(
mean_heart_period,
mean_heart_period,
float(-sd1 * np.sqrt(2) / 2),
float(sd1 * np.sqrt(2) / 2),
linewidth=3,
ec="#E91E63",
fc="#E91E63",
label="SD1",
)
sd2_arrow = ax.arrow(
mean_heart_period,
mean_heart_period,
float(sd2 * np.sqrt(2) / 2),
float(sd2 * np.sqrt(2) / 2),
linewidth=3,
ec="#FF9800",
fc="#FF9800",
label="SD2",
)
ax.set_xlabel(r"$RR_{n} (ms)$")
ax.set_ylabel(r"$RR_{n+1} (ms)$")
ax.legend(handles=[sd1_arrow, sd2_arrow], fontsize=12, loc="best")
return fig
| 25,768 | 40.296474 | 118 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/intervals_to_peaks.py | import numpy as np
from .intervals_utils import _intervals_sanitize, _intervals_successive
def intervals_to_peaks(intervals, intervals_time=None, sampling_rate=1000):
"""**Convert intervals to peaks**
Convenience function to convert intervals to peaks, such as from R-R intervals to R-peaks of an
ECG signal. This can be useful if you do not have raw peak indices and have only interval data
such as breath-to-breath (BBI) or rpeak-to-rpeak (RRI) intervals.
Parameters
----------
intervals : list or array
List of intervals (by default in milliseconds).
intervals_time : list or array, optional
Optional list of timestamps corresponding to intervals, in seconds. If None (default), the
cumulative sum of the intervals is used.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous signal in which the peaks occur.
Returns
-------
np.ndarray
An array of integer values indicating the peak indices,
with the first peak occurring at sample point 0.
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Suppose we have a vector of RRi from data sampled at 1000 Hz
ibi = [500, 400, 700, 500, 300, 800, 500]
peaks = nk.intervals_to_peaks(ibi, sampling_rate=1000)
# We can then use NeuroKit's functionalities to compute HRV indices
@savefig p_intervals_to_peaks.png scale=100%
hrv_indices = nk.hrv_time(peaks, sampling_rate=100, show=True)
@suppress
plt.close()
hrv_indices
.. ipython:: python
# We can also use the timestamps of the intervals
rri = [400, 500, 700, 800, 900]
rri_idx = [0.7, 1.2, 2.5, 3.3, 4.2]
nk.intervals_to_peaks(rri, rri_idx, sampling_rate=1000)
"""
if intervals is None:
return None
intervals, intervals_time, intervals_missing = _intervals_sanitize(
intervals, intervals_time=intervals_time, remove_missing=True
)
if intervals_missing:
# Check for non successive intervals in case of missing data
non_successive_indices = np.arange(1, len(intervals_time))[
np.invert(_intervals_successive(intervals, intervals_time))
]
else:
non_successive_indices = np.array([]).astype(int)
# The number of peaks should be the number of intervals
# plus one extra at the beginning of each group of successive intervals
# (with no missing data there should be N_intervals + 1 peaks)
to_insert_indices = np.concatenate((np.array([0]), non_successive_indices))
times_to_insert = intervals_time[to_insert_indices] - intervals[to_insert_indices] / 1000
peaks_time = np.sort(np.concatenate((intervals_time, times_to_insert)))
# convert seconds to sample indices
peaks = peaks_time * sampling_rate
return np.array([int(np.round(i)) for i in peaks])
| 2,910 | 34.938272 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv_rsa.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.linalg
from ..ecg.ecg_rsp import ecg_rsp
from ..misc import NeuroKitWarning
from ..rsp import rsp_process
from ..signal import (
signal_filter,
signal_interpolate,
signal_rate,
signal_resample,
signal_timefrequency,
)
from ..signal.signal_formatpeaks import _signal_formatpeaks_sanitize
from .hrv_utils import _hrv_format_input, _hrv_get_rri
from .intervals_process import intervals_process
def hrv_rsa(
ecg_signals,
rsp_signals=None,
rpeaks=None,
sampling_rate=1000,
continuous=False,
window=None,
window_number=None,
):
"""**Respiratory Sinus Arrhythmia (RSA)**
Respiratory sinus arrhythmia (RSA), also referred to as 'cardiac coherence' or 'physiological
coherence' (though these terms are often encompassing a wider meaning), is the naturally
occurring variation in heart rate during the breathing cycle. Metrics to quantify it are often
used as a measure of parasympathetic nervous system activity. Neurophysiology informs us that
the functional output of the myelinated vagus originating from the nucleus ambiguous has a
respiratory rhythm. Thus, there would a temporal relation between the respiratory rhythm being
expressed in the firing of these efferent pathways and the functional effect on the heart rate
rhythm manifested as RSA. Several methods exist to quantify RSA:
* The **Peak-to-trough (P2T)** algorithm measures the statistical range in milliseconds of the
heart period oscillation associated with synchronous respiration. Operationally, subtracting
the shortest heart period during inspiration from the longest heart period during a breath
cycle produces an estimate of RSA during each breath. The peak-to-trough method makes no
statistical assumption or correction (e.g., adaptive filtering) regarding other sources of
variance in the heart period time series that may confound, distort, or interact with the
metric such as slower periodicities and baseline trend. Although it has been proposed that
the P2T method "acts as a time-domain filter dynamically centered at the exact ongoing
respiratory frequency" (Grossman, 1992), the method does not transform the time series in any
way, as a filtering process would. Instead the method uses knowledge of the ongoing
respiratory cycle to associate segments of the heart period time series with either
inhalation or exhalation (Lewis, 2012).
* The **Porges-Bohrer (PB)** algorithm assumes that heart period time series reflect the sum of
several component time series. Each of these component time series may be mediated by
different neural mechanisms and may have different statistical features. The Porges-Bohrer
method applies an algorithm that selectively extracts RSA, even when the periodic process
representing RSA is superimposed on a complex baseline that may include aperiodic and slow
periodic processes. Since the method is designed to remove sources of variance in the heart
period time series other than the variance within the frequency band of spontaneous
breathing, the method is capable of accurately quantifying RSA when the signal to noise ratio
is low.
Parameters
----------
ecg_signals : DataFrame
DataFrame obtained from :func:`.ecg_process`. Should contain columns ``ECG_Rate`` and
``ECG_R_Peaks``. Can also take a DataFrame comprising of both ECG and RSP signals,
generated by :func:`.bio_process`.
rsp_signals : DataFrame
DataFrame obtained from :func:`.rsp_process`. Should contain columns ``RSP_Phase`` and
``RSP_PhaseCompletion``. No impact when a DataFrame comprising of both the ECG and RSP
signals are passed as ``ecg_signals``. Defaults to ``None``.
rpeaks : dict
The samples at which the R-peaks of the ECG signal occur. Dict returned by
:func:`.ecg_peaks`, :func:`.ecg_process`, or :func:`.bio_process`. Defaults to ``None``.
sampling_rate : int
The sampling frequency of signals (in Hz, i.e., samples/second).
continuous : bool
If ``False``, will return RSA properties computed from the data (one value per index).
If ``True``, will return continuous estimations of RSA of the same length as the signal.
See below for more details.
window : int
For calculating RSA second by second. Length of each segment in seconds. If ``None``
(default), window will be set at 32 seconds.
window_number : int
Between 2 and 8. For calculating RSA second by second. Number of windows to be
calculated in Peak Matched Multiple Window. If ``None`` (default), window_number will be
set at 8.
Returns
----------
rsa : dict
A dictionary containing the RSA features, which includes:
* ``"RSA_P2T_Values"``: the estimate of RSA during each breath cycle, produced by
subtracting the shortest heart period (or RR interval) from the longest heart period in
ms.
* ``"RSA_P2T_Mean"``: the mean peak-to-trough across all cycles in ms
* ``"RSA_P2T_Mean_log"``: the logarithm of the mean of RSA estimates.
* ``"RSA_P2T_SD"``: the standard deviation of all RSA estimates.
* ``"RSA_P2T_NoRSA"``: the number of breath cycles
from which RSA could not be calculated.
* ``"RSA_PorgesBohrer"``: the Porges-Bohrer estimate of RSA, optimal
when the signal to noise ratio is low, in ``ln(ms^2)``.
Example
----------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_eventrelated_100hz")
# Process the data
ecg_signals, info = nk.ecg_process(data["ECG"], sampling_rate=100)
rsp_signals, _ = nk.rsp_process(data["RSP"], sampling_rate=100)
# Get RSA features
nk.hrv_rsa(ecg_signals, rsp_signals, info, sampling_rate=100, continuous=False)
# Get RSA as a continuous signal
rsa = nk.hrv_rsa(ecg_signals, rsp_signals, info, sampling_rate=100, continuous=True)
rsa
@savefig hrv_rsa1.png scale=100%
nk.signal_plot([ecg_signals["ECG_Rate"], rsp_signals["RSP_Rate"], rsa], standardize=True)
@suppress
plt.close()
References
------------
* Servant, D., Logier, R., Mouster, Y., & Goudemand, M. (2009). La variabilité de la fréquence
cardiaque. Intérêts en psychiatrie. L'Encéphale, 35(5), 423-428.
* Lewis, G. F., Furman, S. A., McCool, M. F., & Porges, S. W. (2012). Statistical strategies to
quantify respiratory sinus arrhythmia: Are commonly used metrics equivalent?. Biological
psychology, 89(2), 349-364.
* Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability:
exploring pathways from personality to cardiac coherence and health. Open Journal of Social
Sciences, 1(06), 32.
"""
signals, ecg_period, rpeaks, sampling_rate = _hrv_rsa_formatinput(
ecg_signals, rsp_signals, rpeaks, sampling_rate
)
# Extract cycles
rsp_cycles = _hrv_rsa_cycles(signals)
rsp_onsets = rsp_cycles["RSP_Inspiration_Onsets"]
rsp_peaks = np.argwhere(signals["RSP_Peaks"].values == 1)[:, 0]
rsp_peaks = np.array(rsp_peaks)[rsp_peaks > rsp_onsets[0]]
if len(rsp_peaks) - len(rsp_onsets) == 0:
rsp_peaks = rsp_peaks[:-1]
if len(rsp_peaks) - len(rsp_onsets) != -1:
warn(
"Couldn't find rsp cycles onsets and centers. Check your RSP signal."
+ " Returning empty dict.",
category=NeuroKitWarning,
)
return {}
# Methods ------------------------
# Peak-to-Trough
rsa_p2t = _hrv_rsa_p2t(
rsp_onsets,
rpeaks,
sampling_rate,
continuous=continuous,
ecg_period=ecg_period,
rsp_peaks=rsp_peaks,
)
# Porges-Bohrer
rsa_pb = _hrv_rsa_pb(ecg_period, sampling_rate, continuous=continuous)
# RSAsecondbysecond
if window is None:
window = 32 # 32 seconds
input_duration = rpeaks[-1] / sampling_rate
if input_duration >= window:
rsa_gates = _hrv_rsa_gates(
ecg_signals,
rpeaks,
sampling_rate=sampling_rate,
window=window,
window_number=window_number,
continuous=continuous,
)
else:
warn(
f"The duration of recording is shorter than the duration of the window ({window}"
"seconds). Returning RSA by Gates method as Nan. Consider using a longer recording.",
category=NeuroKitWarning,
)
if continuous is False:
rsa_gates = np.nan
else:
rsa_gates = np.full(len(rsa_p2t), np.nan)
if continuous is False:
rsa = {} # Initialize empty dict
rsa.update(rsa_p2t)
rsa.update(rsa_pb)
rsa.update(rsa_gates)
else:
rsa = pd.DataFrame({"RSA_P2T": rsa_p2t, "RSA_Gates": rsa_gates})
return rsa
# =============================================================================
# Methods (Domains)
# =============================================================================
def _hrv_rsa_p2t(
rsp_onsets, rpeaks, sampling_rate, continuous=False, ecg_period=None, rsp_peaks=None
):
"""Peak-to-trough algorithm (P2T)"""
# Find all RSP cycles and the Rpeaks within
cycles_rri_inh = []
cycles_rri_exh = []
# Add 750 ms offset to exhalation peak and end of the cycle in order to include next RRI
# (see Grossman, 1990)
rsp_offset = 0.75 * sampling_rate
for idx in range(len(rsp_onsets) - 1):
cycle_init = rsp_onsets[idx]
rsp_peak_offset = rsp_peaks[idx] + rsp_offset
rsp_peak = rsp_peaks[idx]
cycle_end = rsp_onsets[idx + 1] + rsp_offset
# Separately select RRI for inhalation and exhalation
cycles_rri_inh.append(
rpeaks[np.logical_and(rpeaks >= cycle_init, rpeaks < rsp_peak_offset)]
)
cycles_rri_exh.append(rpeaks[np.logical_and(rpeaks >= rsp_peak, rpeaks < cycle_end)])
# Iterate over all cycles
rsa_values = np.full(len(cycles_rri_exh), np.nan)
for i in range(len(cycles_rri_exh)):
# Estimate of RSA during each breathing phase
RRis_inh = np.diff(cycles_rri_inh[i]) / sampling_rate * 1000
RRis_exh = np.diff(cycles_rri_exh[i]) / sampling_rate * 1000
if np.logical_and(len(RRis_inh) > 0, len(RRis_exh) > 0): # you need at least one RRI
rsa_value = np.max(RRis_exh) - np.min(RRis_inh)
if rsa_value > 0:
# Take into consideration only rsp cycles in which the max exh > than min inh
rsa_values[i] = rsa_value
else:
# Negative effect should be factor into the mean using 0 (see Grossman 1990)
rsa_values[i] = 0
if continuous is False:
rsa = {"RSA_P2T_Mean": np.nanmean(rsa_values)}
rsa["RSA_P2T_Mean_log"] = np.log(rsa["RSA_P2T_Mean"]) # pylint: disable=E1111
rsa["RSA_P2T_SD"] = np.nanstd(rsa_values, ddof=1)
rsa["RSA_P2T_NoRSA"] = len(pd.Series(rsa_values).index[pd.Series(rsa_values).isnull()])
else:
rsa = signal_interpolate(
x_values=rsp_peaks[~np.isnan(rsa_values)],
y_values=rsa_values[~np.isnan(rsa_values)],
x_new=np.arange(len(ecg_period)),
)
return rsa
def _hrv_rsa_pb(ecg_period, sampling_rate, continuous=False):
"""Porges-Bohrer method."""
if continuous is True:
return None
# Re-sample at 2 Hz
resampled = signal_resample(ecg_period, sampling_rate=sampling_rate, desired_sampling_rate=2)
# Fit 21-point cubic polynomial filter (zero mean, 3rd order)
# with a low-pass cutoff frequency of 0.095Hz
trend = signal_filter(
resampled,
sampling_rate=2,
lowcut=0.095,
highcut=None,
method="savgol",
order=3,
window_size=21,
)
zero_mean = resampled - trend
# Remove variance outside bandwidth of spontaneous respiration
zero_mean_filtered = signal_filter(zero_mean, sampling_rate=2, lowcut=0.12, highcut=0.40)
# Divide into 30-second epochs
time = np.arange(0, len(zero_mean_filtered)) / 2
time = pd.DataFrame({"Epoch Index": time // 30, "Signal": zero_mean_filtered})
time = time.set_index("Epoch Index")
epochs = [time.loc[i] for i in range(int(np.max(time.index.values)) + 1)]
variance = []
for epoch in epochs:
variance.append(np.log(epoch.var(axis=0) / 1000)) # convert ms
variance = [row for row in variance if not np.isnan(row).any()]
return {"RSA_PorgesBohrer": pd.concat(variance).mean()}
# def _hrv_rsa_synchrony(ecg_period, rsp_signal, sampling_rate=1000, method="correlation", continuous=False):
# """Experimental method
# """
# if rsp_signal is None:
# return None
#
# filtered_period = signal_filter(ecg_period, sampling_rate=sampling_rate,
# lowcut=0.12, highcut=0.4, order=6)
# coupling = signal_synchrony(filtered_period, rsp_signal, method=method, window_size=sampling_rate*3)
# coupling = signal_filter(coupling, sampling_rate=sampling_rate, highcut=0.4, order=6)
#
# if continuous is False:
# rsa = {}
# rsa["RSA_Synchrony_Mean"] = np.nanmean(coupling)
# rsa["RSA_Synchrony_SD"] = np.nanstd(coupling, ddof=1)
# return rsa
# else:
# return coupling
# def _hrv_rsa_servant(ecg_period, sampling_rate=1000, continuous=False):
# """Servant, D., Logier, R., Mouster, Y., & Goudemand, M. (2009). La variabilité de la fréquence
# cardiaque. Intérêts en psychiatrie. L’Encéphale, 35(5), 423–428. doi:10.1016/j.encep.2008.06.016
# """
#
# rpeaks, _ = nk.ecg_peaks(nk.ecg_simulate(duration=90))
# ecg_period = nk.ecg_rate(rpeaks) / 60 * 1000
# sampling_rate=1000
#
# if len(ecg_period) / sampling_rate <= 60:
# return None
#
#
# signal = nk.signal_filter(ecg_period, sampling_rate=sampling_rate,
# lowcut=0.1, highcut=1, order=6)
# signal = nk.standardize(signal)
#
# nk.signal_plot([ecg_period, signal], standardize=True)
#
# troughs = nk.signal_findpeaks(-1 * signal)["Peaks"]
# trough_signal = nk.signal_interpolate(x_values=troughs,
# y_values=signal[troughs],
# desired_length=len(signal))
# first_trough = troughs[0]
#
# # Initial parameters
# n_windows = int(len(trough_signal[first_trough:]) / sampling_rate / 16) # How many windows of 16 s
# onsets = (np.arange(n_windows) * 16 * sampling_rate) + first_trough
#
# areas_under_curve = np.zeros(len(onsets))
# for i, onset in enumerate(onsets):
# areas_under_curve[i] = sklearn.metrics.auc(np.linspace(0, 16, 16*sampling_rate),
# trough_signal[onset:onset+(16*sampling_rate)])
# max_auc = np.max(areas_under_curve)
#
# # Moving computation
# onsets = np.arange(first_trough, len(signal)-16*sampling_rate, step=4*sampling_rate)
# areas_under_curve = np.zeros(len(onsets))
# for i, onset in enumerate(onsets):
# areas_under_curve[i] = sklearn.metrics.auc(np.linspace(0, 16, 16*sampling_rate),
# trough_signal[onset:onset+(16*sampling_rate)])
# rsa = (max_auc - areas_under_curve) / max_auc + 1
#
# # Not sure what to do next, sent an email to Servant.
# pass
# =============================================================================
# Second-by-second RSA
# =============================================================================
def _hrv_rsa_gates(
ecg_signals, rpeaks, sampling_rate=1000, window=None, window_number=None, continuous=False
):
# Boundaries of rsa freq
min_frequency = 0.12
max_frequency = 0.40
# Retrieve IBI and interpolate it
rri, rri_time, _ = _hrv_get_rri(rpeaks, sampling_rate=sampling_rate)
# Re-sample at 4 Hz
desired_sampling_rate = 4
rri, rri_time, sampling_rate = intervals_process(
rri, intervals_time=rri_time, interpolate=True, interpolation_rate=desired_sampling_rate
)
# Sanitize parameters
overlap = int((window - 1) * desired_sampling_rate)
nperseg = window * desired_sampling_rate
if window_number is None:
window_number = 8
# Get multipeak window
multipeak, weight = _get_multipeak_window(nperseg, window_number)
for i in range(4):
_, time, psd = signal_timefrequency(
rri,
sampling_rate=desired_sampling_rate,
min_frequency=min_frequency,
max_frequency=max_frequency,
method="stft",
window=window,
window_type=multipeak[:, i],
overlap=overlap,
show=False,
)
if i == 0:
rsa = np.zeros_like(psd)
rsa = psd * weight[i] + rsa # add weights
meanRSA = np.log(2 * sum(rsa) / nperseg)
# Sanitize output
if continuous is False:
rsa = {"RSA_Gates_Mean": np.nanmean(meanRSA)}
rsa["RSA_Gates_Mean_log"] = np.log(rsa["RSA_Gates_Mean"]) # pylint: disable=E1111
rsa["RSA_Gates_SD"] = np.nanstd(meanRSA, ddof=1)
else:
# For window=32, meanRSA is RSA from 16th second to xth second where x=recording
# duration-16secs
# Padding the missing first and list window/2 segments
pad_length = window / 2
time_start = np.arange(0, pad_length)
time_end = np.arange(time[-1], time[-1] + pad_length)[1:]
time = np.concatenate((time_start, time, time_end))
rsa_start = np.full(len(time_start), meanRSA[0])
rsa_end = np.full(len(time_end), meanRSA[-1])
meanRSA = np.concatenate((rsa_start, meanRSA, rsa_end))
# Convert to samples
time = np.multiply(time, sampling_rate)
rsa = signal_interpolate(
time.astype(int), meanRSA, x_new=len(ecg_signals), method="monotone_cubic"
)
return rsa
def _get_multipeak_window(nperseg, window_number=8):
"""Get Peak Matched Multiple Window
References
----------
Hansson, M., & Salomonsson, G. (1997). A multiple window method for estimation of peaked spectra.
IEEE Transactions on Signal Processing, 45(3), 778-781.
"""
K1 = 20 # Peak in dB
K2 = 30 # Penalty value in dB
B = (window_number + 2) / nperseg # Resolution in spectrum
loge = np.log10(np.exp(1))
C = 2 * K1 / 10 / B / loge
length = np.arange(1, nperseg).conj().transpose()
r0 = 2 / C * (1 - np.exp(-C * B / 2))
r_num = 2 * C - np.exp(-C * B / 2) * (
2 * C * np.cos(np.pi * B * length) - 4 * np.pi * length * np.sin(np.pi * B * length)
)
r_den = C**2 + (2 * np.pi * length) ** 2
r = np.divide(r_num, r_den)
rpeak = np.append(r0, r) # Covariance function peaked spectrum
r = 2 * np.sin(np.pi * B * length) / (2 * np.pi * length)
rbox = np.append(B, r)
rpen = (
10 ** (K2 / 10) * np.append(1, np.zeros((nperseg - 1, 1))) - (10 ** (K2 / 10) - 1) * rbox
) # Covariance function penalty function
Ry = scipy.linalg.toeplitz(rpeak)
Rx = scipy.linalg.toeplitz(rpen)
RR = scipy.linalg.cholesky(Rx)
C = scipy.linalg.inv(RR.conj().transpose()).dot(Ry).dot(scipy.linalg.inv(RR))
_, Q = scipy.linalg.schur(C)
F = scipy.linalg.inv(RR).dot(Q)
RD = F.conj().transpose().dot(Ry).dot(F)
RD = np.diag(RD)
RDN = np.sort(RD)
h = np.argsort(RD)
FN = np.zeros((nperseg, nperseg))
for i in range(len(RD)):
FN[:, i] = F[:, h[i]] / np.sqrt(F[:, h[i]].conj().transpose().dot(F[:, h[i]]))
RDN = RDN[len(RD) - 1 : 0 : -1]
FN = FN[:, len(RD) - 1 : 0 : -1]
weight = RDN[:window_number] / np.sum(RDN[:window_number])
multipeak = FN[:, 0:window_number]
return multipeak, weight
# =============================================================================
# Internals
# =============================================================================
def _hrv_rsa_cycles(signals):
"""Extract respiratory cycles."""
inspiration_onsets = np.intersect1d(
np.where(signals["RSP_Phase"] == 1)[0],
np.where(signals["RSP_Phase_Completion"] == 0)[0],
assume_unique=True,
)
expiration_onsets = np.intersect1d(
np.where(signals["RSP_Phase"] == 0)[0],
np.where(signals["RSP_Phase_Completion"] == 0)[0],
assume_unique=True,
)
cycles_length = np.diff(inspiration_onsets)
return {
"RSP_Inspiration_Onsets": inspiration_onsets,
"RSP_Expiration_Onsets": expiration_onsets,
"RSP_Cycles_Length": cycles_length,
}
def _hrv_rsa_formatinput(ecg_signals, rsp_signals, rpeaks=None, sampling_rate=1000):
rpeaks, sampling_rate = _hrv_format_input(
rpeaks, sampling_rate=sampling_rate, output_format="peaks"
)
# Sanity Checks
if isinstance(ecg_signals, tuple):
ecg_signals = ecg_signals[0]
rpeaks = None
if isinstance(ecg_signals, pd.DataFrame):
if "ECG_Rate" in ecg_signals.columns:
ecg_period = ecg_signals["ECG_Rate"].values
else:
if "ECG_R_Peaks" in ecg_signals.columns:
ecg_period = signal_rate(
np.where(ecg_signals["ECG_R_Peaks"].values == 1)[0],
sampling_rate=sampling_rate,
desired_length=len(ecg_signals),
)
else:
raise ValueError(
"NeuroKit error: _hrv_rsa_formatinput():"
"Wrong input, we couldn't extract"
"heart rate signal."
)
if rsp_signals is None:
rsp_signals = ecg_signals.copy()
elif isinstance(rsp_signals, tuple):
rsp_signals = rsp_signals[0]
if isinstance(rsp_signals, pd.DataFrame):
rsp_cols = [col for col in rsp_signals.columns if "RSP_Phase" in col]
if len(rsp_cols) != 2:
edr = ecg_rsp(ecg_period, sampling_rate=sampling_rate)
rsp_signals, _ = rsp_process(edr, sampling_rate)
warn(
"RSP signal not found. For this time, we will derive RSP"
" signal from ECG using ecg_rsp(). But the results are"
" definitely not reliable, so please provide a real RSP signal.",
category=NeuroKitWarning,
)
if rpeaks is None:
try:
rpeaks = _signal_formatpeaks_sanitize(ecg_signals)
except NameError as e:
raise ValueError(
"NeuroKit error: _hrv_rsa_formatinput(): "
"Wrong input, we couldn't extract rpeaks indices."
) from e
else:
rpeaks = _signal_formatpeaks_sanitize(rpeaks)
nonduplicates = ecg_signals.columns[[i not in rsp_signals.columns for i in ecg_signals.columns]]
signals = pd.concat([ecg_signals[nonduplicates], rsp_signals], axis=1)
return signals, ecg_period, rpeaks, sampling_rate
| 23,322 | 38.13255 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv_time.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from ..stats import mad, summary_plot
from .hrv_utils import _hrv_format_input
from .intervals_utils import _intervals_successive
def hrv_time(peaks, sampling_rate=1000, show=False, **kwargs):
"""**Computes time-domain indices of Heart Rate Variability (HRV)**
Time-domain measures reflect the total variability of HR and are relatively indiscriminate when
it comes to precisely quantifying the respective contributions of different underlying
regulatory mechanisms. However, this "general" sensitivity can be seen as a positive feature
(e.g., in exploratory studies or when specific underlying neurophysiological mechanisms are not
the focus). Moreover, as they are easy to compute and interpret, time-domain measures are still
among the most commonly reported HRV indices.
The time-domain indices can be categorized into deviation-based and difference-based indices
where the formal are calculated directly from the normal beat-to-beat intervals (normal RR
intervals or NN intervals), and the later are derived from the difference between successive NN
intervals.
.. tip::
We strongly recommend checking our open-access paper `Pham et al. (2021)
<https://doi.org/10.3390/s21123998>`_ on HRV indices for more information.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Can be a list of indices or the output(s) of other functions such as :func:`.ecg_peaks`,
:func:`.ppg_peaks`, :func:`.ecg_process` or :func:`.bio_process`.
Can also be a dict containing the keys `RRI` and `RRI_Time`
to directly pass the R-R intervals and their timestamps, respectively.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur. Should be at
least twice as high as the highest frequency in vhf. By default 1000.
show : bool
If ``True``, will plot the distribution of R-R intervals.
Returns
-------
DataFrame
Contains time domain HRV metrics:
* **MeanNN**: The mean of the RR intervals.
* **SDNN**: The standard deviation of the RR intervals.
* **SDANN1**, **SDANN2**, **SDANN5**: The standard deviation of average RR intervals
extracted from n-minute segments of time series data (1, 2 and 5 by default). Note that
these indices require a minimal duration of signal to be computed (3, 6 and 15 minutes
respectively) and will be silently skipped if the data provided is too short.
* **SDNNI1**, **SDNNI2**, **SDNNI5**: The mean of the standard deviations of RR intervals
extracted from n-minute segments of time series data (1, 2 and 5 by default). Note that
these indices require a minimal duration of signal to be computed (3, 6 and 15 minutes
respectively) and will be silently skipped if the data provided is too short.
* **RMSSD**: The square root of the mean of the squared successive differences between
adjacent RR intervals. It is equivalent (although on another scale) to SD1, and
therefore it is redundant to report correlations with both (Ciccone, 2017).
* **SDSD**: The standard deviation of the successive differences between RR intervals.
* **CVNN**: The standard deviation of the RR intervals (**SDNN**) divided by the mean of
the RR intervals (**MeanNN**).
* **CVSD**: The root mean square of successive differences (**RMSSD**) divided by
the mean of the RR intervals (**MeanNN**).
* **MedianNN**: The median of the RR intervals.
* **MadNN**: The median absolute deviation of the RR intervals.
* **MCVNN**: The median absolute deviation of the RR intervals (**MadNN**) divided by the
median of the RR intervals (**MedianNN**).
* **IQRNN**: The interquartile range (**IQR**) of the RR intervals.
* **SDRMSSD**: SDNN / RMSSD, a time-domain equivalent for the low Frequency-to-High
Frequency (LF/HF) Ratio (Sollers et al., 2007).
* **Prc20NN**: The 20th percentile of the RR intervals (Han, 2017; Hovsepian, 2015).
* **Prc80NN**: The 80th percentile of the RR intervals (Han, 2017; Hovsepian, 2015).
* **pNN50**: The proportion of RR intervals greater than 50ms, out of the total number of
RR intervals.
* **pNN20**: The proportion of RR intervals greater than 20ms, out of the total number of
RR intervals.
* **MinNN**: The minimum of the RR intervals (Parent, 2019; Subramaniam, 2022).
* **MaxNN**: The maximum of the RR intervals (Parent, 2019; Subramaniam, 2022).
* **TINN**: A geometrical parameter of the HRV, or more specifically, the baseline width of
the RR intervals distribution obtained by triangular interpolation, where the error of
least squares determines the triangle. It is an approximation of the RR interval
distribution.
* **HTI**: The HRV triangular index, measuring the total number of RR intervals divided by
the height of the RR intervals histogram.
See Also
--------
ecg_peaks, ppg_peaks, hrv_frequency, hrv_summary, hrv_nonlinear
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
# Compute HRV indices
@savefig p_hrv_time.png scale=100%
hrv = nk.hrv_time(peaks, sampling_rate=100, show=True)
@suppress
plt.close()
References
----------
* Pham, T., Lau, Z. J., Chen, S. H. A., & Makowski, D. (2021). Heart Rate Variability in
Psychology: A Review of HRV Indices and an Analysis Tutorial. Sensors, 21(12), 3998.
https://doi.org/10.3390/s21123998
* Ciccone, A. B., Siedlik, J. A., Wecht, J. M., Deckert, J. A., Nguyen, N. D., & Weir, J. P.
(2017). Reminder: RMSSD and SD1 are identical heart rate variability metrics. Muscle & nerve,
56(4), 674-678.
* Han, L., Zhang, Q., Chen, X., Zhan, Q., Yang, T., & Zhao, Z. (2017). Detecting work-related
stress with a wearable device. Computers in Industry, 90, 42-49.
* Hovsepian, K., Al'Absi, M., Ertin, E., Kamarck, T., Nakajima, M., & Kumar, S. (2015). cStress:
towards a gold standard for continuous stress assessment in the mobile environment. In
Proceedings of the 2015 ACM international joint conference on pervasive and ubiquitous
computing (pp. 493-504).
* Parent, M., Tiwari, A., Albuquerque, I., Gagnon, J. F., Lafond, D., Tremblay, S., & Falk, T.
H. (2019). A multimodal approach to improve the robustness of physiological stress prediction
during physical activity. In 2019 IEEE International Conference on Systems, Man and
Cybernetics (SMC) (pp. 4131-4136). IEEE.
* Stein, P. K. (2002). Assessing heart rate variability from real-world Holter reports. Cardiac
electrophysiology review, 6(3), 239-244.
* Shaffer, F., & Ginsberg, J. P. (2017). An overview of heart rate variability metrics and
norms. Frontiers in public health, 5, 258.
* Subramaniam, S. D., & Dass, B. (2022). An Efficient Convolutional Neural Network for Acute
Pain Recognition Using HRV Features. In Proceedings of the International e-Conference on
Intelligent Systems and Signal Processing (pp. 119-132). Springer, Singapore.
* Sollers, J. J., Buchanan, T. W., Mowrer, S. M., Hill, L. K., & Thayer, J. F. (2007).
Comparison of the ratio of the standard deviation of the RR interval and the root mean
squared successive differences (SD/rMSSD) to the low frequency-to-high frequency (LF/HF)
ratio in a patient population and normal healthy controls. Biomed Sci Instrum, 43, 158-163.
"""
# Sanitize input
# If given peaks, compute R-R intervals (also referred to as NN) in milliseconds
rri, rri_time, rri_missing = _hrv_format_input(peaks, sampling_rate=sampling_rate)
diff_rri = np.diff(rri)
if rri_missing:
# Only include successive differences
diff_rri = diff_rri[_intervals_successive(rri, intervals_time=rri_time)]
out = {} # Initialize empty container for results
# Deviation-based
out["MeanNN"] = np.nanmean(rri)
out["SDNN"] = np.nanstd(rri, ddof=1)
for i in [1, 2, 5]:
out["SDANN" + str(i)] = _sdann(rri, window=i)
out["SDNNI" + str(i)] = _sdnni(rri, window=i)
# Difference-based
out["RMSSD"] = np.sqrt(np.nanmean(diff_rri**2))
out["SDSD"] = np.nanstd(diff_rri, ddof=1)
# Normalized
out["CVNN"] = out["SDNN"] / out["MeanNN"]
out["CVSD"] = out["RMSSD"] / out["MeanNN"]
# Robust
out["MedianNN"] = np.nanmedian(rri)
out["MadNN"] = mad(rri)
out["MCVNN"] = out["MadNN"] / out["MedianNN"] # Normalized
out["IQRNN"] = scipy.stats.iqr(rri)
out["SDRMSSD"] = out["SDNN"] / out["RMSSD"] # Sollers (2007)
out["Prc20NN"] = np.nanpercentile(rri, q=20)
out["Prc80NN"] = np.nanpercentile(rri, q=80)
# Extreme-based
nn50 = np.sum(np.abs(diff_rri) > 50)
nn20 = np.sum(np.abs(diff_rri) > 20)
out["pNN50"] = nn50 / (len(diff_rri) + 1) * 100
out["pNN20"] = nn20 / (len(diff_rri) + 1) * 100
out["MinNN"] = np.nanmin(rri)
out["MaxNN"] = np.nanmax(rri)
# Geometrical domain
binsize = kwargs.get("binsize", ((1 / 128) * 1000))
bins = np.arange(0, np.max(rri) + binsize, binsize)
bar_y, bar_x = np.histogram(rri, bins=bins)
# HRV Triangular Index
out["HTI"] = len(rri) / np.max(bar_y)
# Triangular Interpolation of the NN Interval Histogram
out["TINN"] = _hrv_TINN(rri, bar_x, bar_y, binsize)
if show:
_hrv_time_show(rri, **kwargs)
out = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("HRV_")
return out
# =============================================================================
# Utilities
# =============================================================================
def _hrv_time_show(rri, **kwargs):
fig = summary_plot(rri, **kwargs)
plt.xlabel("R-R intervals (ms)")
fig.suptitle("Distribution of R-R intervals")
return fig
def _sdann(rri, rri_time=None, window=1):
window_size = window * 60 * 1000 # Convert window in min to ms
if rri_time is None:
# Compute the timestamps of the R-R intervals in seconds
rri_time = np.nancumsum(rri / 1000)
# Convert timestamps to milliseconds and ensure first timestamp is equal to first interval
rri_cumsum = (rri_time - rri_time[0]) * 1000 + rri[0]
n_windows = int(np.round(rri_cumsum[-1] / window_size))
if n_windows < 3:
return np.nan
avg_rri = []
for i in range(n_windows):
start = i * window_size
start_idx = np.where(rri_cumsum >= start)[0][0]
end_idx = np.where(rri_cumsum < start + window_size)[0][-1]
avg_rri.append(np.nanmean(rri[start_idx:end_idx]))
sdann = np.nanstd(avg_rri, ddof=1)
return sdann
def _sdnni(rri, rri_time=None, window=1):
window_size = window * 60 * 1000 # Convert window in min to ms
if rri_time is None:
# Compute the timestamps of the R-R intervals in seconds
rri_time = np.nancumsum(rri / 1000)
# Convert timestamps to milliseconds and ensure first timestamp is equal to first interval
rri_cumsum = (rri_time - rri_time[0]) * 1000 + rri[0]
n_windows = int(np.round(rri_cumsum[-1] / window_size))
if n_windows < 3:
return np.nan
sdnn_ = []
for i in range(n_windows):
start = i * window_size
start_idx = np.where(rri_cumsum >= start)[0][0]
end_idx = np.where(rri_cumsum < start + window_size)[0][-1]
sdnn_.append(np.nanstd(rri[start_idx:end_idx], ddof=1))
sdnni = np.nanmean(sdnn_)
return sdnni
def _hrv_TINN(rri, bar_x, bar_y, binsize):
# set pre-defined conditions
min_error = 2**14
X = bar_x[np.argmax(bar_y)] # bin where Y is max
Y = np.max(bar_y) # max value of Y
idx_where = np.where(bar_x - np.min(rri) > 0)[0]
if len(idx_where) == 0:
return np.nan
n = bar_x[idx_where[0]] # starting search of N
m = X + binsize # starting search value of M
N = 0
M = 0
# start to find best values of M and N where least square is minimized
while n < X:
while m < np.max(rri):
n_start = np.where(bar_x == n)[0][0]
n_end = np.where(bar_x == X)[0][0]
qn = np.polyval(
np.polyfit([n, X], [0, Y], deg=1), bar_x[n_start : n_end + 1]
)
m_start = np.where(bar_x == X)[0][0]
m_end = np.where(bar_x == m)[0][0]
qm = np.polyval(
np.polyfit([X, m], [Y, 0], deg=1), bar_x[m_start : m_end + 1]
)
q = np.zeros(len(bar_x))
q[n_start : n_end + 1] = qn
q[m_start : m_end + 1] = qm
# least squares error
error = np.sum((bar_y[n_start : m_end + 1] - q[n_start : m_end + 1]) ** 2)
if error < min_error:
N = n
M = m
min_error = error
m += binsize
n += binsize
return M - N
| 13,478 | 44.846939 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv_rqa.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.spatial
from ..complexity import complexity_rqa
from ..signal import signal_detrend
from .hrv_utils import _hrv_format_input
def hrv_rqa(
peaks,
sampling_rate=1000,
dimension=7,
delay=1,
tolerance="zimatore2021",
show=False,
**kwargs,
):
"""**Recurrence Quantification Analysis (RQA) of Heart Rate Variability (HRV)**
RQA is a type of complexity analysis used in non-linear dynamics (related to entropy and fractal
dimensions). See :func:`.complexity_rqa` for more information.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Can be a list of indices or the output(s) of other functions such as :func:`.ecg_peaks`,
:func:`.ppg_peaks`, :func:`.ecg_process` or :func:`.bio_process`.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks occur. Should be at
least twice as high as the highest frequency in vhf. By default 1000.
delay : int
See :func:`.complexity_rqa` for more information.
dimension : int
See :func:`.complexity_rqa` for more information.
tolerance : float
See :func:`.complexity_rqa` for more information. If ``"zimatore2021"``, will be set to half
of the mean pairwise distance between points.
show : bool
See :func:`.complexity_rqa` for more information.
**kwargs
Other arguments to be passed to :func:`.complexity_rqa`.
See Also
--------
complexity_rqa, hrv_nonlinear
Returns
----------
rqa : float
The RQA.
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Download data
data = nk.data("bio_resting_5min_100hz")
# Find peaks
peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
# Compute HRV RQA indices
@savefig p_hrv_rqa1.png scale=100%
hrv_rqa = nk.hrv_rqa(peaks, sampling_rate=100, show=True)
@suppress
plt.close()
.. ipython:: python
hrv_rqa
References
----------
* Zimatore, G., Falcioni, L., Gallotta, M. C., Bonavolontà, V., Campanella, M., De Spirito, M.,
... & Baldari, C. (2021). Recurrence quantification analysis of heart rate variability to
detect both ventilatory thresholds. PloS one, 16(10), e0249504.
* Ding, H., Crozier, S., & Wilson, S. (2008). Optimization of Euclidean distance threshold in
the application of recurrence quantification analysis to heart rate variability studies.
Chaos, Solitons & Fractals, 38(5), 1457-1467.
"""
# Sanitize input
# If given peaks, compute R-R intervals (also referred to as NN) in milliseconds
rri, _, _ = _hrv_format_input(peaks, sampling_rate=sampling_rate)
# Linear detrend (Zimatore, 2021)
rri = signal_detrend(rri, method="polynomial", order=1)
# Radius (50% of mean distance between all pairs of points in time)
if tolerance == "zimatore2021":
dists = scipy.spatial.distance.pdist(np.array([rri, rri]).T, "euclidean")
tolerance = 0.5 * np.mean(dists)
# Run the RQA
rqa, _ = complexity_rqa(
rri,
dimension=dimension,
delay=delay,
tolerance=tolerance,
show=show,
**kwargs,
)
return rqa
| 3,397 | 30.174312 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/__init__.py | # -*- coding: utf-8 -*-
from .hrv import hrv
from .hrv_frequency import hrv_frequency
from .hrv_nonlinear import hrv_nonlinear
from .hrv_rqa import hrv_rqa
from .hrv_rsa import hrv_rsa
from .hrv_time import hrv_time
from .intervals_process import intervals_process
from .intervals_to_peaks import intervals_to_peaks
__all__ = [
"hrv_time",
"hrv_frequency",
"hrv_nonlinear",
"hrv_rsa",
"hrv_rqa",
"hrv",
"intervals_process",
"intervals_to_peaks",
]
| 481 | 21.952381 | 50 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/hrv_utils.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .intervals_to_peaks import intervals_to_peaks
from .intervals_utils import _intervals_sanitize
def _hrv_get_rri(peaks=None, sampling_rate=1000):
if peaks is None:
return None, None, None
# Compute R-R intervals (also referred to as NN) in milliseconds
rri = np.diff(peaks) / sampling_rate * 1000
rri, rri_time, rri_missing = _intervals_sanitize(rri)
return rri, rri_time, rri_missing
def _hrv_format_input(peaks=None, sampling_rate=1000, output_format="intervals"):
if isinstance(peaks, tuple):
rri, rri_time, rri_missing, sampling_rate = _hrv_sanitize_tuple(peaks, sampling_rate=sampling_rate)
elif isinstance(peaks, (dict, pd.DataFrame)):
rri, rri_time, rri_missing, sampling_rate = _hrv_sanitize_dict_or_df(peaks, sampling_rate=sampling_rate)
else:
peaks = _hrv_sanitize_peaks(peaks)
rri, rri_time, rri_missing = _hrv_get_rri(peaks, sampling_rate=sampling_rate)
if output_format == "intervals":
return rri, rri_time, rri_missing
elif output_format == "peaks":
return (
intervals_to_peaks(rri, intervals_time=rri_time, sampling_rate=sampling_rate),
sampling_rate,
)
# =============================================================================
# Internals
# =============================================================================
def _hrv_sanitize_tuple(peaks, sampling_rate=1000):
# Get sampling rate
info = [i for i in peaks if isinstance(i, dict)]
sampling_rate = info[0]["sampling_rate"]
# Detect actual sampling rate
if len(info) < 1:
peaks, sampling_rate = peaks[0], peaks[1]
# Get peaks
if isinstance(peaks[0], (dict, pd.DataFrame)):
try:
peaks = _hrv_sanitize_dict_or_df(peaks[0])
except NameError:
if isinstance(peaks[1], (dict, pd.DataFrame)):
try:
peaks = _hrv_sanitize_dict_or_df(peaks[1])
except NameError:
peaks = _hrv_sanitize_peaks(peaks[1])
else:
peaks = _hrv_sanitize_peaks(peaks[0])
rri, rri_time, rri_missing = _hrv_get_rri(peaks=peaks, sampling_rate=sampling_rate)
return rri, rri_time, rri_missing, sampling_rate
def _hrv_sanitize_dict_or_df(peaks, sampling_rate=None):
# Get columns
if isinstance(peaks, dict):
cols = np.array(list(peaks.keys()))
if "sampling_rate" in cols:
sampling_rate = peaks["sampling_rate"]
elif isinstance(peaks, pd.DataFrame):
cols = peaks.columns.values
# check whether R-R intervals were passed rather than peak indices
if "RRI" in cols:
rri = peaks["RRI"]
if "RRI_Time" in cols:
rri_time = peaks["RRI_Time"]
else:
rri_time = None
rri, rri_time, rri_missing = _intervals_sanitize(rri, intervals_time=rri_time)
return rri, rri_time, rri_missing, sampling_rate
cols = cols[["Peak" in s for s in cols]]
if len(cols) > 1:
cols = cols[[("ECG" in s) or ("PPG" in s) for s in cols]]
if len(cols) == 0:
raise NameError(
"NeuroKit error: hrv(): Wrong input, ",
"we couldn't extract R-peak indices. ",
"You need to provide a list of R-peak indices.",
)
peaks = _hrv_sanitize_peaks(peaks[cols[0]])
if sampling_rate is not None:
rri, rri_time, rri_missing = _hrv_get_rri(peaks=peaks, sampling_rate=sampling_rate)
else:
rri, rri_time, rri_missing = _hrv_get_rri(peaks=peaks)
return rri, rri_time, rri_missing, sampling_rate
def _hrv_sanitize_peaks(peaks):
if isinstance(peaks, pd.Series):
peaks = peaks.values
if len(np.unique(peaks)) == 2:
if np.all(np.unique(peaks) == np.array([0, 1])):
peaks = np.where(peaks == 1)[0]
if isinstance(peaks, list):
peaks = np.array(peaks)
if peaks is not None:
if isinstance(peaks, tuple):
if any(np.diff(peaks[0]) < 0): # not continuously increasing
raise ValueError(
"NeuroKit error: _hrv_sanitize_input(): "
+ "The peak indices passed were detected as non-consecutive. You might have passed RR "
+ "intervals instead of peaks. If so, convert RRIs into peaks using "
+ "nk.intervals_to_peaks()."
)
else:
if any(np.diff(peaks) < 0):
raise ValueError(
"NeuroKit error: _hrv_sanitize_input(): "
+ "The peak indices passed were detected as non-consecutive. You might have passed RR "
+ "intervals instead of peaks. If so, convert RRIs into peaks using "
+ "nk.intervals_to_peaks()."
)
return peaks
| 4,933 | 34.242857 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/hrv/intervals_process.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..misc import NeuroKitWarning
from ..signal import signal_detrend, signal_interpolate
from .intervals_utils import (
_intervals_sanitize,
_intervals_time_to_sampling_rate,
_intervals_time_uniform,
)
def intervals_process(
intervals,
intervals_time=None,
interpolate=False,
interpolation_rate=100,
detrend=None,
**kwargs
):
"""**Interval preprocessing**
R-peak intervals preprocessing.
Parameters
----------
intervals : list or array
List or numpy array of intervals, in milliseconds.
intervals_time : list or array, optional
List or numpy array of timestamps corresponding to intervals, in seconds.
interpolate : bool, optional
Whether to interpolate the interval signal. The default is False.
interpolation_rate : int, optional
Sampling rate (Hz) of the interpolated interbeat intervals. Should be at least twice as
high as the highest frequency in vhf. By default 100. To replicate Kubios defaults, set
to 4.
detrend : str
Can be one of ``"polynomial"`` (traditional detrending of a given order) or
``"tarvainen2002"`` to use the smoothness priors approach described by Tarvainen (2002)
(mostly used in HRV analyses as a lowpass filter to remove complex trends), ``"loess"`` for
LOESS smoothing trend removal or ``"locreg"`` for local linear regression (the *'runline'*
algorithm from chronux). By default None such that there is no detrending.
**kwargs
Keyword arguments to be passed to :func:`.signal_interpolate`.
Returns
-------
np.ndarray
Preprocessed intervals, in milliseconds.
np.ndarray
Preprocessed timestamps corresponding to intervals, in seconds.
int
Sampling rate (Hz) of the interpolated interbeat intervals.
Examples
--------
**Example 1**: With interpolation and detrending
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
plt.rc('font', size=8)
# Download data
data = nk.data("bio_resting_5min_100hz")
# Clean signal and find peaks
ecg_cleaned = nk.ecg_clean(data["ECG"], sampling_rate=100)
peaks, info = nk.ecg_peaks(ecg_cleaned, sampling_rate=100, correct_artifacts=True)
# Convert peaks to intervals
rri = np.diff(peaks) / sampling_rate * 1000
rri_time = np.array(peaks[1:]) / sampling_rate
# Compute HRV indices
@savefig p_intervals_process1.png scale=100%
plt.figure()
plt.plot(intervals_time, intervals, label="Original intervals")
intervals, intervals_time = intervals_process(rri,
intervals_time=rri_time,
interpolate=True,
interpolation_rate=100,
detrend="tarvainen2002")
plt.plot(intervals_time, intervals, label="Processed intervals")
plt.xlabel("Time (seconds)")
plt.ylabel("Interbeat intervals (milliseconds)")
@suppress
plt.close()
"""
# Sanitize input
intervals, intervals_time, _ = _intervals_sanitize(intervals, intervals_time=intervals_time)
if interpolate is False:
interpolation_rate = None
if interpolation_rate is not None:
# Rate should be at least 1 Hz (due to Nyquist & frequencies we are interested in)
# We considered an interpolation rate 4 Hz by default to match Kubios
# but in case of some applications with high heart rates we decided to make it 100 Hz
# See https://github.com/neuropsychology/NeuroKit/pull/680 for more information
# and if you have any thoughts to contribute, please let us know!
if interpolation_rate < 1:
warn(
"The interpolation rate of the R-R intervals is too low for "
" computing the frequency-domain features."
" Consider increasing the interpolation rate to at least 1 Hz.",
category=NeuroKitWarning,
)
# Compute x-values of interpolated interval signal at requested sampling rate.
x_new = np.arange(
start=intervals_time[0],
stop=intervals_time[-1] + 1 / interpolation_rate,
step=1 / interpolation_rate,
)
intervals = signal_interpolate(intervals_time, intervals, x_new=x_new, **kwargs)
intervals_time = x_new
else:
# check if intervals appear to be already interpolated
if _intervals_time_uniform(intervals_time):
# get sampling rate used for interpolation
interpolation_rate = _intervals_time_to_sampling_rate(intervals_time)
if detrend is not None:
intervals = signal_detrend(intervals, method=detrend, sampling_rate=interpolation_rate)
return intervals, intervals_time, interpolation_rate
| 5,060 | 37.340909 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.