repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
NeuroKit | NeuroKit-master/neurokit2/signal/signal_decompose.py | import numpy as np
from ..misc import as_vector
def signal_decompose(signal, method="emd", n_components=None, **kwargs):
"""**Decompose a signal**
Signal decomposition into different sources using different methods, such as Empirical Mode
Decomposition (EMD) or Singular spectrum analysis (SSA)-based signal separation method.
The extracted components can then be recombined into meaningful sources using
:func:`.signal_recompose`.
Parameters
-----------
signal : Union[list, np.array, pd.Series]
Vector of values.
method : str
The decomposition method. Can be one of ``"emd"`` or ``"ssa"``.
n_components : int
Number of components to extract. Only used for ``"ssa"`` method. If ``None``, will default
to 50.
**kwargs
Other arguments passed to other functions.
Returns
-------
Array
Components of the decomposed signal.
See Also
--------
signal_recompose
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Create complex signal
signal = nk.signal_simulate(duration=10, frequency=1, noise=0.01) # High freq
signal += 3 * nk.signal_simulate(duration=10, frequency=3, noise=0.01) # Higher freq
signal += 3 * np.linspace(0, 2, len(signal)) # Add baseline and trend
signal += 2 * nk.signal_simulate(duration=10, frequency=0.1, noise=0)
@savefig p_signal_decompose1.png scale=100%
nk.signal_plot(signal)
@suppress
plt.close()
.. ipython:: python
:okexcept:
# Example 1: Using the EMD method
components = nk.signal_decompose(signal, method="emd")
# Visualize Decomposed Signal Components
@savefig p_signal_decompose2.png scale=100%
nk.signal_plot(components)
@suppress
plt.close()
.. ipython:: python
# Example 2: USing the SSA method
components = nk.signal_decompose(signal, method="ssa", n_components=5)
# Visualize Decomposed Signal Components
@savefig p_signal_decompose3.png scale=100%
nk.signal_plot(components) # Visualize components
@suppress
plt.close()
"""
# Apply method
method = method.lower()
if method in ["emd"]:
components = _signal_decompose_emd(signal, **kwargs)
elif method in ["ssa"]:
components = _signal_decompose_ssa(signal, n_components=n_components)
else:
raise ValueError(
"NeuroKit error: signal_decompose(): 'method' should be one of 'emd' or 'ssa'."
)
return components
# =============================================================================
# Singular spectrum analysis (SSA)
# =============================================================================
def _signal_decompose_ssa(signal, n_components=None):
"""Singular spectrum analysis (SSA)-based signal separation method.
SSA decomposes a time series into a set of summable components that are grouped together and
interpreted as trend, periodicity and noise.
References
----------
- https://www.kaggle.com/jdarcy/introducing-ssa-for-time-series-decomposition
"""
# sanitize input
signal = as_vector(signal)
# Parameters
# The window length.
if n_components is None:
L = 50 if len(signal) >= 100 else int(len(signal) / 2)
else:
L = n_components
# Length.
N = len(signal)
if not 2 <= L <= N / 2:
raise ValueError("`n_components` must be in the interval [2, len(signal)/2].")
# The number of columns in the trajectory matrix.
K = N - L + 1
# Embed the time series in a trajectory matrix by pulling the relevant subseries of F,
# and stacking them as columns.
X = np.array([signal[i : L + i] for i in range(0, K)]).T
# Get n components
d = np.linalg.matrix_rank(X)
# Decompose the trajectory matrix
u, sigma, vt = np.linalg.svd(X, full_matrices=False)
# Initialize components matrix
components = np.zeros((N, d))
# Reconstruct the elementary matrices without storing them
for i in range(d):
X_elem = sigma[i] * np.outer(u[:, i], vt[i, :])
X_rev = X_elem[::-1]
components[:, i] = [
X_rev.diagonal(j).mean() for j in range(-X_rev.shape[0] + 1, X_rev.shape[1])
]
# Return the components
return components.T
# =============================================================================
# ICA
# =============================================================================
# import sklearn.decomposition
# def _signal_decompose_scica(signal, n_components=3, **kwargs):
# # sanitize input
# signal = as_vector(signal)
#
# # Single-channel ICA (SCICA)
# if len(signal.shape) == 1:
# signal = signal.reshape(-1, 1)
#
# c = sklearn.decomposition.FastICA(n_components=n_components, **kwargs).fit_transform(signal)
# =============================================================================
# Empirical Mode Decomposition (EMD)
# =============================================================================
def _signal_decompose_emd(signal, ensemble=False, **kwargs):
"""References
------------
- http://perso.ens-lyon.fr/patrick.flandrin/CSDATrendfiltering.pdf
- https://github.com/laszukdawid/PyEMD
- https://towardsdatascience.com/decomposing-signal-using-empirical-mode-decomposition-algorithm-explanation-for-dummy-93a93304c541 # noqa: E501
"""
try:
import PyEMD
except ImportError as e:
raise ImportError(
"NeuroKit error: _signal_decompose_emd(): the 'PyEMD' module is required for this"
" function to run. Please install it first (`pip install EMD-signal`).",
) from e
if ensemble is False:
emd = PyEMD.EMD(extrema_detection="parabol", **kwargs)
imfs = emd.emd(signal, **kwargs)
else:
emd = PyEMD.EEMD(extrema_detection="parabol", **kwargs)
imfs = emd.eemd(signal, **kwargs)
# _, residue = emd.get_imfs_and_residue()
return imfs
| 6,075 | 31.148148 | 148 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_interpolate.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.interpolate
from ..misc import NeuroKitWarning
def signal_interpolate(
x_values, y_values=None, x_new=None, method="quadratic", fill_value=None
):
"""**Interpolate a signal**
Interpolate a signal using different methods.
Parameters
----------
x_values : Union[list, np.array, pd.Series]
The samples corresponding to the values to be interpolated.
y_values : Union[list, np.array, pd.Series]
The values to be interpolated. If not provided, any NaNs in the x_values
will be interpolated with :func:`_signal_interpolate_nan`,
considering the x_values as equally spaced.
x_new : Union[list, np.array, pd.Series] or int
The samples at which to interpolate the y_values. Samples before the first value in x_values
or after the last value in x_values will be extrapolated. If an integer is passed, nex_x
will be considered as the desired length of the interpolated signal between the first and
the last values of x_values. No extrapolation will be done for values before or after the
first and the last values of x_values.
method : str
Method of interpolation. Can be ``"linear"``, ``"nearest"``, ``"zero"``, ``"slinear"``,
``"quadratic"``, ``"cubic"``, ``"previous"``, ``"next"``, ``"monotone_cubic"``, or ``"akima"``.
The methods ``"zero"``, ``"slinear"``,``"quadratic"`` and ``"cubic"`` refer to a spline
interpolation of zeroth, first, second or third order; whereas ``"previous"`` and
``"next"`` simply return the previous or next value of the point. An integer specifying the
order of the spline interpolator to use.
See `here <https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.
PchipInterpolator.html>`_ for details on the ``"monotone_cubic"`` method.
See `here <https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.
Akima1DInterpolator.html>`_ for details on the ``"akima"`` method.
fill_value : float or tuple or str
If a ndarray (or float), this value will be used to fill in for
requested points outside of the data range.
If a two-element tuple, then the first element is used as a fill value
for x_new < x[0] and the second element is used for x_new > x[-1].
If "extrapolate", then points outside the data range will be extrapolated.
If not provided, then the default is ([y_values[0]], [y_values[-1]]).
Returns
-------
array
Vector of interpolated samples.
See Also
--------
signal_resample
Examples
--------
.. ipython:: python
import numpy as np
import neurokit2 as nk
import matplotlib.pyplot as plt
# Generate Simulated Signal
signal = nk.signal_simulate(duration=2, sampling_rate=10)
# We want to interpolate to 2000 samples
x_values = np.linspace(0, 2000, num=len(signal), endpoint=False)
x_new = np.linspace(0, 2000, num=2000, endpoint=False)
# Visualize all interpolation methods
@savefig p_signal_interpolate1.png scale=100%
nk.signal_plot([
nk.signal_interpolate(x_values, signal, x_new=x_new, method="zero"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="linear"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="quadratic"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="cubic"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="previous"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="next"),
nk.signal_interpolate(x_values, signal, x_new=x_new, method="monotone_cubic")
], labels = ["Zero", "Linear", "Quadratic", "Cubic", "Previous", "Next", "Monotone Cubic"])
# Add original data points
plt.scatter(x_values, signal, label="original datapoints", zorder=3)
@suppress
plt.close()
"""
# Sanity checks
if x_values is None:
raise ValueError(
"NeuroKit error: signal_interpolate(): x_values must be provided."
)
if y_values is None:
# for interpolating NaNs
return _signal_interpolate_nan(x_values, method=method, fill_value=fill_value)
if isinstance(x_values, pd.Series):
x_values = np.squeeze(x_values.values)
if isinstance(x_new, pd.Series):
x_new = np.squeeze(x_new.values)
if len(x_values) != len(y_values):
raise ValueError("x_values and y_values must be of the same length.")
if isinstance(x_new, int):
if len(x_values) == x_new:
return y_values
x_new = np.linspace(x_values[0], x_values[-1], x_new)
else:
# if x_values is identical to x_new, no need for interpolation
if np.array_equal(x_values, x_new):
return y_values
elif np.any(x_values[1:] == x_values[:-1]):
warn(
"Duplicate x values detected. Averaging their corresponding y values.",
category=NeuroKitWarning,
)
x_values, y_values = _signal_interpolate_average_duplicates(
x_values, y_values
)
# If only one value, return a constant signal
if len(x_values) == 1:
return np.ones(len(x_new)) * y_values[0]
if method == "monotone_cubic":
interpolation_function = scipy.interpolate.PchipInterpolator(
x_values, y_values, extrapolate=True
)
elif method == "akima":
interpolation_function = scipy.interpolate.Akima1DInterpolator(
x_values, y_values
)
else:
if fill_value is None:
fill_value = ([y_values[0]], [y_values[-1]])
interpolation_function = scipy.interpolate.interp1d(
x_values,
y_values,
kind=method,
bounds_error=False,
fill_value=fill_value,
)
interpolated = interpolation_function(x_new)
if method == "monotone_cubic" and fill_value != "extrapolate":
# Find the index of the new x value that is closest to the first original x value
first_index = np.argmin(np.abs(x_new - x_values[0]))
# Find the index of the new x value that is closest to the last original x value
last_index = np.argmin(np.abs(x_new - x_values[-1]))
if fill_value is None:
# Swap out the cubic extrapolation of out-of-bounds segments generated by
# scipy.interpolate.PchipInterpolator for constant extrapolation akin to the behavior of
# scipy.interpolate.interp1d with fill_value=([y_values[0]], [y_values[-1]].
fill_value = ([interpolated[first_index]], [interpolated[last_index]])
elif isinstance(fill_value, float) or isinstance(fill_value, int):
# if only a single integer or float is provided as a fill value, format as a tuple
fill_value = ([fill_value], [fill_value])
interpolated[:first_index] = fill_value[0]
interpolated[last_index + 1 :] = fill_value[1]
return interpolated
def _signal_interpolate_nan(values, method="quadratic", fill_value=None):
if np.any(np.isnan(values)):
# assume that values are evenly spaced
# x_new corresponds to the indices of all values, including missing
x_new = np.arange(len(values))
not_missing = np.where(np.invert(np.isnan(values)))[0]
# remove the missing values
y_values = values[not_missing]
# x_values corresponds to the indices of only non-missing values
x_values = x_new[not_missing]
# interpolate to get the values at the indices where they are missing
return signal_interpolate(
x_values=x_values,
y_values=y_values,
x_new=x_new,
method=method,
fill_value=fill_value,
)
else:
# if there are no missing values, return original values
return values
def _signal_interpolate_average_duplicates(x_values, y_values):
unique_x, indices = np.unique(x_values, return_inverse=True)
mean_y = np.bincount(indices, weights=y_values) / np.bincount(indices)
return unique_x, mean_y
| 8,348 | 40.745 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_changepoints.py | import numpy as np
from ..events import events_plot
from ..misc import as_vector
from .signal_plot import signal_plot
def signal_changepoints(signal, change="meanvar", penalty=None, show=False):
"""**Change Point Detection**
Only the PELT method is implemented for now.
Parameters
-----------
signal : Union[list, np.array, pd.Series]
Vector of values.
change : str
Can be one of ``"meanvar"`` (default), ``"mean"`` or ``"var"``.
penalty : float
The algorithm penalty. Defaults to ``np.log(len(signal))``.
show : bool
Defaults to ``False``.
Returns
-------
Array
Values indicating the samples at which the changepoints occur.
Fig
Figure of plot of signal with markers of changepoints.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.emg_simulate(burst_number=3)
@savefig p_signal_changepoints1.png scale=100%
nk.signal_changepoints(signal, change="var", show=True)
@suppress
plt.close()
References
----------
* Killick, R., Fearnhead, P., & Eckley, I. A. (2012). Optimal detection of changepoints with a
linear computational cost. Journal of the American Statistical Association, 107(500), 1590-1598.
"""
signal = as_vector(signal)
changepoints = _signal_changepoints_pelt(signal, change=change, penalty=penalty)
if show is True:
if len(changepoints) > 0:
events_plot(changepoints, signal)
else:
signal_plot(signal)
return changepoints
def _signal_changepoints_pelt(signal, change="meanvar", penalty=None):
"""PELT algorithm to find change points in a signal.
Adapted from: https://github.com/ruipgil/changepy https://github.com/deepcharles/ruptures
https://github.com/STOR-i/Changepoints.jl https://github.com/rkillick/changepoint/
"""
# Initialize
length = len(signal)
if penalty is None:
penalty = np.log(length) # pylint: disable=E1111
if change.lower() == "var":
cost = _signal_changepoints_cost_var(signal)
elif change.lower() == "mean":
cost = _signal_changepoints_cost_mean(signal)
else:
cost = _signal_changepoints_cost_meanvar(signal)
# Run algorithm
F = np.zeros(length + 1)
R = np.array([0], dtype=int)
candidates = np.zeros(length + 1, dtype=int)
F[0] = -penalty # pylint: disable=E1130
for tstar in range(2, length + 1):
cpt_cands = R
seg_costs = np.array([cost(cpt_cands[i], tstar) for i in range(len(cpt_cands))])
F_cost = F[cpt_cands] + seg_costs
F[tstar] = np.nanmin(F_cost) + penalty
tau = np.nanargmin(F_cost)
candidates[tstar] = cpt_cands[tau]
ineq_prune = [val < F[tstar] for val in F_cost]
R = [cpt_cands[j] for j, val in enumerate(ineq_prune) if val]
R.append(tstar - 1)
R = np.array(R, dtype=int)
changepoints = np.sort(np.unique(candidates[candidates]))
changepoints = changepoints[changepoints > 0]
return changepoints
# =============================================================================
# Cost functions
# =============================================================================
def _signal_changepoints_cost_mean(signal):
"""Cost function for a normally distributed signal with a changing mean."""
i_variance_2 = 1 / (np.var(signal) ** 2)
cmm = [0.0]
cmm.extend(np.cumsum(signal))
cmm2 = [0.0]
cmm2.extend(np.cumsum(np.abs(signal)))
def cost(start, end):
cmm2_diff = cmm2[end] - cmm2[start]
cmm_diff = pow(cmm[end] - cmm[start], 2)
i_diff = end - start
diff = cmm2_diff - cmm_diff
return (diff / i_diff) * i_variance_2
return cost
def _signal_changepoints_cost_var(signal):
"""Cost function for a normally distributed signal with a changing variance."""
cumm = [0.0]
cumm.extend(np.cumsum(np.power(np.abs(signal - np.mean(signal)), 2)))
def cost(s, t):
dist = float(t - s)
diff = cumm[t] - cumm[s]
return dist * np.log(diff / dist)
return cost
def _signal_changepoints_cost_meanvar(signal):
"""Cost function for a normally distributed signal with a changing mean and variance."""
signal = np.hstack(([0.0], np.array(signal)))
cumm = np.cumsum(signal)
cumm_sq = np.cumsum([val ** 2 for val in signal])
def cost(s, t):
ts_i = 1.0 / (t - s)
mu = (cumm[t] - cumm[s]) * ts_i
sig = (cumm_sq[t] - cumm_sq[s]) * ts_i - mu ** 2
sig_i = 1.0 / sig
if sig <= 0:
return np.nan
else:
return (
(t - s) * np.log(sig)
+ (cumm_sq[t] - cumm_sq[s]) * sig_i
- 2 * (cumm[t] - cumm[s]) * mu * sig_i
+ ((t - s) * mu ** 2) * sig_i
)
return cost
| 4,929 | 28.878788 | 102 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_rate.py | # -*- coding: utf-8 -*-
from .signal_period import signal_period
def signal_rate(
peaks, sampling_rate=1000, desired_length=None, interpolation_method="monotone_cubic"
):
"""**Compute Signal Rate**
Calculate signal rate (per minute) from a series of peaks. It is a general function that works
for any series of peaks (i.e., not specific to a particular type of signal). It is computed as
``60 / period``, where the period is the time between the peaks (see func:`.signal_period`).
.. note:: This function is implemented under :func:`.signal_rate`, but it also re-exported under
different names, such as :func:`.ecg_rate`, or :func:`.rsp_rate`. The
aliases provided for consistency.
Parameters
----------
peaks : Union[list, np.array, pd.DataFrame, pd.Series, dict]
The samples at which the peaks occur. If an array is passed in, it is assumed that it was
obtained with :func:`.signal_findpeaks`. If a DataFrame is passed in, it is assumed it is
of the same length as the input signal in which occurrences of R-peaks are marked as "1",
with such containers obtained with e.g., :func:.`ecg_findpeaks` or :func:`.rsp_findpeaks`.
sampling_rate : int
The sampling frequency of the signal that contains peaks (in Hz, i.e., samples/second).
Defaults to 1000.
desired_length : int
If left at the default None, the returned rated will have the same number of elements as
``peaks``. If set to a value larger than the sample at which the last peak occurs in the
signal (i.e., ``peaks[-1]``), the returned rate will be interpolated between peaks over
``desired_length`` samples. To interpolate the rate over the entire duration of the signal,
set ``desired_length`` to the number of samples in the signal. Cannot be smaller than or
equal to the sample at which the last peak occurs in the signal. Defaults to ``None``.
interpolation_method : str
Method used to interpolate the rate between peaks. See :func:`.signal_interpolate`.
``"monotone_cubic"`` is chosen as the default interpolation method since it ensures monotone
interpolation between data points (i.e., it prevents physiologically implausible
"overshoots" or "undershoots" in the y-direction). In contrast, the widely used cubic
spline interpolation does not ensure monotonicity.
Returns
-------
array
A vector containing the rate (peaks per minute).
See Also
--------
signal_period, signal_findpeaks, signal_fixpeaks, signal_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Create signal of varying frequency
freq = nk.signal_simulate(1, frequency = 1)
signal = np.sin((freq).cumsum() * 0.5)
# Find peaks
info = nk.signal_findpeaks(signal)
# Compute rate using 2 methods
rate1 = nk.signal_rate(peaks=info["Peaks"],
desired_length=len(signal),
interpolation_method="nearest")
rate2 = nk.signal_rate(peaks=info["Peaks"],
desired_length=len(signal),
interpolation_method="monotone_cubic")
# Visualize signal and rate on the same scale
@savefig p_signal_rate1.png scale=100%
nk.signal_plot([signal, rate1, rate2],
labels = ["Original signal", "Rate (nearest)", "Rate (monotone cubic)"],
standardize = True)
@suppress
plt.close()
"""
period = signal_period(peaks, sampling_rate, desired_length, interpolation_method)
rate = 60 / period
return rate
| 3,728 | 42.360465 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_phase.py | # -*- coding: utf-8 -*-
import itertools
import numpy as np
import scipy.signal
def signal_phase(signal, method="radians"):
"""**Compute the phase of the signal**
The real phase has the property to rotate uniformly, leading to a uniform distribution density.
The prophase typically doesn't fulfill this property. The following functions applies a
nonlinear transformation to the phase signal that makes its distribution exactly uniform. If a
binary vector is provided (containing 2 unique values), the function will compute the phase of
completion of each phase as denoted by each value.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
The values in which the phase is expressed. Can be ``"radians"`` (default),
``"degrees"`` (for values between 0 and 360) or ``"percents"`` (for values between 0 and 1).
See Also
--------
signal_filter, signal_zerocrossings, signal_findpeaks
Returns
-------
array
A vector containing the phase of the signal, between 0 and 2*pi.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=10)
phase = nk.signal_phase(signal)
@savefig p_signal_phase1.png scale=100%
nk.signal_plot([signal, phase])
@suppress
plt.close()
..ipython:: python
rsp = nk.rsp_simulate(duration=30)
phase = nk.signal_phase(rsp, method="degrees")
@savefig p_signal_phase2.png scale=100%
nk.signal_plot([rsp, phase])
@suppress
plt.close()
.. ipython:: python
# Percentage of completion of two phases
signal = nk.signal_binarize(nk.signal_simulate(duration=10))
phase = nk.signal_phase(signal, method="percents")
@savefig p_signal_phase3.png scale=100%
nk.signal_plot([signal, phase])
"""
# If binary signal
if len(set(np.array(signal)[~np.isnan(np.array(signal))])) == 2:
phase = _signal_phase_binary(signal)
else:
phase = _signal_phase_prophase(signal)
if method.lower() in ["degree", "degrees"]:
phase = np.rad2deg(phase)
if method.lower() in ["perc", "percent", "percents", "percentage"]:
phase = np.rad2deg(phase) / 360
return phase
# =============================================================================
# Method
# =============================================================================
def _signal_phase_binary(signal):
phase = itertools.chain.from_iterable(np.linspace(0, 1, sum([1 for i in v])) for _, v in itertools.groupby(signal))
phase = np.array(list(phase))
# Convert to radiant
phase = np.deg2rad(phase * 360)
return phase
def _signal_phase_prophase(signal):
pi2 = 2.0 * np.pi
# Get pro-phase
prophase = np.mod(np.angle(scipy.signal.hilbert(signal)), pi2)
# Transform a pro-phase to a real phase
sort_idx = np.argsort(prophase) # Get a sorting index
reverse_idx = np.argsort(sort_idx) # Get index reversing sorting
tht = pi2 * np.arange(prophase.size) / (prophase.size) # Set up sorted real phase
phase = tht[reverse_idx] # Reverse the sorting of it
return phase
| 3,303 | 30.769231 | 119 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_fillmissing.py | import pandas as pd
def signal_fillmissing(signal, method="both"):
"""**Handle missing values**
Fill missing values in a signal using specific methods.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
The method to use to fill missing values. Can be one of ``"forward"``, ``"backward"``,
or ``"both"``. The default is ``"both"``.
Returns
-------
signal
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, np.nan]
nk.signal_fillmissing(signal, method="forward")
nk.signal_fillmissing(signal, method="backward")
nk.signal_fillmissing(signal, method="both")
"""
if method in ["forward", "forwards", "ffill", "both"]:
signal = pd.Series(signal).fillna(method="ffill").values
if method in ["backward", "backwards", "back", "bfill", "both"]:
signal = pd.Series(signal).fillna(method="bfill").values
return signal
| 1,105 | 27.358974 | 94 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_formatpeaks.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def signal_formatpeaks(info, desired_length, peak_indices=None, other_indices=None):
"""**Format Peaks**
Transforms a peak-info dict to a signal of given length
"""
if peak_indices is None:
peak_indices = [key for key in info.keys() if "Peaks" in key]
signals = {}
for feature, values in info.items():
# Get indices of features
if feature != "SCR_RecoveryTime" and any(
x in str(feature) for x in ["Peak", "Onset", "Offset", "Trough", "Recovery"]
):
signals[feature] = _signal_from_indices(values, desired_length, 1)
signals[feature] = signals[feature].astype("int64") # indexing of feature using 1 and 0
# Get values of features
elif "RecoveryTime" in feature:
# Sanitize indices and values
other_indices, values = _signal_sanitize_indices(other_indices, values)
# Append recovery time values to signal
signals[feature] = _signal_from_indices(other_indices, desired_length, values)
else:
# Sanitize indices and values
peak_indices, values = _signal_sanitize_indices(peak_indices, values)
# Append peak values to signal
signals[feature] = _signal_from_indices(peak_indices, desired_length, values)
signals = pd.DataFrame(signals)
return signals
# =============================================================================
# Internals
# =============================================================================
def _signal_sanitize_indices(indices, values):
# Check if nan in indices
if np.sum(np.isnan(indices)) > 0:
to_drop = np.argwhere(np.isnan(indices))[0]
for i in to_drop:
indices = np.delete(indices, i)
values = np.delete(values, i)
return indices, values
def _signal_from_indices(indices, desired_length=None, value=1):
"""**Generates array of 0 and given values at given indices**
Used in *_findpeaks to transform vectors of peak indices to signal.
"""
signal = pd.Series(np.zeros(desired_length, dtype=float))
if isinstance(indices, list) and (not indices): # skip empty lists
return signal
if isinstance(indices, np.ndarray) and (indices.size == 0): # skip empty arrays
return signal
# Force indices as int
if isinstance(indices[0], float):
indices = indices[~np.isnan(indices)].astype(int)
# Appending single value
if isinstance(value, (int, float)):
signal[indices] = value
# Appending multiple values
elif isinstance(value, (np.ndarray, list)):
for index, val in zip(indices, value):
signal.iloc[index] = val
else:
if len(value) != len(indices):
raise ValueError(
"NeuroKit error: _signal_from_indices(): The number of values "
"is different from the number of indices."
)
signal[indices] = value
return signal
def _signal_formatpeaks_sanitize(peaks, key="Peaks"): # FIXME: private function not used in this module
# Attempt to retrieve column.
if isinstance(peaks, tuple):
if isinstance(peaks[0], (dict, pd.DataFrame)):
peaks = peaks[0]
elif isinstance(peaks[1], dict):
peaks = peaks[1]
else:
peaks = peaks[0]
if isinstance(peaks, pd.DataFrame):
col = [col for col in peaks.columns if key in col]
if len(col) == 0:
raise TypeError(
"NeuroKit error: _signal_formatpeaks(): wrong type of input ",
"provided. Please provide indices of peaks.",
)
peaks_signal = peaks[col[0]].values
peaks = np.where(peaks_signal == 1)[0]
if isinstance(peaks, dict):
col = [col for col in list(peaks.keys()) if key in col]
if len(col) == 0:
raise TypeError(
"NeuroKit error: _signal_formatpeaks(): wrong type of input ",
"provided. Please provide indices of peaks.",
)
peaks = peaks[col[0]]
# Retrieve length.
try: # Detect if single peak
len(peaks)
except TypeError:
peaks = np.array([peaks])
return peaks
| 4,331 | 33.110236 | 104 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_zerocrossings.py | # -*- coding: utf-8 -*-
import numpy as np
def signal_zerocrossings(signal, direction="both"):
"""**Locate the indices where the signal crosses zero**
Note that when the signal crosses zero between two points, the first index is returned.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
direction : str
Direction in which the signal crosses zero, can be ``"positive"``, ``"negative"`` or
``"both"`` (default).
Returns
-------
array
Vector containing the indices of zero crossings.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=5)
zeros = nk.signal_zerocrossings(signal)
@savefig p_signal_zerocrossings1.png scale=100%
nk.events_plot(zeros, signal)
@suppress
plt.close()
.. ipython:: python
# Only upward or downward zerocrossings
up = nk.signal_zerocrossings(signal, direction="up")
down = nk.signal_zerocrossings(signal, direction="down")
@savefig p_signal_zerocrossings2.png scale=100%
nk.events_plot([up, down], signal)
@suppress
plt.close()
"""
df = np.diff(np.sign(signal))
if direction in ["positive", "up"]:
zerocrossings = np.where(df > 0)[0]
elif direction in ["negative", "down"]:
zerocrossings = np.where(df < 0)[0]
else:
zerocrossings = np.nonzero(np.abs(df) > 0)[0]
return zerocrossings
| 1,564 | 26.946429 | 92 | py |
NeuroKit | NeuroKit-master/neurokit2/signal/signal_smooth.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.ndimage
import scipy.signal
from ..stats import fit_loess
def signal_smooth(signal, method="convolution", kernel="boxzen", size=10, alpha=0.1):
"""**Signal smoothing**
Signal smoothing can be achieved using either the convolution of a filter kernel with the input
signal to compute the smoothed signal (Smith, 1997) or a LOESS regression.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
Can be one of ``"convolution"`` (default) or ``"loess"``.
kernel : Union[str, np.array]
Only used if ``method`` is ``"convolution"``. Type of kernel to use; if array, use directly
as the kernel. Can be one of ``"median"``, ``"boxzen"``, ``"boxcar"``, ``"triang"``,
``"blackman"``, ``"hamming"``, ``"hann"``, ``"bartlett"``, ``"flattop"``, ``"parzen"``,
``"bohman"``, ``"blackmanharris"``, ``"nuttall"``, ``"barthann"``,
``"kaiser"`` (needs beta), ``"gaussian"`` (needs std), ``"general_gaussian"`` (needs power
width), ``"slepian"`` (needs width) or ``"chebwin"`` (needs attenuation).
size : int
Only used if ``method`` is ``"convolution"``. Size of the kernel; ignored if kernel is an
array.
alpha : float
Only used if ``method`` is ``"loess"``. The parameter which controls the degree of
smoothing.
Returns
-------
array
Smoothed signal.
See Also
---------
fit_loess
Examples
--------
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
signal = np.cos(np.linspace(start=0, stop=10, num=1000))
distorted = nk.signal_distort(signal,
noise_amplitude=[0.3, 0.2, 0.1, 0.05],
noise_frequency=[5, 10, 50, 100])
size = len(signal)/100
signals = pd.DataFrame({"Raw": distorted,
"Median": nk.signal_smooth(distorted, kernel="median", size=size-1),
"BoxZen": nk.signal_smooth(distorted, kernel="boxzen", size=size),
"Triang": nk.signal_smooth(distorted, kernel="triang", size=size),
"Blackman": nk.signal_smooth(distorted, kernel="blackman", size=size),
"Loess_01": nk.signal_smooth(distorted, method="loess", alpha=0.1),
"Loess_02": nk.signal_smooth(distorted, method="loess", alpha=0.2),
"Loess_05": nk.signal_smooth(distorted, method="loess", alpha=0.5)})
@savefig p_signal_smooth1.png scale=100%
fig = signals.plot()
@suppress
plt.close()
.. ipython:: python
# Magnify the plot
@savefig p_signal_smooth2.png scale=100%
fig_magnify = signals[50:150].plot()
@suppress
plt.close()
References
----------
* Smith, S. W. (1997). The scientist and engineer's guide to digital signal processing.
"""
if isinstance(signal, pd.Series):
signal = signal.values
length = len(signal)
if isinstance(kernel, str) is False:
raise TypeError("NeuroKit error: signal_smooth(): 'kernel' should be a string.")
# Check length.
size = int(size)
if size > length or size < 1:
raise TypeError(
"NeuroKit error: signal_smooth(): 'size' should be between 1 and length of the signal."
)
method = method.lower()
# LOESS
if method in ["loess", "lowess"]:
smoothed, _ = fit_loess(signal, alpha=alpha)
# Convolution
else:
if kernel == "boxcar":
# This is faster than using np.convolve (like is done in _signal_smoothing)
# because of optimizations made possible by the uniform boxcar kernel shape.
smoothed = scipy.ndimage.uniform_filter1d(signal, size, mode="nearest")
elif kernel == "boxzen":
# hybrid method
# 1st pass - boxcar kernel
x = scipy.ndimage.uniform_filter1d(signal, size, mode="nearest")
# 2nd pass - parzen kernel
smoothed = _signal_smoothing(x, kernel="parzen", size=size)
elif kernel == "median":
smoothed = _signal_smoothing_median(signal, size)
else:
smoothed = _signal_smoothing(signal, kernel=kernel, size=size)
return smoothed
# =============================================================================
# Internals
# =============================================================================
def _signal_smoothing_median(signal, size=5):
# Enforce odd kernel size.
if size % 2 == 0:
size += 1
smoothed = scipy.signal.medfilt(signal, kernel_size=size)
return smoothed
def _signal_smoothing(signal, kernel, size=5):
# Get window.
window = scipy.signal.get_window(kernel, size)
w = window / window.sum()
# Extend signal edges to avoid boundary effects.
x = np.concatenate((signal[0] * np.ones(size), signal, signal[-1] * np.ones(size)))
# Compute moving average.
smoothed = np.convolve(w, x, mode="same")
smoothed = smoothed[size:-size]
return smoothed
| 5,358 | 33.352564 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_dynamic.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..markov import transition_matrix
from ..misc import as_vector
def microstates_dynamic(microstates, show=False):
"""**Dynamic Properties of Microstates**
This computes statistics related to the transition pattern (based on the
:func:`.transition_matrix`).
.. note::
This function does not compute all the features available under the markov submodule.
Don't hesitate to open an issue to help us test and decide what features to include.
Parameters
----------
microstates : np.ndarray
The topographic maps of the found unique microstates which has a shape of n_channels x
n_states, generated from :func:`.nk.microstates_segment`.
show : bool
Show the transition matrix.
Returns
-------
DataFrame
Dynamic properties of microstates.
See Also
--------
.transition_matrix, .microstates_static
Examples
--------
.. ipython:: python
import neurokit2 as nk
microstates = [0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 0, 0, 2, 2]
@savefig p_microstates_dynamic1.png scale=100%
nk.microstates_dynamic(microstates, show=True)
@suppress
plt.close()
"""
# See https://github.com/Frederic-vW/eeg_microstates
# and https://github.com/maximtrp/mchmm
# for other implementations
out = {}
# Try retrieving info
if isinstance(microstates, dict):
microstates = microstates["Sequence"]
# Sanitize
microstates = as_vector(microstates)
# Transition matrix
tm, info = transition_matrix(microstates, show=show)
for row in tm.index:
for col in tm.columns:
out[str(tm.loc[row].name) + "_to_" + str(tm[col].name)] = tm[col][row]
df = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("Microstate_")
# TODO:
# * Chi-square test statistics of the observed microstates against the expected microstates
# * Symmetry test statistics of the observed microstates against the expected microstates
return df
| 2,081 | 26.394737 | 95 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_peaks.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.signal
from ..eeg import eeg_gfp
def microstates_peaks(eeg, gfp=None, sampling_rate=None, distance_between=0.01, **kwargs):
"""**Find peaks of stability using the GFP**
Peaks in the global field power (GFP) are often used to find microstates.
Parameters
----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
gfp : list
The Global Field Power (GFP). If ``None``, will be obtained via :func:`.eeg_gfp`.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
distance_between : float
The minimum distance (this value is to be multiplied by the sampling rate) between peaks.
The default is 0.01, which corresponds to 10 ms (as suggested in the Microstate EEGlab
toolbox).
**kwargs
Additional arguments to be passed to :func:`.eeg_gfp`.
Returns
-------
peaks : array
The index of the sample where GFP peaks occur.
Examples
---------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw")
gfp = nk.eeg_gfp(eeg)
peaks1 = nk.microstates_peaks(eeg, distance_between=0.01)
peaks2 = nk.microstates_peaks(eeg, distance_between=0.05)
peaks3 = nk.microstates_peaks(eeg, distance_between=0.10)
@savefig p_microstates_peaks1.png scale=100%
nk.events_plot([peaks1[peaks1 < 500], peaks2[peaks2 < 500], peaks3[peaks3 < 500]], gfp[0:500])
@suppress
plt.close()
See Also
--------
.eeg_gfp
"""
if isinstance(eeg, (pd.DataFrame, np.ndarray)) is False:
sampling_rate = eeg.info["sfreq"]
eeg = eeg.get_data()
if sampling_rate is None:
raise ValueError(
"NeuroKit error: microstates_peaks(): The sampling_rate is requested ",
"for this function to run. Please provide it as an argument.",
)
# If we don't want to rely on peaks but take uniformly spaced samples
# (used in microstates_clustering)
if isinstance(gfp, (int, float)):
if gfp <= 1: # If fraction
gfp = int(gfp * len(eeg[0, :]))
return np.linspace(0, len(eeg[0, :]), gfp, endpoint=False, dtype=int)
# Deal with string inputs
if isinstance(gfp, str):
if gfp == "all":
gfp = False
elif gfp == "gfp":
gfp = True
else:
raise ValueError(
"The `gfp` argument was not understood.",
)
# If we want ALL the indices
if gfp is False:
return np.arange(len(eeg))
# if gfp is True or gfp is None:
gfp = eeg_gfp(eeg, **kwargs)
peaks = _microstates_peaks_gfp(
gfp=gfp, sampling_rate=sampling_rate, distance_between=distance_between
)
return peaks
# =============================================================================
# Methods
# =============================================================================
def _microstates_peaks_gfp(gfp=None, sampling_rate=None, distance_between=0.01):
minimum_separation = int(distance_between * sampling_rate) # 10 ms (Microstate EEGlab toolbox)
if minimum_separation == 0:
minimum_separation = 1
peaks_gfp, _ = scipy.signal.find_peaks(gfp, distance=minimum_separation)
# Alternative methods: (doesn't work best IMO)
# peaks_gfp = scipy.signal.find_peaks_cwt(gfp, np.arange(minimum_separation, int(0.2 * sampling_rate)))
# peaks_gfp = scipy.signal.argrelmax(gfp)[0]
# Use DISS
# diss = nk.eeg_diss(eeg, gfp)
# peaks_diss, _ = scipy.signal.find_peaks(diss, distance=minimum_separation)
return peaks_gfp
| 3,783 | 30.798319 | 110 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_complexity.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..complexity import entropy_shannon
from ..misc import as_vector
def microstates_complexity(microstates, show=False):
"""**Complexity of Microstates Pattern**
This computes the complexity related to the sequence of the microstates pattern.
.. note::
This function does not compute all the features available under the complexity
submodule. Don't hesitate to open an issue to help us test and decide what features to
include.
Parameters
----------
microstates : np.ndarray
The topographic maps of the found unique microstates which has a shape of n_channels x
n_states, generated from :func:`.nk.microstates_segment`.
show : bool
Show the transition matrix.
See Also
--------
.microstates_dynamic, .microstates_static
Examples
--------
.. ipython:: python
import neurokit2 as nk
microstates = [0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 0, 0, 2, 2]
@savefig p_microstates_complexity1.png scale=100%
nk.microstates_complexity(microstates, show=True)
@suppress
plt.close()
"""
# Try retrieving info
if isinstance(microstates, dict):
microstates = microstates["Sequence"]
# Sanitize
microstates = as_vector(microstates)
# Initialize output container
out = {}
# Empirical Shannon entropy
out["Entropy_Shannon"], _ = entropy_shannon(microstates, show=show)
# Maximym entropy given the number of different states
# h_max = np.log2(len(np.unique(microstates)))
df = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("Microstates_")
return df
| 1,692 | 26.754098 | 94 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_clean.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..eeg import eeg_gfp
from ..stats import standardize
from .microstates_peaks import microstates_peaks
def microstates_clean(
eeg,
sampling_rate=None,
train="gfp",
standardize_eeg=True,
normalize=True,
gfp_method="l1",
**kwargs
):
"""**Prepare eeg data for microstates extraction**
This is mostly a utility function to get the data ready for :func:`.microstates_segment`.
Parameters
----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Defaults to ``None``.
train : Union[str, int, float]
Method for selecting the timepoints how which to train the clustering algorithm. Can be
``"gfp"`` to use the peaks found in the global field power (GFP). Can be
``"all"``, in which case it will select all the datapoints. It can also be a number or a
ratio, in which case it will select the corresponding number of evenly spread data points.
For instance, ``train=10`` will select 10 equally spaced datapoints, whereas ``train=0.5``
will select half the data. See :func:`.microstates_peaks`.
standardize_eeg : bool
Standardize (z-score) the data across time using :func:`.nk.standardize`, prior to GFP
extraction and running k-means algorithm. Defaults to ``True``.
normalize : bool
Normalize (divide each data point by the maximum value of the data) across time prior to
GFP extraction and running k-means algorithm. Defaults to ``True``.
gfp_method : str
The GFP extraction method to be passed into :func:`.nk.eeg_gfp`. Can be either ``"l1"``
(default) or ``"l2"`` to use the L1 or L2 norm.
**kwargs : optional
Other arguments.
Returns
-------
eeg : array
The eeg data which has a shape of channels x samples.
peaks : array
The index of the sample where GFP peaks occur.
gfp : array
The global field power of each sample point in the data.
info : dict
Other information pertaining to the eeg raw object.
Examples
---------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw")
eeg, peaks, gfp, info = nk.microstates_clean(eeg, train="gfp")
See Also
--------
.eeg_gfp, microstates_peaks, .microstates_segment
"""
# If MNE object
if isinstance(eeg, (pd.DataFrame, np.ndarray)) is False:
sampling_rate = eeg.info["sfreq"]
info = eeg.info
eeg = eeg.get_data()
else:
info = None
# Normalization
if standardize_eeg is True:
eeg = standardize(eeg, **kwargs)
# Get GFP
gfp = eeg_gfp(
eeg, sampling_rate=sampling_rate, normalize=normalize, method=gfp_method, **kwargs
)
# If train is a custom of vector (assume it's the pre-computed peaks)
if isinstance(train, (list, np.ndarray)):
peaks = train
# Find peaks in the global field power (GFP) or take a given amount of indices
else:
peaks = microstates_peaks(eeg, gfp=train, sampling_rate=sampling_rate, **kwargs)
return eeg, peaks, gfp, info
| 3,329 | 32.636364 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_findnumber.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import find_knee, progress_bar
from ..stats.cluster_quality import _cluster_quality_dispersion
from .microstates_segment import microstates_segment
def microstates_findnumber(
eeg, n_max=12, method="GEV", clustering_method="kmod", show=False, verbose=True, **kwargs
):
"""**Estimate optimal number of microstates**
Computes statistical indices useful for estimating the optimal number of microstates using a
* **Global Explained Variance (GEV)**: measures how similar each EEG sample is to its assigned
microstate class. The **higher** (closer to 1), the better the segmentation.
* **Krzanowski-Lai Criterion (KL)**: measures quality of microstate segmentation based on the
dispersion measure (average distance between samples in the same microstate class); the
**larger the KL value**, the better the segmentation. Note that KL is not a polarity
invariant measure and thus might not be a suitable measure of fit for polarity-invariant
methods such as modified K-means and (T)AAHC.
Parameters
----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
n_max : int
Maximum number of microstates to try. A higher number leads to a longer process.
method : str
The method to use to estimate the optimal number of microstates. Can be "GEV" (the elbow,
detected using :func:`find_knee`), or "KL" (the location of the maximum value).
show : bool
Plot indices normalized on the same scale.
verbose : bool
Print progress bar.
**kwargs
Arguments to be passed to :func:`.microstates_segment`
Returns
-------
int
Optimal number of microstates.
DataFrame
The different quality scores for each number of microstates.
See Also
---------
microstates_segment
Examples
------------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw").crop(0, 5)
# Estimate optimal number
@savefig p_microstates_findnumber1.png scale=100%
n_clusters, results = nk.microstates_findnumber(eeg, n_max=8, show=True)
@suppress
plt.close()
"""
# Retrieve data
if isinstance(eeg, (pd.DataFrame, np.ndarray)) is False:
data = eeg.get_data()
elif isinstance(eeg, pd.DataFrame):
data = eeg.values
else:
data = eeg.copy()
# Loop accross number and get indices of fit
n_channel, _ = data.shape
dispersion_previous = np.nan
dispersion_diff_previous = np.nan
results = []
for idx, n_microstates in progress_bar(range(2, n_max + 1), verbose=verbose):
out = microstates_segment(
eeg, n_microstates=n_microstates, method=clustering_method, **kwargs
)
segmentation = out["Sequence"]
rez = {}
rez["Score_GEV"] = out["GEV"]
# Dispersion
dispersion = _cluster_quality_dispersion(
data.T, clustering=segmentation, n_clusters=n_microstates
)
# Dispersion(k)
dispersion_current = dispersion * n_microstates ** (2 / n_channel)
# dispersion_dff(k) = dispersion(k-1) - dispersion(k)
dispersion_diff = dispersion_previous - dispersion_current
# Calculate KL criterion
# KL(k) = abs(dispersion_diff(k) / dispersion_diff(k+1))
rez["KL_Criterion"] = np.nan
if idx not in [0]:
results[idx - 1]["KL_Criterion"] = np.abs(dispersion_diff_previous / dispersion_diff)
# Update for next round
dispersion_previous = dispersion_current.copy()
dispersion_diff_previous = dispersion_diff.copy()
results.append(rez)
results = pd.DataFrame(results)
# Estimate optimal number
if method == "KL":
n_clusters = int(np.argmax(results["KL_Criterion"]) + 2)
else:
n_clusters = find_knee(results["Score_GEV"], np.rint(np.arange(2, n_max + 1)))
if show is True:
normalized = (results - results.min()) / (results.max() - results.min())
normalized["n_Clusters"] = np.rint(np.arange(2, n_max + 1))
normalized.columns = normalized.columns.str.replace("Score", "Normalized")
normalized.plot(x="n_Clusters")
plt.axvline(n_clusters, color="red", linestyle="--", label=f"Method: {method}")
plt.legend()
plt.xticks(np.rint(np.arange(2, n_max + 1)))
plt.xlabel("Number of microstates")
plt.ylabel("Normalized score")
plt.title("Optimal number of microstates")
return n_clusters, results
| 4,713 | 34.179104 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_segment.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import check_random_state
from ..stats import cluster
from ..stats.cluster_quality import _cluster_quality_gev
from .microstates_classify import microstates_classify
from .microstates_clean import microstates_clean
def microstates_segment(
eeg,
n_microstates=4,
train="gfp",
method="kmod",
gfp_method="l1",
sampling_rate=None,
standardize_eeg=False,
n_runs=10,
max_iterations=1000,
criterion="gev",
random_state=None,
optimize=False,
**kwargs
):
"""**Segment M/EEG signal into Microstates**
This functions identifies and extracts the microstates from an M/EEG signal using different
clustering algorithms. Several runs of the clustering algorithm are performed, using different
random initializations.The run that resulted in the best segmentation, as measured by global
explained variance (GEV), is used.
* **kmod**: Modified k-means algorithm. It differs from a traditional k-means in that it is
*polarity-invariant*, which means that samples with EEG potential distribution maps that are
similar but have opposite polarity will be assigned the *same* microstate class.
* **kmeans**: Normal k-means.
* **kmedoids**: k-medoids clustering, a more stable version of k-means.
* **pca**: Principal Component Analysis.
* **ica**: Independent Component Analysis.
* **aahc**: Atomize and Agglomerate Hierarchical Clustering. Computationally heavy.
The microstates clustering is typically fitted on the EEG data at the global field power (GFP)
peaks to maximize the signal to noise ratio and focus on moments of high global neuronal
synchronization. It is assumed that the topography around a GFP peak remains stable and is at
its highest signal-to-noise ratio at the GFP peak.
Parameters
----------
eeg : np.ndarray
An array (channels, times) of M/EEG data or a Raw or Epochs object from MNE.
n_microstates : int
The number of unique microstates to find. Defaults to 4.
train : Union[str, int, float]
Method for selecting the timepoints how which to train the clustering algorithm. Can be
``"gfp"`` to use the peaks found in the Peaks in the global field power. Can be ``"all"``,
in which case it will select all the datapoints. It can also be a number or a ratio, in
which case it will select the corresponding number of evenly spread data points. For
instance, ``train=10`` will select 10 equally spaced datapoints, whereas ``train=0.5`` will
select half the data. See :func:`.microstates_peaks`.
method : str
The algorithm for clustering. Can be one of ``"kmod"`` (default), ``"kmeans"``,
``"kmedoids"``, ``"pca"``, ``"ica"``, or ``"aahc"``.
gfp_method : str
The GFP extraction method, can be either ``"l1"`` (default) or ``"l2"`` to use the L1 or L2
norm. See :func:`nk.eeg_gfp` for more details.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
standardize_eeg : bool
Standardized (z-score) the data across time prior to GFP extraction
using :func:`.nk.standardize`.
n_runs : int
The number of random initializations to use for the k-means algorithm. The best fitting
segmentation across all initializations is used. Defaults to 10.
max_iterations : int
The maximum number of iterations to perform in the k-means algorithm.
Defaults to 1000.
criterion : str
Which criterion to use to choose the best run for modified k-means algorithm,
can be ``"gev"`` (default) which selects the best run based on the highest global explained
variance, or ``"cv"`` which selects the best run based on the lowest cross-validation
criterion. See :func:`.nk.microstates_gev` and :func:`.nk.microstates_crossvalidation` for
more details respectively.
random_state : Union[int, numpy.random.RandomState]
The seed or ``RandomState`` for the random number generator. Defaults to ``None``, in which
case a different seed is chosen each time this function is called.
optimize : bool
Optimized method in Poulsen et al. (2018) for the *k*-means modified method.
Returns
-------
dict
Contains information about the segmented microstates:
* **Microstates**: The topographic maps of the found unique microstates which has a shape of
n_channels x n_states
* **Sequence**: For each sample, the index of the microstate to which the sample has been
assigned.
* **GEV**: The global explained variance of the microstates.
* **GFP**: The global field power of the data.
* **Cross-Validation Criterion**: The cross-validation value of the iteration.
* **Explained Variance**: The explained variance of each cluster map generated by PCA.
* **Total Explained Variance**: The total explained variance of the cluster maps generated
by PCA.
Examples
---------
* **Example 1**: k-means Algorithm
.. ipython:: python
import neurokit2 as nk
# Download data
eeg = nk.mne_data("filt-0-40_raw")
# Average rereference and band-pass filtering
eeg = nk.eeg_rereference(eeg, 'average').filter(1, 30, verbose=False)
# Cluster microstates
microstates = nk.microstates_segment(eeg, method="kmeans")
@savefig p_microstate_segment1.png scale=100%
nk.microstates_plot(microstates , epoch=(500, 750))
@suppress
plt.close()
# Modified kmeans (currently comment out due to memory error)
#out_kmod = nk.microstates_segment(eeg, method='kmod')
# nk.microstates_plot(out_kmod, gfp=out_kmod["GFP"][0:500])
# K-medoids (currently comment out due to memory error)
#out_kmedoids = nk.microstates_segment(eeg, method='kmedoids')
#nk.microstates_plot(out_kmedoids, gfp=out_kmedoids["GFP"][0:500])
* **Example with PCA**
.. ipython:: python
out_pca = nk.microstates_segment(eeg, method='pca', standardize_eeg=True)
@savefig p_microstate_segment2.png scale=100%
nk.microstates_plot(out_pca, gfp=out_pca["GFP"][0:500])
@suppress
plt.close()
* **Example with ICA**
.. ipython:: python
out_ica = nk.microstates_segment(eeg, method='ica', standardize_eeg=True)
@savefig p_microstate_segment3.png scale=100%
nk.microstates_plot(out_ica, gfp=out_ica["GFP"][0:500])
@suppress
plt.close()
* **Example with AAHC**
.. ipython:: python
out_aahc = nk.microstates_segment(eeg, method='aahc')
@savefig p_microstate_segment4.png scale=100%
nk.microstates_plot(out_aahc, gfp=out_aahc["GFP"][0:500])
@suppress
plt.close()
See Also
--------
eeg_gfp, microstates_peaks, microstates_gev, microstates_crossvalidation, microstates_classify
References
----------
* Poulsen, A. T., Pedroni, A., Langer, N., & Hansen, L. K. (2018). Microstate EEGlab toolbox:
an introductory guide. BioRxiv, (289850).
* Pascual-Marqui, R. D., Michel, C. M., & Lehmann, D. (1995). Segmentation of brain
electrical activity into microstates: model estimation and validation. IEEE Transactions
on Biomedical Engineering.
"""
# Sanitize input
data, indices, gfp, info_mne = microstates_clean(
eeg,
train=train,
sampling_rate=sampling_rate,
standardize_eeg=standardize_eeg,
gfp_method=gfp_method,
**kwargs
)
# Run clustering algorithm
if method in ["kmods", "kmod", "kmeans modified", "modified kmeans"]:
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# Generate one random integer for each run
random_state = rng.choice(n_runs * 1000, n_runs, replace=False)
# Initialize values
gev = 0
cv = np.inf
microstates = None
segmentation = None
polarity = None
info = None
# Do several runs of the k-means algorithm, keep track of the best segmentation.
for run in range(n_runs):
# Run clustering on subset of data
_, _, current_info = cluster(
data[:, indices].T,
method="kmod",
n_clusters=n_microstates,
random_state=random_state[run],
max_iterations=max_iterations,
threshold=1e-6,
optimize=optimize,
)
current_microstates = current_info["clusters_normalized"]
current_residual = current_info["residual"]
# Run segmentation on the whole dataset
s, p, g, g_all = _microstates_segment_runsegmentation(
data, current_microstates, gfp, n_microstates=n_microstates
)
if criterion == "gev":
# If better (i.e., higher GEV), keep this segmentation
if g > gev:
microstates, segmentation, polarity, gev = (
current_microstates,
s,
p,
g,
)
gev_all = g_all
info = current_info
elif criterion == "cv":
# If better (i.e., lower CV), keep this segmentation
# R2 and residual are proportional, use residual instead of R2
if current_residual < cv:
microstates, segmentation, polarity = current_microstates, s, p
cv, gev, gev_all = current_residual, g, g_all
info -= current_info
else:
# Run clustering algorithm on subset
_, microstates, info = cluster(
data[:, indices].T, method=method, n_clusters=n_microstates, random_state=random_state, **kwargs
)
# Run segmentation on the whole dataset
segmentation, polarity, gev, gev_all = _microstates_segment_runsegmentation(
data, microstates, gfp, n_microstates=n_microstates
)
# Reorder
segmentation, microstates = microstates_classify(segmentation, microstates)
# CLustering quality
# quality = cluster_quality(data, segmentation, clusters=microstates, info=info, n_random=10, sd=gfp)
# Output
info = {
"Microstates": microstates,
"Sequence": segmentation,
"GEV": gev,
"GEV_per_microstate": gev_all,
"GFP": gfp,
"Polarity": polarity,
"Info_algorithm": info,
"Info": info_mne,
}
return info
# =============================================================================
# Utils
# =============================================================================
def _microstates_segment_runsegmentation(data, microstates, gfp, n_microstates):
# Find microstate corresponding to each datapoint
activation = microstates.dot(data)
segmentation = np.argmax(np.abs(activation), axis=0)
polarity = np.sign(np.choose(segmentation, activation))
# Get Global Explained Variance (GEV)
gev, gev_all = _cluster_quality_gev(
data.T, microstates, segmentation, sd=gfp, n_clusters=n_microstates
)
return segmentation, polarity, gev, gev_all
| 11,449 | 38.482759 | 108 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/__init__.py | """Submodule for NeuroKit."""
from .microstates_clean import microstates_clean
from .microstates_peaks import microstates_peaks
from .microstates_static import microstates_static
from .microstates_dynamic import microstates_dynamic
from .microstates_complexity import microstates_complexity
from .microstates_segment import microstates_segment
from .microstates_classify import microstates_classify
from .microstates_plot import microstates_plot
from .microstates_findnumber import microstates_findnumber
__all__ = ["microstates_clean",
"microstates_peaks",
"microstates_static",
"microstates_dynamic",
"microstates_complexity",
"microstates_segment",
"microstates_classify",
"microstates_plot",
"microstates_findnumber"]
| 813 | 34.391304 | 58 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_classify.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import replace
def microstates_classify(segmentation, microstates):
"""**Reorder (sort) the microstates (experimental)**
Reorder (sort) the microstates (experimental) based on the pattern of values in the vector of
channels (thus, depends on how channels are ordered).
Parameters
----------
segmentation : Union[np.array, dict]
Vector containing the segmentation.
microstates : Union[np.array, dict]
Array of microstates maps . Defaults to ``None``.
Returns
-------
segmentation, microstates
Tuple containing re-ordered input.
Examples
------------
.. ipython:: python
import neurokit2 as nk
eeg = nk.mne_data("filt-0-40_raw").filter(1, 35, verbose=False)
eeg = nk.eeg_rereference(eeg, 'average')
# Original order
out = nk.microstates_segment(eeg)
@savefig p_microstates_classify.png scale=100%
nk.microstates_plot(out, gfp=out["GFP"][0:100])
@suppress
plt.close()
# Reorder
out = nk.microstates_classify(out["Sequence"], out["Microstates"])
"""
# Reorder
new_order = _microstates_sort(microstates)
microstates = microstates[new_order]
replacement = dict(enumerate(new_order))
segmentation = replace(segmentation, replacement)
return segmentation, microstates
# =============================================================================
# Methods
# =============================================================================
def _microstates_sort(microstates):
n_states = len(microstates)
order_original = np.arange(n_states)
# For each state, get linear and quadratic coefficient
coefs_quadratic = np.zeros(n_states)
coefs_linear = np.zeros(n_states)
for i in order_original:
state = microstates[i, :]
_, coefs_linear[i], coefs_quadratic[i] = np.polyfit(
state, np.arange(len(state)), 2
)
# For each state, which is the biggest trend, linear or quadratic
order_quad = order_original[np.abs(coefs_linear) <= np.abs(coefs_quadratic)]
order_lin = order_original[np.abs(coefs_linear) > np.abs(coefs_quadratic)]
# Reorder each
order_quad = order_quad[np.argsort(coefs_quadratic[order_quad])]
order_lin = order_lin[np.argsort(coefs_linear[order_lin])]
new_order = np.concatenate([order_quad, order_lin])
return new_order
| 2,460 | 28.650602 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_static.py | # -*- coding: utf-8 -*-
import matplotlib.gridspec
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from ..misc import as_vector, find_groups
def microstates_static(microstates, sampling_rate=1000, show=False):
"""**Static Properties of Microstates**
The duration of each microstate is also referred to as the Ratio of Time Covered (RTT) in
some microstates publications.
Parameters
----------
microstates : np.ndarray
The sequence .
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second). Defaults to 1000.
show : bool
Returns a plot of microstate duration, proportion, and lifetime distribution if ``True``.
Returns
-------
DataFrame
Values of microstates proportion, lifetime distribution and duration (median, mean, and their averages).
See Also
--------
.microstates_dynamic
Examples
--------
.. ipython:: python
import neurokit2 as nk
microstates = [0, 0, 0, 1, 1, 2, 2, 2, 2, 1, 0, 0, 2, 2]
@savefig p_microstates_static1.png scale=100%
nk.microstates_static(microstates, sampling_rate=100, show=True)
@suppress
plt.close()
"""
# Try retrieving info
if isinstance(microstates, dict):
microstates = microstates["Sequence"]
# Sanitize
microstates = as_vector(microstates)
# Initialize output container
out = {}
out, lifetimes = _microstates_prevalence(microstates, out=out)
out, durations, types = _microstates_duration(microstates, sampling_rate=sampling_rate, out=out)
if show is True:
fig = plt.figure(constrained_layout=False)
spec = matplotlib.gridspec.GridSpec(
ncols=2, nrows=2, height_ratios=[1, 1], width_ratios=[1, 1]
)
ax0 = fig.add_subplot(spec[1, :])
ax1 = fig.add_subplot(spec[0, :-1])
ax2 = fig.add_subplot(spec[0, 1])
_microstates_duration_plot(durations, types, ax=ax0)
_microstates_prevalence_plot(microstates, lifetimes, out, ax_prop=ax1, ax_distrib=ax2)
plt.tight_layout()
df = pd.DataFrame.from_dict(out, orient="index").T.add_prefix("Microstate_")
return df
# =============================================================================
# Duration
# =============================================================================
def _microstates_duration(microstates, sampling_rate=1000, out=None):
states = np.unique(microstates)
if out is None:
out = {}
# Find durations of each state
groups = find_groups(microstates)
# Initialize empty containers for duration and type
durations = np.full(len(groups), np.nan)
types = np.full(len(groups), np.nan)
for i, group in enumerate(groups):
types[i] = group[0]
durations[i] = len(group) / sampling_rate
# Average duration
for s in states:
out[str(s) + "_DurationMean"] = np.mean(durations[types == s])
out[str(s) + "_DurationMedian"] = np.median(durations[types == s])
out["Average_DurationMean"] = np.mean(durations)
out["Average_DurationMedian"] = np.median(durations)
return out, durations, types
def _microstates_duration_plot(durations, types, ax=None):
# Make data for violin
states = np.unique(types)
data = []
for s in states:
data.append(durations[types == s])
# Plot
if ax is None:
fig, ax = plt.subplots(ncols=1)
else:
fig = None
parts = ax.violinplot(
data, positions=range(len(states)), vert=False, showmedians=True, showextrema=False
)
for component in parts:
if isinstance(parts[component], list):
for part in parts[component]:
# part.set_facecolor("#FF5722")
part.set_edgecolor("white")
else:
parts[component].set_edgecolor("black")
ax.set_xlabel("Duration (s)")
ax.set_title("Duration")
ax.set_yticks(range(len(states)))
return fig
# =============================================================================
# Prevalence
# =============================================================================
def _microstates_prevalence(microstates, out=None):
n = len(microstates)
states = np.unique(microstates)
if out is None:
out = {}
# Average proportion
for s in states:
out[str(s) + "_Proportion"] = np.sum(microstates == s) / n
# Leftime distribution
out, lifetimes = _microstates_lifetime(microstates, out=out)
return out, lifetimes
def _microstates_prevalence_plot(microstates, lifetimes, out, ax_prop=None, ax_distrib=None):
states = np.unique(microstates)
# Plot
if ax_prop is None and ax_distrib is None:
fig, axes = plt.subplots(ncols=2)
ax_prop = axes[0]
ax_distrib = axes[1]
else:
fig = None
for s in states:
ax_prop.bar(s, out[str(s) + "_Proportion"])
ax_distrib.plot(lifetimes[s], label=str(s))
plt.legend()
ax_prop.set_xticks(range(len(states)))
ax_prop.set_title("Proportion")
ax_distrib.set_title("Lifetime Distribution")
return fig
# Lifetime distribution
# ------------------------
def _microstates_lifetime(microstates, out=None):
"""Based on https://github.com/Frederic-vW/eeg_microstates
Compute the lifetime distributions for each symbol in a symbolic sequence X with ns symbols.
"""
n = len(microstates)
states = np.unique(microstates)
tau_dict = {s: [] for s in states}
s = microstates[0] # current symbol
tau = 1.0 # current lifetime
for i in range(n):
if microstates[i] == s:
tau += 1.0
else:
tau_dict[s].append(tau)
s = microstates[i]
tau = 1.0
tau_dict[s].append(tau) # last state
# Initialize empty distributions with max lifetime for each symbol
lifetimes = {}
for s in states:
lifetimes[s] = np.zeros(int(np.max(tau_dict[s])))
# Lifetime distributions
for s in states:
for j in range(len(tau_dict[s])):
tau = tau_dict[s][j]
lifetimes[s][int(tau) - 1] += 1.0
# Get Area under curve (AUCs)
if out is None:
out = {}
for s in states:
out[str(s) + "_LifetimeDistribution"] = np.trapz(lifetimes[s])
return out, lifetimes
| 6,410 | 27.878378 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/microstates/microstates_plot.py | # -*- coding: utf-8 -*-
import matplotlib
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
def microstates_plot(microstates, segmentation=None, gfp=None, info=None, epoch=None):
"""**Visualize Microstates**
Plots the clustered microstates.
Parameters
----------
microstates : np.ndarray
The topographic maps of the found unique microstates which has a shape of n_channels x
n_states, generated from :func:`.microstates_segment`.
segmentation : array
For each sample, the index of the microstate to which the sample has been assigned.
Defaults to ``None``.
gfp : array
The range of global field power (GFP) values to visualize. Defaults to ``None``, which will
plot the whole range of GFP values.
info : dict
The dictionary output of :func:`.nk.microstates_segment`. Defaults to ``None``.
epoch : tuple
A sub-epoch of GFP to plot in the shape ``(beginning sample, end sample)``.
Returns
-------
fig
Plot of prototypical microstates maps and GFP across time.
Examples
---------
.. ipython:: python
import neurokit2 as nk
# Download data
eeg = nk.mne_data("filt-0-40_raw")
# Average rereference and band-pass filtering
eeg = nk.eeg_rereference(eeg, 'average').filter(1, 30, verbose=False)
# Cluster microstates
microstates = nk.microstates_segment(eeg, method='kmeans', n_microstates=4)
@savefig p_microstates_plot1.png scale=100%
nk.microstates_plot(microstates, epoch=(500, 750))
@suppress
plt.close()
"""
try:
import mne
except ImportError as e:
raise ImportError(
"The 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
# Try retrieving info
if isinstance(microstates, dict):
if info is None and "Info" in microstates.keys():
info = microstates["Info"]
if gfp is None and "GFP" in microstates.keys():
gfp = microstates["GFP"]
segmentation = microstates["Sequence"]
microstates = microstates["Microstates"]
# Sanity checks
if gfp is None:
raise ValueError("GFP data must be passed to 'gfp' in order to plot the segmentation.")
# Prepare figure layout
n = len(microstates)
fig, ax = plt.subplot_mosaic([np.arange(n), ["GFP"] * n])
# Plot topomaps -----------------------------------------------------------
for i, map in enumerate(microstates):
_, _ = mne.viz.plot_topomap(map, info, axes=ax[i], ch_type="eeg", show=False)
ax[i].set_title(f"{i}")
# Plot GFP ---------------------------------------------------------------
# Get x-axis
if info is not None and "sfreq" in info.keys():
times = np.arange(len(gfp)) / info["sfreq"]
else:
times = np.arange(len(gfp))
# Correct lengths
if len(segmentation) > len(gfp):
segmentation = segmentation[0 : len(gfp)]
if len(segmentation) < len(gfp):
gfp = gfp[0 : len(segmentation)]
if epoch is None:
epoch = (0, len(gfp))
cmap = plt.cm.get_cmap("plasma", n)
# Plot the GFP line above the area
ax["GFP"].plot(
times[epoch[0] : epoch[1]], gfp[epoch[0] : epoch[1]], color="black", linewidth=0.5
)
# Plot area
for state, color in zip(range(n), cmap.colors):
ax["GFP"].fill_between(
times[epoch[0] : epoch[1]],
gfp[epoch[0] : epoch[1]],
color=color,
where=(segmentation == state)[epoch[0] : epoch[1]],
)
# Create legend
norm = matplotlib.colors.Normalize(vmin=-0.5, vmax=n - 0.5)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
fig.colorbar(sm, ax=ax["GFP"])
ax["GFP"].set_yticks([])
if info is not None and "sfreq" in info.keys():
ax["GFP"].set_xlabel("Time (s)")
else:
ax["GFP"].set_xlabel("Sample")
ax["GFP"].set_ylabel("Global Field Power (GFP)")
ax["GFP"].set_title("Microstates Sequence")
| 4,154 | 31.460938 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/markov_test_random.py | # -*- coding: utf-8 -*-
import pandas as pd
import scipy.stats
from .transition_matrix import _sanitize_tm_input
def markov_test_random(fm):
"""**Is the Markov process random?**
This function computes the expected (theoretical) transition matrix if the order of appearance
of each state was governed only by their overall prevalence, and that a previous state had no
influence on the next state. This "random" matrix is then compered again the observed one, and
a Chi2 test is conducted.
If significant (e.g., ``*p*-value < .05``), one can reject the hypothesis that observed Markov
process is random, and conclude that past states have an influence on next states.
Parameters
----------
fm : pd.DataFrame
A frequency matrix obtained from :func:`transition_matrix`.
Returns
-------
dict
Contains indices of the Chi2 test.
See Also
--------
transition_matrix
Examples
--------
.. ipython:: python
import neurokit2 as nk
sequence = [0, 0, 1, 2, 2, 2, 1, 0, 0, 3]
_, info = nk.transition_matrix(sequence)
result = nk.markov_test_random(info["Occurrences"])
result["Random_p"]
"""
# Sanitize input
fm = _sanitize_tm_input(fm, probs=False)
# Remove rows with no occurence
fm = fm.loc[~(fm.sum(axis=1) == 0).values]
out = {}
# Expect transition matrix (theoretical)
out["Random_Matrix"] = scipy.stats.contingency.expected_freq(fm.values)
out["Random_Matrix"] = pd.DataFrame(out["Random_Matrix"], index=fm.index, columns=fm.columns)
# Chi-square test
results = scipy.stats.chisquare(f_obs=fm, f_exp=out["Random_Matrix"], axis=None)
# Store results
out["Random_Chi2"] = results[0]
out["Random_df"] = len(fm) * (len(fm) - 1) / 2
out["Random_p"] = results[1]
return out
| 1,865 | 26.850746 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/markov_simulate.py | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import check_random_state
from .transition_matrix import _sanitize_tm_input
def markov_simulate(tm, n=10, random_state=None):
"""**Markov Chain Simulation**
Given a :func:`transition_matrix`, this function simulates the corresponding sequence of states
(also known as a discrete Markov chain).
Parameters
----------
tm : pd.DataFrame
A probability matrix obtained from :func:`transition_matrix`.
n : int
Length of the simulated sequence.
random_state : None, int, numpy.random.RandomState or numpy.random.Generator
Seed for the random number generator. See for ``misc.check_random_state`` for further information.
Returns
-------
np.ndarray
Sequence of states.
See Also
--------
transition_matrix
Examples
--------
.. ipython:: python
import neurokit2 as nk
sequence = [0, 0, 1, 2, 2, 2, 1, 0, 0, 3]
tm, _ = nk.transition_matrix(sequence)
x = nk.markov_simulate(tm, n=15)
x
"""
# Sanitize input
tm = _sanitize_tm_input(tm)
states = tm.columns.values
# Start selection
_start = np.argmax(tm.sum(axis=1) / tm.sum())
# simulated sequence init
seq = np.zeros(n, dtype=int)
seq[0] = _start
# Seed the random generator for reproducible results
rng = check_random_state(random_state)
# simulation procedure
for i in range(1, n):
_ps = tm.values[seq[i - 1]]
_sample = rng.choice(len(_ps), p=_ps)
seq[i] = _sample
return states[seq]
| 1,606 | 23.348485 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/transition_matrix.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import as_vector
def transition_matrix(sequence, order=1, adjust=True, show=False):
"""**Transition Matrix**
A **Transition Matrix** (also known as a stochastic matrix or a **Markov matrix**) is a
convenient way of representing and describing a sequence of (discrete) states, also known as
**discrete Markov chains**. Each of its entries is a probability of transitioning from one
state to the other.
.. note::
This function is fairly new and hasn't be tested extensively. Please help us by
double-checking the code and letting us know if everything is correct.
Parameters
----------
sequence : Union[list, np.array, pd.Series]
A list of discrete states.
order : int
The order of the Markov chain.
adjust : bool
If ``True``, the transition matrix will be adjusted to ensure that the sum of each row is
equal to 1. This is useful when the transition matrix is used to represent a probability
distribution.
show : bool
Displays the transition matrix heatmap.
See Also
--------
markov_simulate, markov_test_random, markov_test_symmetry
Returns
-------
pd.DataFrame
The empirical (observed) transition matrix.
dict
A dictionnary containing additional information, such as the Frequency Matrix (**fm**;
accessible via the key ``"Occurrences"``), useful for some tests.
Examples
--------
.. ipython:: python
import neurokit2 as nk
sequence = ["A", "A", "C", "B", "B", "B", "C", "A", "A", "D"]
@savefig p_transition_matrix1.png scale=100%
tm, _ = nk.transition_matrix(sequence, show=True)
@suppress
plt.close()
.. ipython:: python
tm
In this example, the transition from D is unknown (it is the last element), resulting in an
absence of transitioning probability. As this can cause issues, unknown probabilities are
replaced by a uniform distribution, but this can be turned off using the ``adjust`` argument.
.. ipython:: python
tm, _ = nk.transition_matrix(sequence, adjust=False)
tm
Transition matrix of higher order
.. ipython:: python
sequence = ["A", "A", "A", "B", "A", "A", "B", "A", "A", "B"]
tm, _ = nk.transition_matrix(sequence, order=2)
tm
"""
sequence = as_vector(sequence)
# Observed transition matrix
states = np.unique(sequence)
n_states = len(states)
# Get observed transition matrix
freqs = np.zeros((n_states,) * (order + 1))
for idx in zip(*[sequence[i:] for i in range(order + 1)]):
idx = tuple([np.argwhere(states == k)[0][0] for k in idx])
freqs[idx] += 1
freqs
# Find rows containing zeros (unknown transition)
idx = freqs.sum(axis=-1) == 0
# Fillit with uniform probability to avoid problem in division
freqs[idx, :] = 1
# Convert to probabilities
tm = (freqs.T / freqs.sum(axis=-1)).T
# If no adjustment, revert to 0
freqs[idx, :] = 0
if adjust is False:
tm[idx, :] = 0
# Convert to DataFrame
if order == 1:
tm = pd.DataFrame(tm, index=states, columns=states)
freqs = pd.DataFrame(freqs, index=states, columns=states)
if show is True:
if order > 1:
raise ValueError(
"Visualization of order > 1 not supported yet. "
"Consider helping us to implement it!"
)
fig, ax = plt.subplots()
ax.imshow(tm, cmap="Reds", interpolation="nearest")
ax.set_xticks(np.arange(len(tm)))
ax.set_yticks(np.arange(len(tm)))
ax.set_xticklabels(tm.columns)
ax.set_yticklabels(tm.index)
# Loop over data dimensions and create text annotations.
for i, row in enumerate(tm.index):
for j, col in enumerate(tm.columns):
if tm.loc[row, col] > 0.5:
color = "white"
else:
color = "black"
ax.text(j, i, f"{tm.loc[row, col]:.2f}", ha="center", va="center", color=color)
ax.set_title("Transition Matrix")
fig.tight_layout()
return tm, {"Occurrences": freqs, "States": states}
# =============================================================================
# Utils
# =============================================================================
def _sanitize_tm_input(tm, probs=True):
# If symmetric dataframe, then surely a transition matrix
if isinstance(tm, pd.DataFrame) and tm.shape[1] == tm.shape[0]:
if tm.values.max() > 1:
if probs is True:
raise ValueError(
"Transition matrix must be a probability matrix (all probabilities must be"
" < 1)."
)
else:
return tm
else:
if probs is True:
return tm
else:
raise ValueError(
"Transition matrix must be a frequency matrix containing counts and not"
" probabilities. Please pass the `info['Occurrences']` object instead of"
" the transition matrix."
)
# Otherwise, conver to TM
else:
return transition_matrix(tm)
# def transition_matrix_plot(tm):
# """Graph of Transition Matrix
# Abandonned for now because networkx gives ugly results. Please do help!
# """
# try:
# import networkx as nx
# except ImportError:
# raise ImportError(
# "NeuroKit error: transition_matrix_plot(): the 'networkx' module is required for this ",
# "function to run. Please install it first (`pip install networkx`).",
# )
# # create graph object
# G = nx.MultiDiGraph(tm)
# edge_labels = {}
# for col in tm.columns:
# for row in tm.index:
# G.add_edge(row, col, weight=tm.loc[row, col])
# edge_labels[(row, col)] = label = "{:.02f}".format(tm.loc[row, col])
# pos = nx.circular_layout(G)
# nx.draw_networkx_edges(G, pos, width=2.0, alpha=0.5)
# nx.draw_networkx_edge_labels(G, pos, edge_labels)
# nx.draw_networkx(G, pos)
| 6,349 | 31.070707 | 102 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/markov_mixingtime.py | # -*- coding: utf-8 -*-
import numpy as np
from .transition_matrix import _sanitize_tm_input
def markov_mixingtime(tm):
"""**Markov Chain Mixing Time**
The Mixing time (also known as relaxation time) is the inverse of spectral gap, which is the
difference between the two largest eigenvalues of the transition matrix. The Mixing time of a
Markov chain tells us how long does it take for a run to go near the stationary distribution
(for convergence to happen).
Parameters
----------
tm : pd.DataFrame
A transition matrix obtained from :func:`transition_matrix`.
Returns
-------
float
Mixing time of the Markov chain.
See Also
--------
transition_matrix
Examples
----------
.. ipython:: python
import neurokit2 as nk
sequence = [0, 0, 1, 2, 2, 2, 1, 0, 0, 3]
tm, _ = nk.transition_matrix(sequence)
nk.markov_mixingtime(tm)
References
-----------
* Levin, D. A., & Peres, Y. (2017). Markov chains and mixing times (Vol. 107). American
Mathematical Society.
"""
# Sanitize input
tm = _sanitize_tm_input(tm)
ev = np.linalg.eigvals(tm)
ev = np.real(ev)
# ascending
ev.sort()
# Spectral gap = Largest (last) - second largest
sg = ev[-1] - ev[-2]
# mixing time (aka, relaxation time)
return 1.0 / sg
| 1,380 | 22.016667 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/__init__.py | """Submodule for NeuroKit."""
from .markov_mixingtime import markov_mixingtime
from .markov_simulate import markov_simulate
from .markov_test_homogeneity import markov_test_homogeneity
from .markov_test_markovity import markov_test_markovity
from .markov_test_random import markov_test_random
from .markov_test_symmetry import markov_test_symmetry
from .transition_matrix import transition_matrix
__all__ = [
"transition_matrix",
"markov_test_symmetry",
"markov_test_random",
"markov_test_homogeneity",
"markov_test_markovity",
"markov_simulate",
"markov_mixingtime",
]
| 600 | 29.05 | 60 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/markov_test_homogeneity.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats
def markov_test_homogeneity(sequence, size=10):
"""**Is the Markov process homogeneous?**
Performs a homogeneity test that tests the null hypothesis that the samples are
homogeneous, i.e., from the same - but unspecified - population, against the alternative
hypothesis that at least one pair of samples is from different populations.
Parameters
----------
sequence : Union[list, np.array, pd.Series]
A list of discrete states.
size : int
The size of the non-overlapping windows to split the sequence.
Returns
-------
dict
Contains indices of the test.
See Also
--------
transition_matrix
Examples
--------
.. ipython:: python
import neurokit2 as nk
sequence = [0, 0, 1, 2, 2, 2, 1, 0, 0, 3]
result = nk.markov_test_homogeneity(sequence, size=2)
result["Homogeneity_p"]
References
----------
* Kullback, S., Kupperman, M., & Ku, H. H. (1962). Tests for contingency tables and Markov
chains. Technometrics, 4(4), 573-608.
"""
states = np.unique(sequence)
n_states = len(states)
n = len(sequence)
r = int(np.floor(n / size)) # number of blocks
if r < 5:
raise ValueError("The size of the blocks is too high. Decrease the 'size' argument.")
f_ijk = np.zeros((r, n_states, n_states))
f_ij = np.zeros((r, n_states))
f_jk = np.zeros((n_states, n_states))
f_i = np.zeros(r)
f_j = np.zeros(n_states)
# calculate f_ijk (time / block dep. transition matrix)
for i in range(r): # block index
for ii in range(size - 1): # pos. inside the current block
j = sequence[i * size + ii]
k = sequence[i * size + ii + 1]
f_ijk[i, j, k] += 1.0
f_ij[i, j] += 1.0
f_jk[j, k] += 1.0
f_i[i] += 1.0
f_j[j] += 1.0
# conditional homogeneity (Markovianity stationarity)
T = 0.0
for i, j, k in np.ndindex(f_ijk.shape):
# conditional homogeneity
f = f_ijk[i, j, k] * f_j[j] * f_ij[i, j] * f_jk[j, k]
if f > 0:
T += f_ijk[i, j, k] * np.log((f_ijk[i, j, k] * f_j[j]) / (f_ij[i, j] * f_jk[j, k]))
out = {"Homogeneity_t": T * 2.0, "Homogeneity_df": (r - 1) * (n_states - 1) * n_states}
out["Homogeneity_p"] = scipy.stats.chi2.sf(
out["Homogeneity_t"], out["Homogeneity_df"], loc=0, scale=1
)
return out
| 2,506 | 29.204819 | 95 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/markov_test_markovity.py | import numpy as np
import pandas as pd
import scipy.stats
from .transition_matrix import transition_matrix
def markov_test_markovity(sequence):
"""**Test of Markovity**
The Markovity (also known as Markovianity) of a symbolic sequence.
.. note::
We would like to extend this to different orders (order 1, 2), but we lack the skills. If you
are interested, please get in touch!
Parameters
----------
sequence : Union[list, np.array, pd.Series]
A list of discrete states.
See Also
--------
markov_test_random, markov_test_symmetry
Returns
-------
dict
Contains indices of the test.
Examples
--------
.. ipython:: python
import neurokit2 as nk
sequence = [0, 0, 1, 2, 2, 2, 1, 0, 0, 3, 1]
nk.markov_test_markovity(sequence)
References
----------
* Kullback, S., Kupperman, M., & Ku, H. H. (1962). Tests for contingency tables and Markov
chains. Technometrics, 4(4), 573-608.
"""
_, info = transition_matrix(sequence)
# Extract frequency matrix
if isinstance(info["Occurrences"], pd.DataFrame):
fm = info["Occurrences"].values
else:
fm = info["Occurrences"]
k = len(fm)
valid = fm != 0
# Get numerator and denominator
num = fm * len(sequence)
den = np.tile(fm.sum(axis=1), (k, 1)).T * np.tile(fm.sum(axis=0), (k, 1))
# Compute statistics
out = {
"Markovity_t": 2 * np.sum(fm[valid] * np.log(num[valid] / den[valid])),
"Markovity_df": (k - 1.0) * (k - 1.0),
}
# Chi2 test
out["Markovity_p"] = scipy.stats.chi2.sf(
out["Markovity_t"],
out["Markovity_df"],
loc=0,
scale=1,
)
return out
# def testMarkov1(sequence, verbose=True):
# """Test first-order Markovianity of symbolic sequence X with ns symbols.
# Null hypothesis:
# first-order MC <=>
# p(X[t+1] | X[t]) = p(X[t+1] | X[t], X[t-1])
# cf. Kullback, Technometrics (1962), Tables 8.1, 8.2, 8.6.
# Args:
# x: symbolic sequence, symbols = [0, 1, 2, ...]
# ns: number of symbols
# alpha: significance level
# Returns:
# p: p-value of the Chi2 test for independence
# """
# sequence = [1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1]
# _, info = nk.transition_matrix(sequence, order=2)
# fm = info["Occurrences"]
# X = sequence
# ns = len(np.unique(X))
# n = len(X)
# f_ijk = np.zeros((ns, ns, ns))
# f_ij = np.zeros((ns, ns))
# f_jk = np.zeros((ns, ns))
# f_j = np.zeros(ns)
# for t in range(n - 2):
# i = X[t]
# j = X[t + 1]
# k = X[t + 2]
# f_ijk[i, j, k] += 1.0
# f_ij[i, j] += 1.0
# f_jk[j, k] += 1.0
# f_j[j] += 1.0
# T = 0.0
# for i, j, k in np.ndindex(f_ijk.shape):
# f = f_ijk[i][j][k] * f_j[j] * f_ij[i][j] * f_jk[j][k]
# if f > 0:
# num_ = f_ijk[i, j, k] * f_j[j]
# print(num_)
# den_ = f_ij[i, j] * f_jk[j, k]
# T += f_ijk[i, j, k] * np.log(num_ / den_)
# T *= 2.0
# df = ns * (ns - 1) * (ns - 1)
# # p = chi2test(T, df, alpha)
# p = scipy.stats.chi2.sf(T, df, loc=0, scale=1)
# if verbose:
# print(f"p: {p:.2e} | t: {T:.3f} | df: {df:.1f}")
# return p
| 3,341 | 25.52381 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/markov/markov_test_symmetry.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats
from .transition_matrix import _sanitize_tm_input
def markov_test_symmetry(fm):
"""**Is the Markov process symmetric?**
Performs a symmetry test, to test if for instance if the transitions A -> B and B -> A occur
with the same probability. If significant (e.g., ``*p*-value < .05``), one can reject the
hypothesis that observed Markov process is symmetric, and conclude that it the transition
matrix is asymmetric.
Parameters
----------
fm : pd.DataFrame
A frequency matrix obtained from :func:`transition_matrix`.
Returns
-------
dict
Contains indices of the test.
See Also
--------
transition_matrix
Examples
--------
.. ipython:: python
import neurokit2 as nk
sequence = [0, 0, 1, 2, 2, 2, 1, 0, 0, 3]
_, info = nk.transition_matrix(sequence)
result = nk.markov_test_symmetry(info["Occurrences"])
result["Symmetry_p"]
References
----------
* Kullback, S., Kupperman, M., & Ku, H. H. (1962). Tests for contingency tables and Markov
chains. Technometrics, 4(4), 573-608.
"""
# Sanitize input
fm = _sanitize_tm_input(fm, probs=False)
# Convert to array
fm = fm.values
# Start computation
t = 0.0
for i, j in np.ndindex(fm.shape):
if i != j:
f = fm[i, j] * fm[j, i]
if f > 0:
t += fm[i, j] * np.log((2.0 * fm[i, j]) / (fm[i, j] + fm[j, i]))
# Run test
out = {"Symmetry_t": t * 2.0, "Symmetry_df": len(fm) * (len(fm) - 1) / 2}
out["Symmetry_p"] = scipy.stats.chi2.sf(out["Symmetry_t"], out["Symmetry_df"], loc=0, scale=1)
return out
| 1,737 | 25.333333 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/epochs_to_df.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def epochs_to_df(epochs):
"""**Convert epochs to a DataFrame**
Convert epochs to a DataFrame.
Parameters
----------
epochs : dict
A dict containing one DataFrame per event/trial. Usually obtained via `epochs_create()`.
Returns
----------
DataFrame
A DataFrame containing all epochs identifiable by the 'Label' column, which time axis
is stored in the 'Time' column.
See Also
----------
events_find, events_plot, epochs_create, epochs_plot
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Get data
data = nk.data("bio_eventrelated_100hz")
# Find events
events = nk.events_find(data["Photosensor"],
threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
# Create epochs
epochs = nk.epochs_create(data, events, sampling_rate=200, epochs_end=3)
# Convert to DataFrame
data = nk.epochs_to_df(epochs)
data.head()
"""
data = pd.concat(epochs)
data["Time"] = data.index.get_level_values(1).values
data = data.reset_index(drop=True)
return data
def _df_to_epochs(data):
# Convert dataframe of epochs created by `epochs_to_df` back into a dictionary.
labels = data.Label.unique()
epochs_dict = {i: pd.DataFrame for i in labels}
for key in epochs_dict:
epochs_dict[key] = data[:][data.Label == key]
epochs_dict[key].index = np.array(epochs_dict[key]["Time"])
epochs_dict[key] = epochs_dict[key].drop(["Time"], axis=1)
return epochs_dict
| 1,721 | 25.090909 | 96 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/epochs_to_array.py | # -*- coding: utf-8 -*-
import numpy as np
def epochs_to_array(epochs):
"""**Epochs to Array**
Convert epochs to a numpy array.
.. note::
TODO: We would like to make it work with uneven epochs (not the same length).
Parameters
----------
epochs : dict
A dict containing one DataFrame per event/trial. Usually obtained via :func:`epochs_create`.
Returns
----------
array
An array containing all signals.
See Also
----------
events_find, events_plot, epochs_create, epochs_plot
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Get data
signal = nk.signal_simulate(sampling_rate=100)
# Create epochs
epochs = nk.epochs_create(signal, events=[400, 430, 460], sampling_rate=100, epochs_end=1)
# Convert to epochs
X = nk.epochs_to_array(epochs)
@savefig p_epochs_to_array1.png scale=100%
nk.signal_plot(X.T)
@suppress
plt.close()
"""
example_array = epochs[list(epochs.keys())[0]].select_dtypes(include=["number"])
if example_array.shape[1] == 2:
array = np.full((example_array.shape[0], len(epochs)), np.nan)
for i, key in enumerate(epochs):
array[:, i] = epochs[key].select_dtypes(include=["number"]).drop("Index", axis=1).values[:, 0]
else:
array = np.full((example_array.shape[0], example_array.shape[1] - 1, len(epochs)), np.nan)
for i, key in enumerate(epochs):
array[:, :, i] = epochs[key].select_dtypes(include=["number"]).drop("Index", axis=1).values
return array
| 1,626 | 25.672131 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/epochs_plot.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from .epochs_to_df import epochs_to_df
def epochs_plot(epochs, legend=True, **kwargs):
"""**Epochs visualization**
Plot epochs.
Parameters
----------
epochs : dict
A dict containing one DataFrame per event/trial. Usually obtained via `epochs_create()`.
legend : bool
Display the legend (the key of each epoch).
**kwargs
Other arguments to pass (not used for now).
See Also
----------
events_find, events_plot, epochs_create, epochs_to_df
Examples
----------
* **Example with data**
.. ipython:: python
import neurokit2 as nk
data = nk.data("bio_eventrelated_100hz")
events = nk.events_find(data["Photosensor"],
threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
epochs = nk.epochs_create(data, events, sampling_rate=100, epochs_end=1)
@savefig p_epochs_plot1.png scale=100%
nk.epochs_plot(epochs)
@suppress
plt.close()
* **Example with ECG Peaks**
.. ipython:: python
signal = nk.ecg_simulate(duration=10)
events = nk.ecg_findpeaks(signal)
epochs = nk.epochs_create(signal, events=events["ECG_R_Peaks"], epochs_start=-0.5,
epochs_end=0.5)
@savefig p_epochs_plot2.png scale=100%
nk.epochs_plot(epochs)
@suppress
plt.close()
"""
# sanitize epochs
if isinstance(epochs, dict):
data = epochs_to_df(epochs)
elif isinstance(epochs, object):
# Try loading mne
try:
import mne
except ImportError as e:
raise ImportError(
"NeuroKit error: epochs_plot(): the 'mne' module is required for this function to run. ",
"Please install it first (`pip install mne`).",
) from e
if not isinstance(epochs, mne.Epochs):
raise ValueError(
"NeuroKit error: epochs_plot(): Please make sure your epochs object passed is `mne.Epochs` object. "
)
data = _epochs_mne_sanitize(epochs, **kwargs)
cols = data.columns.values
cols = [x for x in cols if x not in ["Time", "Condition", "Label", "Index"]]
if len(cols) == 1:
fig, ax = plt.subplots()
_epochs_plot(data, ax, cols[0], legend=legend)
else:
fig, ax = plt.subplots(nrows=len(cols))
for i, col in enumerate(cols):
_epochs_plot(data, ax=ax[i], col=col, legend=legend)
# -------------------------------------------------------------------------------------------------
# Utils
# -------------------------------------------------------------------------------------------------
def _epochs_mne_sanitize(epochs, what):
"""Channel array extraction from MNE for plotting.
Select one or several channels by name and returns them in a dataframe.
"""
data = epochs.to_data_frame()
data = data.rename(columns={"time": "Time", "condition": "Condition", "epoch": "Label"})
data["Time"] = data["Time"] / 1000 # ms to seconds
if isinstance(what, str):
data = data[[x for x in data.columns.values if x in ["Time", "Condition", "Label", what]]]
# Select a few specified channels
elif isinstance(what, list):
data = data[[x for x in data.columns.values if x in ["Time", "Condition", "Label"] + what]]
return data
def _epochs_plot(data, ax, col, legend):
if "Condition" in data.columns:
grouped = data.groupby("Condition")
# Colors
color_list = ["red", "blue", "green", "yellow", "purple", "orange", "cyan", "magenta"]
colors = {}
for i, cond in enumerate(set(data["Condition"])):
colors[cond] = color_list[i]
# Plot
for key, group in grouped:
df = group.pivot_table(index="Time", columns=["Condition", "Label"], values=col)
df.plot(ax=ax, label=col, title=col, style=colors[key], legend=legend)
# TODO: Custom legend
else:
data.pivot(index="Time", columns="Label", values=col).plot(
ax=ax, label=col, title=col, legend=legend
)
| 4,230 | 30.574627 | 116 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/epochs_create.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..events.events_find import _events_find_label
from ..misc import listify
def epochs_create(
data,
events=None,
sampling_rate=1000,
epochs_start=0,
epochs_end="from_events",
event_labels=None,
event_conditions=None,
baseline_correction=False,
):
"""**Create Epochs**
Create epochs of a signal or a dataframe.
Parameters
----------
data : DataFrame
A DataFrame containing the different signal(s) as different columns.
If a vector of values is passed, it will be transformed in a DataFrame
with a single 'Signal' column.
events : list or ndarray or dict
Events onset location. If a dict is passed (e.g., from ``events_find()``),
will select only the 'onset' list. If an integer is passed,
will use this number to create an evenly spaced list of events. If None,
will chunk the signal into successive blocks of the set duration.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
epochs_start : int, list
Epochs start relative to events_onsets (in seconds). The start can be negative to start
epochs before a given event (to have a baseline for instance). An integer can be specified
to have the same start for all epochs. A list of equal length to the events can be
specified to have a different start for each epoch.
epochs_end : int, list
Epochs end relative to events_onsets (in seconds). An integer can be specified to have the
same end for all epochs. A list of equal length to the events can be specified to have a
different end for each epoch. If ``"from_events"``, events must be a dict (from
:func:`.events_find`). Duration from events will be used as ``epochs_end``.
event_labels : list
A list containing unique event identifiers. If ``None``, will use the event index number.
event_conditions : list
An optional list containing, for each event, for example the trial category, group or
experimental conditions.
baseline_correction : bool
Defaults to False.
Returns
----------
dict
A dict containing DataFrames for all epochs.
See Also
----------
events_find, events_plot, epochs_to_df, epochs_plot
Examples
----------
* **Example 1**: Find events
.. ipython:: python
import neurokit2 as nk
# Get data
data = nk.data("bio_eventrelated_100hz")
# Find events
events = nk.events_find(data["Photosensor"],
threshold_keep='below',
event_conditions=["Negative", "Neutral", "Neutral", "Negative"])
@savefig p_epochs_create1.png scale=100%
nk.events_plot(events, data)
@suppress
plt.close()
* **Example 2**: Create epochs
.. ipython:: python
epochs = nk.epochs_create(data, events, sampling_rate=100, epochs_end=3)
@savefig p_epochs_create2.png scale=100%
nk.epochs_plot(epochs)
@suppress
plt.close()
* **Example 3**: Baseline correction
.. ipython:: python
epochs = nk.epochs_create(data, events, sampling_rate=100,
epochs_end=3, baseline_correction=True)
@savefig p_epochs_create3.png scale=100%
nk.epochs_plot(epochs)
@suppress
plt.close()
* **Example 4**: Arbitrary epoching
.. ipython:: python
# Chunk into n blocks of 1 second
epochs = nk.epochs_create(data, sampling_rate=100, epochs_end=1)
"""
# Santize data input
if isinstance(data, tuple): # If a tuple of data and info is passed
data = data[0]
if isinstance(data, (list, np.ndarray, pd.Series)):
data = pd.DataFrame({"Signal": list(data)})
# Sanitize events input
if events is None:
max_duration = (np.max(epochs_end) - np.min(epochs_start)) * sampling_rate
events = np.arange(0, len(data) - max_duration, max_duration)
if isinstance(events, int):
events = np.linspace(0, len(data), events + 2)[1:-1]
if isinstance(events, dict) is False:
events = _events_find_label(
{"onset": events}, event_labels=event_labels, event_conditions=event_conditions
)
event_onsets = list(events["onset"])
event_labels = list(events["label"])
if "condition" in events.keys():
event_conditions = list(events["condition"])
# Create epochs
if epochs_end == "from_events":
if "duration" not in events.keys():
events["duration"] = list(np.diff(events["onset"])) + [len(data) - 1]
epochs_end = [i / sampling_rate for i in events["duration"]]
parameters = listify(
onset=event_onsets,
label=event_labels,
condition=event_conditions,
start=epochs_start,
end=epochs_end,
)
# Find the maximum numbers of samples in an epoch
parameters["duration"] = list(np.array(parameters["end"]) - np.array(parameters["start"]))
epoch_max_duration = int(max((i * sampling_rate for i in parameters["duration"])))
# Extend data by the max samples in epochs * NaN (to prevent non-complete data)
length_buffer = epoch_max_duration
# First createa buffer of the same dtype as data and fill with it 0s
buffer = pd.DataFrame(0, index=range(length_buffer), columns=data.columns).astype(
dtype=data.dtypes
)
# Only then, we convert the non-integers to nans (because regular numpy's ints cannot be nan)
buffer.select_dtypes(exclude="int64").replace({0.0: np.nan}, inplace=True)
# Now we can combine the buffer with the data
data = pd.concat([buffer, data, buffer], ignore_index=True, sort=False)
# Adjust the Onset of the events for the buffer
parameters["onset"] = [i + length_buffer for i in parameters["onset"]]
epochs = {}
for i, label in enumerate(parameters["label"]):
# Find indices
start = parameters["onset"][i] + (parameters["start"][i] * sampling_rate)
end = parameters["onset"][i] + (parameters["end"][i] * sampling_rate)
# Slice dataframe
epoch = data.iloc[int(start) : int(end)].copy()
# Correct index
epoch["Index"] = epoch.index.values - length_buffer
epoch.index = np.linspace(
start=parameters["start"][i], stop=parameters["end"][i], num=len(epoch), endpoint=True
)
if baseline_correction is True:
baseline_end = 0 if epochs_start <= 0 else epochs_start
epoch = epoch - epoch.loc[:baseline_end].mean()
# Add additional
epoch["Label"] = parameters["label"][i]
if parameters["condition"][i] is not None:
epoch["Condition"] = parameters["condition"][i]
# Store
epochs[label] = epoch
# Sanitize dtype of individual columns
for i in epochs:
for colname, column in epochs[i].select_dtypes(include=["object"]).items():
# Check whether columns are indices or label/condition
values = column.unique().tolist()
zero_or_one = not (False in [x in [0, 1] for x in values])
if zero_or_one:
# Force to int64
epochs[i][colname] = epochs[i][colname].astype("int64")
else:
epochs[i][colname] = epochs[i][colname].astype("string")
return epochs
| 7,510 | 33.454128 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/__init__.py | """Submodule for NeuroKit."""
from .epochs_average import epochs_average
from .epochs_create import epochs_create
from .epochs_plot import epochs_plot
from .epochs_to_array import epochs_to_array
from .epochs_to_df import epochs_to_df
__all__ = ["epochs_create", "epochs_to_df", "epochs_to_array", "epochs_average", "epochs_plot"]
| 333 | 32.4 | 95 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/eventrelated_utils.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning, find_closest
from ..stats import fit_polynomial
from .epochs_to_df import _df_to_epochs
def _eventrelated_sanitizeinput(epochs, what="ecg", silent=False):
# Sanity checks
if isinstance(epochs, pd.DataFrame):
if "Time" in epochs.columns.values:
# Assumpe it is a dataframe of epochs created by `epochs_to_df`
epochs = _df_to_epochs(epochs) # Convert df back to dict
else:
raise ValueError(
"It seems that you are trying to pass a single epoch to an eventrelated function."
+ 'If this is what you want, please wrap it in a dict: `{"0": epoch}` '
)
if not isinstance(epochs, dict):
raise ValueError(
"NeuroKit error: " + str(what) + "_eventrelated(): Please specify an input "
"that is of the correct form i.e., either a dictionary "
"or dataframe."
)
# Warning for long epochs
if silent is False:
length_mean = np.mean(
[np.max(epochs[i].index) - np.min(epochs[i].index) for i in epochs.keys()]
)
if length_mean > 10:
warn(
str(what) + "_eventrelated():"
" The duration of your epochs seems quite long. You might want"
" to use " + str(what) + "_intervalrelated().",
category=NeuroKitWarning,
)
return epochs
def _eventrelated_addinfo(epoch, output={}):
# Add label
if "Index" in epoch.columns:
output["Event_Onset"] = epoch.loc[np.min(np.abs(epoch.index))]["Index"]
# Add label
if "Label" in epoch.columns and len(set(epoch["Label"])) == 1:
output["Label"] = epoch["Label"].values[0]
# Add condition
if "Condition" in epoch.columns and len(set(epoch["Condition"])) == 1:
output["Condition"] = epoch["Condition"].values[0]
# Add participant_id
if "Participant" in epoch.columns and len(set(epoch["Participant"])) == 1:
output["Participant"] = epoch["Participant"].values[0]
return output
def _eventrelated_sanitizeoutput(data):
df = pd.DataFrame.from_dict(data, orient="index") # Convert to a dataframe
colnames = df.columns.values
if "Event_Onset" in colnames:
df = df.sort_values("Event_Onset")
df = df[["Event_Onset"] + [col for col in df.columns if col != "Event_Onset"]]
# Move columns to front
if "Condition" in colnames:
df = df[["Condition"] + [col for col in df.columns if col != "Condition"]]
if "Label" in colnames:
df = df[["Label"] + [col for col in df.columns if col != "Label"]]
return df
def _eventrelated_rate(epoch, output={}, var="ECG_Rate"):
# Sanitize input
colnames = epoch.columns.values
if len([i for i in colnames if var in i]) == 0:
warn(
"Input does not have an `" + var + "` column."
" Will skip all rate-related features.",
category=NeuroKitWarning,
)
return output
# Get baseline
zero = find_closest(0, epoch.index.values, return_index=True) # Find closest to 0
baseline = epoch[var].iloc[zero]
# Get signal
signal = epoch[var].values[zero + 1 : :]
index = epoch.index.values[zero + 1 : :]
# Max / Min / Mean
output[var + "_Baseline"] = baseline
output[var + "_Max"] = np.max(signal) - baseline
output[var + "_Min"] = np.min(signal) - baseline
output[var + "_Mean"] = np.mean(signal) - baseline
output[var + "_SD"] = np.std(signal)
# Time of Max / Min
output[var + "_Max_Time"] = index[np.argmax(signal)]
output[var + "_Min_Time"] = index[np.argmin(signal)]
# Modelling
# These are experimental indices corresponding to parameters of a quadratic model
# Instead of raw values (such as min, max etc.)
_, info = fit_polynomial(signal - baseline, index, order=2)
output[var + "_Trend_Linear"] = info["coefs"][1]
output[var + "_Trend_Quadratic"] = info["coefs"][2]
output[var + "_Trend_R2"] = info["R2"]
return output
| 4,183 | 33.295082 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/epochs/epochs_average.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from .epochs_to_df import epochs_to_df
def epochs_average(epochs, which=None, show=False, **kwargs):
"""**Compute Grand Average**
Average epochs and returns the grand average, as well as the SD and the confidence interval.
Parameters
----------
epochs : dict
A dict containing one DataFrame per event/trial. Usually obtained via `epochs_create()`.
which : str or list
The name of the column(s) to compute the average from.
**kwargs
Other arguments to pass (not used for now).
See Also
----------
events_find, events_plot, epochs_create, epochs_to_df
Examples
----------
* **Example with ECG Peaks**
.. ipython:: python
signal = nk.ecg_simulate(duration=10)
events = nk.ecg_findpeaks(signal)
epochs = nk.epochs_create(signal, events=events["ECG_R_Peaks"], epochs_start=-0.5,
epochs_end=0.5)
@savefig p_epochs_average1.png scale=100%
grand_av = nk.epochs_average(epochs, which="Signal", show=True)
@suppress
plt.close()
"""
data = epochs_to_df(epochs)
assert (
"Time" in data.columns
), "Something is wrong with the epochs data, could not find a 'Time' column in them."
# Select only the first column
if which is None:
which = data.columns[0]
if isinstance(which, str):
which = [which]
# Define quantile functions
def q1(x):
return x.quantile(0.025)
def q2(x):
return x.quantile(0.975)
# Format which
what = {i: ["mean", "std", q1, q2] for i in which}
# Group by and average
av = data.groupby(["Time"], as_index=False).agg(what).reset_index()
av.columns = ["%s%s" % (a, "_%s" % b if b else "") for a, b in av.columns]
# Format
av.columns = av.columns.str.replace("_mean", "_Mean")
av.columns = av.columns.str.replace("_std", "_SD")
av.columns = av.columns.str.replace("_q1", "_CI_low")
av.columns = av.columns.str.replace("_q2", "_CI_high")
# Plot
if show is True:
for i in which:
plt.plot(av["Time"], av[f"{i}_Mean"], label=i)
plt.fill_between(
av["Time"],
av[f"{i}_CI_low"],
av[f"{i}_CI_high"],
alpha=0.3,
)
plt.legend()
return av
| 2,383 | 26.402299 | 96 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_clean.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
import pandas as pd
import scipy.ndimage
from ..misc import NeuroKitWarning, as_vector
from ..signal import signal_filter
def eog_clean(eog_signal, sampling_rate=1000, method="neurokit"):
"""**Clean an EOG signal**
Prepare a raw EOG signal for eye blinks detection.
Parameters
----------
eog_signal : Union[list, np.array, pd.Series]
The raw EOG channel (either vertical or horizontal).
sampling_rate : int
The sampling frequency of :func:`.eog_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
method : str
The processing pipeline to apply. Can be one of ``"neurokit"`` (default), "agarwal2019",
"mne" (requires the MNE package to be installed), "brainstorm", "kong1998".
Returns
-------
array
Vector containing the cleaned EOG signal.
See Also
--------
signal_filter, eog_peaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Get data
eog_signal = nk.data('eog_100hz')
# Clean
df = {"Raw": eog_signal}
df["neurokit"] = nk.eog_clean(eog_signal, sampling_rate=100, method='neurokit')
df["kong1998"] = nk.eog_clean(eog_signal, sampling_rate=100, method='kong1998')
df["agarwal2019"] = nk.eog_clean(eog_signal, sampling_rate=100, method='agarwal2019')
df["mne"] = nk.eog_clean(eog_signal, sampling_rate=100, method='mne')
df["brainstorm"] = nk.eog_clean(eog_signal, sampling_rate=100, method='brainstorm')
df["blinker"] = nk.eog_clean(eog_signal, sampling_rate=100, method='blinker')
# Visualize
@savefig p_eog_clean.png scale=100%
pd.DataFrame(df).plot(subplots=True)
@suppress
plt.close()
References
----------
* Agarwal, M., & Sivakumar, R. (2019). Blink: A Fully Automated Unsupervised Algorithm for
Eye-Blink Detection in EEG Signals. In 2019 57th Annual Allerton Conference on Communication,
Control, and Computing (Allerton) (pp. 1113-1121). IEEE.
* Kleifges, K., Bigdely-Shamlo, N., Kerick, S. E., & Robbins, K. A. (2017). BLINKER: automated
extraction of ocular indices from EEG enabling large-scale analysis. Frontiers in
neuroscience, 11, 12.
* Kong, X., & Wilson, G. F. (1998). A new EOG-based eyeblink detection algorithm.
Behavior Research Methods, Instruments, & Computers, 30(4), 713-719.
"""
# Sanitize input
eog_signal = as_vector(eog_signal)
# Missing data
n_missing = np.sum(np.isnan(eog_signal))
if n_missing > 0:
warn(
"There are " + str(n_missing) + " missing data points in your signal."
" Filling missing values by using the forward filling method.",
category=NeuroKitWarning,
)
eog_signal = _eog_clean_missing(eog_signal)
# Apply method
method = method.lower()
if method in ["neurokit", "nk"]:
clean = _eog_clean_neurokit(eog_signal, sampling_rate=sampling_rate)
elif method in ["agarwal", "agarwal2019"]:
clean = _eog_clean_agarwal2019(eog_signal, sampling_rate=sampling_rate)
elif method in ["brainstorm"]:
clean = _eog_clean_brainstorm(eog_signal, sampling_rate=sampling_rate)
elif method in ["mne"]:
clean = _eog_clean_mne(eog_signal, sampling_rate=sampling_rate)
elif method in ["blinker", "kleifges2017", "kleifges"]:
clean = _eog_clean_blinker(eog_signal, sampling_rate=sampling_rate)
elif method in ["kong1998", "kong"]:
clean = _eog_clean_kong1998(eog_signal, sampling_rate=sampling_rate)
else:
raise ValueError(
"NeuroKit error: eog_clean(): 'method' should be one of 'agarwal2019', 'brainstorm',",
"'mne', 'kong1998', 'blinker'.",
)
return clean
# =============================================================================
# Handle missing data
# =============================================================================
def _eog_clean_missing(eog_signal):
eog_signal = pd.DataFrame.pad(pd.Series(eog_signal))
return eog_signal
# =============================================================================
# Methods
# =============================================================================
def _eog_clean_neurokit(eog_signal, sampling_rate=1000):
"""NeuroKit method."""
return signal_filter(
eog_signal,
sampling_rate=sampling_rate,
method="butterworth",
order=6,
lowcut=0.25,
highcut=7.5,
)
def _eog_clean_agarwal2019(eog_signal, sampling_rate=1000):
"""Agarwal, M., & Sivakumar, R.
(2019). Blink: A Fully Automated Unsupervised Algorithm for Eye-Blink Detection in EEG Signals. In 2019 57th
Annual Allerton Conference on Communication, Control, and Computing (Allerton) (pp. 1113-1121). IEEE.
"""
return signal_filter(
eog_signal,
sampling_rate=sampling_rate,
method="butterworth",
order=4,
lowcut=None,
highcut=10,
)
def _eog_clean_brainstorm(eog_signal, sampling_rate=1000):
"""EOG cleaning implemented by default in Brainstorm.
https://neuroimage.usc.edu/brainstorm/Tutorials/TutRawSsp
"""
return signal_filter(
eog_signal,
sampling_rate=sampling_rate,
method="butterworth",
order=4,
lowcut=1.5,
highcut=15,
)
def _eog_clean_blinker(eog_signal, sampling_rate=1000):
"""Kleifges, K., Bigdely-Shamlo, N., Kerick, S.
E., & Robbins, K. A. (2017). BLINKER: automated extraction of ocular indices from EEG enabling large-scale
analysis. Frontiers in neuroscience, 11, 12.
"""
# "Each candidate signal is band-passed filtered in the interval [1, 20] Hz prior
# to blink detection."
return signal_filter(
eog_signal, sampling_rate=sampling_rate, method="butterworth", order=4, lowcut=1, highcut=20
)
def _eog_clean_mne(eog_signal, sampling_rate=1000):
"""EOG cleaning implemented by default in MNE.
https://github.com/mne-tools/mne-python/blob/master/mne/preprocessing/eog.py
"""
# Make sure MNE is installed
try:
import mne
except ImportError:
raise ImportError(
"NeuroKit error: signal_filter(): the 'mne' module is required for this method to run.",
" Please install it first (`pip install mne`).",
)
# Filter
clean = mne.filter.filter_data(
eog_signal,
sampling_rate,
l_freq=1,
h_freq=10,
filter_length="10s",
l_trans_bandwidth=0.5,
h_trans_bandwidth=0.5,
phase="zero-double",
fir_window="hann",
fir_design="firwin2",
verbose=False,
)
return clean
def _eog_clean_kong1998(eog_signal, sampling_rate=1000):
"""Kong, X., & Wilson, G.
F. (1998). A new EOG-based eyeblink detection algorithm. Behavior Research Methods, Instruments, & Computers,
30(4), 713-719.
"""
# The order E should be less than half of the expected eyeblink duration. For example, if
# the expected blink duration is 200 msec (10 samples with a sampling rate of 50 Hz), the
# order E should be less than five samples.
eroded = scipy.ndimage.grey_erosion(eog_signal, size=int((0.2 / 2) * sampling_rate))
# a "low-noise" Lanczos differentiation filter introduced in Hamming (1989) is employed.
# Frequently, a first order differentiation filter is sufficient and has the familiar
# form of symmetric difference:
# w[k] = 0.5 * (y[k + 1] - y[k - 1])
diff = eroded - np.concatenate([[0], 0.5 * np.diff(eroded)])
# To reduce the effects of noise, characterized by small fluctuations around zero, a
# median filter is also used with the order of the median filter denoted as M.
# The median filter acts like a mean filter except that it preserves the sharp edges ofthe
# input. The order M should be less than a quarter ofthe expected eyeblink duration.
clean = scipy.ndimage.median_filter(diff, size=int((0.2 / 4) * sampling_rate))
return clean
| 8,170 | 33.045833 | 113 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_intervalrelated.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def eog_intervalrelated(data):
"""**EOG analysis on longer periods of data**
Performs EOG analysis on longer periods of data (typically > 10 seconds), such as resting-state
data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A DataFrame containing the different processed signal(s) as different columns, typically
generated by :func:`.eog_process` or :func:`.bio_process`. Can also take a dict containing
sets of separately processed DataFrames.
Returns
-------
DataFrame
A dataframe containing the analyzed EOG features. The analyzed features consist of the
following:
* ``"EOG_Rate_Mean"``: the mean heart rate.
* ``"EOG_Peaks_N"``: the number of blink peak occurrences.
See Also
--------
bio_process, eog_eventrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Download data
eog = nk.data('eog_200hz')['vEOG']
# Process the data
df, info = nk.eog_process(eog, sampling_rate=200)
# Single dataframe is passed
nk.eog_intervalrelated(df)
# Dictionary is passed
epochs = nk.epochs_create(df, events=[0, 30000], sampling_rate=200,
epochs_end=120)
nk.eog_intervalrelated(epochs)
"""
intervals = {}
# Format input
if isinstance(data, pd.DataFrame):
rate_cols = [col for col in data.columns if "EOG_Rate" in col]
if len(rate_cols) == 1:
intervals.update(_eog_intervalrelated_formatinput(data))
eog_intervals = pd.DataFrame.from_dict(intervals, orient="index").T
elif isinstance(data, dict):
for index in data:
intervals[index] = {} # Initialize empty container
# Add label info
intervals[index]["Label"] = data[index]["Label"].iloc[0]
# Rate and Blinks quantity
intervals[index] = _eog_intervalrelated_formatinput(data[index], intervals[index])
eog_intervals = pd.DataFrame.from_dict(intervals, orient="index")
return eog_intervals
# =============================================================================
# Internals
# =============================================================================
def _eog_intervalrelated_formatinput(data, output={}):
# Sanitize input
colnames = data.columns.values
if len([i for i in colnames if "EOG_Rate" in i]) == 0:
raise ValueError(
"NeuroKit error: eog_intervalrelated(): Wrong input,"
"we couldn't extract EOG rate. Please make sure"
"your DataFrame contains an `EOG_Rate` column."
)
if len([i for i in colnames if "EOG_Blinks" in i]) == 0:
raise ValueError(
"NeuroKit error: eog_intervalrelated(): Wrong input,"
"we couldn't extract EOG blinks. Please make sure"
"your DataFrame contains an `EOG_Blinks` column."
)
signal = data["EOG_Rate"].values
n_blinks = len(np.where(data["EOG_Blinks"] == 1)[0])
output["EOG_Peaks_N"] = n_blinks
output["EOG_Rate_Mean"] = np.mean(signal)
return output
| 3,245 | 28.779817 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_findpeaks.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
from ..epochs import epochs_create, epochs_to_array
from ..misc import as_vector
from ..signal import signal_findpeaks, signal_fixpeaks
from ..stats import fit_rmse, rescale
from .eog_features import _eog_features_delineate
from .eog_simulate import _eog_simulate_blink
def eog_findpeaks(veog_cleaned, sampling_rate=None, method="mne", **kwargs):
"""**Locate EOG eye blinks**
Low-level function used by :func:`.eog_peaks` to identify blinks in an EOG signal using a
different set of algorithms. See :func:`.eog_peaks` for details.
Parameters
----------
veog_cleaned : Union[list, np.array, pd.Series]
The cleaned vertical EOG channel. Note that it must be positively oriented, i.e., blinks
must appear as upward peaks.
sampling_rate : int
The signal sampling rate (in Hz, i.e., samples/second). Needed for method ``"blinker"`` or
``"jammes2008"``.
method : str
The peak detection algorithm. Can be one of ``"neurokit"``, ``"mne"`` (requires the MNE
package to be installed), or ``"brainstorm"`` or ``"blinker"``.
sampling_rate : int
The sampling frequency of the EOG signal (in Hz, i.e., samples/second). Needs to be supplied
if the method to be used is ``"'blinker"``, otherwise defaults to ``None``.
**kwargs
Other arguments passed to functions.
Returns
-------
array
Vector containing the samples at which EOG-peaks occur,
See Also
--------
eog_peaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Get data
eog_signal = nk.data('eog_100hz')
eog_cleaned = nk.eog_clean(eog_signal, sampling_rate=100)
* **Example 1:** NeuroKit method
.. ipython:: python
neurokit = nk.eog_findpeaks(eog_cleaned,
sampling_rate=100,
method="neurokit",
threshold=0.33,
show=True)
@savefig p_eog_findpeaks1.png scale=100%
nk.events_plot(neurokit, eog_cleaned)
@suppress
plt.close()
* **Example 2:** MNE-method
.. ipython:: python
mne = nk.eog_findpeaks(eog_cleaned, method="mne")
@savefig p_eog_findpeaks2.png scale=100%
nk.events_plot(mne, eog_cleaned)
@suppress
plt.close()
* **Example 3:** brainstorm method
.. ipython:: python
brainstorm = nk.eog_findpeaks(eog_cleaned, method="brainstorm")
@savefig p_eog_findpeaks3.png scale=100%
nk.events_plot(brainstorm, eog_cleaned)
@suppress
plt.close()
* **Example 4:** blinker method
.. ipython:: python
blinker = nk.eog_findpeaks(eog_cleaned, sampling_rate=100, method="blinker")
@savefig p_eog_findpeaks4.png scale=100%
nk.events_plot(blinker, eog_cleaned)
@suppress
plt.close()
# Jammes (2008) method
# jammes2008 = nk.eog_findpeaks(eog_cleaned, sampling_rate=100, method="jammes2008")
# nk.events_plot(jammes2008, eog_cleaned)
"""
# Sanitize input
eog_cleaned = as_vector(veog_cleaned)
# Apply method
method = method.lower()
if method in ["mne"]:
peaks = _eog_findpeaks_mne(eog_cleaned)
elif method in ["brainstorm"]:
peaks = _eog_findpeaks_brainstorm(eog_cleaned)
elif method in ["blinker"]:
peaks = _eog_findpeaks_blinker(eog_cleaned, sampling_rate=sampling_rate)
elif method in ["neurokit", "nk"]:
peaks = _eog_findpeaks_neurokit(eog_cleaned, sampling_rate=sampling_rate, **kwargs)
# elif method in ["jammes2008", "jammes"]:
# peaks = _eog_findpeaks_jammes2008(eog_cleaned, sampling_rate=sampling_rate)
else:
raise ValueError(
"NeuroKit error: eog_peaks(): 'method' should be "
"one of 'mne', 'brainstorm' or 'blinker'."
)
return peaks
# =============================================================================
# Method - NeuroKit
# =============================================================================
def _eog_findpeaks_neurokit(eog_cleaned, sampling_rate=1000, threshold=0.33, show=True):
"""In-house EOG blink detection."""
peaks = signal_findpeaks(eog_cleaned, relative_height_min=1.25)["Peaks"]
peaks = signal_fixpeaks(
peaks=peaks, sampling_rate=sampling_rate, interval_min=0.2, method="neurokit"
)
peaks = _eog_findpeaks_neurokit_filterblinks(
eog_cleaned, peaks, sampling_rate=sampling_rate, threshold=threshold, show=show
)
return peaks
def _eog_findpeaks_neurokit_filterblinks(
eog_cleaned, peaks, sampling_rate=1000, threshold=0.5, show=False
):
"""Compare each detected event to blink template and reject it if too different."""
# Get epoch around each blink
events = epochs_create(
eog_cleaned, peaks, sampling_rate=sampling_rate, epochs_start=-0.4, epochs_end=0.6
)
events = epochs_to_array(events) # Convert to 2D array
# Generate Blink-template
template = _eog_simulate_blink(sampling_rate=sampling_rate, method="gamma")
# Get the "distance" (RMSE) between each blink and the template
rmse = np.full(events.shape[1], np.nan)
for i in range(events.shape[1]):
events[:, i] = rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
rmse[i] = fit_rmse(events[:, i], template)
# Plot RMSE distribution
if show is True:
plt.subplot(1, 2, 1)
plt.hist(rmse, color="#FF9800")
plt.axvline(x=threshold, linewidth=4, color="r")
plt.title("RMSE Distribution (threshold = " + str(threshold) + ")")
plt.xlabel("RMSE")
plt.subplot(1, 2, 2)
plt.plot(events[:, rmse < threshold], linewidth=0.25, color="black")
plt.plot(events[:, rmse >= threshold], linewidth=0.5, color="red")
plt.plot(template, linewidth=2, color="#2196F3", label="Blink template")
plt.title("Accepted and rejected (red) blinks")
plt.legend(loc="upper right")
return peaks[rmse < threshold]
# =============================================================================
# Method - Jammes (2008)
# =============================================================================
# def _eog_findpeaks_jammes2008(eog_cleaned, sampling_rate=1000):
# """Derivative-based method by Jammes (2008)
#
# https://link.springer.com/article/10.1007/s11818-008-0351-y
#
# """
# # Derivative
# derivative = np.gradient(eog_cleaned)
#
# # These parameters were set by the authors "empirically". These are values based on
# # their figure 1.
# vcl = 0.5 * np.max(derivative)
# vol = 0.75 * np.min(derivative)
#
# crosses_vcl = signal_zerocrossings(derivative - vcl, direction="up")
# crosses_vol = signal_zerocrossings(derivative - vol, direction="down")
# crosses_vol = nk.find_closest(crosses_vcl, crosses_vol, direction="above")
#
# nk.events_plot([crosses_vcl, crosses_vol], eog_cleaned)
# nk.signal_plot([eog_cleaned, derivative, derivative - vol])
# durations = (crosses_vol - crosses_vcl) / sampling_rate
# indices = durations < 0.5
#
# peaks = np.full(np.sum(indices), np.nan)
# for i in range(np.sum(indices)):
# segment = eog_cleaned[crosses_vcl[indices][i]:crosses_vol[indices][i]]
# peaks[i] = crosses_vcl[indices][i] + np.argmax(segment)
#
# return peaks
# =============================================================================
# Method - MNE
# =============================================================================
def _eog_findpeaks_mne(eog_cleaned):
"""EOG blink detection based on MNE.
https://github.com/mne-tools/mne-python/blob/master/mne/preprocessing/eog.py
"""
# Make sure MNE is installed
try:
import mne
except ImportError:
raise ImportError(
"NeuroKit error: signal_filter(): the 'mne' module is required for this method to run. ",
"Please install it first (`pip install mne`).",
)
# Find peaks
eog_events, _ = mne.preprocessing.peak_finder(eog_cleaned, extrema=1, verbose=False)
return eog_events
# =============================================================================
# Method - Brainstorm
# =============================================================================
def _eog_findpeaks_brainstorm(eog_cleaned):
"""EOG blink detection implemented in brainstorm.
https://neuroimage.usc.edu/brainstorm/Tutorials/ArtifactsDetect#Detection:_Blinks
"""
# Brainstorm: "An event of interest is detected if the absolute value of the filtered
# signal value goes over a given number of times the standard deviation. For EOG: 2xStd."
# -> Remove all peaks that correspond to regions < 2 SD
peaks = signal_findpeaks(eog_cleaned, relative_height_min=2)["Peaks"]
return peaks
# =============================================================================
# Method - blinker
# =============================================================================
def _eog_findpeaks_blinker(eog_cleaned, sampling_rate=1000):
"""EOG blink detection based on BLINKER algorithm.
Detects only potential blink landmarks and does not separate blinks from other artifacts yet.
https://www.frontiersin.org/articles/10.3389/fnins.2017.00012/full
"""
# Establish criterion
threshold = 1.5 * np.std(eog_cleaned) + eog_cleaned.mean()
min_blink = 0.05 * sampling_rate # min blink frames
potential_blinks = []
for i, signal in enumerate(eog_cleaned):
if signal > threshold:
potential_blinks.append(i)
# Make sure each blink is 50ms long and separated by 50ms
indexes = np.where(np.diff(potential_blinks) > min_blink)[0]
individual_blinks = np.split(np.diff(potential_blinks), indexes)
blinks = []
for idx, i in enumerate(individual_blinks):
if len(i) > min_blink:
blinks.append(idx)
candidates = np.array(potential_blinks)[np.append(0, indexes)[blinks]]
_, peaks, _, _, _, _ = _eog_features_delineate(
eog_cleaned, candidates, sampling_rate=sampling_rate
)
# Blink peak markers
peaks = np.array(peaks)
return peaks
| 10,326 | 34.245734 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_analyze.py | # -*- coding: utf-8 -*-
import pandas as pd
from .eog_eventrelated import eog_eventrelated
from .eog_intervalrelated import eog_intervalrelated
def eog_analyze(data, sampling_rate=1000, method="auto"):
"""**EOG Analysis**
Performs EOG analysis on either epochs (event-related analysis) or on longer periods of data such as resting-state data.
Parameters
----------
data : Union[dict, pd.DataFrame]
A dictionary of epochs, containing one DataFrame per epoch, usually obtained via
:func:`.epochs_create`, or a DataFrame containing all epochs, usually obtained via
:func:`.epochs_to_df`. Can also take a DataFrame of processed signals from a longer period
of data, typically generated by :func:`.eog_process` or :func:`.bio_process`. Can also take
a dict containing sets of separate periods of data.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
Defaults to 1000Hz.
method : str
Can be one of ``"event-related"`` for event-related analysis on epochs, or
``"interval-related"`` for analysis on longer periods of data. Defaults to ``"auto"`` where
the right method will be chosen based on the mean duration of the data (``"event-related"``
for duration under 10s).
Returns
-------
DataFrame
A dataframe containing the analyzed EOG features. If
event-related analysis is conducted, each epoch is indicated
by the `Label` column. See :func:`.eog_eventrelated` and
:func:`.eog_intervalrelated` docstrings for details.
See Also
--------
bio_process, eog_process, epochs_create, eog_eventrelated, eog_intervalrelated
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example 1: Event-related analysis
data = nk.data("eog_100hz")
# Process the data for event-related analysis
df, info = nk.bio_process(eog=data, sampling_rate=100)
epochs = nk.epochs_create(df, events=[500, 4000, 6000, 9000], sampling_rate=100,
epochs_start=-0.1,epochs_end=1.9)
# Analyze
nk.eog_analyze(epochs, sampling_rate=100)
# Example 2: Interval-related analysis with same dataset
nk.eog_analyze(df, sampling_rate=100)
"""
method = method.lower()
# Event-related analysis
if method in ["event-related", "event", "epoch"]:
# Sanity checks
if isinstance(data, dict):
for i in data:
colnames = data[i].columns.values
elif isinstance(data, pd.DataFrame):
colnames = data.columns.values
if len([i for i in colnames if "Label" in i]) == 0:
raise ValueError(
"NeuroKit error: eog_analyze(): Wrong input or method,"
"we couldn't extract epochs features."
)
else:
features = eog_eventrelated(data)
# Interval-related analysis
elif method in ["interval-related", "interval", "resting-state"]:
features = eog_intervalrelated(data)
# Auto
elif method in ["auto"]:
if isinstance(data, dict):
for i in data:
duration = len(data[i]) / sampling_rate
if duration >= 10:
features = eog_intervalrelated(data)
else:
features = eog_eventrelated(data)
if isinstance(data, pd.DataFrame):
if "Label" in data.columns:
epoch_len = data["Label"].value_counts()[0]
duration = epoch_len / sampling_rate
else:
duration = len(data) / sampling_rate
if duration >= 10:
features = eog_intervalrelated(data)
else:
features = eog_eventrelated(data)
return features
| 3,868 | 33.855856 | 124 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_features.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from ..epochs import epochs_create
from ..signal import signal_zerocrossings
def eog_features(eog_cleaned, peaks, sampling_rate=1000):
"""**Extracts Features of EOG Eye Blinks**
Extracts features of EOG eye blinks e.g., velocity measures, blink-amplitude-ratio (BAR),
duration, and markers of onset and offset of each blink.
The positive amplitude velocity ratio (pAVR) and the negative amplitude velocity ratio (nAVR).
The positive amplitude velocity ratio is the ratio of the maximum amplitude of the blink over
the maximum velocity (rate of change) during the blink upStroke. Similarly, the negative
amplitude velocity ratio is the ratio of the maximum amplitude of the blink over the maximum
velocity found in the blink downStroke. These measures have units of centiseconds and are
indicators of fatigue.
The blink-amplitude ratio (BAR) is the average amplitude of the signal between the blink
leftZero and rightZero zero crossings divided by the average amplitude of the positive fraction
of the signal “outside” the blink. BAR values in the range [5, 20]. BAR is a measure of the
signal-to-noise ratio (SNR) of the blink to the background in a candidate signal.
Parameters
----------
eog_cleaned : Union[list, np.array, pd.Series]
The cleaned EOG channel, extracted from :func:`.eog_clean`.
peaks : np.array
Vector containing the samples at which EOG-peaks occur.
sampling_rate : int
The sampling frequency of :func:`.eog_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
Returns
-------
info : dict
A dictionary containing information of the features of the EOG blinks, accessible with keys
``"Blink_LeftZeros"`` (point when eye closes), ``"Blink_RightZeros"`` (point when eye opens)
, ``"Blink_pAVR"``, ``"Blink_nAVR"``, ``"Blink_BAR"``, and ``"Blink_Duration"`` (duration
of each blink in seconds).
See Also
--------
eog_clean, eog_findpeaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Get data
eog_signal = nk.data('eog_100hz')
eog_cleaned = nk.eog_clean(eog_signal, sampling_rate=100)
peaks = nk.eog_findpeaks(eog_cleaned, sampling_rate=100)
info = nk.eog_features(eog_cleaned, peaks, sampling_rate=100)
References
----------
* Kleifges, K., Bigdely-Shamlo, N., Kerick, S. E., & Robbins, K. A. (2017). BLINKER: automated
extraction of ocular indices from EEG enabling large-scale analysis. Frontiers in
neuroscience, 11, 12.
"""
BARs, _, leftzeros, rightzeros, downstrokes, upstrokes = _eog_features_delineate(
eog_cleaned, peaks, sampling_rate=sampling_rate
)
pAVR_list = []
nAVR_list = []
duration_list = []
for i in range(len(peaks)):
# Closing blink (pAVR)
blink_close = upstrokes[i].Signal
change_close = np.diff(blink_close)
duration_close = len(change_close) / sampling_rate
pAVR = abs(change_close.max() / duration_close) * 100
pAVR_list.append(pAVR)
# Opening blink (nAVR)
blink_open = downstrokes[i].Signal
change_open = np.diff(blink_open)
duration_open = len(change_open) / sampling_rate
nAVR = abs(change_open.max() / duration_open) * 100
nAVR_list.append(nAVR)
# Duration
blink_full = np.hstack([np.array(upstrokes[i].Signal), np.array(downstrokes[i].Signal)])
duration_full = len(blink_full) / sampling_rate # in seconds
duration_list.append(duration_full)
# Return info dictionary
info = {
"Blink_LeftZeros": leftzeros,
"Blink_RightZeros": rightzeros,
"Blink_pAVR": pAVR_list,
"Blink_nAVR": nAVR_list,
"Blink_BAR": BARs,
"Blink_Duration": duration_list,
}
return info
# =============================================================================
# Internals
# =============================================================================
def _eog_features_delineate(eog_cleaned, candidates, sampling_rate=1000):
# Calculate blink landmarks
epochs = epochs_create(
eog_cleaned,
events=candidates,
sampling_rate=sampling_rate,
epochs_start=-0.5,
epochs_end=0.5,
)
# max value marker
peaks = []
leftzeros = []
rightzeros = []
downstrokes = []
upstrokes = []
BARs = []
for i in epochs:
max_value = epochs[i].Signal.max()
# Check if peak is at the end or start of epoch
t = epochs[i].loc[epochs[i]["Signal"] == max_value].index
if np.all(0.3 < t < 0.51):
# Trim end of epoch
epochs[i] = epochs[i][-0.5:0.3]
max_value = epochs[i].Signal.max()
if np.all(-0.51 < t < -0.3):
# Trim start of epoch
epochs[i] = epochs[i][-0.3:0.5]
max_value = epochs[i].Signal.max()
# Find position of peak
max_frame = epochs[i]["Index"].loc[epochs[i]["Signal"] == max_value]
max_frame = np.array(max_frame)
if len(max_frame) > 1:
max_frame = max_frame[0] # If two points achieve max value, first one is blink
else:
max_frame = int(max_frame)
# left and right zero markers
crossings = signal_zerocrossings(epochs[i].Signal)
crossings_idx = epochs[i]["Index"].iloc[crossings]
crossings_idx = np.sort(np.append([np.array(crossings_idx)], [max_frame]))
max_position = int(np.where(crossings_idx == max_frame)[0])
if (max_position - 1) >= 0: # crosses zero point
leftzero = crossings_idx[max_position - 1]
else:
max_value_t = epochs[i].Signal.idxmax()
sliced_before = epochs[i].loc[slice(max_value_t), :]
leftzero = sliced_before["Index"].loc[
sliced_before["Signal"] == sliced_before["Signal"].min()
]
leftzero = int(np.array(leftzero))
if (max_position + 1) < len(crossings_idx): # crosses zero point
rightzero = crossings_idx[max_position + 1]
else:
max_value_t = epochs[i].Signal.idxmax()
sliced_before = epochs[i].loc[slice(max_value_t), :]
sliced_after = epochs[i].tail(epochs[i].shape[0] - sliced_before.shape[0])
rightzero = sliced_after["Index"].loc[
sliced_after["Signal"] == sliced_after["Signal"].min()
]
rightzero = int(np.array(rightzero))
# upstroke and downstroke markers
upstroke_idx = list(np.arange(leftzero, max_frame))
upstroke = epochs[i].loc[epochs[i]["Index"].isin(upstroke_idx)]
downstroke_idx = list(np.arange(max_frame, rightzero))
downstroke = epochs[i].loc[epochs[i]["Index"].isin(downstroke_idx)]
# left base and right base markers
leftbase_idx = list(np.arange(epochs[i]["Index"].iloc[0], leftzero))
leftbase_signal = epochs[i].loc[epochs[i]["Index"].isin(leftbase_idx)]
# leftbase_min = leftbase_signal['Signal'].min()
# leftbase = np.array(leftbase_signal['Index'].loc[leftbase_signal['Signal'] == leftbase_min])[0]
rightbase_idx = list(np.arange(rightzero, epochs[i]["Index"].iloc[epochs[i].shape[0] - 1]))
rightbase_signal = epochs[i].loc[epochs[i]["Index"].isin(rightbase_idx)]
# rightbase_min = rightbase_signal['Signal'].min()
# rightbase = np.array(rightbase_signal['Index'].loc[rightbase_signal['Signal'] == rightbase_min])[0]
# Rejecting candidate signals with low SNR (BAR = blink-amplitude-ratio)
inside_blink_idx = list(np.arange(leftzero, rightzero))
inside_blink = epochs[i].loc[epochs[i]["Index"].isin(inside_blink_idx)]
outside_blink = pd.concat([leftbase_signal, rightbase_signal], axis=0)
BAR = inside_blink.Signal.mean() / outside_blink.Signal[outside_blink["Signal"] > 0].mean()
# Features of all candidates
BARs.append(BAR)
leftzeros.append(leftzero)
rightzeros.append(rightzero)
downstrokes.append(downstroke)
upstrokes.append(upstroke)
# BAR values in the range [5, 20] usually capture blinks reasonably well
if 3 < BAR < 50:
peaks.append(max_frame)
return BARs, peaks, leftzeros, rightzeros, downstrokes, upstrokes
| 8,542 | 38.009132 | 116 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_simulate.py | import numpy as np
import scipy.stats
# ==============================================================================
# THIS IS WIP and we would like to implement an EOG simulator. Please help!
# ==============================================================================
def _eog_simulate_blink(sampling_rate=1000, length=None, method="scr", parameters="default"):
"""**Simulate a canonical blink from vertical EOG**
Recommended parameters:
* For method ``"scr"``: ``[3.644, 0.422, 0.356, 0.943]``
* For method ``"gamma"``: ``[2.659, 5.172, 0.317]``
Examples
--------
.. ipython:: python
import neurokit2 as nk
blink_scr = _eog_simulate_blink(sampling_rate=100,
method='scr',
parameters=[3.644, 0.422, 0.356, 0.943])
blink_gamma = _eog_simulate_blink(sampling_rate=100,
method='gamma',
parameters=[2.659, 5.172, 0.317])
@savefig p_eog_simulate1.png scale=100%
nk.signal_plot([blink_scr, blink_gamma], sampling_rate=100)
@suppress
plt.close()
"""
if length is None:
length = int(sampling_rate)
x = np.linspace(0, 10, num=length)
if method.lower() == "scr":
if isinstance(parameters, str):
parameters = [3.644, 0.422, 0.356, 0.943]
gt = np.exp(-((x - parameters[0]) ** 2) / (2 * parameters[1] ** 2))
ht = np.exp(-x / parameters[2]) + np.exp(-x / parameters[3])
ft = np.convolve(gt, ht)
ft = ft[0 : len(x)]
y = ft / np.max(ft)
else:
if isinstance(parameters, str):
parameters = [2.659, 5.172, 0.317]
gamma = scipy.stats.gamma.pdf(x, a=parameters[1], loc=parameters[0], scale=parameters[2])
y = gamma / np.max(gamma)
return y
| 1,892 | 34.055556 | 97 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_process.py | # -*- coding: utf-8 -*-
import pandas as pd
from ..misc import as_vector
from ..signal import signal_rate
from ..signal.signal_formatpeaks import _signal_from_indices
from .eog_clean import eog_clean
from .eog_findpeaks import eog_findpeaks
def eog_process(veog_signal, sampling_rate=1000, **kwargs):
"""**Process an EOG signal**
Convenience function that automatically processes an EOG signal.
Parameters
----------
veog_signal : Union[list, np.array, pd.Series]
The raw vertical EOG channel. Note that it must be positively oriented, i.e., blinks must
appear as upward peaks.
sampling_rate : int
The sampling frequency of :func:`.eog_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
**kwargs
Other arguments passed to other functions.
Returns
-------
signals : DataFrame
A DataFrame of the same length as the :func:`.eog_signal` containing the following columns:
* ``"EOG_Raw"``: the raw signal.
* ``"EOG_Clean"``: the cleaned signal.
* ``"EOG_Blinks"``: the blinks marked as "1" in a list of zeros.
* ``"EOG_Rate"``: eye blinks rate interpolated between blinks.
info : dict
A dictionary containing the samples at which the eye blinks occur, accessible with the key
``"EOG_Blinks"`` as well as the signals' sampling rate.
See Also
--------
eog_clean, eog_findpeaks
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Get data
eog_signal = nk.data('eog_100hz')
signals, info = nk.eog_process(eog_signal, sampling_rate=100)
References
----------
* Agarwal, M., & Sivakumar, R. (2019, September). Blink: A Fully Automated Unsupervised
Algorithm for Eye-Blink Detection in EEG Signals. In 2019 57th Annual Allerton Conference on
Communication, Control, and Computing (Allerton) (pp. 1113-1121). IEEE.
"""
# Sanitize input
eog_signal = as_vector(veog_signal)
# Clean signal
eog_cleaned = eog_clean(eog_signal, sampling_rate=sampling_rate, **kwargs)
# Find peaks
peaks = eog_findpeaks(eog_cleaned, sampling_rate=sampling_rate, **kwargs)
info = {"EOG_Blinks": peaks}
info['sampling_rate'] = sampling_rate # Add sampling rate in dict info
# Mark (potential) blink events
signal_blinks = _signal_from_indices(peaks, desired_length=len(eog_cleaned))
# Rate computation
rate = signal_rate(peaks, sampling_rate=sampling_rate, desired_length=len(eog_cleaned))
# Prepare output
signals = pd.DataFrame(
{"EOG_Raw": eog_signal, "EOG_Clean": eog_cleaned, "EOG_Blinks": signal_blinks, "EOG_Rate": rate}
)
return signals, info
| 2,741 | 30.159091 | 104 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_eventrelated.py | # -*- coding: utf-8 -*-
from warnings import warn
import numpy as np
from ..epochs.eventrelated_utils import (
_eventrelated_addinfo,
_eventrelated_rate,
_eventrelated_sanitizeinput,
_eventrelated_sanitizeoutput,
)
from ..misc import NeuroKitWarning
def eog_eventrelated(epochs, silent=False):
"""**Performs event-related EOG analysis on epochs**
Parameters
----------
epochs : Union[dict, pd.DataFrame]
A dict containing one DataFrame per event/trial,
usually obtained via :func:`.epochs_create`, or a DataFrame
containing all epochs, usually obtained via :func:`.epochs_to_df`.
silent : bool
If True, silence possible warnings.
Returns
-------
DataFrame
A dataframe containing the analyzed EOG features for each epoch, with each epoch indicated
by the `Label` column (if not present, by the `Index` column). The analyzed features
consist of the following:
* ``"EOG_Rate_Baseline"``: the baseline EOG rate before stimulus onset.
* ``"EOG_Rate_Max"``: the maximum EOG rate after stimulus onset.
* ``"EOG_Rate_Min"``: the minimum EOG rate after stimulus onset.
* ``"EOG_Rate_Mean"``: the mean EOG rate after stimulus onset.
* ``"EOG_Rate_SD"``: the standard deviation of the EOG rate after stimulus onset.
* ``"EOG_Rate_Max_Time"``: the time at which maximum EOG rate occurs.
* ``"EOG_Rate_Min_Time"``: the time at which minimum EOG rate occurs.
* ``"EOG_Blinks_Presence"``: marked with '1' if a blink occurs in the epoch, and '0' if not.
See Also
--------
events_find, epochs_create, bio_process
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Example with real data
eog = nk.data('eog_100hz')
# Process the data
eog_signals, info = nk.bio_process(eog=eog, sampling_rate=100)
epochs = nk.epochs_create(eog_signals, events=[500, 4000, 6000, 9000], sampling_rate=100,
epochs_start=-0.1,epochs_end=1.9)
# Analyze
nk.eog_eventrelated(epochs)
"""
# Sanity checks
epochs = _eventrelated_sanitizeinput(epochs, what="eog", silent=silent)
# Extract features and build dataframe
data = {} # Initialize an empty dict
for i in epochs.keys():
data[i] = {} # Initialize an empty dict for the current epoch
# Rate
data[i] = _eventrelated_rate(epochs[i], data[i], var="EOG_Rate")
# Number of blinks per epoch
data[i] = _eog_eventrelated_features(epochs[i], data[i])
for x in ["EOG_Rate_Trend_Quadratic", "EOG_Rate_Trend_Linear", "EOG_Rate_Trend_R2"]:
data[i].pop(x, None)
# Fill with more info
data[i] = _eventrelated_addinfo(epochs[i], data[i])
df = _eventrelated_sanitizeoutput(data)
return df
# =============================================================================
# Internals
# =============================================================================
def _eog_eventrelated_features(epoch, output={}):
# Sanitize input
if "EOG_Blinks" not in epoch:
warn(
"Input does not have an `EOG_Blinks` column." " Unable to process blink features.",
category=NeuroKitWarning,
)
return output
if "EOG_Rate" not in epoch:
warn(
"Input does not have an `EOG_Rate` column." " Will skip computation of EOG rate.",
category=NeuroKitWarning,
)
return output
# Detect whether blink exists after onset of stimulus
blinks_presence = len(np.where(epoch["EOG_Blinks"][epoch.index > 0] == 1)[0])
if blinks_presence > 0:
output["EOG_Blinks_Presence"] = 1
else:
output["EOG_Blinks_Presence"] = 0
return output
| 3,862 | 29.65873 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/__init__.py | """Submodule for NeuroKit."""
from ..signal import signal_rate as eog_rate
from .eog_analyze import eog_analyze
from .eog_clean import eog_clean
from .eog_eventrelated import eog_eventrelated
from .eog_features import eog_features
from .eog_findpeaks import eog_findpeaks
from .eog_intervalrelated import eog_intervalrelated
from .eog_peaks import eog_peaks
from .eog_plot import eog_plot
from .eog_process import eog_process
__all__ = [
"eog_rate",
"eog_clean",
"eog_features",
"eog_findpeaks",
"eog_peaks",
"eog_process",
"eog_plot",
"eog_eventrelated",
"eog_intervalrelated",
"eog_analyze",
]
| 638 | 23.576923 | 52 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_peaks.py | from ..signal import signal_formatpeaks
from .eog_findpeaks import eog_findpeaks
def eog_peaks(veog_cleaned, sampling_rate=None, method="mne", **kwargs):
"""**Locate EOG eye blinks**
Parameters
----------
veog_cleaned : Union[list, np.array, pd.Series]
The cleaned vertical EOG channel. Note that it must be positively oriented, i.e., blinks
must appear as upward peaks.
sampling_rate : int
The signal sampling rate (in Hz, i.e., samples/second). Needed for method ``"blinker"`` or
``"jammes2008"``.
method : str
The peak detection algorithm. Can be one of ``"neurokit"``, ``"mne"`` (requires the MNE
package to be installed), or ``"brainstorm"`` or ``"blinker"``.
sampling_rate : int
The sampling frequency of the EOG signal (in Hz, i.e., samples/second). Needs to be
supplied if the method to be used is ``"blinker"``, otherwise defaults to ``None``.
**kwargs
Other arguments passed to functions.
Returns
-------
array
Vector containing the samples at which EOG-peaks occur,
See Also
--------
eog_clean
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Get data
eog_signal = nk.data('eog_100hz')
eog_cleaned = nk.eog_clean(eog_signal, sampling_rate=100)
* **Example 1:** NeuroKit method
.. ipython:: python
signals, info_nk = nk.eog_peaks(eog_cleaned,
sampling_rate=100,
method="neurokit",
threshold=0.33,
show=True)
@savefig p_eog_peaks1.png scale=100%
nk.events_plot(info_nk["EOG_Blinks"], eog_cleaned)
@suppress
plt.close()
* **Example 2:** MNE-method
.. ipython:: python
signals, info_mne = nk.eog_peaks(eog_cleaned, method="mne")
@savefig p_eog_peaks2.png scale=100%
nk.events_plot(info_mne["EOG_Blinks"], eog_cleaned)
@suppress
plt.close()
* **Example 3:** brainstorm method
.. ipython:: python
signals, info_brainstorm = nk.eog_peaks(eog_cleaned, method="brainstorm")
@savefig p_eog_peaks3.png scale=100%
nk.events_plot(info_brainstorm["EOG_Blinks"], eog_cleaned)
@suppress
plt.close()
* **Example 4:** blinker method
.. ipython:: python
signals, info_blinker = nk.eog_peaks(eog_cleaned, sampling_rate=100, method="blinker")
@savefig p_eog_peaks4.png scale=100%
nk.events_plot(info_blinker["EOG_Blinks"], eog_cleaned)
@suppress
plt.close()
References
----------
* Agarwal, M., & Sivakumar, R. (2019). Blink: A Fully Automated Unsupervised Algorithm for
Eye-Blink Detection in EEG Signals. In 2019 57th Annual Allerton Conference on Communication,
Control, and Computing (Allerton) (pp. 1113-1121). IEEE.
* Kleifges, K., Bigdely-Shamlo, N., Kerick, S. E., & Robbins, K. A. (2017). BLINKER: automated
extraction of ocular indices from EEG enabling large-scale analysis. Frontiers in neuroscience, 11, 12.
"""
peaks = eog_findpeaks(veog_cleaned, sampling_rate=sampling_rate, method=method, **kwargs)
info = {"EOG_Blinks": peaks}
instant_peaks = signal_formatpeaks(info, desired_length=len(veog_cleaned), peak_indices=peaks)
signals = instant_peaks
info["sampling_rate"] = sampling_rate # Add sampling rate in dict info
return signals, info
| 3,523 | 32.245283 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/eog/eog_plot.py | # -*- coding: utf-8 -*-
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..epochs import epochs_create, epochs_to_array, epochs_to_df
from ..stats import standardize
def eog_plot(eog_signals, peaks=None, sampling_rate=None):
"""**Visualize EOG data**
Parameters
----------
eog_signals : DataFrame
DataFrame obtained from :func:`.eog_process`.
peaks : dict
The samples at which the blink peaks occur. Dict returned by
:func:`.eog_process`. Defaults to ``None``. Must be specified to plot individual blinks.
sampling_rate : int
The sampling frequency of the EOG (in Hz, i.e., samples/second). Needs to be supplied if
the data should be plotted over time in seconds. Otherwise the data is plotted over
samples. Defaults to ``None``. Must be specified to plot individual blinks.
See Also
--------
eog_process
Returns
-------
Though the function returns nothing, the figure can be retrieved and saved as follows:
.. code-block:: console
# To be run after eog_plot()
fig = plt.gcf()
fig.savefig("myfig.png")
Examples
--------
.. ipython:: python
import neurokit2 as nk
# Simulate data
eog_signal = nk.data('eog_100hz')
# Process signal
eog_signals, peaks = nk.eog_process(eog_signal, sampling_rate=100)
# Plot
@savefig p.eog_plot.png scale=100%
nk.eog_plot(eog_signals, peaks, sampling_rate=100)
@suppress
plt.close()
"""
# Sanity-check input.
if not isinstance(eog_signals, pd.DataFrame):
raise ValueError(
"NeuroKit error: eog_plot(): The `eog_signals` argument must"
" be the DataFrame returned by `eog_process()`."
)
# Prepare figure
if sampling_rate is not None:
x_axis = np.linspace(0, eog_signals.shape[0] / sampling_rate, eog_signals.shape[0])
gs = matplotlib.gridspec.GridSpec(2, 2, width_ratios=[1 - 1 / np.pi, 1 / np.pi])
fig = plt.figure(constrained_layout=False)
ax0 = fig.add_subplot(gs[0, :-1])
ax1 = fig.add_subplot(gs[1, :-1])
ax2 = fig.add_subplot(gs[:, -1])
ax0.set_xlabel("Time (seconds)")
ax1.set_xlabel("Time (seconds)")
ax2.set_xlabel("Time (seconds)")
else:
x_axis = np.arange(0, eog_signals.shape[0])
fig, (ax0, ax1) = plt.subplots(nrows=2, ncols=1, sharex=True)
ax0.set_xlabel("Samples")
ax1.set_xlabel("Samples")
fig.suptitle("Electrooculography (EOG)", fontweight="bold")
plt.tight_layout(h_pad=0.3, w_pad=0.2)
# Plot cleaned and raw EOG
ax0.set_title("Raw and Cleaned Signal")
ax0.plot(x_axis, eog_signals["EOG_Raw"], color="#B0BEC5", label="Raw", zorder=1)
ax0.plot(
x_axis, eog_signals["EOG_Clean"], color="#49A4FD", label="Cleaned", zorder=1, linewidth=1.5
)
ax0.set_ylabel("Amplitude (mV)")
# Plot blinks
blinks = np.where(eog_signals["EOG_Blinks"] == 1)[0]
ax0.scatter(
x_axis[blinks], eog_signals["EOG_Clean"][blinks], color="#0146D7", label="Blinks", zorder=2
)
ax0.legend(loc="upper right")
# Rate
ax1.set_title("Blink Rate")
ax1.set_ylabel("Blinks per minute")
blink_rate_mean = eog_signals["EOG_Rate"].mean()
ax1.plot(x_axis, eog_signals["EOG_Rate"], color="#9C5AFF", label="Rate", linewidth=1.5)
ax1.axhline(y=blink_rate_mean, label="Mean", linestyle="--", color="#CEAFFF")
ax1.legend(loc="upper right")
# Plot individual blinks
if sampling_rate is not None:
ax2.set_title("Individual Blinks")
# Create epochs
events = epochs_create(
eog_signals["EOG_Clean"],
peaks["EOG_Blinks"],
sampling_rate=sampling_rate,
epochs_start=-0.3,
epochs_end=0.7,
)
events_array = epochs_to_array(events) # Convert to 2D array
events_array = standardize(
events_array
) # Rescale so that all the blinks are on the same scale
blinks_df = epochs_to_df(events)
blinks_wide = blinks_df.pivot(index="Time", columns="Label", values="Signal")
blinks_wide = standardize(blinks_wide)
cmap = iter(plt.cm.RdBu(np.linspace(0, 1, num=len(events))))
for x, color in zip(blinks_wide, cmap):
ax2.plot(blinks_wide[x], color=color, linewidth=0.4, zorder=1)
# Plot with their median (used here as a robust average)
ax2.plot(
np.array(blinks_wide.index),
np.median(events_array, axis=1),
linewidth=2,
linestyle="--",
color="black",
label="Median",
)
ax2.legend(loc="upper right")
| 4,808 | 32.165517 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_complexity_ordinalpatterns.py | import numpy as np
from .utils_complexity_embedding import complexity_embedding
def complexity_ordinalpatterns(signal, delay=1, dimension=3, algorithm="quicksort", **kwargs):
"""**Find Ordinal Patterns for Permutation Procedures**
The seminal work by Bandt and Pompe (2002) introduced a symbolization approach to obtain a
sequence of ordinal patterns (permutations) from continuous data. It is used in
:func:`permutation entropy <entropy_permutation>` and its different variants.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
algorithm : str
Can be ``"quicksort"`` (default) or ``"bubblesort"`` (used in Bubble Entropy).
Returns
-------
array
Ordinal patterns.
vector
Frequencies of each ordinal pattern.
dict
A dictionary containing additional elements.
Examples
----------
Example given by Bandt and Pompe (2002):
.. ipython:: python
import neurokit2 as nk
signal = [4, 7, 9, 10, 6, 11, 3]
patterns, info = nk.complexity_ordinalpatterns(signal, delay=1, dimension=3)
patterns
info["Frequencies"]
.. ipython:: python
signal = [4, 7, 9, 10, 6, 5, 3, 6, 8, 9, 5, 1, 0]
patterns, info = nk.complexity_ordinalpatterns(signal, algorithm="bubblesort")
info["Frequencies"]
References
----------
* Bandt, C., & Pompe, B. (2002). Permutation entropy: a natural complexity measure for time
series. Physical review letters, 88(17), 174102.
* Manis, G., Aktaruzzaman, M. D., & Sassi, R. (2017). Bubble entropy: An entropy almost free of
parameters. IEEE Transactions on Biomedical Engineering, 64(11), 2711-2718.
"""
# Time-delay embedding
info = {"Embedded": complexity_embedding(signal, delay=delay, dimension=dimension)}
# Transform embedded into permutations matrix
if algorithm == "bubblesort":
info["Permutations"] = _bubblesort(info["Embedded"])
else:
info["Permutations"] = info["Embedded"].argsort(kind="quicksort")
# Count and get unique patterns
patterns, info["Uniques"], info["Frequencies"] = np.unique(
info["Permutations"],
axis=0,
return_inverse=True,
return_counts=True,
)
# Find all possible patterns (not needed for now)
# all_symbols = np.array(list(map(np.array, list(itertools.permutations(np.arange(delay * dimension))))))
# Relative Frequency
info["Frequencies"] = info["Frequencies"] / info["Frequencies"].sum()
return patterns, info
def _bubblesort(embedded):
"""
Manis, G., Aktaruzzaman, M. D., & Sassi, R. (2017). Bubble entropy: An entropy almost free of
parameters. IEEE Transactions on Biomedical Engineering, 64(11), 2711-2718.
"""
n, n_dim = np.shape(embedded)
swaps = np.zeros(n)
for y in range(n):
for t in range(n_dim - 1):
for kk in range(n_dim - t - 1):
if embedded[y, kk] > embedded[y, kk + 1]:
embedded[y, kk], embedded[y, kk + 1] = embedded[y, kk + 1], embedded[y, kk]
swaps[y] += 1
return swaps
| 3,626 | 33.216981 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity.py | import numpy as np
import pandas as pd
from .complexity_hjorth import complexity_hjorth
from .entropy_attention import entropy_attention
from .entropy_bubble import entropy_bubble
from .entropy_multiscale import entropy_multiscale
from .entropy_permutation import entropy_permutation
from .entropy_svd import entropy_svd
from .fractal_dfa import fractal_dfa
from .fractal_linelength import fractal_linelength
def complexity(signal, which="makowski2022", delay=1, dimension=2, tolerance="sd", **kwargs):
"""**Complexity and Chaos Analysis**
Measuring the complexity of a signal refers to the quantification of various aspects related to
concepts such as **chaos**, **entropy**, **unpredictability**, and **fractal dimension**.
.. tip::
We recommend checking our open-access `review <https://onlinelibrary.wiley.com/doi/10.1111/ejn.15800>`_ for an
introduction to **fractal physiology** and its application in neuroscience.
There are many indices that have been developed and used to assess the complexity of signals,
and all of them come with different specificities and limitations. While they should be used in
an informed manner, it is also convenient to have a single function that can compute multiple
indices at once.
The ``nk.complexity()`` function can be used to compute a useful subset of complexity metrics
and features. While this is great for exploratory analyses, we recommend running each function
separately, to gain more control over the parameters and information that you get.
.. warning::
The indices included in this function will be subjected to change in future versions,
depending on what the literature suggests. We recommend using this function only for quick
exploratory analyses, but then replacing it by the calls to the individual functions.
Check-out our `open-access study <https://onlinelibrary.wiley.com/doi/10.1111/ejn.15800>`_
explaining the selection of indices.
The categorization by "computation time" is based on `our study
<https://www.mdpi.com/1099-4300/24/8/1036>`_ results:
.. figure:: https://raw.githubusercontent.com/DominiqueMakowski/ComplexityStructure/main/figures/time1-1.png
:alt: Complexity Benchmark (Makowski).
:target: https://www.mdpi.com/1099-4300/24/8/1036
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
which : list
What metrics to compute. Can be "makowski2022".
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
Returns
--------
df : pd.DataFrame
A dataframe with one row containing the results for each metric as columns.
info : dict
A dictionary containing additional information.
See Also
--------
complexity_delay, complexity_dimension, complexity_tolerance
Examples
----------
* **Example 1**: Compute fast and medium-fast complexity metrics
.. ipython:: python
import neurokit2 as nk
# Simulate a signal of 3 seconds
signal = nk.signal_simulate(duration=3, frequency=[5, 10])
# Compute selection of complexity metrics (Makowski et al., 2022)
df, info = nk.complexity(signal, which = "makowski2022")
df
* **Example 2**: Compute complexity over time
.. ipython:: python
import numpy as np
import pandas as pd
import neurokit2 as nk
# Create dynamically varying noise
amount_noise = nk.signal_simulate(duration=2, frequency=0.9)
amount_noise = nk.rescale(amount_noise, [0, 0.5])
noise = np.random.uniform(0, 2, len(amount_noise)) * amount_noise
# Add to simple signal
signal = noise + nk.signal_simulate(duration=2, frequency=5)
@savefig p_complexity1.png scale=100%
nk.signal_plot(signal, sampling_rate = 1000)
@suppress
plt.close()
.. ipython:: python
# Create function-wrappers that only return the index value
pfd = lambda x: nk.fractal_petrosian(x)[0]
kfd = lambda x: nk.fractal_katz(x)[0]
sfd = lambda x: nk.fractal_sevcik(x)[0]
svden = lambda x: nk.entropy_svd(x)[0]
fisher = lambda x: -1 * nk.fisher_information(x)[0] # FI is anticorrelated with complexity
# Use them in a rolling window
rolling_kfd = pd.Series(signal).rolling(500, min_periods = 300, center=True).apply(kfd)
rolling_pfd = pd.Series(signal).rolling(500, min_periods = 300, center=True).apply(pfd)
rolling_sfd = pd.Series(signal).rolling(500, min_periods = 300, center=True).apply(sfd)
rolling_svden = pd.Series(signal).rolling(500, min_periods = 300, center=True).apply(svden)
rolling_fisher = pd.Series(signal).rolling(500, min_periods = 300, center=True).apply(fisher)
@savefig p_complexity2.png scale=100%
nk.signal_plot([signal,
rolling_kfd.values,
rolling_pfd.values,
rolling_sfd.values,
rolling_svden.values,
rolling_fisher],
labels = ["Signal",
"Petrosian Fractal Dimension",
"Katz Fractal Dimension",
"Sevcik Fractal Dimension",
"SVD Entropy",
"Fisher Information"],
sampling_rate = 1000,
standardize = True)
@suppress
plt.close()
References
----------
* Lau, Z. J., Pham, T., Chen, S. H. A., & Makowski, D. (2022). Brain entropy, fractal
dimensions and predictability: A review of complexity measures for EEG in healthy and
neuropsychiatric populations. European Journal of Neuroscience, 1-23.
* Makowski, D., Te, A. S., Pham, T., Lau, Z. J., & Chen, S. H. (2022). The Structure of Chaos:
An Empirical Comparison of Fractal Physiology Complexity Indices Using NeuroKit2. Entropy, 24
(8), 1036.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Initialize
df = {}
info = {}
# Fast ======================================================================================
if which in ["makowski2022", "makowski"]:
df["LL"], info["LL"] = fractal_linelength(signal)
df["Hjorth"], info["Hjorth"] = complexity_hjorth(signal)
df["AttEn"], info["AttEn"] = entropy_attention(signal)
df["SVDEn"], info["SVDEn"] = entropy_svd(signal, delay=delay, dimension=dimension)
df["BubbEn"], info["BubbEn"] = entropy_bubble(
signal, delay=delay, dimension=dimension, **kwargs
)
df["CWPEn"], info["CWPEn"] = entropy_permutation(
signal,
delay=delay,
dimension=dimension,
corrected=True,
weighted=True,
conditional=True,
**kwargs
)
df["MSPEn"], info["MSPEn"] = entropy_multiscale(
signal, dimension=dimension, method="MSPEn", **kwargs
)
mfdfa, _ = fractal_dfa(signal, multifractal=True, **kwargs)
for k in mfdfa.columns:
df["MFDFA_" + k] = mfdfa[k].values[0]
# Prepare output
df = pd.DataFrame.from_dict(df, orient="index").T # Convert to dataframe
df = df.reindex(sorted(df.columns), axis=1) # Reorder alphabetically
return df, info
| 8,255 | 40.074627 | 118 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_dfa.py | # -*- coding: utf-8 -*-
from warnings import warn
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import find_knee
def fractal_dfa(
signal,
scale="default",
overlap=True,
integrate=True,
order=1,
multifractal=False,
q="default",
maxdfa=False,
show=False,
**kwargs,
):
"""**(Multifractal) Detrended Fluctuation Analysis (DFA or MFDFA)**
Detrended fluctuation analysis (DFA) is used to find long-term statistical dependencies in time
series.
For monofractal DFA, the output *alpha* :math:`\\alpha` corresponds to the slope of the linear
trend between the scale factors and the fluctuations. For multifractal DFA, the slope values
under different *q* values are actually generalised Hurst exponents *h*. Monofractal DFA
corresponds to MFDFA with *q = 2*, and its output is actually an estimation of the
**Hurst exponent** (:math:`h_{(2)}`).
The Hurst exponent is the measure of long range autocorrelation of a signal, and
:math:`h_{(2)} > 0.5` suggests the presence of long range correlation, while
:math:`h_{(2)} < 0.5`suggests short range correlations. If :math:`h_{(2)} = 0.5`, it indicates
uncorrelated indiscriminative fluctuations, i.e. a Brownian motion.
.. figure:: ../img/douglas2022a.png
:alt: Illustration of DFA (Douglas et al., 2022).
Multifractal DFA returns the generalised Hurst exponents *h* for different values of *q*. It is
converted to the multifractal **scaling exponent** *Tau* :math:`\\tau_{(q)}`, which non-linear
relationship with *q* can indicate multifractility. From there, we derive the singularity
exponent *H* (or :math:`\\alpha`) (also known as Hölder's exponents) and the singularity
dimension *D* (or :math:`f(\\alpha)`). The variation of *D* with *H* is known as multifractal
singularity spectrum (MSP), and usually has shape of an inverted parabola. It measures the long
range correlation property of a signal. From these elements, different features are extracted:
* **Width**: The width of the singularity spectrum, which quantifies the degree of the
multifractality. In the case of monofractal signals, the MSP width is zero, since *h*\\(q) is
independent of *q*.
* **Peak**: The value of the singularity exponent *H* corresponding to the peak of
singularity dimension *D*. It is a measure of the self-affinity of the signal, and a high
value is an indicator of high degree of correlation between the data points. In the other
words, the process is recurrent and repetitive.
* **Mean**: The mean of the maximum and minimum values of singularity exponent *H*, which
quantifies the average fluctuations of the signal.
* **Max**: The value of singularity spectrum *D* corresponding to the maximum value of
singularity exponent *H*, which indicates the maximum fluctuation of the signal.
* **Delta**: the vertical distance between the singularity spectrum *D* where the singularity
exponents are at their minimum and maximum. Corresponds to the range of fluctuations of the
signal.
* **Asymmetry**: The Asymmetric Ratio (AR) corresponds to the centrality of the peak of the
spectrum. AR = 0.5 indicates that the multifractal spectrum is symmetric (Orozco-Duque et
al., 2015).
* **Fluctuation**: The *h*-fluctuation index (hFI) is defined as the power of the second
derivative of *h*\\(q). See Orozco-Duque et al. (2015).
* **Increment**: The cumulative function of the squared increments (:math:`\\alpha CF`) of the
generalized Hurst's exponents between consecutive moment orders is a more robust index of the
distribution of the generalized Hurst's exponents (Faini et al., 2021).
This function can be called either via ``fractal_dfa()`` or ``complexity_dfa()``, and its
multifractal variant can be directly accessed via ``fractal_mfdfa()`` or ``complexity_mfdfa()``.
.. note ::
Help is needed to implement the modified formula to compute the slope when
*q* = 0. See for instance Faini et al. (2021). See https://github.com/LRydin/MFDFA/issues/33
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
scale : list
A list containing the lengths of the windows (number of data points in each subseries) that
the signal is divided into. Also referred to as Tau :math:`\\tau`. If ``"default"``, will
set it to a logarithmic scale (so that each window scale has the same weight) with a
minimum of 4 and maximum of a tenth of the length (to have more than 10 windows to
calculate the average fluctuation).
overlap : bool
Defaults to ``True``, where the windows will have a 50% overlap with each other, otherwise
non-overlapping windows will be used.
integrate : bool
It is common practice to convert the signal to a random walk (i.e., detrend and integrate,
which corresponds to the signal 'profile') in order to avoid having too small exponent
values. Note that it leads to the flattening of the signal, which can lead to the loss of
some details (see Ihlen, 2012 for an explanation). Note that for strongly anticorrelated
signals, this transformation should be applied two times (i.e., provide
``np.cumsum(signal - np.mean(signal))`` instead of ``signal``).
order : int
The order of the polynomial trend for detrending. 1 corresponds to a linear detrending.
multifractal : bool
If ``True``, compute Multifractal Detrended Fluctuation Analysis (MFDFA), in which case the
argument ``q`` is taken into account.
q : Union[int, list, np.array]
The sequence of fractal exponents when ``multifractal=True``. Must be a sequence between
-10 and 10 (note that zero will be removed, since the code does not converge there).
Setting ``q = 2`` (default for DFA) gives a result of a standard DFA. For instance, Ihlen
(2012) uses ``q = [-5, -3, -1, 0, 1, 3, 5]`` (default when for multifractal). In general,
positive q moments amplify the contribution of fractal components with larger amplitude and
negative q moments amplify the contribution of fractal with smaller amplitude (Kantelhardt
et al., 2002).
maxdfa : bool
If ``True``, it will locate the knee of the fluctuations (using :func:`.find_knee`) and use
that as a maximum scale value. It computes max. DFA (a similar method exists in
:func:`entropy_rate`).
show : bool
Visualise the trend between the window size and the fluctuations.
**kwargs : optional
Currently not used.
Returns
----------
dfa : float or pd.DataFrame
If ``multifractal`` is ``False``, one DFA value is returned for a single time series.
parameters : dict
A dictionary containing additional information regarding the parameters used
to compute DFA. If ``multifractal`` is ``False``, the dictionary contains q value, a
series of windows, fluctuations of each window and the
slopes value of the log2(windows) versus log2(fluctuations) plot. If
``multifractal`` is ``True``, the dictionary additionally contains the
parameters of the singularity spectrum.
See Also
--------
fractal_hurst, fractal_tmf, entropy_rate
Examples
----------
**Example 1:** Monofractal DFA
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, frequency=[5, 7, 10, 14], noise=0.05)
@savefig p_fractal_dfa1.png scale=100%
dfa, info = nk.fractal_dfa(signal, show=True)
@suppress
plt.close()
.. ipython:: python
dfa
As we can see from the plot, the final value, corresponding to the slope of the red line,
doesn't capture properly the relationship. We can adjust the *scale factors* to capture the
fractality of short-term fluctuations.
.. ipython:: python
scale = nk.expspace(10, 100, 20, base=2)
@savefig p_fractal_dfa2.png scale=100%
dfa, info = nk.fractal_dfa(signal, scale=scale, show=True)
@suppress
plt.close()
**Example 2:** Multifractal DFA (MFDFA)
.. ipython:: python
@savefig p_fractal_dfa3.png scale=100%
mfdfa, info = nk.fractal_mfdfa(signal, q=[-5, -3, -1, 0, 1, 3, 5], show=True)
@suppress
plt.close()
.. ipython:: python
mfdfa
References
-----------
* Faini, A., Parati, G., & Castiglioni, P. (2021). Multiscale assessment of the degree of
multifractality for physiological time series. Philosophical Transactions of the Royal
Society A, 379(2212), 20200254.
* Orozco-Duque, A., Novak, D., Kremen, V., & Bustamante, J. (2015). Multifractal analysis for
grading complex fractionated electrograms in atrial fibrillation. Physiological Measurement,
36(11), 2269-2284.
* Ihlen, E. A. F. E. (2012). Introduction to multifractal detrended
fluctuation analysis in Matlab. Frontiers in physiology, 3, 141.
* Kantelhardt, J. W., Zschiegner, S. A., Koscielny-Bunde, E., Havlin, S.,
Bunde, A., & Stanley, H. E. (2002). Multifractal detrended fluctuation
analysis of nonstationary time series. Physica A: Statistical
Mechanics and its Applications, 316(1-4), 87-114.
* Hardstone, R., Poil, S. S., Schiavone, G., Jansen, R., Nikulin, V. V.,
Mansvelder, H. D., & Linkenkaer-Hansen, K. (2012). Detrended
fluctuation analysis: a scale-free view on neuronal oscillations.
Frontiers in physiology, 3, 450.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
n = len(signal)
scale = _fractal_dfa_findscales(n, scale)
# Sanitize fractal power (cannot be close to 0)
q = _sanitize_q(q, multifractal=multifractal)
# Store parameters
info = {"scale": scale, "q": q}
# Preprocessing
if integrate is True:
# Get signal profile
signal = np.cumsum(signal - np.mean(signal))
# Function to store fluctuations. For DFA this is an array. For MFDFA, this
# is a matrix of size (len(scale),len(q))
fluctuations = np.zeros((len(scale), len(q)))
# Start looping over scale
for i, window in enumerate(scale):
# Get window
segments = _fractal_dfa_getwindow(signal, n, window, overlap=overlap)
# Get polynomial trends
trends = _fractal_dfa_trends(segments, window, order=order)
# Get local fluctuation
fluctuations[i] = _fractal_dfa_fluctuation(segments, trends, q)
if len(fluctuations) == 0:
return np.nan, info
# Max. DFA ---------------------
if maxdfa is True:
# Find knees of fluctuations
knee = np.repeat(len(scale), fluctuations.shape[1])
for i in range(fluctuations.shape[1]):
knee[i] = find_knee(
y=np.log2(fluctuations[:, i]), x=np.log2(scale), show=False, verbose=False
)
knee = np.exp2(np.nanmax(knee))
# Cut fluctuations
fluctuations = fluctuations[scale <= knee, :]
scale = scale[scale <= knee]
# ------------------------------
# Get slopes
slopes = _slopes(scale, fluctuations, q)
if len(slopes) == 1:
slopes = slopes[0]
# Prepare output
info["Fluctuations"] = fluctuations
info["Alpha"] = slopes
# Extract features
if multifractal is True:
info.update(_singularity_spectrum(q=q, slopes=slopes))
out = pd.DataFrame(
{
k: v
for k, v in info.items()
if k
in [
"Peak",
"Width",
"Mean",
"Max",
"Delta",
"Asymmetry",
"Fluctuation",
"Increment",
]
},
index=[0],
)
else:
out = slopes
# Plot if show is True.
if show is True:
if multifractal is True:
_fractal_mdfa_plot(
info,
scale=scale,
fluctuations=fluctuations,
q=q,
)
else:
_fractal_dfa_plot(info=info, scale=scale, fluctuations=fluctuations)
return out, info
# =============================================================================
# Utils
# =============================================================================
def _fractal_dfa_findscales(n, scale="default"):
# Convert to array
if isinstance(scale, list):
scale = np.asarray(scale)
# Default scale number
if scale is None or isinstance(scale, str):
scale = int(n / 10)
# See https://github.com/neuropsychology/NeuroKit/issues/206
if isinstance(scale, int):
scale = np.exp(np.linspace(np.log(10), np.log(int(n / 10)), scale)).astype(int)
scale = np.unique(scale) # keep only unique
# Sanity checks (return warning for too short scale)
if len(scale) < 2:
raise ValueError("NeuroKit error: more than one window is needed. Increase 'scale'.")
if np.min(scale) < 2:
raise ValueError(
"NeuroKit error: there must be at least 2 data points in each window. Decrease 'scale'."
)
if np.max(scale) >= n:
raise ValueError(
"NeuroKit error: the window cannot contain more data points than the time series. Decrease 'scale'."
)
return scale
def _sanitize_q(q=2, multifractal=False):
# Turn to list
if isinstance(q, str):
if multifractal is False:
q = [2]
else:
q = [-5, -3, -1, 0, 1, 3, 5]
elif isinstance(q, (int, float)):
q = [q]
# Fractal powers as floats
q = np.asarray_chkfinite(q, dtype=float)
return q
def _slopes(scale, fluctuations, q):
# Extract the slopes of each `q` power obtained with MFDFA to later produce
# either the singularity spectrum or the multifractal exponents.
# Note: added by Leonardo Rydin (see https://github.com/LRydin/MFDFA/)
# Ensure mfdfa has the same q-power entries as q
if fluctuations.shape[1] != q.shape[0]:
raise ValueError("Fluctuation function and q powers don't match in dimension.")
# Allocated array for slopes
slopes = np.zeros(len(q))
# Find slopes of each q-power
for i in range(len(q)):
# if fluctiations is zero, log2 wil encounter zero division
old_setting = np.seterr(divide="ignore", invalid="ignore")
slopes[i] = np.polyfit(np.log2(scale), np.log2(fluctuations[:, i]), 1)[0]
np.seterr(**old_setting)
return slopes
def _fractal_dfa_getwindow(signal, n, window, overlap=True):
# This function reshapes the segments from a one-dimensional array to a
# matrix for faster polynomail fittings. 'Overlap' reshapes into several
# overlapping partitions of the data
# TODO: see whether this step could be integrated within complexity_coarsegraining
if overlap:
segments = np.array([signal[i : i + window] for i in np.arange(0, n - window, window // 2)])
else:
segments = signal[: n - (n % window)]
segments = segments.reshape((signal.shape[0] // window, window))
return segments
def _fractal_dfa_trends(segments, window, order=1):
# TODO: can we rely on signal_detrend?
x = np.arange(window)
coefs = np.polyfit(x[:window], segments.T, order).T
# TODO: Could this be optimized? Something like np.polyval(x[:window], coefs)
trends = np.array([np.polyval(coefs[j], x) for j in np.arange(len(segments))])
return trends
def _fractal_dfa_fluctuation(segments, trends, q=2):
# Detrend
detrended = segments - trends
# Compute variance
var = np.var(detrended, axis=1)
# Remove where var is zero
var = var[var > 1e-08]
if len(var) == 0:
warn("All detrended segments have no variance. Retuning NaN.")
return np.nan
# Find where q is close to zero. Limit set at |q| < 0.1
# See https://github.com/LRydin/MFDFA/issues/33
is0 = np.abs(q) < 0.1
# Reshape q to perform np.float_power
q_non0 = q[~is0].reshape(-1, 1)
# Get the fluctuation function, which is a function of the windows and of q
# When q = 2 (i.e., multifractal = False)
# The formula is equivalent to np.sqrt(np.mean(var))
# And corresponds to the Root Mean Square (RMS)
fluctuation = np.float_power(np.mean(np.float_power(var, q_non0 / 2), axis=1), 1 / q_non0.T)
if np.sum(is0) > 0:
fluc0 = np.exp(0.5 * np.mean(np.log(var)))
fluctuation = np.insert(fluctuation, np.where(is0)[0], [fluc0])
return fluctuation
# =============================================================================
# Utils MFDFA
# =============================================================================
def _singularity_spectrum(q, slopes):
"""Extract the slopes of the fluctuation function to futher obtain the
singularity strength `α` (or Hölder exponents) and singularity spectrum
`f(α)` (or fractal dimension). This is iconically shaped as an inverse
parabola, but most often it is difficult to obtain the negative `q` terms,
and one can focus on the left side of the parabola (`q>0`).
Note that these measures rarely match the theoretical expectation,
thus a variation of ± 0.25 is absolutely reasonable.
The parameters are mostly identical to `fractal_mfdfa()`, as one needs to
perform MFDFA to obtain the singularity spectrum. Calculating only the
DFA is insufficient, as it only has `q=2`, and a set of `q` values are
needed. Here defaulted to `q = list(range(-5,5))`, where the `0` element
is removed by `_cleanse_q()`.
This was first designed and implemented by Leonardo Rydin in
`MFDFA <https://github.com/LRydin/MFDFA/>`_ and ported here by the same.
"""
# Components of the singularity spectrum
# ---------------------------------------
# The generalised Hurst exponents `h(q)` from MFDFA, which are simply the slopes of each DFA
# for various `q` values
out = {"h": slopes}
# The generalised Hurst exponent h(q) is related to the Scaling Exponent Tau t(q):
out["Tau"] = q * slopes - 1
# Calculate Singularity Exponent H or α, which needs tau
out["H"] = np.gradient(out["Tau"]) / np.gradient(q)
# Calculate Singularity Dimension Dq or f(α), which needs tau and q
# The relation between α and f(α) (H and D) is called the Multifractal (MF) spectrum or
# singularity spectrum, which resembles the shape of an inverted parabola.
out["D"] = q * out["H"] - out["Tau"]
# Features (Orozco-Duque et al., 2015)
# ---------------------------------------
# The width of the MSP quantifies the degree of the multifractality
# the spectrum width delta quantifies the degree of the multifractality.
out["Width"] = np.nanmax(out["H"]) - np.nanmin(out["H"])
# the singularity exponent, for which the spectrum D takes its maximum value (α0)
out["Peak"] = out["H"][np.nanargmax(out["D"])]
# The mean of the maximum and minimum values of singularity exponent H
out["Mean"] = (np.nanmax(out["H"]) + np.nanmin(out["H"])) / 2
# The value of singularity spectrum D, corresponding to the maximum value of
# singularity exponent H, indicates the maximum fluctuation of the signal.
out["Max"] = out["D"][np.nanargmax(out["H"])]
# The vertical distance between the singularity spectrum *D* where the singularity
# exponents are at their minimum and maximum.
out["Delta"] = out["D"][np.nanargmax(out["H"])] - out["D"][np.nanargmin(out["H"])]
# the asymmetric ratio (AR) defined as the ratio between h calculated with negative q and the
# total width of the spectrum. If the multifractal spectrum is symmetric, AR should be equal to
# 0.5
out["Asymmetry"] = (np.nanmin(out["H"]) - out["Peak"]) / out["Width"]
# h-fluctuation index (hFI), which is defined as the power of the second derivative of h(q)
# hFI tends to zero in high fractionation signals.
if len(slopes) > 3:
# Help needed to double check that!
out["Fluctuation"] = np.sum(np.gradient(np.gradient(out["h"])) ** 2) / (
2 * np.max(np.abs(q)) + 2
)
else:
out["Fluctuation"] = np.nan
# hFI tends to zero in high fractionation signals. hFI has no reference point when a set of
# signals is evaluated, so hFI must be normalisedd, so that hFIn = 1 is the most organised and
# the most regular signal in the set
# BUT the formula in Orozco-Duque (2015) is weird, as HFI is a single value so cannot be
# normalized by its range...
# Faini (2021): new index that describes the distribution of the generalized Hurst's exponents
# without requiring the Legendre transform. This index, αCF, is the cumulative function of the
# squared increments of the generalized Hurst's exponents between consecutive moment orders.
out["Increment"] = np.sum(np.gradient(slopes) ** 2 / np.gradient(q))
return out
# =============================================================================
# Plots
# =============================================================================
def _fractal_dfa_plot(info, scale, fluctuations):
polyfit = np.polyfit(np.log2(scale), np.log2(fluctuations), 1)
fluctfit = 2 ** np.polyval(polyfit, np.log2(scale))
plt.loglog(scale, fluctuations, "o", c="#90A4AE")
plt.xlabel(r"$\log_{2}$(Scale)")
plt.ylabel(r"$\log_{2}$(Fluctuations)")
plt.loglog(scale, fluctfit, c="#E91E63", label=r"$\alpha$ = {:.3f}".format(info["Alpha"]))
plt.legend(loc="lower right")
plt.title("Detrended Fluctuation Analysis (DFA)")
return None
def _fractal_mdfa_plot(info, scale, fluctuations, q):
# Prepare figure
fig = plt.figure(constrained_layout=False)
spec = matplotlib.gridspec.GridSpec(ncols=2, nrows=2)
ax_fluctuation = fig.add_subplot(spec[0, 0])
ax_spectrum = fig.add_subplot(spec[0, 1])
ax_tau = fig.add_subplot(spec[1, 0])
ax_hq = fig.add_subplot(spec[1, 1])
n = len(q)
colors = plt.cm.viridis(np.linspace(0, 1, n))
for i in range(n):
# Plot the points
ax_fluctuation.loglog(
scale,
fluctuations[:, i],
"o",
fillstyle="full",
markeredgewidth=0.0,
c=colors[i],
alpha=0.2,
markersize=6,
base=2,
zorder=1,
)
# Plot the polyfit line
polyfit = np.polyfit(np.log2(scale), np.log2(fluctuations[:, i]), 1)
fluctfit = 2 ** np.polyval(polyfit, np.log2(scale))
ax_fluctuation.loglog(scale, fluctfit, c=colors[i], base=2, label="_no_legend_", zorder=2)
# Add labels for max and min
if i == 0:
ax_fluctuation.plot(
[],
label=f"$h$ = {polyfit[0]:.3f}, $q$ = {q[0]:.1f}",
c=colors[0],
)
elif i == (n - 1):
ax_fluctuation.plot(
[],
label=f"$h$ = {polyfit[0]:.3f}, $q$ = {q[-1]:.1f}",
c=colors[-1],
)
ax_fluctuation.set_xlabel(r"$\log_{2}$(Scale)")
ax_fluctuation.set_ylabel(r"$\log_{2}$(Fluctuations)")
ax_fluctuation.legend(loc="lower right")
# Plot the singularity spectrum
# ax.set_title("Singularity Spectrum")
ax_spectrum.set_ylabel(r"Singularity Dimension ($D$)")
ax_spectrum.set_xlabel(r"Singularity Exponent ($H$)")
ax_spectrum.axvline(
x=info["Peak"],
color="black",
linestyle="dashed",
label=r"Peak = {:.3f}".format(info["Peak"]),
)
ax_spectrum.plot(
[np.nanmin(info["H"]), np.nanmax(info["H"])],
[np.nanmin(info["D"])] * 2,
color="red",
linestyle="solid",
label=r"Width = {:.3f}".format(info["Width"]),
)
ax_spectrum.plot(
[np.nanmin(info["H"]), np.nanmax(info["H"])],
[info["D"][-1], info["D"][0]],
color="#B71C1C",
linestyle="dotted",
label=r"Delta = {:.3f}".format(info["Delta"]),
)
ax_spectrum.plot(info["H"], info["D"], "o-", c="#FFC107")
ax_spectrum.legend(loc="lower right")
# Plot tau against q
# ax.set_title("Scaling Exponents")
ax_tau.set_ylabel(r"Scaling Exponent ($τ$)")
ax_tau.set_xlabel(r"Fractal Exponent ($q$)")
ax_tau.plot(q, info["Tau"], "o-", c="#E91E63")
# Plot H against q
# ax.set_title("Generalised Hurst Exponents")
ax_hq.set_ylabel(r"Singularity Exponent ($H$)")
ax_hq.set_xlabel(r"Fractal Exponent ($q$)")
ax_hq.plot(q, info["H"], "o-", c="#2196F3")
fig.suptitle("Multifractal Detrended Fluctuation Analysis (MFDFA)")
return None
| 25,243 | 38.567398 | 112 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_kolmogorov.py | import numpy as np
import pandas as pd
from .utils_complexity_embedding import complexity_embedding
from .optim_complexity_tolerance import complexity_tolerance
def entropy_kolmogorov(signal=None, delay=1, dimension=3, tolerance="sd"):
"""**Kolmogorov Entropy (K2 or K2En)**
Kolmogorov Entropy, also known as metric entropy, is related to Lyapunov exponents.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
Returns
--------
k2 : float
The Kolmogorov Entropy entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
k2, info = nk.entropy_kolmogorov(signal)
k2
References
-----------
* Grassberger, P., & Procaccia, I. (1983). Estimation of the Kolmogorov entropy from a chaotic
signal. Physical review A, 28(4), 2591.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
}
Ci = [
_correlation_integral(
signal,
delay=delay,
dimension=dimension,
tolerance=info["Tolerance"],
)
for dimension in [dimension, dimension + 1]
]
if Ci[1] == 0:
K2 = np.nan
else:
K2 = np.log(Ci[0] / Ci[1]) / delay
return K2, info
def _correlation_integral(signal, delay=3, dimension=2, tolerance=0.2):
# Time-delay embedding
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance
n = len(embedded)
norm = np.zeros((n - 1, n - 1))
for i in range(n - 1):
Temp = np.tile(embedded[i, :], (n - i - 1, 1)) - embedded[i + 1 :, :]
norm[i, i:] = np.linalg.norm(Temp, axis=1)
norm[norm == 0] = np.inf
# correlation integrals Ci
return 2 * np.sum(norm < tolerance) / (n * (n - 1))
| 3,190 | 29.390476 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_bubble.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .entropy_permutation import _entropy_permutation
from .entropy_renyi import entropy_renyi
def entropy_bubble(signal, delay=1, dimension=3, alpha=2, **kwargs):
"""**Bubble Entropy (BubblEn)**
Introduced by Manis et al. (2017) with the goal of being independent of parameters such as
*Tolerance* and *Dimension*. Bubble Entropy is based on :func:`permutation entropy <entropy_permutation>`,
but uses the bubble sort algorithm for the ordering procedure instead of the number of swaps
performed for each vector.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
alpha : float
The *alpha* :math:`\\alpha` parameter (default to 1) for :func:`Rényi entropy <entropy_renyi>`).
**kwargs : optional
Other arguments.
See Also
--------
complexity_ordinalpatterns, entropy_permutation, entropy_renyi
Returns
----------
BubbEn : float
The Bubble Entropy.
info : dict
A dictionary containing additional information regarding the parameters used
to compute sample entropy.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
BubbEn, info = nk.entropy_bubble(signal)
BubbEn
References
----------
* Manis, G., Aktaruzzaman, M. D., & Sassi, R. (2017). Bubble entropy: An entropy almost free of
parameters. IEEE Transactions on Biomedical Engineering, 64(11), 2711-2718.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Dimension": dimension, "Delay": delay}
H = [
_entropy_permutation(
signal,
dimension=d,
delay=delay,
algorithm=entropy_renyi,
sorting="bubblesort",
**kwargs,
)
for d in [dimension, dimension + 1]
]
BubbEn = np.diff(H) / np.log((dimension + 1) / (dimension - 1))
return BubbEn[0], info
| 2,990 | 32.606742 | 110 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_distribution.py | import numpy as np
import pandas as pd
import scipy.stats
from .utils_complexity_embedding import complexity_embedding
from .entropy_shannon import entropy_shannon
def entropy_distribution(signal=None, delay=1, dimension=3, bins="Sturges", base=2):
"""**Distribution Entropy (DistrEn)**
Distribution Entropy (**DistrEn**, more commonly known as **DistEn**).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
bins : int or str
Method to find the number of bins. Can be a number, or one of ``"Sturges"``, ``"Rice"``,
``"Doane"``, or ``"sqrt"``.
base : int
The logarithmic base to use for :func:`entropy_shannon`.
Returns
--------
distren : float
The Distance Entropy entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
distren, info = nk.entropy_distribution(signal)
distren
References
-----------
* Li, P., Liu, C., Li, K., Zheng, D., Liu, C., & Hou, Y. (2015). Assessing the complexity of
short-term heartbeat interval series by distribution entropy. Medical & biological
engineering & computing, 53(1), 77-87.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Bins": bins,
}
# Time-delay embedding
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance
n = len(embedded)
d = np.zeros(round(n * (n - 1) / 2))
for k in range(1, n):
Ix = (int((k - 1) * (n - k / 2)), int(k * (n - ((k + 1) / 2))))
d[Ix[0] : Ix[1]] = np.max(
abs(np.tile(embedded[k - 1, :], (n - k, 1)) - embedded[k:, :]), axis=1
)
# TODO: "D is symmetrical. Only the upper or lower triangular matrix will actually be adequate
# for the estimation of the ePDF, which can be used to facilitate its fast calculation."
n_d = len(d)
# Number of bins
if isinstance(bins, str):
bins = bins.lower()
if bins == "sturges":
n_bins = np.ceil(np.log2(n_d) + 1)
elif bins == "rice":
n_bins = np.ceil(2 * (n_d ** (1 / 3)))
elif bins == "sqrt":
n_bins = np.ceil(np.sqrt(n_d))
elif bins == "doanes":
sigma = np.sqrt(6 * (n_d - 2) / ((n_d + 1) * (n_d + 3)))
n_bins = np.ceil(1 + np.log2(n_d) + np.log2(1 + abs(scipy.stats.skew(d) / sigma)))
else:
raise Exception("Please enter a valid binning method")
else:
n_bins = bins
# Get probability
freq, _ = np.histogram(d, int(n_bins))
freq = freq / freq.sum()
# Compute Shannon Entropy
distren, _ = entropy_shannon(freq=freq, base=base)
# Normalize by number of bins (so that the range should be within [0, 1])
distren = distren / (np.log(n_bins) / np.log(base))
return distren, info
| 3,817 | 31.913793 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_hierarchical.py | import matplotlib.cm
import matplotlib.gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .entropy_sample import entropy_sample
from .optim_complexity_tolerance import complexity_tolerance
def entropy_hierarchical(
signal, scale="default", dimension=2, tolerance="sd", show=False, **kwargs
):
"""**Hierarchical Entropy (HEn)**
Hierarchical Entropy (HEn) can be viewed as a generalization of the multiscale
decomposition used in :func:`multiscale entropy <entropy_multiscale>`, and the Haar wavelet
decomposition since it generate subtrees of the hierarchical tree. It preserves the strength of
the multiscale decomposition with additional components of higher frequency in different
scales. The hierarchical decomposition, unlike the wavelet decomposition, contains redundant
components, which makes it sensitive to the dynamical richness of the time series.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
scale : int
The maximum scale factor. Can only be a number of "default". Though it behaves a bit
differently here, see :func:`complexity_multiscale` for details.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
method : str
Method for symbolic sequence partitioning. Can be one of ``"MEP"`` (default),
``"linear"``, ``"uniform"``, ``"kmeans"``.
**kwargs : optional
Other keyword arguments (currently not used).
Returns
-------
SyDyEn : float
Symbolic Dynamic Entropy (SyDyEn) of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, entropy_multiscale
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=5, frequency=[97, 98, 100], noise=0.05)
# Compute Hierarchical Entropy (HEn)
@savefig p_entropy_hierarchical1.png scale=100%
hen, info = nk.entropy_hierarchical(signal, show=True, scale=5, dimension=3)
@suppress
plt.close()
References
----------
* Jiang, Y., Peng, C. K., & Xu, Y. (2011). Hierarchical entropy analysis for biological
signals. Journal of Computational and Applied Mathematics, 236(5), 728-742.
* Li, W., Shen, X., & Li, Y. (2019). A comparative study of multiscale sample entropy and
hierarchical entropy and its application in feature extraction for ship-radiated noise.
Entropy, 21(8), 793.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Get max scale
if isinstance(scale, str):
N = int(2 ** np.floor(np.log2(len(signal))))
# Max scale where N / (2 ** (scale - 1)) < 8
scale = 1
while N / (2 ** (scale - 1)) > 8 and scale < len(signal) / 2:
scale += 1
# Store parameters
info = {
"Scale": np.arange(1, scale + 1),
"Dimension": dimension,
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
}
# TODO: Simplify this code, make it clearer and step by step, following the paper more closely
Q, N = _hierarchical_decomposition(signal, scale=scale)
HEns = np.zeros(len(Q))
for T in range(len(Q)):
Temp = Q[T, : int(N / (2 ** (int(np.log2(T + 1)))))]
# This could be exposed to have different type of entropy estimators
HEns[T], _ = entropy_sample(Temp, delay=1, dimension=dimension, tolerance=info["Tolerance"])
Sn = np.zeros(scale)
for t in range(scale):
vals = HEns[(2 ** t) - 1 : (2 ** (t + 1)) - 1]
Sn[t] = np.mean(vals[np.isfinite(vals)])
# The HEn index is quantified as the area under the curve (AUC),
# which is like the sum normalized by the number of values. It's similar to the mean.
hen = np.trapz(Sn[np.isfinite(Sn)]) / len(Sn[np.isfinite(Sn)])
if show is True:
# Color normalization values by extending beyond the range of the mean values
colormin = np.min(Sn) - np.ptp(Sn) * 0.1
colormax = np.max(Sn) + np.ptp(Sn) * 0.1
plt.figure()
G = matplotlib.gridspec.GridSpec(10, 1)
ax1 = plt.subplot(G[:2, :])
ax1.plot(np.arange(1, scale + 1), Sn, color="black", zorder=0)
ax1.scatter(
np.arange(1, scale + 1),
Sn,
c=Sn,
zorder=1,
# Color map and color normalization values
cmap="spring",
vmin=colormin,
vmax=colormax,
)
ax1.set_xticks(np.arange(1, scale + 1))
ax1.set_xlabel("Scale Factor")
ax1.set_ylabel("Entropy")
ax1.set_title("Hierarchical Entropy")
N = 2 ** (scale - 1)
x = np.zeros(2 * N - 1, dtype=int)
x[0] = N
y = -1 * (scale - np.log2(np.arange(1, 2 * N)) // 1) + scale + 1
for k in range(1, 2 * N):
Q = int(np.log2(k) // 1)
P = int((k) // 2) - 1
if k > 1:
if k % 2:
x[k - 1] = x[P] + N / (2 ** Q)
else:
x[k - 1] = x[P] - N / (2 ** Q)
Edges = np.vstack((np.repeat(np.arange(1, N), 2), np.arange(2, 2 * N))).transpose() - 1
labx = ["".join(k) for k in np.round(HEns, 3).astype(str)]
ax2 = plt.subplot(G[3:, :])
for k in range(len(x) - 1):
ax2.plot(x[Edges[k, :]], y[Edges[k, :]], color="black", zorder=0)
ax2.annotate(labx[k], (x[k], y[k]), fontsize=8)
ax2.scatter(
x,
y,
c=HEns,
zorder=1,
# Color map and color normalization values
cmap="spring",
vmin=colormin,
vmax=colormax,
)
ax2.annotate(labx[-1], (x[-1], y[-1]), fontsize=8)
ax2.invert_yaxis()
ax2.set_ylabel("Scale Factor")
# return MSx, Sn, CI
return hen, info
def _hierarchical_decomposition(signal, scale=3):
N = int(2 ** np.floor(np.log2(len(signal))))
if N / (2 ** (scale - 1)) < 8:
raise Exception(
"Signal length is too short to estimate entropy at the lowest"
" subtree. Consider reducing the value of scale."
)
Q = np.zeros(((2 ** scale) - 1, N))
Q[0, :] = signal[:N]
p = 1
for k in range(scale - 1):
for n in range(2 ** k):
Temp = Q[(2 ** k) + n - 1, :]
# 1. We define an averaging operator Q0. It is the the low frequency component.
Q[p, : N // 2] = (Temp[::2] + Temp[1::2]) / 2
# 2. We define a difference frequency component. It is the the high frequency component.
Q[p + 1, : N // 2] = (Temp[::2] - Temp[1::2]) / 2
p += 2
return Q, N
| 7,284 | 35.243781 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_linelength.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def fractal_linelength(signal):
"""**Line Length (LL)**
Line Length (LL, also known as curve length), stems from a modification of the
:func:`Katz fractal dimension <fractal_katz>` algorithm, with the goal of making it more
efficient and accurate (especially for seizure onset detection).
It basically corresponds to the average of the absolute consecutive differences of the signal,
and was made to be used within subwindows. Note that this does not technically measure the
fractal dimension, but the function was named with the ``fractal_`` prefix due to its
conceptual similarity with Katz's fractal dimension.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
float
Line Length.
dict
A dictionary containing additional information (currently empty, but returned nonetheless
for consistency with other functions).
See Also
--------
fractal_katz
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6, 10])
ll, _ = nk.fractal_linelength(signal)
ll
References
----------
* Esteller, R., Echauz, J., Tcheng, T., Litt, B., & Pless, B. (2001, October). Line length: an
efficient feature for seizure onset detection. In 2001 Conference Proceedings of the 23rd
Annual International Conference of the IEEE Engineering in Medicine and Biology Society (Vol.
2, pp. 1707-1710). IEEE.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Force to array
signal = np.array(signal)
# Drop missing values
signal = signal[~np.isnan(signal)]
# Compute line length
ll = np.mean(np.abs(np.diff(signal)))
return ll, {}
| 2,149 | 28.861111 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_sample.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .optim_complexity_tolerance import complexity_tolerance
from .utils import _phi, _phi_divide
def entropy_sample(signal, delay=1, dimension=2, tolerance="sd", **kwargs):
"""**Sample Entropy (SampEn)**
Compute the sample entropy (SampEn) of a signal. SampEn is a modification
of ApEn used for assessing complexity of physiological time series signals. It corresponds to
the conditional probability that two vectors that are close to each other for *m* dimensions
will remain close at the next *m + 1* component.
This function can be called either via ``entropy_sample()`` or ``complexity_sampen()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
**kwargs : optional
Other arguments.
See Also
--------
entropy_shannon, entropy_approximate, entropy_fuzzy
Returns
----------
sampen : float
The sample entropy of the single time series.
If undefined conditional probabilities are detected (logarithm
of sum of conditional probabilities is ``ln(0)``), ``np.inf`` will
be returned, meaning it fails to retrieve 'accurate' regularity information.
This tends to happen for short data segments, increasing tolerance
levels might help avoid this.
info : dict
A dictionary containing additional information regarding the parameters used
to compute sample entropy.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
sampen, parameters = nk.entropy_sample(signal, delay=1, dimension=2)
sampen
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
}
# Compute phi
phi = _phi(
signal,
delay=delay,
dimension=dimension,
tolerance=info["Tolerance"],
approximate=False,
**kwargs
)[0]
return _phi_divide(phi), info
| 3,187 | 32.914894 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/optim_complexity_tolerance.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import scipy.spatial
import sklearn.neighbors
from ..stats import density
from .utils import _phi
from .utils_complexity_embedding import complexity_embedding
def complexity_tolerance(
signal, method="maxApEn", r_range=None, delay=None, dimension=None, show=False
):
"""**Automated selection of tolerance (r)**
Estimate and select the optimal tolerance (*r*) parameter used by other entropy and other
complexity algorithms.
Many complexity algorithms are built on the notion of self-similarity and recurrence, and how
often a system revisits its past states. Considering two states as identical is straightforward
for discrete systems (e.g., a sequence of ``"A"``, ``"B"`` and ``"C"`` states), but for
continuous signals, we cannot simply look for when the two numbers are exactly the same.
Instead, we have to pick a threshold by which to consider two points as similar.
The tolerance *r* is essentially this threshold value (the numerical difference between two
similar points that we "tolerate"). This parameter has a critical impact and is a major
source of inconsistencies in the literature.
Different methods have been described to estimate the most appropriate tolerance value:
* **maxApEn**: Different values of tolerance will be tested and the one where the approximate
entropy (ApEn) is maximized will be selected and returned (Chen, 2008).
* **recurrence**: The tolerance that yields a recurrence rate (see ``RQA``) close to 1% will
be returned. Note that this method is currently not suited for very long signals, as it is
based on a recurrence matrix, which size is close to n^2. Help is needed to address this
limitation.
* **neighbours**: The tolerance that yields a number of nearest neighbours (NN) close to 2% will
be returned.
As these methods are computationally expensive, other fast heuristics are available:
* **sd**: r = 0.2 * standard deviation (SD) of the signal will be returned. This is the most
commonly used value in the literature, though its appropriateness is questionable.
* **makowski**: Adjusted value based on the SD, the embedding dimension and the signal's
length. See our `study <https://github.com/DominiqueMakowski/ComplexityTolerance>`_.
* **nolds**: Adjusted value based on the SD and the dimension. The rationale is that
the chebyshev distance (used in various metrics) rises logarithmically with increasing
dimension. ``0.5627 * np.log(dimension) + 1.3334`` is the logarithmic trend line for the
chebyshev distance of vectors sampled from a univariate normal distribution. A constant of
``0.1164`` is used so that ``tolerance = 0.2 * SDs`` for ``dimension = 2`` (originally in
https://github.com/CSchoel/nolds).
* **singh2016**: Makes a histogram of the Chebyshev distance matrix and returns the upper bound
of the modal bin.
* **chon2009**: Acknowledging that computing multiple ApEns is computationally expensive, Chon
(2009) suggested an approximation based a heuristic algorithm that takes into account the
length of the signal, its short-term and long-term variability, and the embedding dimension
*m*. Initially defined only for *m* in [2-7], we expanded this to work with value of *m*
(though the accuracy is not guaranteed beyond *m* = 4).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
Can be ``"maxApEn"`` (default), ``"sd"``, ``"recurrence"``, ``"neighbours"``, ``"nolds"``,
``"chon2009"``, or ``"neurokit"``.
r_range : Union[list, int]
The range of tolerance values (or the number of values) to test. Only used if ``method`` is
``"maxApEn"`` or ``"recurrence"``. If ``None`` (default), the default range will be used;
``np.linspace(0.02, 0.8, r_range) * np.std(signal, ddof=1)`` for ``"maxApEn"``, and ``np.
linspace(0, np.max(d), 30 + 1)[1:]`` for ``"recurrence"``. You can set a lower number for
faster results.
delay : int
Only used if ``method="maxApEn"``. See :func:`entropy_approximate()`.
dimension : int
Only used if ``method="maxApEn"``. See :func:`entropy_approximate()`.
show : bool
If ``True`` and method is ``"maxApEn"``, will plot the ApEn values for each value of r.
See Also
--------
complexity, complexity_delay, complexity_dimension, complexity_embedding
Returns
----------
float
The optimal tolerance value.
dict
A dictionary containing additional information.
Examples
----------
* **Example 1**: The method based on the SD of the signal is fast. The plot shows the d
distribution of the values making the signal, and the width of the arrow represents the
chosen ``r`` parameter.
.. ipython:: python
import neurokit2 as nk
# Simulate signal
signal = nk.signal_simulate(duration=2, frequency=[5, 7, 9, 12, 15])
# Fast method (based on the standard deviation)
@savefig p_complexity_tolerance1.png scale=100%
r, info = nk.complexity_tolerance(signal, method = "sd", show=True)
@suppress
plt.close()
.. ipython:: python
r
The dimension can be taken into account:
.. ipython:: python
# nolds method
@savefig p_complexity_tolerance2.png scale=100%
r, info = nk.complexity_tolerance(signal, method = "nolds", dimension=3, show=True)
@suppress
plt.close()
.. ipython:: python
r
* **Example 2**: The method based on the recurrence rate will display the rates according to
different values of tolerance. The horizontal line indicates 5%.
.. ipython:: python
@savefig p_complexity_tolerance3.png scale=100%
r, info = nk.complexity_tolerance(signal, delay=1, dimension=10,
method = 'recurrence', show=True)
@suppress
plt.close()
.. ipython:: python
r
An alternative, better suited for long signals is to use nearest neighbours.
.. ipython:: python
@savefig p_complexity_tolerance4.png scale=100%
r, info = nk.complexity_tolerance(signal, delay=1, dimension=10,
method = 'neighbours', show=True)
@suppress
plt.close()
Another option is to use the density of distances.
.. ipython:: python
@savefig p_complexity_tolerance5.png scale=100%
r, info = nk.complexity_tolerance(signal, delay=1, dimension=3,
method = 'bin', show=True)
@suppress
plt.close()
* **Example 3**: The default method selects the tolerance at which *ApEn* is maximized.
.. ipython:: python
# Slow method
@savefig p_complexity_tolerance6.png scale=100%
r, info = nk.complexity_tolerance(signal, delay=8, dimension=6,
method = 'maxApEn', show=True)
@suppress
plt.close()
.. ipython:: python
r
* **Example 4**: The tolerance values that are tested can be modified to get a more precise
estimate.
.. ipython:: python
# Narrower range
@savefig p_complexity_tolerance7.png scale=100%
r, info = nk.complexity_tolerance(signal, delay=8, dimension=6, method = 'maxApEn',
r_range=np.linspace(0.002, 0.8, 30), show=True)
@suppress
plt.close()
.. ipython:: python
r
References
-----------
* Chon, K. H., Scully, C. G., & Lu, S. (2009). Approximate entropy for all signals. IEEE
engineering in medicine and biology magazine, 28(6), 18-23.
* Lu, S., Chen, X., Kanters, J. K., Solomon, I. C., & Chon, K. H. (2008). Automatic selection of
the threshold value r for approximate entropy. IEEE Transactions on Biomedical Engineering,
55(8), 1966-1972.
* Chen, X., Solomon, I. C., & Chon, K. H. (2008). Parameter selection criteria in approximate
entropy and sample entropy with application to neural respiratory signals. Am. J. Physiol.
Regul. Integr. Comp. Physiol.
* Singh, A., Saini, B. S., & Singh, D. (2016). An alternative approach to approximate entropy
threshold value (r) selection: application to heart rate variability and systolic blood
pressure variability under postural challenge. Medical & biological engineering & computing,
54(5), 723-732.
"""
if not isinstance(method, str):
return method, {"Method": "None"}
# Method
method = method.lower()
if method in ["traditional", "sd", "std", "default"]:
r = 0.2 * np.std(signal, ddof=1)
info = {"Method": "20% SD"}
elif method in ["adjusted_sd", "nolds"] and (
isinstance(dimension, (int, float)) or dimension is None
):
if dimension is None:
raise ValueError("'dimension' cannot be empty for the 'nolds' method.")
r = 0.11604738531196232 * np.std(signal, ddof=1) * (0.5627 * np.log(dimension) + 1.3334)
info = {"Method": "Adjusted 20% SD"}
elif method in ["chon", "chon2009"] and (
isinstance(dimension, (int, float)) or dimension is None
):
if dimension is None:
raise ValueError("'dimension' cannot be empty for the 'chon2009' method.")
sd1 = np.std(np.diff(signal), ddof=1) # short-term variability
sd2 = np.std(signal, ddof=1) # long-term variability of the signal
# Here are the 3 formulas from Chon (2009):
# For m=2: r =(−0.036 + 0.26 * sqrt(sd1/sd2)) / (len(signal) / 1000)**1/4
# For m=3: r =(−0.08 + 0.46 * sqrt(sd1/sd2)) / (len(signal) / 1000)**1/4
# For m=4: r =(−0.12 + 0.62 * sqrt(sd1/sd2)) / (len(signal) / 1000)**1/4
# For m=5: r =(−0.16 + 0.78 * sqrt(sd1/sd2)) / (len(signal) / 1000)**1/4
# For m=6: r =(−0.19 + 0.91 * sqrt(sd1/sd2)) / (len(signal) / 1000)**1/4
# For m=7: r =(−0.2 + 1 * sqrt(sd1/sd2)) / (len(signal) / 1000)**1/4
if dimension <= 2 and dimension <= 7:
x = [-0.036, -0.08, -0.12, -0.16, -0.19, -0.2][dimension - 2]
y = [0.26, 0.46, 0.62, 0.78, 0.91, 1][dimension - 2]
else:
# We need to extrapolate the 2 first numbers, x and y
# np.polyfit(np.log([2,3,4, 5, 6, 7]), [-0.036, -0.08, -0.12, -0.16, -0.19, -0.2], 1)
# np.polyfit([2,3,4, 5, 6, 7], [0.26, 0.46, 0.62, 0.78, 0.91, 1], 1)
x = -0.034 * dimension + 0.022
y = 0.14885714 * dimension - 0.00180952
r = (x + y * np.sqrt(sd1 / sd2)) / (len(signal) / 1000) ** 1 / 4
info = {"Method": "Chon (2009)"}
elif method in ["neurokit", "makowski"] and (
isinstance(dimension, (int, float)) or dimension is None
):
# Method described in
# https://github.com/DominiqueMakowski/ComplexityTolerance
if dimension is None:
raise ValueError("'dimension' cannot be empty for the 'makowski' method.")
n = len(signal)
r = np.std(signal, ddof=1) * (
0.2811 * (dimension - 1) + 0.0049 * np.log(n) - 0.02 * ((dimension - 1) * np.log(n))
)
info = {"Method": "Makowski"}
elif method in ["maxapen", "optimize"]:
r, info = _optimize_tolerance_maxapen(
signal, r_range=r_range, delay=delay, dimension=dimension
)
info.update({"Method": "Max ApEn"})
elif method in ["recurrence", "rqa"]:
r, info = _optimize_tolerance_recurrence(
signal, r_range=r_range, delay=delay, dimension=dimension
)
info.update({"Method": "1% Recurrence Rate"})
elif method in ["neighbours", "neighbors", "nn"]:
r, info = _optimize_tolerance_neighbours(
signal, r_range=r_range, delay=delay, dimension=dimension
)
info.update({"Method": "2% Neighbours"})
elif method in ["bin", "bins", "singh", "singh2016"]:
r, info = _optimize_tolerance_bin(signal, delay=delay, dimension=dimension)
info.update({"Method": "bin"})
else:
raise ValueError("NeuroKit error: complexity_tolerance(): 'method' not recognized.")
if show is True:
_optimize_tolerance_plot(r, info, method=method, signal=signal)
return r, info
# =============================================================================
# Internals
# =============================================================================
def _optimize_tolerance_recurrence(signal, r_range=None, delay=None, dimension=None):
# Optimize missing parameters
if delay is None or dimension is None:
raise ValueError("If method='recurrence', both delay and dimension must be specified.")
# Compute distance matrix
emb = complexity_embedding(signal, delay=delay, dimension=dimension)
d = scipy.spatial.distance.cdist(emb, emb, metric="euclidean")
if r_range is None:
r_range = 50
if isinstance(r_range, int):
r_range = np.linspace(0, np.max(d), r_range + 1)[1:]
recurrence_rate = np.zeros_like(r_range)
# Indices of the lower triangular (without the diagonal)
idx = np.tril_indices(len(d), k=-1)
n = len(d[idx])
for i, r in enumerate(r_range):
recurrence_rate[i] = (d[idx] <= r).sum() / n
# Closest to 0.01 (1%)
optimal = r_range[np.abs(recurrence_rate - 0.01).argmin()]
return optimal, {"Values": r_range, "Scores": recurrence_rate}
def _optimize_tolerance_maxapen(signal, r_range=None, delay=None, dimension=None):
# Optimize missing parameters
if delay is None or dimension is None:
raise ValueError("If method='maxApEn', both delay and dimension must be specified.")
if r_range is None:
r_range = 40
if isinstance(r_range, int):
r_range = np.linspace(0.02, 0.8, r_range) * np.std(signal, ddof=1)
apens = np.zeros_like(r_range)
info = {"kdtree1": None, "kdtree2": None}
for i, r in enumerate(r_range):
apens[i], info = _entropy_apen(
signal,
delay=delay,
dimension=dimension,
tolerance=r,
kdtree1=info["kdtree1"],
kdtree2=info["kdtree2"],
)
# apens = [_entropy_apen(signal, delay=delay, dimension=dimension, tolerance=r) for r in r_range]
return r_range[np.argmax(apens)], {"Values": r_range, "Scores": np.array(apens)}
def _entropy_apen(signal, delay, dimension, tolerance, **kwargs):
phi, info = _phi(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
approximate=True,
**kwargs,
)
return np.abs(np.subtract(phi[0], phi[1])), info
def _optimize_tolerance_neighbours(signal, r_range=None, delay=None, dimension=None):
if delay is None:
delay = 1
if dimension is None:
dimension = 1
if r_range is None:
r_range = 50
if isinstance(r_range, int):
r_range = np.linspace(0.02, 0.8, r_range) * np.std(signal, ddof=1)
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
kdtree = sklearn.neighbors.KDTree(embedded, metric="chebyshev")
counts = np.array(
[
np.mean(
kdtree.query_radius(embedded, r, count_only=True).astype(np.float64)
/ embedded.shape[0]
)
for r in r_range
]
)
# Closest to 0.02 (2%)
optimal = r_range[np.abs(counts - 0.02).argmin()]
return optimal, {"Values": r_range, "Scores": counts}
def _optimize_tolerance_bin(signal, delay=None, dimension=None):
# Optimize missing parameters
if delay is None or dimension is None:
raise ValueError("If method='bin', both delay and dimension must be specified.")
# Compute distance matrix
emb = complexity_embedding(signal, delay=delay, dimension=dimension)
d = scipy.spatial.distance.cdist(emb, emb, metric="chebyshev")
# Histogram of the lower triangular (without the diagonal)
y, x = np.histogram(d[np.tril_indices(len(d), k=-1)], bins=200, density=True)
# Most common distance
# Divide by two because r corresponds to the radius of the circle (NOTE: this is
# NOT in the paper and thus, opinion is required!)
optimal = x[np.argmax(y) + 1] / 2
return optimal, {"Values": x[1::] / 2, "Scores": y}
# =============================================================================
# Plotting
# =============================================================================
def _optimize_tolerance_plot(r, info, ax=None, method="maxApEn", signal=None):
if ax is None:
fig, ax = plt.subplots()
else:
fig = None
if method in [
"traditional",
"sd",
"std",
"default",
"none",
"adjusted_sd",
"nolds",
"chon",
"chon2009",
]:
x, y = density(signal)
arrow_y = np.mean([np.max(y), np.min(y)])
x_range = np.max(x) - np.min(x)
ax.plot(x, y, color="#80059c", label="Optimal r: " + str(np.round(r, 3)))
ax.arrow(
np.mean(x),
arrow_y,
np.mean(x) + r / 2,
0,
head_width=0.01 * x_range,
head_length=0.01 * x_range,
linewidth=4,
color="g",
length_includes_head=True,
)
ax.arrow(
np.mean(x),
arrow_y,
np.mean(x) - r / 2,
0,
head_width=0.01 * x_range,
head_length=0.01 * x_range,
linewidth=4,
color="g",
length_includes_head=True,
)
ax.set_title("Optimization of Tolerance Threshold (r)")
ax.set_xlabel("Signal values")
ax.set_ylabel("Distribution")
ax.legend(loc="upper right")
return fig
if method in ["bin", "bins", "singh", "singh2016"]:
ax.set_title("Optimization of Tolerance Threshold (r)")
ax.set_xlabel("Chebyshev Distance")
ax.set_ylabel("Density")
ax.plot(info["Values"], info["Scores"], color="#4CAF50")
ax.axvline(x=r, color="#E91E63", label="Optimal r: " + str(np.round(r, 3)))
ax.legend(loc="upper right")
return fig
r_range = info["Values"]
y_values = info["Scores"]
# Custom legend depending on method
if method in ["maxapen", "optimize"]:
ylabel = "Approximate Entropy $ApEn$"
legend = "$ApEn$"
else:
y_values *= 100 # Convert to percentage
ax.axhline(y=0.5, color="grey")
ax.yaxis.set_major_formatter(matplotlib.ticker.PercentFormatter())
if method in ["neighbours", "neighbors", "nn"]:
ylabel = "Nearest Neighbours"
legend = "$NN$"
else:
ylabel = "Recurrence Rate $RR$"
legend = "$RR$"
ax.set_title("Optimization of Tolerance Threshold (r)")
ax.set_xlabel("Tolerance threshold $r$")
ax.set_ylabel(ylabel)
ax.plot(r_range, y_values, "o-", label=legend, color="#80059c")
ax.axvline(x=r, color="#E91E63", label="Optimal r: " + str(np.round(r, 3)))
ax.legend(loc="upper right")
return fig
| 19,419 | 36.929688 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_increment.py | import numpy as np
import pandas as pd
from .utils_complexity_embedding import complexity_embedding
from .entropy_shannon import entropy_shannon
def entropy_increment(signal, dimension=2, q=4, **kwargs):
"""**Increment Entropy (IncrEn) and its Multiscale variant (MSIncrEn)**
Increment Entropy (IncrEn) quantifies the magnitudes of the variations between adjacent
elements into ranks based on a precision factor *q* and the standard deviation of the time
series. IncrEn is conceptually similar to :func:`permutation entropy <entropy_permutation>` in
that it also uses the concepts of symbolic dynamics.
In the IncrEn calculation, two letters are used to describe the relationship between adjacent
elements in a time series. One letter represents the volatility direction, and the other
represents the magnitude of the variation between the adjacent elements.
The time series is reconstructed into vectors of *m* elements. Each element of each vector
represents the increment between two neighbouring elements in the original time series.
Each increment element is mapped to a word consisting of two letters (one letter represents
the volatility direction, and the other represents the magnitude of the variation between
the adjacent elements), and then, each vector is described as a symbolic (discrete) pattern.
The :func:`Shannon entropy <entropy_shannon>` of the probabilities of independent patterns is
then computed.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
q : float
The quantifying resolution *q* represents the precision of *IncrEn*, with larger values
indicating a higher precision, causing IncrEn to be more sensitive to subtle fluctuations.
The IncrEn value increases with increasing *q*, until reaching a plateau. This property can
be useful to selecting an optimal *q* value.
**kwargs : optional
Other keyword arguments, such as the logarithmic ``base`` to use for
:func:`entropy_shannon`.
Returns
--------
incren : float
The Increment Entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used, such as the
average entropy ``AvEn``.
See Also
--------
entropy_shannon, entropy_multiscale
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# IncrEn
incren, _ = nk.entropy_increment(signal, dimension=3, q=2)
incren
# Multiscale IncrEn (MSIncrEn)
@savefig p_entropy_increment1.png scale=100%
msincren, _ = nk.entropy_multiscale(signal, method="MSIncrEn", show=True)
@suppress
plt.close()
References
-----------
* Liu, X., Jiang, A., Xu, N., & Xue, J. (2016). Increment entropy as a measure of complexity
for time series. Entropy, 18(1), 22.
* Liu, X., Jiang, A., Xu, N., & Xue, J. (2016). Correction on Liu, X.; Jiang, A.; Xu, N.; Xue,
J. Increment Entropy as a Measure of Complexity for Time Series. Entropy 2016, 18, 22.
Entropy, 18(4), 133.
* Liu, X., Wang, X., Zhou, X., & Jiang, A. (2018). Appropriate use of the increment entropy for
electrophysiological time series. Computers in Biology and Medicine, 95, 13-23.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Dimension": dimension, "q": q}
# Time-embedding of the consecutive differences ("increment series")
embedded = complexity_embedding(np.diff(signal), dimension=dimension, **kwargs)
# The sign indicates the direction of the volatility between the corresponding neighbouring
# elements in the original time series; it takes values of 1, 0, or 1, indicating a rise, no
# change, or a decline
sign = np.sign(embedded)
# The size describes the magnitude of the variation between these adjacent elements
Temp = np.tile(np.std(embedded, axis=1, ddof=1, keepdims=True), (1, dimension))
size = np.minimum(q, np.floor(abs(embedded) * q / Temp))
size[np.any(Temp == 0, axis=1), :] = 0
# Each element in each vector is mapped to a word consisting of the sign and the size
words = sign * size
# Get probabilities of occurence
freq = np.unique(words, axis=0)
freq = [np.sum(~np.any(words - freq[k, :], axis=1)) for k in range(len(freq))]
freq = np.array(freq) / np.sum(freq)
# Compute entropy
incren, _ = entropy_shannon(freq=freq, **kwargs)
# Normalize
incren = incren / (dimension - 1)
return incren, info
| 5,168 | 41.02439 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_higuchi.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .optim_complexity_k import _complexity_k_slope, complexity_k
def fractal_higuchi(signal, k_max="default", show=False, **kwargs):
"""**Higuchi's Fractal Dimension (HFD)**
The Higuchi's Fractal Dimension (HFD) is an approximate value for the box-counting dimension for
time series. It is computed by reconstructing k-max number of new data sets. For each
reconstructed data set, curve length is computed and plotted against its corresponding
*k*-value on a log-log scale. HFD corresponds to the slope of the least-squares linear trend.
Values should fall between 1 and 2. For more information about the *k* parameter selection, see
the :func:`complexity_k` optimization function.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
k_max : str or int
Maximum number of interval times (should be greater than or equal to 2).
If ``"default"``, the optimal k-max is estimated using :func:`complexity_k`, which is slow.
show : bool
Visualise the slope of the curve for the selected k_max value.
**kwargs : optional
Currently not used.
Returns
----------
HFD : float
Higuchi's fractal dimension of the time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Higuchi's fractal dimension.
See Also
--------
complexity_k
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=1, sampling_rate=100, frequency=[3, 6], noise = 0.2)
@savefig p_fractal_higuchi1.png scale=100%
k_max, info = nk.complexity_k(signal, k_max='default', show=True)
@suppress
plt.close()
@savefig p_fractal_higuchi2.png scale=100%
hfd, info = nk.fractal_higuchi(signal, k_max=k_max, show=True)
@suppress
plt.close()
.. ipython:: python
hfd
References
----------
* Higuchi, T. (1988). Approach to an irregular time series on the basis of the fractal theory.
Physica D: Nonlinear Phenomena, 31(2), 277-283.
* Vega, C. F., & Noel, J. (2015, June). Parameters analyzed of Higuchi's fractal dimension for
EEG brain signals. In 2015 Signal Processing Symposium (SPSympo) (pp. 1-5). IEEE.
https://ieeexplore.ieee.org/document/7168285
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Get k_max
if isinstance(k_max, (str, list, np.ndarray, pd.Series)):
# Optimizing needed
k_max, info = complexity_k(signal, k_max=k_max, show=False)
idx = np.where(info["Values"] == k_max)[0][0]
slope = info["Scores"][idx]
intercept = info["Intercepts"][idx]
average_values = info["Average_Values"][idx]
k_values = np.arange(1, k_max + 1)
else:
# Compute Higuchi
slope, intercept, info = _complexity_k_slope(k_max, signal)
k_values = info["k_values"]
average_values = info["average_values"]
# Plot
if show:
_fractal_higuchi_plot(k_values, average_values, k_max, slope, intercept)
return slope, {
"k_max": k_max,
"Values": k_values,
"Scores": average_values,
"Intercept": intercept,
}
# =============================================================================
# Utilities
# =============================================================================
def _fractal_higuchi_plot(k_values, average_values, kmax, slope, intercept, ax=None):
if ax is None:
fig, ax = plt.subplots()
fig.suptitle("Higuchi Fractal Dimension (HFD)")
else:
fig = None
ax.set_title(
"Least-squares linear best-fit curve for $k_{max}$ = "
+ str(kmax)
+ ", slope = "
+ str(np.round(slope, 2))
)
ax.set_ylabel(r"$ln$(L(k))")
ax.set_xlabel(r"$ln$(1/k)")
colors = plt.cm.plasma(np.linspace(0, 1, len(k_values)))
# Label all values unless len(k_values) > 10 then label only min and max k_max
if len(k_values) < 10:
for i in range(0, len(k_values)):
ax.scatter(
-np.log(k_values[i]),
np.log(average_values[i]),
color=colors[i],
marker="o",
zorder=2,
label="k = {}".format(i + 1),
)
else:
for i in range(0, len(k_values)):
ax.scatter(
-np.log(k_values[i]),
np.log(average_values[i]),
color=colors[i],
marker="o",
zorder=2,
label="_no_legend_",
)
ax.plot([], label="k = {}".format(np.min(k_values)), c=colors[0])
ax.plot([], label="k = {}".format(np.max(k_values)), c=colors[-1])
fit_values = [slope * i + -intercept for i in -np.log(k_values)]
ax.plot(-np.log(k_values), fit_values, color="#FF9800", zorder=1)
ax.legend(loc="lower right")
return fig
| 5,361 | 32.304348 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity_lempelziv.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from .utils_complexity_ordinalpatterns import complexity_ordinalpatterns
from .utils_complexity_symbolize import complexity_symbolize
def complexity_lempelziv(
signal,
delay=1,
dimension=2,
permutation=False,
symbolize="mean",
**kwargs,
):
"""**Lempel-Ziv Complexity (LZC, PLZC and MSLZC)**
Computes Lempel-Ziv Complexity (LZC) to quantify the regularity of the signal, by scanning
symbolic sequences for new patterns, increasing the complexity count every time a new sequence
is detected. Regular signals have a lower number of distinct patterns and thus have low LZC
whereas irregular signals are characterized by a high LZC. While often being interpreted as a
complexity measure, LZC was originally proposed to reflect randomness (Lempel and Ziv, 1976).
Permutation Lempel-Ziv Complexity (**PLZC**) combines LZC with :func:`permutation <entropy_permutation>`.
A sequence of symbols is generated from the permutations observed in the :func:`tine-delay
embedding <complexity_embedding>`, and LZC is computed over it.
Multiscale (Permutation) Lempel-Ziv Complexity (**MSLZC** or **MSPLZC**) combines permutation
LZC with the :func:`multiscale approach <entropy_multiscale>`. It first performs a
:func:`coarse-graining <complexity_coarsegraining>` procedure to the original time series.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter. Only used
when ``permutation=True``.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter. Only used
when ``permutation=True``
permutation : bool
If ``True``, will return PLZC.
symbolize : str
Only used when ``permutation=False``. Method to convert a continuous signal input into a
symbolic (discrete) signal. By default, assigns 0 and 1 to values below and above the mean.
Can be ``None`` to skip the process (in case the input is already discrete). See
:func:`complexity_symbolize` for details.
**kwargs
Other arguments to be passed to :func:`complexity_ordinalpatterns` (if
``permutation=True``) or :func:`complexity_symbolize`.
Returns
----------
lzc : float
Lempel Ziv Complexity (LZC) of the signal.
info : dict
A dictionary containing additional information regarding the parameters used
to compute LZC.
See Also
--------
.complexity_symbolize, .complexity_ordinalpatterns, .entropy_permutation,
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# LZC
lzc, info = nk.complexity_lempelziv(signal)
lzc
# PLZC
plzc, info = nk.complexity_lempelziv(signal, delay=1, dimension=3, permutation=True)
plzc
.. ipython:: python
# MSLZC
@savefig p_complexity_lempelziv1.png scale=100%
mslzc, info = nk.entropy_multiscale(signal, method="LZC", show=True)
@suppress
plt.close()
.. ipython:: python
# MSPLZC
@savefig p_complexity_lempelziv2.png scale=100%
msplzc, info = nk.entropy_multiscale(signal, method="LZC", permutation=True, show=True)
@suppress
plt.close()
References
----------
* Lempel, A., & Ziv, J. (1976). On the complexity of finite sequences. IEEE Transactions on
information theory, 22(1), 75-81.
* Nagarajan, R. (2002). Quantifying physiological data with Lempel-Ziv complexity-certain
issues. IEEE Transactions on Biomedical Engineering, 49(11), 1371-1373.
* Kaspar, F., & Schuster, H. G. (1987). Easily calculable measure for the complexity of
spatiotemporal patterns. Physical Review A, 36(2), 842.
* Zhang, Y., Hao, J., Zhou, C., & Chang, K. (2009). Normalized Lempel-Ziv complexity and
its application in bio-sequence analysis. Journal of mathematical chemistry, 46(4), 1203-1212.
* Bai, Y., Liang, Z., & Li, X. (2015). A permutation Lempel-Ziv complexity measure for EEG
analysis. Biomedical Signal Processing and Control, 19, 102-114.
* Borowska, M. (2021). Multiscale Permutation Lempel-Ziv Complexity Measure for Biomedical
Signal Analysis: Interpretation and Application to Focal EEG Signals. Entropy, 23(7), 832.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Permutation": permutation}
# Permutation or not
if permutation:
info["Dimension"] = dimension
info["Delay"] = delay
# Permutation on the signal (i.e., converting to ordinal pattern).
_, info = complexity_ordinalpatterns(signal, delay=delay, dimension=dimension, **kwargs)
symbolic = info["Uniques"]
else:
# Binarize the signal
symbolic = complexity_symbolize(signal, method=symbolize, **kwargs)
# Count using the lempelziv algorithm
info["Complexity_Kolmogorov"], n = _complexity_lempelziv_count(symbolic)
# Normalize
if permutation is False:
lzc = (info["Complexity_Kolmogorov"] * np.log2(n)) / n
else:
lzc = (
info["Complexity_Kolmogorov"] * np.log2(n) / np.log2(np.math.factorial(dimension))
) / n
return lzc, info
# =============================================================================
# Utilities
# =============================================================================
def _complexity_lempelziv_count(symbolic):
"""Computes LZC counts from symbolic sequences"""
# TODO: I really can't imagine that there is no faster way of doing that that with a while loop
# Convert to string (faster)
string = "".join(list(symbolic.astype(int).astype(str)))
# Initialize variables
n = len(string)
s = "0" + string
c = 1
j = 1
i = 0
k = 1
k_max = 1
stop = False
# Start counting
while stop is False:
if s[i + k] != s[j + k]:
if k > k_max:
# k_max stores the length of the longest pattern in the LA that has been matched
# somewhere in the SB
k_max = k
# we increase i while the bit doesn't match, looking for a previous occurrence of a
# pattern. s[i+k] is scanning the "search buffer" (SB)
i = i + 1
# we stop looking when i catches up with the first bit of the "look-ahead" (LA) part.
if i == j:
# If we were actually compressing, we would add the new token here. here we just
# count reconstruction STEPs
c = c + 1
# we move the beginning of the LA to the end of the newly matched pattern.
j = j + k_max
# if the LA surpasses length of string, then we stop.
if j + 1 > n:
stop = True
# after STEP,
else:
# we reset the searching index to beginning of SB (beginning of string)
i = 0
# we reset pattern matching index. Note that we are actually matching against
# the first bit of the string, because we added an extra 0 above, so i+k is the
# first bit of the string.
k = 1
# and we reset max length of matched pattern to k.
k_max = 1
else:
# we've finished matching a pattern in the SB, and we reset the matched pattern
# length counter.
k = 1
# I increase k as long as the pattern matches, i.e. as long as s[j+k] bit string can be
# reconstructed by s[i+k] bit string. Note that the matched pattern can "run over" j
# because the pattern starts copying itself (see LZ 76 paper). This is just what happens
# when you apply the cloning tool on photoshop to a region where you've already cloned...
else:
k = k + 1
# if we reach the end of the string while matching, we need to add that to the tokens,
# and stop.
if j + k > n:
c = c + 1
stop = True
return c, n
| 8,876 | 39.167421 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_shannon_joint.py | import numpy as np
import scipy.stats
from .entropy_shannon import _entropy_freq
def entropy_shannon_joint(x, y, base=2):
"""**Shannon's Joint Entropy**
The joint entropy measures how much entropy is contained in a joint system of two random
variables.
Parameters
----------
x : Union[list, np.array, pd.Series]
A :func:`symbolic <complexity_symbolize>` sequence in the form of a vector of values.
y : Union[list, np.array, pd.Series]
Another symbolic sequence with the same values.
base: float
The logarithmic base to use, defaults to ``2``. Note that ``scipy.stats.entropy()``
uses ``np.e`` as default (the natural logarithm).
Returns
--------
float
The Shannon joint entropy.
dict
A dictionary containing additional information regarding the parameters used
to compute Shannon entropy.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
x = ["A", "A", "A", "B", "A", "B"]
y = ["A", "B", "A", "A", "A", "A"]
jen, _ = nk.entropy_shannon_joint(x, y)
jen
"""
# Get frequencies
labels_x, freq_x = _entropy_freq(x)
labels_y, freq_y = _entropy_freq(y)
assert np.all(labels_y == labels_y), "The labels of x and y are not the same."
return scipy.stats.entropy(freq_x, freq_y, base=base), {"Base": base}
| 1,440 | 24.732143 | 93 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/optim_complexity_delay.py | # -*- coding: utf-8 -*-
import itertools
from warnings import warn
import matplotlib
import matplotlib.collections
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.signal
import scipy.spatial
import scipy.stats
from ..misc import NeuroKitWarning, find_closest
from ..signal import (signal_autocor, signal_findpeaks, signal_psd,
signal_surrogate, signal_zerocrossings)
from .entropy_kl import entropy_kl
from .information_mutual import mutual_information
from .utils_complexity_embedding import complexity_embedding
def complexity_delay(
signal, delay_max=50, method="fraser1986", algorithm=None, show=False, **kwargs
):
"""**Automated selection of the optimal Delay (Tau)**
The time delay (Tau :math:`\\tau`, also referred to as *Lag*) is one of the two critical
parameters (the other being the :func:`Dimension <complexity_dimension>` *m*) involved in the
construction of the time-delay embedding of a signal. It corresponds to the delay in samples
between the original signal and its delayed version(s). In other words, how many samples do we
consider between a given state of the signal and its closest past state.
When :math:`\\tau` is smaller than the optimal theoretical value, consecutive coordinates of the
system's state are correlated and the attractor is not sufficiently unfolded. Conversely, when
:math:`\\tau` is larger than it should be, successive coordinates are almost independent,
resulting in an uncorrelated and unstructured cloud of points.
The selection of the parameters *delay* and :func:`*dimension* <complexity_dimension>` is a
challenge. One approach is to select them (semi) independently (as dimension selection often
requires the delay), using :func:`complexity_delay` and :func:`complexity_dimension`. However,
some joint-estimation methods do exist, that attempt at finding the optimal delay and dimension
at the same time.
Note also that some authors (e.g., Rosenstein, 1994) suggest identifying the
optimal embedding dimension first, and that the optimal delay value should then be considered
as the optimal delay between the first and last delay coordinates (in other words, the actual
delay should be the optimal delay divided by the optimal embedding dimension minus 1).
Several authors suggested different methods to guide the choice of the delay:
* **Fraser and Swinney (1986)** suggest using the first local minimum of the mutual information
between the delayed and non-delayed time series, effectively identifying a value of Tau for
which they share the least information (and where the attractor is the least redundant).
Unlike autocorrelation, mutual information takes into account also nonlinear correlations.
* **Theiler (1990)** suggested to select Tau where the autocorrelation between the signal and
its lagged version at Tau first crosses the value :math:`1/e`. The autocorrelation-based
methods have the advantage of short computation times when calculated via the fast Fourier
transform (FFT) algorithm.
* **Casdagli (1991)** suggests instead taking the first zero-crossing of the autocorrelation.
* **Rosenstein (1993)** suggests to approximate the point where the autocorrelation function
drops to :math:`(1 - 1/e)` of its maximum value.
* **Rosenstein (1994)** suggests to the point close to 40% of the slope of the average
displacement from the diagonal (ADFD).
* **Kim (1999)** suggests estimating Tau using the correlation integral, called the C-C method,
which has shown to agree with those obtained using the Mutual Information. This method
makes use of a statistic within the reconstructed phase space, rather than analyzing the
temporal evolution of the time series. However, computation times are significantly long for
this method due to the need to compare every unique pair of pairwise vectors within the
embedded signal per delay.
* **Lyle (2021)** describes the "Symmetric Projection Attractor Reconstruction" (SPAR), where
:math:`1/3` of the the dominant frequency (i.e., of the length of the average "cycle") can be
a suitable value for approximately periodic data, and makes the attractor sensitive to
morphological changes. See also `Aston's talk <https://youtu.be/GGrOJtcTcHA?t=730>`_. This
method is also the fastest but might not be suitable for aperiodic signals.
The ``algorithm`` argument (default to ``"fft"``) and will be passed as the ``method``
argument of ``signal_psd()``.
**Joint-Methods for Delay and Dimension**
* **Gautama (2003)** mentions that in practice, it is common to have a fixed time lag and to
adjust the embedding dimension accordingly. As this can lead to large *m* values (and thus to
embedded data of a large size) and thus, slow processing, they describe an optimisation
method to jointly determine *m* and :math:`\\tau`, based on the **entropy ratio**.
.. note::
We would like to implement the joint-estimation by `Matilla-García et al. (2021)
<https://www.mdpi.com/1099-4300/23/2/221>`_, please get in touch if you can help us!
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay_max : int
The maximum time delay (Tau or lag) to test.
method : str
The method that defines what to compute for each tested value of Tau. Can be one of
``"fraser1986"``, ``"theiler1990"``, ``"casdagli1991"``, ``"rosenstein1993"``,
``"rosenstein1994"``, ``"kim1999"``, or ``"lyle2021"``.
algorithm : str
The method used to find the optimal value of Tau given the values computed by the method.
If ``None`` (default), will select the algorithm according to the method. Modify only if you
know what you are doing.
show : bool
If ``True``, will plot the metric values for each value of tau.
**kwargs : optional
Additional arguments to be passed for C-C method.
Returns
-------
delay : int
Optimal time delay.
parameters : dict
A dictionary containing additional information regarding the parameters used
to compute optimal time-delay embedding.
See Also
---------
complexity, complexity_dimension, complexity_embedding, complexity_tolerance
Examples
----------
* **Example 1**: Comparison of different methods for estimating the optimal delay of an simple
artificial signal.
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, sampling_rate=100, frequency=[1, 1.5], noise=0.02)
@savefig p_complexity_delay1.png scale=100%
nk.signal_plot(signal)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay2.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=100, show=True,
method="fraser1986")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay3.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=100, show=True,
method="theiler1990")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay4.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=100, show=True,
method="casdagli1991")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay5.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=100, show=True,
method="rosenstein1993")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay6.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=100, show=True,
method="rosenstein1994")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay7.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=100, show=True,
method="lyle2021")
@suppress
plt.close()
* **Example 2**: Optimizing the delay and the dimension using joint-estimation methods.
.. ipython:: python
@savefig p_complexity_delay8.png scale=100%
delay, parameters = nk.complexity_delay(
signal,
delay_max=np.arange(1, 30, 1), # Can be an int or a list
dimension_max=20, # Can be an int or a list
method="gautama2003",
surrogate_n=5, # Number of surrogate signals to generate
surrogate_method="random", # can be IAAFT, see nk.signal_surrogate()
show=True)
@suppress
plt.close()
.. ipython:: python
# Optimal dimension
dimension = parameters["Dimension"]
dimension
**Note**: A double-checking of that method would be appreciated! Please help us improve.
* **Example 3**: Using a realistic signal.
.. ipython:: python
ecg = nk.ecg_simulate(duration=60*6, sampling_rate=200)
signal = nk.ecg_rate(nk.ecg_peaks(ecg, sampling_rate=200),
sampling_rate=200,
desired_length=len(ecg))
@savefig p_complexity_delay9.png scale=100%
nk.signal_plot(signal)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_delay10.png scale=100%
delay, parameters = nk.complexity_delay(signal, delay_max=1000, show=True)
@suppress
plt.close()
References
------------
* Lyle, J. V., Nandi, M., & Aston, P. J. (2021). Symmetric Projection Attractor Reconstruction:
Sex Differences in the ECG. Frontiers in cardiovascular medicine, 1034.
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
* Camplani, M., & Cannas, B. (2009). The role of the embedding dimension and time delay in time
series forecasting. IFAC Proceedings Volumes, 42(7), 316-320.
* Rosenstein, M. T., Collins, J. J., & De Luca, C. J. (1993). A practical method for calculating
largest Lyapunov exponents from small data sets. Physica D: Nonlinear Phenomena, 65(1-2),
117-134.
* Rosenstein, M. T., Collins, J. J., & De Luca, C. J. (1994). Reconstruction expansion as a
geometry-based framework for choosing proper delay times. Physica-Section D, 73(1), 82-98.
* Kim, H., Eykholt, R., & Salas, J. D. (1999). Nonlinear dynamics, delay times, and embedding
windows. Physica D: Nonlinear Phenomena, 127(1-2), 48-60.
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
* Camplani, M., & Cannas, B. (2009). The role of the embedding dimension and time delay in time
series forecasting. IFAC Proceedings Volumes, 42(7), 316-320.
"""
# Initalize vectors
if isinstance(delay_max, int):
tau_sequence = np.arange(1, delay_max + 1)
else:
tau_sequence = np.array(delay_max).astype(int)
# Method
method = method.lower()
if method in ["fraser", "fraser1986", "tdmi"]:
metric = "Mutual Information"
if algorithm is None:
algorithm = "first local minimum"
elif method in ["mi2"]:
metric = "Mutual Information 2"
if algorithm is None:
algorithm = "first local minimum"
elif method in ["theiler", "theiler1990"]:
metric = "Autocorrelation"
if algorithm is None:
algorithm = "first 1/e crossing"
elif method in ["casdagli", "casdagli1991"]:
metric = "Autocorrelation"
if algorithm is None:
algorithm = "first zero crossing"
elif method in ["rosenstein", "rosenstein1994", "adfd"]:
metric = "Displacement"
if algorithm is None:
algorithm = "closest to 40% of the slope"
elif method in ["rosenstein1993"]:
metric = "Autocorrelation"
if algorithm is None:
algorithm = "first drop below 1-(1/e) of maximum"
elif method in ["kim1999", "cc"]:
metric = "Correlation Integral"
if algorithm is None:
algorithm = "first local minimum"
elif method in ["aston2020", "lyle2021", "spar"]:
return _complexity_delay_spar(signal, algorithm=algorithm, show=show, **kwargs)
elif method in ["gautama2003", "gautama", "entropyratio"]:
return _complexity_delay_entropyratio(signal, delay_max=delay_max, show=show, **kwargs)
else:
raise ValueError("complexity_delay(): 'method' not recognized.")
# Get metric
metric_values = _embedding_delay_metric(signal, tau_sequence, metric=metric)
# Get optimal tau
optimal = _embedding_delay_select(metric_values, algorithm=algorithm)
if np.isnan(optimal):
warn(
"No optimal time delay is found. Nan is returned."
" Consider using a higher `delay_max`.",
category=NeuroKitWarning,
)
else:
optimal = tau_sequence[optimal]
if show is True:
_embedding_delay_plot(
signal,
metric_values=metric_values,
tau_sequence=tau_sequence,
tau=optimal,
metric=metric,
)
# Return optimal tau and info dict
return optimal, {
"Values": tau_sequence,
"Scores": metric_values,
"Algorithm": algorithm,
"Metric": metric,
"Method": method,
}
# =============================================================================
# Methods
# =============================================================================
def _embedding_delay_select(metric_values, algorithm="first local minimum"):
if algorithm in ["first local minimum (corrected)"]:
# if immediately increasing, assume it is the first is the closest
if np.diff(metric_values)[0] > 0:
optimal = 0
# take last value if continuously decreasing with no inflections
elif all(np.diff(metric_values) < 0):
optimal = len(metric_values) - 1
else:
# Find reversed peaks
optimal = signal_findpeaks(
-1 * metric_values, relative_height_min=0.1, relative_max=True
)["Peaks"]
elif algorithm == "first local minimum":
# Find reversed peaks
try:
optimal = signal_findpeaks(
-1 * metric_values, relative_height_min=0.1, relative_max=True
)["Peaks"]
except ValueError:
warn(
"First local minimum detection failed. Try setting "
+ "`algorithm = 'first local minimum (corrected)'` or using another method.",
category=NeuroKitWarning,
)
optimal = np.nan
elif algorithm == "first 1/e crossing":
metric_values = metric_values - 1 / np.exp(1)
optimal = signal_zerocrossings(metric_values)
elif algorithm == "first zero crossing":
optimal = signal_zerocrossings(metric_values)
elif algorithm == "closest to 40% of the slope":
slope = np.diff(metric_values) * len(metric_values)
slope_in_deg = np.rad2deg(np.arctan(slope))
optimal = np.where(slope_in_deg == find_closest(40, slope_in_deg))[0]
elif algorithm == "first drop below 1-(1/e) of maximum":
try:
optimal = np.where(metric_values < np.nanmax(metric_values) * (1 - 1.0 / np.e))[0][0]
except IndexError: # If no value are below that threshold
optimal = np.nan
if not isinstance(optimal, (int, float, np.integer)):
if len(optimal) != 0:
optimal = optimal[0]
else:
optimal = np.nan
return optimal
# =============================================================================
def _embedding_delay_metric(
signal,
tau_sequence,
metric="Mutual Information",
dimensions=[2, 3, 4, 5],
r_vals=[0.5, 1.0, 1.5, 2.0],
):
"""Iterating through dimensions and r values is relevant only if metric used is Correlation Integral.
For this method, either first zero crossing of the statistic averages or the first local
minimum of deviations to obtain optimal tau. This implementation takes the latter since in practice,
they are both in close proximity.
"""
if metric == "Autocorrelation":
values, _ = signal_autocor(signal)
values = values[: len(tau_sequence)] # upper limit
elif metric == "Correlation Integral":
r_vals = [i * np.std(signal) for i in r_vals]
# initiate empty list for storing
# averages = np.zeros(len(tau_sequence))
values = np.zeros(len(tau_sequence))
for i, t in enumerate(tau_sequence):
# average = 0
change = 0
for m in dimensions:
# # find average of dependence statistic
# for r in r_vals:
# s = _embedding_delay_cc_statistic(signal, delay=t, dimension=m, r=r)
# average += s
# find average of statistic deviations across r_vals
deviation = _embedding_delay_cc_deviation_max(
signal, delay=t, dimension=m, r_vals=r_vals
)
change += deviation
# averages[i] = average / 16
values[i] = change / 4
else:
values = np.zeros(len(tau_sequence))
# Loop through taus and compute all scores values
for i, current_tau in enumerate(tau_sequence):
embedded = complexity_embedding(signal, delay=current_tau, dimension=2)
if metric == "Mutual Information":
values[i] = mutual_information(
embedded[:, 0], embedded[:, 1], method="varoquaux"
)
elif metric == "Mutual Information 2":
values[i] = mutual_information(
embedded[:, 0], embedded[:, 1], method="knn"
)
elif metric == "Displacement":
dimension = 2
# Reconstruct with zero time delay.
tau0 = embedded[:, 0].repeat(dimension).reshape(len(embedded), dimension)
dist = np.asarray(
[scipy.spatial.distance.euclidean(i, j) for i, j in zip(embedded, tau0)]
)
values[i] = np.mean(dist)
else:
raise ValueError("'metric' not recognized.")
return values
# =============================================================================
# Internals for C-C method, Kim et al. (1999)
# =============================================================================
def _embedding_delay_cc_integral_sum(signal, dimension=3, delay=10, r=0.02):
"""Correlation integral is a cumulative distribution function, which denotes
the probability of distance between any pairs of points in phase space
not greater than the specified `r`.
"""
# Embed signal
embedded = complexity_embedding(signal, delay=delay, dimension=dimension, show=False)
M = embedded.shape[0]
# Prepare indices for comparing all unique pairwise vectors
combinations = list(itertools.combinations(range(0, M), r=2))
first_index, second_index = np.transpose(combinations)[0], np.transpose(combinations)[1]
vectorized_integral = np.vectorize(_embedding_delay_cc_integral, excluded=["embedded", "r"])
integral = np.sum(
vectorized_integral(
first_index=first_index, second_index=second_index, embedded=embedded, r=r
)
)
return integral
def _embedding_delay_cc_integral(first_index, second_index, embedded, r=0.02):
M = embedded.shape[0] # Number of embedded points
diff = np.linalg.norm(embedded[first_index] - embedded[second_index], ord=np.inf) # sup-norm
h = np.heaviside(r - diff, 1)
integral = (2 / (M * (M - 1))) * h # find average
return integral
def _embedding_delay_cc_statistic(signal, dimension=3, delay=10, r=0.02):
"""The dependence statistic as the serial correlation of a nonlinear time series."""
# create disjoint time series
series = [signal[i - 1 :: delay] for i in range(1, delay + 1)]
statistic = 0
for sub_series in series:
diff = _embedding_delay_cc_integral_sum(
sub_series, dimension=dimension, delay=delay, r=r
) - ((_embedding_delay_cc_integral_sum(signal, dimension=1, delay=delay, r=r)) ** dimension)
statistic += diff
return statistic / delay
def _embedding_delay_cc_deviation_max(signal, r_vals=[0.5, 1.0, 1.5, 2.0], delay=10, dimension=3):
"""A measure of the variation of the dependence statistic with r using
several representative values of r.
"""
vectorized_deviation = np.vectorize(
_embedding_delay_cc_deviation, excluded=["signal", "delay", "dimension"]
)
deviations = vectorized_deviation(
signal=signal, r_vals=r_vals, delay=delay, dimension=dimension
)
return np.max(deviations) - np.min(deviations)
def _embedding_delay_cc_deviation(signal, r_vals=[0.5, 1.0, 1.5, 2.0], delay=10, dimension=3):
return _embedding_delay_cc_statistic(signal, delay=delay, dimension=dimension, r=r_vals)
# =============================================================================
# Plotting Generics
# =============================================================================
def _embedding_delay_plot(
signal,
metric_values,
tau_sequence,
tau=1,
metric="Mutual Information",
ax0=None,
ax1=None,
plot="2D",
):
# Prepare figure
if ax0 is None and ax1 is None:
fig = plt.figure(constrained_layout=False)
spec = matplotlib.gridspec.GridSpec(
ncols=1, nrows=2, height_ratios=[1, 3], width_ratios=[2]
)
ax0 = fig.add_subplot(spec[0])
if plot == "2D":
ax1 = fig.add_subplot(spec[1])
elif plot == "3D":
ax1 = fig.add_subplot(spec[1], projection="3d")
else:
fig = None
ax0.set_title("Optimization of Delay")
ax0.set_xlabel("Time Delay")
ax0.set_ylabel(metric)
ax0.plot(tau_sequence, metric_values, color="#FFC107")
ax0.axvline(x=tau, color="#E91E63", label="Optimal delay: " + str(tau))
ax0.legend(loc="upper right")
ax1.set_title("Attractor")
ax1.set_xlabel("Signal [i]")
ax1.set_ylabel("Signal [i-" + str(tau) + "]")
# Get data points, set axis limits
embedded = complexity_embedding(signal, delay=tau, dimension=3)
x = embedded[:, 0]
y = embedded[:, 1]
z = embedded[:, 2]
ax1.set_xlim(x.min(), x.max())
ax1.set_ylim(x.min(), x.max())
# Colors
norm = plt.Normalize(z.min(), z.max())
cmap = plt.get_cmap("plasma")
colors = cmap(norm(x))
# Attractor for 2D vs 3D
if plot == "2D":
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
lc = matplotlib.collections.LineCollection(segments, cmap="plasma", norm=norm)
lc.set_array(z)
ax1.add_collection(lc)
elif plot == "3D":
points = np.array([x, y, z]).T.reshape(-1, 1, 3)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for i in range(len(x) - 1):
seg = segments[i]
(l,) = ax1.plot(seg[:, 0], seg[:, 1], seg[:, 2], color=colors[i])
l.set_solid_capstyle("round")
ax1.set_zlabel("Signal [i-" + str(2 * tau) + "]")
return fig
# =============================================================================
# Optimal Delay via SPAR Method
# =============================================================================
def _complexity_delay_spar(signal, algorithm=None, show=False, **kwargs):
if algorithm is None:
algorithm = "fft"
# Compute power in freqency domain
psd = signal_psd(signal, sampling_rate=1000, method=algorithm, show=False, **kwargs)
power = psd["Power"].values
freqs = 1000 / psd["Frequency"].values # Convert to samples
# Get the 1/3 max frequency (in samples) (https://youtu.be/GGrOJtcTcHA?t=730)
idx = np.argmax(power)
optimal = int(freqs[idx] / 3)
if show is True:
idxs = freqs <= optimal * 6
_embedding_delay_plot(
signal,
metric_values=power[idxs],
tau_sequence=freqs[idxs],
tau=optimal,
metric="Power",
)
return optimal, {"Algorithm": algorithm, "Method": "SPAR"}
# =============================================================================
# Joint-Optimization via Entropy Ratio (Gautama, 2003)
# =============================================================================
def _complexity_delay_entropyratio(
signal,
delay_max=20,
dimension_max=5,
surrogate_n=5,
surrogate_method="random",
show=False,
):
"""Joint-optimization using Entropy Ratio method.
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay_max : int
The maximum time delay (Tau) to test.
dimension_max : int
The maximum embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order') to test.
surrogate_iter : int
The maximum surrogates generated using the iAAFT method.
"""
# Initalize vectors
if isinstance(delay_max, int):
delay_max = np.arange(1, delay_max + 1)
if isinstance(dimension_max, int):
dimension_max = np.arange(2, dimension_max + 1)
# Prepare output
out = pd.DataFrame(
{
"Dimension": np.repeat(dimension_max, len(delay_max)),
"Delay": np.tile(delay_max, len(dimension_max)),
}
)
# N is the number of delay vectors
# out["N"] = len(signal) - (out["Dimension"] - 1) * out["Delay"]
out["N"] = len(signal)
klens = np.full(len(out), np.nan)
klens_surrogates = np.full(len(out), np.nan)
for i in range(len(out)):
klens[i], _ = entropy_kl(
signal,
delay=out["Delay"][i],
dimension=out["Dimension"][i],
)
klens_surrogates[i] = np.nanmean(
[
entropy_kl(
signal_surrogate(signal, method=surrogate_method),
delay=out["Delay"][i],
dimension=out["Dimension"][i],
)[0]
for j in range(surrogate_n)
]
)
out["KLEn_Signal"] = klens
out["KLEn_Surrogate"] = klens_surrogates
# Entropy Ratio (ER)
out["KLEn_Ratio"] = klens / klens_surrogates
# To penalise for higher embedding dimensions, the minimum
# description length (MDL) method is superimposed, yielding
# the "entropy ratio" (ER):
out["Entropy_Ratio"] = out["KLEn_Ratio"] * (
1 + (out["Dimension"] * np.log(out["N"])) / out["N"]
)
# optimal dimension and tau is where entropy_ratio is minimum
idx = out["Entropy_Ratio"].argmin()
optimal_dimension = out["Dimension"][idx]
optimal_delay = out["Delay"][idx]
if show is True:
plt.figure()
ax = plt.axes(projection="3d")
ax.set_title("Joint-Estimation of Optimal Delay and Dimension")
ax.plot_trisurf(
out["Delay"],
out["Dimension"],
out["Entropy_Ratio"],
cmap=plt.get_cmap("GnBu"),
antialiased=True,
linewidth=0,
label="Minimum Entropy Ratio",
zorder=1,
)
ax.plot(
[optimal_delay] * 2,
[optimal_dimension] * 2,
[out["Entropy_Ratio"].min(), out["Entropy_Ratio"].max()],
zorder=2,
)
ax.scatter(
[optimal_delay] * 2,
[optimal_dimension] * 2,
[out["Entropy_Ratio"].min(), out["Entropy_Ratio"].max()],
color="red",
zorder=2,
)
ax.set_ylabel("Dimension")
ax.set_xlabel("Delay")
ax.set_zlabel("Entropy Ratio")
if len(dimension_max) < 10:
ax.set_yticks(dimension_max)
if len(delay_max) < 10:
ax.set_xticks(delay_max)
return optimal_delay, {"Dimension": optimal_dimension, "Data": out}
# =============================================================================
# Joint-Optimization via SymbolicDynamic (Matilla-García, 2021)
# =============================================================================
# def _complexity_delay_symbolicdynamics(
# signal,
# delay_max=20,
# dimension_max=5,
# show=False,
# ):
# """https://www.mdpi.com/1099-4300/23/2/221/htm"""
# delay = 1
# dimension = 3
# e = 3
# signal = [2, -7, -12, 5, -1, 9, 14]
# embedded = nk.complexity_embedding(signal, delay=delay, dimension=dimension)
# print(embedded)
# # How to create the symbolic sequence?
# symbols = np.zeros((len(embedded), dimension - 1))
# for d in range(1, dimension):
# # Difference with e
# symbols[:, d - 1][np.all(np.abs(embedded[:, d - 1 : d + 1]) >= e, axis=1)] = 1
# symbols[:, 0][np.abs(embedded[:, 0]) >= e] = 1
# symbols[:, 1][np.all(embedded[:, 1:] < e, axis=1)] = 1
| 30,487 | 37.987212 | 109 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_renyi.py | import numpy as np
from .entropy_shannon import _entropy_freq
def entropy_renyi(signal=None, alpha=1, symbolize=None, show=False, freq=None, **kwargs):
"""**Rényi entropy (REn or H)**
In information theory, the Rényi entropy *H* generalizes the Hartley entropy, the Shannon
entropy, the collision entropy and the min-entropy.
* :math:`\\alpha = 0`: the Rényi entropy becomes what is known as the **Hartley entropy**.
* :math:`\\alpha = 1`: the Rényi entropy becomes the **:func:`Shannon entropy <entropy_shannon>`**.
* :math:`\\alpha = 2`: the Rényi entropy becomes the collision entropy, which corresponds to
the surprisal of "rolling doubles".
It is mathematically defined as:
.. math::
REn = \\frac{1}{1-\\alpha} \\log_2 \\left( \\sum_{x \\in \\mathcal{X}} p(x)^\\alpha \\right)
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
alpha : float
The *alpha* :math:`\\alpha` parameter (default to 1) for Rényi entropy.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. ``None`` by
default, which skips the process (and assumes the input is already discrete). See
:func:`complexity_symbolize` for details.
show : bool
If ``True``, will show the discrete the signal.
freq : np.array
Instead of a signal, a vector of probabilities can be provided.
**kwargs
Optional arguments. Not used for now.
Returns
--------
ren : float
The Tsallis entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, entropy_tsallis
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [1, 3, 3, 2, 6, 6, 6, 1, 0]
tsen, _ = nk.entropy_renyi(signal, alpha=1)
tsen
# Compare to Shannon function
shanen, _ = nk.entropy_shannon(signal, base=np.e)
shanen
# Hartley Entropy
nk.entropy_renyi(signal, alpha=0)[0]
# Collision Entropy
nk.entropy_renyi(signal, alpha=2)[0]
References
-----------
* Rényi, A. (1961, January). On measures of entropy and information. In Proceedings of the
Fourth Berkeley Symposium on Mathematical Statistics and Probability, Volume 1:
Contributions to the Theory of Statistics (Vol. 4, pp. 547-562). University of California
Press.
"""
if freq is None:
_, freq = _entropy_freq(signal, symbolize=symbolize, show=show)
freq = freq / np.sum(freq)
if np.isclose(alpha, 1):
ren = -np.sum(freq * np.log(freq))
else:
ren = (1 / (1 - alpha)) * np.log(np.sum(freq**alpha))
return ren, {"Symbolization": symbolize}
| 2,898 | 30.857143 | 103 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_sevcik.py | import numpy as np
import pandas as pd
from ..stats import rescale
def fractal_sevcik(signal):
"""**Sevcik Fractal Dimension (SFD)**
The SFD algorithm was proposed to calculate the fractal dimension of waveforms by Sevcik
(1998). This method can be used to quickly measure the complexity and randomness of a signal.
.. note:: Some papers (e.g., Wang et al. 2017) suggest adding ``np.log(2)`` to the numerator,
but it's unclear why, so we sticked to the original formula for now. But if you have an idea,
please let us know!
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
---------
sfd : float
The sevcik fractal dimension.
info : dict
An empty dictionary returned for consistency with the other complexity functions.
See Also
--------
fractal_petrosian
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
sfd, _ = nk.fractal_sevcik(signal)
sfd
References
----------
* Sevcik, C. (2010). A procedure to estimate the fractal dimension of waveforms. arXiv preprint
arXiv:1003.5266.
* Kumar, D. K., Arjunan, S. P., & Aliahmad, B. (2017). Fractals: applications in biological
Signalling and image processing. CRC Press.
* Wang, H., Li, J., Guo, L., Dou, Z., Lin, Y., & Zhou, R. (2017). Fractal complexity-based
feature extraction algorithm of communication signals. Fractals, 25(04), 1740008.
* Goh, C., Hamadicharef, B., Henderson, G., & Ifeachor, E. (2005, June). Comparison of fractal
dimension algorithms for the computation of EEG biomarkers for dementia. In 2nd International
Conference on Computational Intelligence in Medicine and Healthcare (CIMED2005).
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# 1. Normalize the signal (new range to [0, 1])
y_ = rescale(signal, to=[0, 1])
n = len(y_)
# 2. Derive x* and y* (y* is actually the normalized signal)
x_ = np.linspace(0, 1, n)
# 3. Compute L (because we use np.diff, hence n-1 below)
L = np.sum(np.sqrt(np.diff(y_) ** 2 + np.diff(x_) ** 2))
# 4. Compute the fractal dimension (approximation)
sfd = 1 + np.log(L) / np.log(2 * (n - 1))
# Some papers (e.g., Wang et al. 2017) suggest adding np.log(2) to the numerator:
# sfd = 1 + (np.log(L) + np.log(2)) / np.log(2 * (n - 1))
# But it's unclear why. Sticking to the original formula for now.
# If you have an idea, let us know!
return sfd, {}
| 2,856 | 33.841463 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_complexity_simulate.py | # -*- coding: utf-8 -*-
import numpy as np
from .utils_complexity_attractor import _attractor_lorenz
def complexity_simulate(
duration=10, sampling_rate=1000, method="ornstein", hurst_exponent=0.5, **kwargs
):
"""**Simulate chaotic time series**
This function generates a chaotic signal using different algorithms and complex systems.
* **Mackey-Glass:** Generates time series using the discrete approximation of the
Mackey-Glass delay differential equation described by Grassberger & Procaccia (1983).
* **Ornstein-Uhlenbeck**
* **Lorenz**
* **Random walk**
Parameters
----------
duration : int
Desired length of duration (s).
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second).
duration : int
The desired length in samples.
method : str
The method. can be ``"hurst"`` for a (fractional) Ornstein-Uhlenbeck process, ``"lorenz"``
for the first dimension of a Lorenz system, ``"mackeyglass"`` to use the Mackey-Glass
equation, or ``random`` to generate a random-walk.
hurst_exponent : float
Defaults to ``0.5``.
**kwargs
Other arguments.
Returns
-------
array
Simulated complexity time series.
Examples
------------
**Lorenz System**
.. ipython:: python
import neurokit2 as nk
signal = nk.complexity_simulate(duration=5, sampling_rate=1000, method="lorenz")
@savefig p_complexity_simulate1.png scale=100%
nk.signal_plot(signal)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_simulate2.png scale=100%
nk.complexity_attractor(nk.complexity_embedding(signal, delay = 5), alpha=1, color="blue")
@suppress
plt.close()
**Ornstein System**
.. ipython:: python
signal = nk.complexity_simulate(duration=30, sampling_rate=100, method="ornstein")
@savefig p_complexity_simulate3.png scale=100%
nk.signal_plot(signal, color = "red")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_simulate4.png scale=100%
nk.complexity_attractor(nk.complexity_embedding(signal, delay = 100), alpha=1, color="red")
@suppress
plt.close()
**Mackey-Glass System**
.. ipython:: python
signal = nk.complexity_simulate(duration=1, sampling_rate=1000, method="mackeyglass")
@savefig p_complexity_simulate5.png scale=100%
nk.signal_plot(signal, color = "green")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_simulate6.png scale=100%
nk.complexity_attractor(nk.complexity_embedding(signal, delay = 25), alpha=1, color="green")
@suppress
plt.close()
**Random walk**
.. ipython:: python
signal = nk.complexity_simulate(duration=30, sampling_rate=100, method="randomwalk")
@savefig p_complexity_simulate7.png scale=100%
nk.signal_plot(signal, color = "orange")
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_simulate8.png scale=100%
nk.complexity_attractor(nk.complexity_embedding(signal, delay = 100), alpha=1, color="orange")
@suppress
plt.close()
"""
method = method.lower()
if method in ["fractal", "fractional", "hurst", "ornsteinuhlenbeck", "ornstein"]:
signal = _complexity_simulate_ornstein(
duration=duration, sampling_rate=sampling_rate, hurst_exponent=hurst_exponent, **kwargs
)
elif method in ["lorenz"]:
# x-dimension of Lorenz system
signal = _attractor_lorenz(sampling_rate=sampling_rate, duration=duration, **kwargs)[:, 0]
elif method in ["mackeyglass"]:
signal = _complexity_simulate_mackeyglass(
duration=duration, sampling_rate=sampling_rate, **kwargs
)
else:
signal = _complexity_simulate_randomwalk(int(duration * sampling_rate))
return signal
# =============================================================================
# Methods
# =============================================================================
def _complexity_simulate_mackeyglass(
duration=10, sampling_rate=1000, x0="fixed", a=0.2, b=0.1, c=10.0, n=1000, discard=250
):
"""Generate time series using the Mackey-Glass equation. Generates time series using the discrete approximation of
the Mackey-Glass delay differential equation described by Grassberger & Procaccia (1983).
Taken from nolitsa (https://github.com/manu-mannattil/nolitsa/blob/master/nolitsa/data.py#L223).
Parameters
----------
duration : int
Duration of the time series to be generated.
sampling_rate : float
Sampling step of the time series. It is useful to pick something between tau/100 and tau/10,
with tau/sampling_rate being a factor of n. This will make sure that there are only whole
number indices. Defaults to 1000.
x0 : array
Initial condition for the discrete map. Should be of length n. Can be "fixed", "random", or
a vector of size n.
a : float
Constant a in the Mackey-Glass equation. Defaults to 0.2.
b : float
Constant b in the Mackey-Glass equation. Defaults to 0.1.
c : float
Constant c in the Mackey-Glass equation. Defaults to 10.0
n : int
The number of discrete steps into which the interval between t and t + tau should be divided.
This results in a time step of tau/n and an n + 1 dimensional map. Defaults to 1000.
discard : int
Number of n-steps to discard in order to eliminate transients. A total of n*discard steps will
be discarded. Defaults to 250.
Returns
-------
array
Simulated complexity time series.
"""
length = duration * sampling_rate
tau = sampling_rate / 2 * 100
sampling_rate = int(n * sampling_rate / tau)
grids = int(n * discard + sampling_rate * length)
x = np.zeros(grids)
if isinstance(x0, str):
if x0 == "random":
x[:n] = 0.5 + 0.05 * (-1 + 2 * np.random.random(n))
else:
x[:n] = np.ones(n)
else:
x[:n] = x0
A = (2 * n - b * tau) / (2 * n + b * tau)
B = a * tau / (2 * n + b * tau)
for i in range(n - 1, grids - 1):
x[i + 1] = A * x[i] + B * (
x[i - n] / (1 + x[i - n] ** c) + x[i - n + 1] / (1 + x[i - n + 1] ** c)
)
return x[n * discard :: sampling_rate]
def _complexity_simulate_ornstein(
duration=10, sampling_rate=1000, theta=0.3, sigma=0.1, hurst_exponent=0.7
):
"""This is based on https://github.com/LRydin/MFDFA.
Parameters
----------
duration : int
The desired length in samples.
sampling_rate : int
The desired sampling rate (in Hz, i.e., samples/second). Defaults to 1000Hz.
theta : float
Drift. Defaults to 0.3.
sigma : float
Diffusion. Defaults to 0.1.
hurst_exponent : float
Defaults to 0.7.
Returns
-------
array
Simulated complexity time series.
"""
# Time array
length = duration * sampling_rate
# The fractional Gaussian noise
dB = (duration ** hurst_exponent) * _complexity_simulate_fractionalnoise(
size=length, hurst_exponent=hurst_exponent
)
# Initialise the array y
y = np.zeros([length])
# Integrate the process
for i in range(1, length):
y[i] = y[i - 1] - theta * y[i - 1] * (1 / sampling_rate) + sigma * dB[i]
return y
def _complexity_simulate_fractionalnoise(size=1000, hurst_exponent=0.5):
"""Generates fractional Gaussian noise.
Generates fractional Gaussian noise with a Hurst index H in (0,1). If H = 1/2 this is simply
Gaussian noise. The current method employed is the Davies-Harte method, which fails for H ≈ 0.
Looking for help to implement a Cholesky decomposition method and the Hosking's method.
This is based on https://github.com/LRydin/MFDFA/blob/master/MFDFA/fgn.py and the work of
Christopher Flynn fbm in https://github.com/crflynn/fbm
See also Davies, Robert B., and D. S. Harte. 'Tests for Hurst effect.' Biometrika 74, no.1
(1987): 95-101.
Parameters
----------
size : int
Length of fractional Gaussian noise to generate.
hurst_exponent : float
Hurst exponent H in (0,1).
Returns
-------
array
Simulated complexity time series.
"""
# Sanity checks
assert isinstance(size, int), "Size must be an integer number"
assert isinstance(hurst_exponent, float), "Hurst index must be a float in (0,1)"
# Generate linspace
k = np.linspace(0, size - 1, size)
# Correlation function
cor = 0.5 * (
np.abs(k - 1) ** (2 * hurst_exponent)
- 2 * np.abs(k) ** (2 * hurst_exponent)
+ np.abs(k + 1) ** (2 * hurst_exponent)
)
# Eigenvalues of the correlation function
eigenvals = np.sqrt(np.fft.fft(np.concatenate([cor[:], 0, cor[1:][::-1]], axis=None).real))
# Two normal distributed noises to be convoluted
gn = np.random.normal(0.0, 1.0, size)
gn2 = np.random.normal(0.0, 1.0, size)
# This is the Davies–Harte method
w = np.concatenate(
[
(eigenvals[0] / np.sqrt(2 * size)) * gn[0],
(eigenvals[1:size] / np.sqrt(4 * size)) * (gn[1:] + 1j * gn2[1:]),
(eigenvals[size] / np.sqrt(2 * size)) * gn2[0],
(eigenvals[size + 1 :] / np.sqrt(4 * size)) * (gn[1:][::-1] - 1j * gn2[1:][::-1]),
],
axis=None,
)
# Perform fft. Only first N entry are useful
f = np.fft.fft(w).real[:size] * ((1.0 / size) ** hurst_exponent)
return f
def _complexity_simulate_randomwalk(size=1000):
"""Random walk."""
steps = np.random.choice(a=[-1, 0, 1], size=size - 1)
return np.concatenate([np.zeros(1), steps]).cumsum(0)
| 9,912 | 31.395425 | 118 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_katz.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
def fractal_katz(signal):
"""**Katz's Fractal Dimension (KFD)**
Computes Katz's Fractal Dimension (KFD). The euclidean distances between successive points in
the signal are summed and averaged, and the maximum distance between the starting point and any
other point in the sample.
Fractal dimensions range from 1.0 for straight lines, through approximately 1.15 for
random-walks, to approaching 1.5 for the most convoluted waveforms.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
kfd : float
Katz's fractal dimension of the single time series.
info : dict
A dictionary containing additional information (currently empty, but returned nonetheless
for consistency with other functions).
See Also
--------
fractal_linelength
Examples
----------
* **Step 1.** Simulate different kinds of signals
.. ipython:: python
import neurokit2 as nk
import numpy as np
# Simulate straight line
straight = np.linspace(-1, 1, 2000)
# Simulate random
random = nk.complexity_simulate(duration=2, method="randomwalk")
random = nk.rescale(random, [-1, 1])
# Simulate simple
simple = nk.signal_simulate(duration=2, frequency=[5, 10])
# Simulate complex
complex = nk.signal_simulate(duration=2,
frequency=[1, 3, 6, 12],
noise = 0.1)
@savefig p_katz.png scale=100%
nk.signal_plot([straight, random, simple, complex])
* **Step 2.** Compute KFD for each of them
.. ipython:: python
KFD, _ = nk.fractal_katz(straight)
KFD
KFD, _ = nk.fractal_katz(random)
KFD
KFD, _ = nk.fractal_katz(simple)
KFD
KFD, _ = nk.fractal_katz(complex)
KFD
References
----------
* Katz, M. J. (1988). Fractals and the analysis of waveforms.
Computers in Biology and Medicine, 18(3), 145-156. doi:10.1016/0010-4825(88)90041-8.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Force to array
signal = np.array(signal)
# Drop missing values
signal = signal[~np.isnan(signal)]
# Define total length of curve
dists = np.abs(np.diff(signal))
length = np.sum(dists)
# Average distance between successive points
a = np.mean(dists)
# Compute farthest distance between starting point and any other point
d = np.max(np.abs(signal - signal[0]))
kfd = np.log10(length / a) / (np.log10(d / a))
return kfd, {}
| 2,915 | 26.771429 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_cosinesimilarity.py | import numpy as np
import pandas as pd
from ..stats import rescale
from .utils_complexity_embedding import complexity_embedding
from .optim_complexity_tolerance import complexity_tolerance
def entropy_cosinesimilarity(signal=None, delay=1, dimension=3, tolerance="sd"):
"""**Cosine Similarity Entropy (CoSiEn) and its multiscale variant (MSCoSiEn)**
Cosine Similarity Entropy (CoSiEn) is based on fundamental modifications of the SampEn and the
MSEn approaches, which makes the CoSiEn amplitude-independent and robust to spikes and short
length of data segments, two key problems with the standard SampEn.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
**kwargs
Optional arguments. Not used for now.
Returns
--------
cosien : float
The Cosine Similarity Entropy entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_multiscale
Examples
----------
1. Compute Cosine Similarity Entropy (**CoSiEn**)
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
cosien, info = nk.entropy_cosinesimilarity(signal)
cosien
2. Compute Multiscale Cosine Similarity Entropy (**MSCoSiEn**)
.. ipython:: python
mscosien, info = nk.entropy_multiscale(signal, method="MSCoSiEn")
mscosien
References
-----------
* Chanwimalueang, T., & Mandic, D. P. (2017). Cosine similarity entropy: Self-correlation-based
complexity analysis of dynamical systems. Entropy, 19(12), 652.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {
"Dimension": dimension,
"Delay": delay,
"Tolerance": complexity_tolerance(
signal,
method=tolerance,
dimension=dimension,
show=False,
)[0],
}
# Steps from Chanwimalueang et al. (2017)
# TODO: this should be integrated in utils.py - _get_count()
# But so far I am not sure how to adapt this code to a logic similar to the one in _get_count()
# 1. (Optional pre-processing) Remove the offset and generate a zero median series
signal = signal - np.median(signal)
# 2. Construct the embedding vectors
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
# 3. Compute angular distance for all pairwise embedding vectors
norm = np.linalg.norm(embedded, axis=1)
norm = np.triu(np.inner(embedded, embedded) / np.outer(norm, norm), 1)
# Rescale to [-1, 1] to prevent numerical errors
norm = rescale(norm, [-1, 1])
# angular distance
d = np.arccos(norm) / np.pi
# 4. Obtain the number of similar patterns P(m)i(rCSE) when a criterion AngDis(m)i,j≤rCSE is
# fulfilled.
Pm = np.real(d) <= info["Tolerance"]
# 5. Compute the local probability of occurrences of similar patterns
# 6. Compute the global probability of occurrences of similar patterns
N = len(signal) - (dimension - 1) * delay
Bm = np.sum(np.triu(Pm, 1) / (N * (N - 1) / 2))
# 7. Cosine similarity entropy
if Bm == 1 or Bm == 0:
cosien = 0
else:
cosien = -(Bm * np.log(Bm)) - ((1 - Bm) * np.log(1 - Bm))
return cosien, info
| 4,292 | 34.775 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_complexity_symbolize.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.cluster.vq
import scipy.special
from ..stats import standardize
from .optim_complexity_tolerance import complexity_tolerance
def complexity_symbolize(signal, method="mean", c=3, show=False, **kwargs):
"""**Signal Symbolization and Discretization**
Many complexity indices are made to assess the recurrence and predictability of discrete -
symbolic - states. As such, continuous signals must be transformed into such discrete sequence.
For instance, one of the easiest way is to split the signal values into two categories, above
and below the mean, resulting in a sequence of *A* and *B*. More complex methods have been
developped to that end.
* **Method 'A'** binarizes the signal by higher vs. lower values as compated to the signal's
mean. Equivalent tp ``method="mean"`` (``method="median"`` is also valid).
* **Method 'B'** uses values that are within the mean +/- 1 SD band vs. values that are outside
this band.
* **Method 'C'** computes the difference between consecutive samples and binarizes depending on
their sign.
* **Method 'D'** forms separates consecutive samples that exceed 1 signal's SD from the others
smaller changes.
* **Method 'r'** is based on the concept of :func:`*tolerance* <complexity_tolerance>`, and
will separate consecutive samples that exceed a given tolerance threshold, by default
:math:`0.2 * SD`. See :func:`complexity_tolerance` for more details.
* **Binning**: If an integer *n* is passed, will bin the signal into *n* equal-width bins.
Requires to specify *c*.
* **MEP**: Maximum Entropy Partitioning. Requires to specify *c*.
* **NCDF**: Please help us to improve the documentation here. Requires to specify *c*.
* **Linear**: Please help us to improve the documentation here. Requires to specify *c*.
* **Uniform**: Please help us to improve the documentation here. Requires to specify *c*.
* **kmeans**: k-means clustering. Requires to specify *c*.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str or int
Method of symbolization. Can be one of ``"A"`` (default), ``"B"``, ``"C"``, ``"D"``,
``"r"``, ``"Binning"``, ``"MEP"``, ``"NCDF"``, ``"linear"``, ``"uniform"``, ``"kmeans"``,
``"equal"``, or ``None`` to skip the process (for instance, in cases when the binarization
has already been done before).
See :func:`complexity_symbolize` for details.
c : int
Number of symbols *c*, used in some algorithms.
show : bool
Plot the reconstructed attractor. See :func:`complexity_attractor` for details.
**kwargs
Other arguments to be passed to :func:`complexity_attractor`.
Returns
-------
array
A symbolic sequence made of discrete states (e.g., 0 and 1).
See Also
------------
entropy_shannon, entropy_cumulative_residual, fractal_petrosian
Examples
---------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 12])
# Method "A" is equivalent to "mean"
@savefig p_complexity_symbolize1.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "A", show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize2.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "B", show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize3.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "C", show=True)
@suppress
plt.close()
.. ipython:: python
signal = nk.signal_simulate(duration=2, frequency=[5], noise = 0.1)
@savefig p_complexity_symbolize4.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "D", show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize5.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "r", show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize6.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "binning", c=3, show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize7.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "MEP", c=3, show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize8.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "NCDF", c=3, show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize9.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "linear", c=5, show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize10.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "equal", c=5, show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_symbolize11.png scale=100%
symbolic = nk.complexity_symbolize(signal, method = "kmeans", c=5, show=True)
@suppress
plt.close()
"""
# Do nothing
if method is None:
symbolic = signal
if show is True:
df = pd.DataFrame({"Signal": signal, "Bin": signal, "Index": np.arange(len(signal))})
df = df.pivot_table(index="Index", columns="Bin", values="Signal")
for i in df.columns:
plt.plot(df[i])
# Binnning
elif isinstance(method, int):
c = method
method = "binning"
if isinstance(method, str):
method = method.lower()
if method in ["a", "mean"]:
symbolic = (signal > np.nanmean(signal)).astype(int)
if show is True:
df = pd.DataFrame({"A": signal, "B": signal})
df["A"][df["A"] > np.nanmean(signal)] = np.nan
df["B"][df["B"] <= np.nanmean(signal)] = np.nan
df.plot()
plt.axhline(y=np.nanmean(signal), color="r", linestyle="dotted")
plt.title("Method A")
elif method == "median":
symbolic = (signal > np.nanmedian(signal)).astype(int)
if show is True:
df = pd.DataFrame({"A": signal, "B": signal})
df["A"][df["A"] > np.nanmedian(signal)] = np.nan
df["B"][df["B"] <= np.nanmedian(signal)] = np.nan
df.plot()
plt.axhline(y=np.nanmean(signal), color="r", linestyle="dotted")
plt.title("Binarization by median")
elif method == "b":
m = np.nanmean(signal)
sd = np.nanstd(signal, ddof=1)
symbolic = np.logical_or(signal < m - sd, signal > m + sd).astype(int)
if show is True:
df = pd.DataFrame({"A": signal, "B": signal})
df["A"][np.logical_or(signal < m - sd, signal > m + sd)] = np.nan
df["B"][~np.isnan(df["A"])] = np.nan
df.plot()
plt.axhline(y=m - sd, color="r", linestyle="dotted")
plt.axhline(y=m + sd, color="r", linestyle="dotted")
plt.title("Method B")
elif method in ["c", "sign"]:
symbolic = np.signbit(np.diff(signal)).astype(int)
if show is True:
df = pd.DataFrame({"A": signal, "B": signal})
df["A"][np.insert(symbolic, 0, False)] = np.nan
df["B"][~np.isnan(df["A"])] = np.nan
df.plot()
plt.title("Method C")
elif method == "d":
symbolic = (np.abs(np.diff(signal)) > np.nanstd(signal, ddof=1)).astype(int)
if show is True:
where = np.where(symbolic)[0]
plt.plot(signal, zorder=1 == 1)
plt.scatter(where, signal[where], color="orange", label="Inversion", zorder=2)
plt.title("Method D")
elif method == "r":
symbolic = np.abs(np.diff(signal)) > complexity_tolerance(signal, method="sd")[0]
symbolic = symbolic.astype(int)
if show is True:
where = np.where(symbolic == 1)[0]
plt.plot(signal, zorder=1)
plt.scatter(where, signal[where], color="orange", label="Inversion", zorder=2)
plt.title("Method based on tolerance r")
elif method in ["binning", "mep", "ncdf", "linear", "uniform", "kmeans", "equal"]:
n = len(signal)
if method == "binning":
symbolic = pd.cut(signal, bins=c, labels=False)
elif method == "mep":
Temp = np.hstack((0, np.ceil(np.arange(1, c) * len(signal) / c) - 1)).astype(int)
symbolic = np.digitize(signal, np.sort(signal)[Temp])
elif method == "ncdf":
symbolic = np.digitize(
scipy.special.ndtr(standardize(signal)), np.arange(0, 1, 1 / c)
)
elif method == "linear":
symbolic = np.digitize(
signal, np.arange(np.min(signal), np.max(signal), np.ptp(signal) / c)
)
elif method == "uniform":
symbolic = np.zeros(len(signal))
symbolic[np.argsort(signal)] = np.digitize(np.arange(n), np.arange(0, 2 * n, n / c))
elif method == "kmeans":
centroids, labels = scipy.cluster.vq.kmeans2(signal, c)
labels += 1
xx = np.argsort(centroids) + 1
symbolic = np.zeros(n)
for k in range(1, c + 1):
symbolic[labels == xx[k - 1]] = k
elif method == "equal":
ix = np.argsort(signal)
xx = np.round(np.arange(0, 2 * n, n / c)).astype(int)
symbolic = np.zeros(n)
for k in range(c):
symbolic[ix[xx[k] : xx[k + 1]]] = k + 1
if show is True:
df = pd.DataFrame(
{"Signal": signal, "Bin": symbolic, "Index": np.arange(len(signal))}
)
df = df.pivot_table(index="Index", columns="Bin", values="Signal")
for i in df.columns:
plt.plot(df[i])
plt.title(f"Method: {method} (c={c})")
else:
raise ValueError(
"`method` must be one of 'A', 'B', 'C' or 'D', 'Binning', 'MEP', 'NCDF', 'linear',"
" 'uniform', 'kmeans'. See the documentation for more information."
)
return symbolic
| 10,984 | 37.54386 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity_hjorth.py | import numpy as np
import pandas as pd
def complexity_hjorth(signal):
"""**Hjorth's Complexity and Parameters**
Hjorth Parameters are indicators of statistical properties initially introduced by Hjorth
(1970) to describe the general characteristics of an EEG trace in a few quantitative terms, but
which can applied to any time series. The parameters are activity, mobility, and complexity.
NeuroKit returns complexity directly in the output tuple, but the other parameters can be found
in the dictionary.
* The **activity** parameter is simply the variance of the signal, which corresponds to the
mean power of a signal (if its mean is 0).
.. math::
Activity = \\sigma_{signal}^2
* The **mobility** parameter represents the mean frequency or the proportion of standard
deviation of the power spectrum. This is defined as the square root of variance of the
first derivative of the signal divided by the variance of the signal.
.. math::
Mobility = \\frac{\\sigma_{dd}/ \\sigma_{d}}{Complexity}
* The **complexity** parameter gives an estimate of the bandwidth of the signal, which
indicates the similarity of the shape of the signal to a pure sine wave (for which the
value converges to 1). In other words, it is a measure of the "excessive details" with
reference to the "softest" possible curve shape. The Complexity parameter is defined as the
ratio of the mobility of the first derivative of the signal to the mobility of the signal.
.. math::
Complexity = \\sigma_{d}/ \\sigma_{signal}
:math:`d` and :math:`dd` represent the first and second derivatives of the signal, respectively.
Hjorth (1970) illustrated the parameters as follows:
.. figure:: ../img/hjorth1970.png
:alt: Figure from Hjorth (1970).
:target: http://dx.doi.org/10.1016/0013-4694(70)90143-4
See Also
--------
.fractal_petrosian
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
hjorth : float
Hjorth's Complexity.
info : dict
A dictionary containing the additional Hjorth parameters, such as ``"Mobility"`` and
``"Activity"``.
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a signal with duration os 2s
signal = nk.signal_simulate(duration=2, frequency=5)
# Compute Hjorth's Complexity
complexity, info = nk.complexity_hjorth(signal)
complexity
info
References
----------
* Hjorth, B (1970) EEG Analysis Based on Time Domain Properties. Electroencephalography and
Clinical Neurophysiology, 29, 306-310.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Calculate derivatives
dx = np.diff(signal)
ddx = np.diff(dx)
# Calculate variance and its derivatives
x_var = np.var(signal) # = activity
dx_var = np.var(dx)
ddx_var = np.var(ddx)
# Mobility and complexity
mobility = np.sqrt(dx_var / x_var)
complexity = np.sqrt(ddx_var / dx_var) / mobility
return complexity, {"Mobility": mobility, "Activity": x_var}
| 3,457 | 31.933333 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_recurrence_matrix.py | import matplotlib.pyplot as plt
import matplotlib.ticker
import numpy as np
import scipy.spatial
from .optim_complexity_tolerance import complexity_tolerance
from .utils_complexity_embedding import complexity_embedding
def recurrence_matrix(signal, delay=1, dimension=3, tolerance="default", show=False):
"""**Recurrence Matrix**
Fast Python implementation of recurrence matrix (tested against pyRQA). Returns a tuple
with the recurrence matrix (made of 0s and 1s) and the distance matrix (the non-binarized
version of the former).
It is used in :func:`Recurrence Quantification Analysis (RQA) <complexity_rqa>`.
Parameters
----------
signal : Union[list, np.ndarray, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter. A rule of
thumb is to set *r* so that the percentage of points classified as recurrences is about
2-5%.
show : bool
Visualise recurrence matrix.
See Also
--------
complexity_embedding, complexity_delay, complexity_dimension, complexity_tolerance,
complexity_rqa
Returns
-------
np.ndarray
The recurrence matrix.
np.ndarray
The distance matrix.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6], noise=0.01)
# Default r
@savefig p_recurrence_matrix1.png scale=100%
rc, _ = nk.recurrence_matrix(signal, show=True)
@suppress
plt.close()
.. ipython:: python
# Larger radius
@savefig p_recurrence_matrix2.png scale=100%
rc, d = nk.recurrence_matrix(signal, tolerance=0.5, show=True)
@suppress
plt.close()
.. ipython:: python
# Optimization of tolerance via recurrence matrix
@savefig p_recurrence_matrix3.png scale=100%
tol, _ = nk.complexity_tolerance(signal, dimension=1, delay=3, method="recurrence", show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_recurrence_matrix4.png scale=100%
rc, d = nk.recurrence_matrix(signal, tolerance=tol, show=True)
@suppress
plt.close()
References
----------
* Rawald, T., Sips, M., Marwan, N., & Dransch, D. (2014). Fast computation of recurrences
in long time series. In Translational Recurrences (pp. 17-29). Springer, Cham.
* Dabiré, H., Mestivier, D., Jarnet, J., Safar, M. E., & Chau, N. P. (1998). Quantification of
sympathetic and parasympathetic tones by nonlinear indexes in normotensive rats. American
Journal of Physiology-Heart and Circulatory Physiology, 275(4), H1290-H1297.
"""
tolerance, _ = complexity_tolerance(
signal, method=tolerance, delay=delay, dimension=dimension, show=False
)
# Time-delay embedding
emb = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance matrix
d = scipy.spatial.distance.cdist(emb, emb, metric="euclidean")
# Initialize the recurrence matrix filled with 0s
recmat = np.zeros((len(d), len(d)))
# If lower than tolerance, then 1
recmat[d <= tolerance] = 1
# Plotting
if show is True:
try:
fig, axes = plt.subplots(ncols=2)
axes[0].imshow(recmat, cmap="Greys")
axes[0].set_title("Recurrence Matrix")
im = axes[1].imshow(d)
axes[1].set_title("Distance")
cbar = fig.colorbar(im, ax=axes[1], fraction=0.046, pad=0.04)
cbar.ax.plot([0, 1], [tolerance] * 2, color="r")
# Flip the matrix to match traditional RQA representation
axes[0].invert_yaxis()
axes[1].invert_yaxis()
axes[0].xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
axes[1].xaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
except MemoryError as e:
raise MemoryError(
"NeuroKit error: complexity_rqa(): the recurrence plot is too large to display. ",
"You can recover the matrix from the parameters and try to display parts of it.",
) from e
return recmat, d
| 4,884 | 35.729323 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_correlation.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.metrics.pairwise
from ..misc import expspace
from .utils_complexity_embedding import complexity_embedding
def fractal_correlation(signal, delay=1, dimension=2, radius=64, show=False, **kwargs):
"""**Correlation Dimension (CD)**
The Correlation Dimension (CD, also denoted *D2*) is a lower bound estimate of the fractal
dimension of a signal.
The time series is first :func:`time-delay embedded <complexity_embedding>`, and distances
between all points in the trajectory are calculated. The "correlation sum" is then computed,
which is the proportion of pairs of points whose distance is smaller than a given radius. The
final correlation dimension is then approximated by a log-log graph of correlation sum vs. a
sequence of radiuses.
This function can be called either via ``fractal_correlation()`` or ``complexity_cd()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
radius : Union[str, int, list]
The sequence of radiuses to test. If an integer is passed, will get an exponential sequence
of length ``radius`` ranging from 2.5% to 50% of the distance range. Methods implemented in
other packages can be used via ``"nolds"``, ``"Corr_Dim"`` or ``"boon2008"``.
show : bool
Plot of correlation dimension if ``True``. Defaults to ``False``.
**kwargs
Other arguments to be passed (not used for now).
Returns
----------
cd : float
The Correlation Dimension (CD) of the time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute the correlation dimension.
Examples
----------
For some completely unclear reasons, uncommenting the following examples messes up the figures
path of all the subsequent documented function. So, commenting it for now.
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=1, frequency=[10, 14], noise=0.1)
# @savefig p_fractal_correlation1.png scale=100%
# cd, info = nk.fractal_correlation(signal, radius=32, show=True)
# @suppress
# plt.close()
.. ipython:: python
# @savefig p_fractal_correlation2.png scale=100%
# cd, info = nk.fractal_correlation(signal, radius="nolds", show=True)
# @suppress
# plt.close()
.. ipython:: python
# @savefig p_fractal_correlation3.png scale=100%
# cd, info = nk.fractal_correlation(signal, radius='boon2008', show=True)
# @suppress
# plt.close()
References
-----------
* Bolea, J., Laguna, P., Remartínez, J. M., Rovira, E., Navarro, A., & Bailón, R. (2014).
Methodological framework for estimating the correlation dimension in HRV signals.
Computational and mathematical methods in medicine, 2014.
* Boon, M. Y., Henry, B. I., Suttle, C. M., & Dain, S. J. (2008). The correlation dimension:
A useful objective measure of the transient visual evoked potential?. Journal of vision,
8(1), 6-6.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Get embedded
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
dist = sklearn.metrics.pairwise.euclidean_distances(embedded)
r_vals = _fractal_correlation_get_r(radius, signal, dist)
# Store parameters
info = {"Dimension": dimension, "Delay": delay, "Radius": r_vals}
# Get only upper triang of the distance matrix to reduce computational load
upper = dist[np.triu_indices_from(dist, k=1)]
corr = np.array([np.sum(upper < r) for r in r_vals])
corr = corr / len(upper)
# filter zeros from correlation sums
r_vals = r_vals[np.nonzero(corr)[0]]
corr = corr[np.nonzero(corr)[0]]
# Compute trend
if len(corr) == 0:
return np.nan, info
else:
cd, intercept = np.polyfit(np.log2(r_vals), np.log2(corr), 1)
if show is True:
plt.figure()
plt.title("Correlation Dimension")
plt.xlabel(r"$\log_{2}$(radius)")
plt.ylabel(r"$\log_{2}$(correlation sum)")
fit = 2 ** np.polyval((cd, intercept), np.log2(r_vals))
plt.loglog(r_vals, corr, "bo")
plt.loglog(r_vals, fit, "r", label=f"$CD$ = {np.round(cd, 2)}")
plt.legend(loc="lower right")
return cd, info
# =============================================================================
# Utilities
# =============================================================================
def _fractal_correlation_get_r(radius, signal, dist):
if isinstance(radius, str):
if radius == "nolds":
sd = np.std(signal, ddof=1)
min_r, max_r, factor = 0.1 * sd, 0.5 * sd, 1.03
r_n = int(np.floor(np.log(1.0 * max_r / min_r) / np.log(factor)))
r_vals = np.array([min_r * (factor ** i) for i in range(r_n + 1)])
elif radius == "Corr_Dim":
r_min, r_max = np.min(dist[np.where(dist > 0)]), np.exp(np.floor(np.log(np.max(dist))))
n_r = int(np.floor(np.log(r_max / r_min))) + 1
ones = -1 * np.ones([n_r])
r_vals = r_max * np.exp(ones * np.arange(n_r) - ones)
elif radius == "boon2008":
r_min, r_max = np.min(dist[np.where(dist > 0)]), np.max(dist)
r_vals = r_min + np.arange(1, 65) * ((r_max - r_min) / 64)
if isinstance(radius, int):
dist_range = np.max(dist) - np.min(dist)
r_min, r_max = (np.min(dist) + 0.025 * dist_range), (np.min(dist) + 0.5 * dist_range)
r_vals = expspace(r_min, r_max, radius, base=2, out="float")
return r_vals
| 6,393 | 37.751515 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_kl.py | import numpy as np
import pandas as pd
import scipy.spatial
import scipy.special
from .utils_complexity_embedding import complexity_embedding
def entropy_kl(signal, delay=1, dimension=2, norm="euclidean", **kwargs):
"""**Kozachenko-Leonenko (K-L) Differential entropy (KLEn)**
The Kozachenko-Leonenko (K-L) estimate of the differential entropy is also referred to as the
*nearest neighbor estimate* of entropy.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
norm : str
The probability norm used when computing k-nearest neighbour distances. Can be
``"euclidean"`` (default) or ``"max"``.
**kwargs : optional
Other arguments (not used for now).
Returns
--------
klen : float
The KL-entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Differential entropy.
See Also
--------
entropy_differential
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, frequency=5, noise=0.1)
# Compute Kozachenko-Leonenko (K-L) Entropy
klen, info = nk.entropy_kl(signal, delay=1, dimension=3)
klen
References
-----------
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
* Beirlant, J., Dudewicz, E. J., Györfi, L., & Van der Meulen, E. C. (1997). Nonparametric
entropy estimation: An overview. International Journal of Mathematical and Statistical
Sciences, 6(1), 17-39.
* Kozachenko, L., & Leonenko, N. (1987). Sample estimate of the entropy of a random vector.
Problemy Peredachi Informatsii, 23(2), 9-16.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {"Dimension": dimension, "Delay": delay}
# Time delay embedding
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
n, d = embedded.shape
# Get distance to nearest neighbor for each delay vector
# -------------------------------------------------------
# Using cKDTree is a much faster version than:
# # Euclidean distance between vectors
# dist = sklearn.metrics.DistanceMetric.get_metric(norm)
# dist = dist.pairwise(embedded)
# # Enforce non-zero
# dist[np.isclose(dist, 0)] = np.nan
# # pj is the Euclidean distance of the j-th delay vector to its nearest neighbor
# nearest = np.nanmin(dist, axis=1)
if norm == "max": # max norm
p = np.inf
log_c_d = 0 # volume of the d-dimensional unit ball
elif norm == "euclidean": # euclidean norm
p = 2
log_c_d = (d / 2.0) * np.log(np.pi) - np.log(scipy.special.gamma(d / 2.0 + 1))
else:
raise ValueError("'norm' not recognized.")
kdtree = scipy.spatial.cKDTree(embedded)
# Query all points -- k+1 as query point also in initial set
k = 1 # We want the first nearest neighbour (k = 0 would be itself)
nearest, _ = kdtree.query(embedded, k + 1, eps=0, p=p)
nearest = nearest[:, -1]
# Enforce non-zero distances
nearest = nearest[nearest > 0]
# Compute entropy H
# -------------------------------------------------------
# (In Gautama (2003), it's not divided by n but it is in Berilant (1997))
# (the *2 is because 2*radius=diameter)
klen = np.sum(np.log(n * 2 * nearest) + np.log(2) + np.euler_gamma) / n
# The above is what I understand from Gautama (2003)'s equation
# But empirically the following seems more accurate. If someone could clarify / confirm that
# it's the correct way (or not), that'd be great
# (Also I don't fully understand the code below)
# It was used in https://github.com/paulbrodersen/entropy_estimators/continuous.py
sum_dist = np.sum(np.log(2 * nearest))
klen = sum_dist * (d / n) - scipy.special.digamma(k) + scipy.special.digamma(n) + log_c_d
return klen, info
| 4,914 | 37.100775 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils.py | # -*- coding: utf-8 -*-
import numpy as np
import sklearn.metrics
import sklearn.neighbors
from packaging import version
from .utils_complexity_embedding import complexity_embedding
# =============================================================================
# Phi
# =============================================================================
def _phi(
signal,
delay=1,
dimension=2,
tolerance=0.1,
distance="chebyshev",
approximate=True,
fuzzy=False,
kdtree1=None,
kdtree2=None,
**kwargs,
):
"""Common internal for `entropy_approximate`, `entropy_sample` and `entropy_range`."""
# Embed signal at m and m+1
embedded1, count1, kdtree1 = _get_count(
signal,
delay,
dimension,
tolerance,
distance=distance,
approximate=approximate,
fuzzy=fuzzy,
kdtree=kdtree1,
)
embedded2, count2, kdtree2 = _get_count(
signal,
delay,
dimension + 1,
tolerance,
distance=distance,
approximate=True,
fuzzy=fuzzy,
kdtree=kdtree2,
)
# Initialize phi
phi = np.zeros(2)
if approximate is True:
phi[0] = np.mean(np.log(count1 / embedded1.shape[0]))
phi[1] = np.mean(np.log(count2 / embedded2.shape[0]))
else:
phi[0] = np.mean((count1 - 1) / (embedded1.shape[0] - 1))
phi[1] = np.mean((count2 - 1) / (embedded2.shape[0] - 1))
return phi, {
"embedded1": embedded1,
"count1": count1,
"kdtree1": kdtree1,
"embedded2": embedded2,
"count2": count2,
"kdtree2": kdtree2,
}
def _phi_divide(phi):
if np.isclose(phi[0], 0):
return -np.inf
division = np.divide(phi[1], phi[0])
if np.isclose(division, 0):
return np.inf
if division < 0:
return np.nan
return -np.log(division)
# =============================================================================
# Get Embedded
# =============================================================================
def _get_count(
signal,
delay=1,
dimension=2,
tolerance=0.1,
distance="chebyshev",
approximate=True,
fuzzy=False,
kdtree=None,
n=1,
**kwargs,
):
"""
This is usually the bottleneck for several complexity methods, in particular in the counting.
That's why we allow the possibility of giving kdtrees as pre-computed (used in the optimization
of tolerance via MaxApEn which computes iteratively the value with multiple tolerances).
However, more improvements are welcome!
"""
# Get embedded
# -------------------
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
if approximate is False:
embedded = embedded[:-1] # Removes the last line
# Get neighbors count
# -------------------
# Sanity checks
sklearn_version = version.parse(sklearn.__version__)
if sklearn_version >= version.parse("1.3.0"):
valid_metrics = sklearn.neighbors.KDTree.valid_metrics() + ["range"]
else:
valid_metrics = sklearn.neighbors.KDTree.valid_metrics + ["range"]
if distance not in valid_metrics:
raise ValueError(
"The given metric (%s) is not valid."
"The valid metric names are: %s"
% (distance, valid_metrics)
)
if fuzzy is True:
if distance == "range":
raise ValueError("The fuzzy option is not available for range distance.")
# FuzzyEn: Remove the local baselines of vectors
embedded -= np.mean(embedded, axis=1, keepdims=True)
# TODO: it would be good to implement 'distrange' here to have fuzzy RangeEn
# TODO: also, different membership functions?
# https://github.com/HamedAzami/FuzzyEntropy_Matlab/blob/master/FuzEn_MFs.m
dist = sklearn.metrics.DistanceMetric.get_metric(distance)
dist = dist.pairwise(embedded)
# sklearn.metrics.pairwise_distances_chunked()
if n > 1:
sim = np.exp(-(dist**n) / tolerance)
else:
sim = np.exp(-dist / tolerance)
# Return the count
count = np.sum(sim, axis=0)
elif distance == "range":
# internal function for distrange
def distrange(x, y):
numerator = np.max(np.abs(x - y), axis=1) - np.min(np.abs(x - y), axis=1)
denominator = np.max(np.abs(x - y), axis=1) + np.min(np.abs(x - y), axis=1)
valid = np.where(denominator != 0) # To prevent division by 0
return np.divide(numerator[valid], denominator[valid])
# Count for each row
count = np.array(
[np.sum(distrange(embedded, embedded[i]) < tolerance) for i in range(len(embedded))]
)
else: # chebyshev and other sklearn methods
# Perhaps scipy.spatial.KDTree would be faster? Especially since its query() method
# has a `workers` argument to use multiple cores? Benchmark or opinion required!
if kdtree is None:
kdtree = sklearn.neighbors.KDTree(embedded, metric=distance)
count = kdtree.query_radius(embedded, tolerance, count_only=True).astype(np.float64)
return embedded, count, kdtree
| 5,241 | 30.769697 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_sda.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..signal import signal_detrend
def fractal_sda(signal, scales=None, show=False):
"""**Standardised Dispersion Analysis (SDA)**
SDA is part of a family of dispersion techniques used to compute fractal dimension.
The standardized time series is divided in bins of different sizes and their standard deviation
(SD) is calculated. The relationship between the SD and the bin size can be an indication
of the presence of power-laws. For instance, if the SD systematically increases or
decreases with larger bin sizes, this means the fluctuations depend on the size of the bins.
The dispersion measurements are in units of the standard error of the mean. An FD of 1.5
indicates random data series, while values approaching 1.20 indicate 1/f scaling.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
scales : list
The scales at which the signal is binned for evaluating the dispersions. If not ``None``, it
should be a list of integer powers of 2 (e.g., scales = [1, 2, 4, 8, 16...]) including 1
(meaning that the data points are treated individually).
show : bool
If ``True``, returns the log-log plot of standardized dispersion versus bin size.
Returns
----------
sda : float
Estimate of the fractal dimension using the conversion formula of
SDA (Hasselman, 2013).
info : dict
A dictionary containing additional information regarding the parameters used
to compute SDA.
References
----------
* https://complexity-methods.github.io/book/standardised-dispersion-analysis-sda.html
* Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model
inference based
on empirical waveforms. Frontiers in Physiology, 4, 75. https://doi.org/10.3389/fphys.2013.00075
* Holden, J. G. (2005). Gauging the fractal dimension of response times from cognitive tasks.
Contemporary nonlinear methods for behavioral scientists: A webbook tutorial, 267-318.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=6, sampling_rate=200, frequency=[5, 6], noise=0.5)
@savefig p_fractal_sda.png scale=100%
sda, _ = nk.fractal_sda(signal, show=True)
@suppress
plt.close()
.. ipython:: python
sda
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Detrend signal
signal = signal_detrend(signal)
# compute SD using population formula N instead of usual bias corrected N-1
signal = (signal - np.nanmean(signal)) / np.nanstd(signal)
n = len(signal)
# Set scales to be an integer power of 2
if scales is None:
scale_min = 1
scale_max = int(np.floor(np.log2(n / 2)))
scales = np.append(1, 2 ** np.arange(scale_min, scale_max + 1)) # include binsize = 1 too
# sanitize scales
scales = scales[scales <= n / 2]
# Assess variability using the SD of means of progressively larger adjacent samples
sds = np.zeros(len(scales))
for i, scale in enumerate(scales):
max_n = int(len(signal) / scale) * scale
splits = np.split(signal[0:max_n], scale)
# compute sd of the sampling distribution of means (mean of each bin)
if scale == 1:
sds[i] = np.std(splits)
# sd of original standardized time series is 1
else:
sds[i] = np.std([np.mean(split) for split in splits])
# Get slope
slope, intercept = np.polyfit(np.log10(scales), np.log10(sds), 1)
if show:
_fractal_sda_plot(sds, scales, slope, intercept, ax=None)
# FD = 1 - slope
return 1 - slope, {"Slope": slope, "SD": sds, "Scale": scales}
def _fractal_sda_plot(sds, scales, slope, intercept, ax=None):
if ax is None:
fig, ax = plt.subplots()
fig.suptitle(
"Standardized Dispersion as a function of Sample-Bin size"
+ ", slope = "
+ str(np.round(slope, 2))
)
else:
fig = None
ax.set_title(
"Standardized Dispersion as a function of Sample-Bin size"
+ ", slope = "
+ str(np.round(slope, 2))
)
ax.set_ylabel(r"$\log_{10}$(Standardized Dispersion)")
ax.set_xlabel(r"$\log_{10}$(Bin Size)")
ax.scatter(np.log10(scales), np.log10(sds), marker="o", zorder=2)
fit_values = [slope * i + intercept for i in np.log10(scales)]
ax.plot(
np.log10(scales),
fit_values,
color="#FF9800",
zorder=1,
label="Fractal Dimension = " + str(np.round(1 - slope, 2)),
)
ax.legend(loc="lower right")
| 5,048 | 34.307692 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/information_gain.py | import numpy as np
from .utils_complexity_embedding import complexity_embedding
from .utils_complexity_symbolize import complexity_symbolize
def information_gain(signal, delay=1, dimension=4, symbolize="mean"):
"""**Mean Information Gain (MIG)** and **Fluctuation Complexity (FC)**
Mean Information Gain (MIG) is a measure of diversity, as it exhibits maximum values for random
signals. (Bates & Shepard, 1993; Wackerbauer et al., 1994).
Unlike MIG, fluctuation complexity (FC) does not consider a random signal to be complex. The
fluctuation complexity is the mean square deviation of the net information gain (i.e. the
differences between information gain and loss). The more this balance of information gain and
loss is fluctuating, the more complex the signal is considered.
It is to note that the original formulations discuss the length of the "words" (the number of
samples) the signal is partitioned into, this length parameter corresponds to the embedding
dimension (i.e., the amount of past states to consider, by default 4). We additionally modified
the original algorithm to the possibility of modulating the delay between each past state.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. By default,
assigns 0 and 1 to values below and above the mean. Can be ``None`` to skip the process (in
case the input is already discrete). See :func:`complexity_symbolize` for details.
Returns
-------
mig : float
The Mean Information Gain value (MIG).
info : dict
A dictionary containing additional information .
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 12, 85])
mig, info = nk.information_gain(signal)
# Mean Information Gain (MIG)
mig
# Fluctuation Complexity
info['FC']
References
----------
* Bates, J. E., & Shepard, H. K. (1993). Measuring complexity using information fluctuation.
Physics Letters A, 172(6), 416-425.
* Wackerbauer, R., Witt, A., Atmanspacher, H., Kurths, J., & Scheingraber, H. (1994). A
comparative classification of complexity measures. Chaos, Solitons & Fractals, 4(1), 133-173.
"""
# Discretize the signal into zeros and ones
binary = complexity_symbolize(signal, method=symbolize)
# Get overlapping windows of a given width
embedded = complexity_embedding(binary, dimension=dimension, delay=delay).astype(int)
# Convert into strings
states = ["".join(list(state)) for state in embedded.astype(str)]
transitions = [tuple(states[i : i + 2]) for i in range(len(states) - 1)]
# Get unique and format
states_unique, states_prob = np.unique(states, axis=0, return_counts=True)
states_prob = states_prob / np.sum(states_prob)
s_prob = {k: states_prob[i] for i, k in enumerate(states_unique)}
transitions_unique, transitions_prob = np.unique(transitions, axis=0, return_counts=True)
transitions_prob = transitions_prob / np.sum(transitions_prob)
t_prob = {tuple(k): transitions_prob[i] for i, k in enumerate(transitions_unique)}
mig = 0
fc = 0
for i in states_unique:
for j in states_unique:
if (i, j) not in t_prob.keys():
continue
i_j_prob = t_prob.get((i, j), 0)
if i_j_prob == 0:
continue
# Information gain
mig += i_j_prob * np.log2(1 / (i_j_prob / s_prob.get(i)))
# Net information gain
fc += i_j_prob * (np.log2(s_prob[i] / s_prob[j]) ** 2)
return mig, {"Dimension": dimension, "Delay": delay, "FC": fc}
| 4,307 | 39.641509 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_power.py | import numpy as np
import pandas as pd
import scipy.integrate
from ..stats import density
def entropy_power(signal, **kwargs):
"""**Entropy Power (PowEn)**
The Shannon Entropy Power (PowEn or SEP) is a measure of the effective variance of a random
vector. It is based on the estimation of the density of the variable, thus relying on :func:`density`.
.. warning::
We are not sure at all about the correct implementation of this function. Please consider
helping us by double-checking the code against the formulas in the references.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
**kwargs
Other arguments to be passed to :func:`density_bandwidth`.
Returns
-------
powen : float
The computed entropy power measure.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
information_fisershannon
Examples
--------
.. ipython:: python
import neurokit2 as nk
import matplotlib.pyplot as plt
signal = nk.signal_simulate(duration=10, frequency=[10, 12], noise=0.1)
powen, info = nk.entropy_power(signal)
powen
# Visualize the distribution that the entropy power is based on
@savefig entropy_power2.png scale=100%
plt.plot(info["Values"], info["Density"])
@suppress
plt.close()
Change density bandwidth.
.. ipython:: python
powen, info = nk.entropy_power(signal, bandwidth=0.01)
powen
References
----------
* Guignard, F., Laib, M., Amato, F., & Kanevski, M. (2020). Advanced analysis of temporal data
using Fisher-Shannon information: theoretical development and application in geosciences.
Frontiers in Earth Science, 8, 255.
* Vignat, C., & Bercher, J. F. (2003). Analysis of signals in the Fisher-Shannon information
plane. Physics Letters A, 312(1-2), 27-33.
* Dembo, A., Cover, T. M., & Thomas, J. A. (1991). Information theoretic inequalities. IEEE
Transactions on Information theory, 37(6), 1501-1518.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# we consider a random variable x whose probability density function is denoted as fx
x_range, fx = density(signal, **kwargs)
valid = np.where(fx > 0)[0]
# In https://github.com/fishinfo/FiShPy/blob/master/FiSh.py
# The formula is somewhat different...
# And on top of that it looks like it also differs between Dembo 1991 and Vignat 2003
# (The former divides by n)
# Shannon Entropy (https://en.wikipedia.org/wiki/Entropy_power_inequality)
H = fx[valid] * np.log(fx[valid])
H = -1 * scipy.integrate.simpson(H, x=x_range[valid])
# Entropy power
powen = np.exp(2 * H / len(signal)) / (2 * np.pi * np.e)
return powen, {"Density": fx[valid], "Values": x_range[valid]}
| 3,166 | 31.649485 | 106 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/information_mutual.py | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import scipy.ndimage
import scipy.special
import scipy.stats
import sklearn.metrics
import sklearn.neighbors
def mutual_information(x, y, method="varoquaux", bins="default", **kwargs):
"""**Mutual Information (MI)**
Computes the mutual information (MI) between two vectors from a joint histogram.
The mutual information of two variables is a measure of the mutual dependence between them.
More specifically, it quantifies the "amount of information" obtained about one variable by
observing the other variable.
Different methods are available:
* **nolitsa**: Standard mutual information (a bit faster than the ``"sklearn"`` method).
* **varoquaux**: Applies a Gaussian filter on the joint-histogram. The smoothing amount can be
modulated via the ``sigma`` argument (by default, ``sigma=1``).
* **knn**: Non-parametric (i.e., not based on binning) estimation via nearest neighbors.
Additional parameters includes ``k`` (by default, ``k=3``), the number of nearest neighbors
to use.
* **max**: Maximum Mutual Information coefficient, i.e., the MI is maximal given a certain
combination of number of bins.
Parameters
----------
x : Union[list, np.array, pd.Series]
A vector of values.
y : Union[list, np.array, pd.Series]
A vector of values.
method : str
The method to use.
bins : int
Number of bins to use while creating the histogram. Only used for ``"nolitsa"`` and
``"varoquaux"``. If ``"default"``, the number of bins is estimated following
Hacine-Gharbi (2018).
**kwargs
Additional keyword arguments to pass to the chosen method.
Returns
-------
float
The computed similarity measure.
See Also
--------
information_fisher
Examples
---------
**Example 1**: Simple case
.. ipython:: python
import neurokit2 as nk
x = [3, 3, 5, 1, 6, 3, 2, 8, 1, 2, 3, 5, 4, 0, 2]
y = [5, 3, 1, 3, 4, 5, 6, 4, 1, 3, 4, 6, 2, 1, 3]
nk.mutual_information(x, y, method="varoquaux")
nk.mutual_information(x, y, method="nolitsa")
nk.mutual_information(x, y, method="knn")
nk.mutual_information(x, y, method="max")
**Example 2**: Method comparison
.. ipython:: python
import numpy as np
import pandas as pd
x = np.random.normal(size=400)
y = x**2
data = pd.DataFrame()
for level in np.linspace(0.01, 3, 200):
noise = np.random.normal(scale=level, size=400)
rez = pd.DataFrame({"Noise": [level]})
rez["MI1"] = nk.mutual_information(x, y + noise, method="varoquaux", sigma=1)
rez["MI2"] = nk.mutual_information(x, y + noise, method="varoquaux", sigma=0)
rez["MI3"] = nk.mutual_information(x, y + noise, method="nolitsa")
rez["MI4"] = nk.mutual_information(x, y + noise, method="knn")
rez["MI5"] = nk.mutual_information(x, y + noise, method="max")
data = pd.concat([data, rez], axis=0)
data["MI1"] = nk.rescale(data["MI1"])
data["MI2"] = nk.rescale(data["MI2"])
data["MI3"] = nk.rescale(data["MI3"])
data["MI4"] = nk.rescale(data["MI4"])
data["MI5"] = nk.rescale(data["MI5"])
@savefig p_information_mutual1.png scale=100%
data.plot(x="Noise", y=["MI1", "MI2", "MI3", "MI4", "MI5"], kind="line")
@suppress
plt.close()
.. ipython:: python
# Computation time
# x = np.random.normal(size=10000)
# %timeit nk.mutual_information(x, x**2, method="varoquaux")
# %timeit nk.mutual_information(x, x**2, method="nolitsa")
# %timeit nk.mutual_information(x, x**2, method="sklearn")
# %timeit nk.mutual_information(x, x**2, method="knn", k=2)
# %timeit nk.mutual_information(x, x**2, method="knn", k=5)
# %timeit nk.mutual_information(x, x**2, method="max")
References
----------
* Studholme, C., Hawkes, D. J., & Hill, D. L. (1998, June). Normalized entropy measure for
multimodality image alignment. In Medical imaging 1998: image processing (Vol. 3338, pp.
132-143). SPIE.
* Hacine-Gharbi, A., & Ravier, P. (2018). A binning formula of bi-histogram for joint entropy
estimation using mean square error minimization. Pattern Recognition Letters, 101, 21-28.
"""
method = method.lower()
if method in ["max"] or isinstance(bins, (list, np.ndarray)):
# https://www.freecodecamp.org/news/
# how-machines-make-predictions-finding-correlations-in-complex-data-dfd9f0d87889/
if isinstance(bins, str):
bins = np.arange(2, np.ceil(len(x) ** 0.6) + 1).astype(int)
mi = 0
for i in bins:
for j in bins:
if i * j > np.max(bins):
continue
p_x = pd.cut(x, i, labels=False)
p_y = pd.cut(y, j, labels=False)
new_mi = _mutual_information_sklearn(p_x, p_y) / np.log2(np.min([i, j]))
if new_mi > mi:
mi = new_mi
else:
if isinstance(bins, str):
# Hacine-Gharbi (2018)
# https://stats.stackexchange.com/questions/179674/
# number-of-bins-when-computing-mutual-information
bins = 1 + np.sqrt(1 + (24 * len(x) / (1 - np.corrcoef(x, y)[0, 1] ** 2)))
bins = np.round((1 / np.sqrt(2)) * np.sqrt(bins)).astype(int)
if method in ["varoquaux"]:
mi = _mutual_information_varoquaux(x, y, bins=bins, **kwargs)
elif method in ["shannon", "nolitsa"]:
mi = _mutual_information_nolitsa(x, y, bins=bins)
elif method in ["sklearn"]:
mi = _mutual_information_sklearn(x, y, bins=bins)
elif method in ["knn"]:
mi = _mutual_information_knn(x, y, **kwargs)
else:
raise ValueError("NeuroKit error: mutual_information(): 'method' not recognized.")
return mi
# =============================================================================
# Methods
# =============================================================================
def _mutual_information_sklearn(x, y, bins=None):
if bins is None:
_, p_xy = scipy.stats.contingency.crosstab(x, y)
else:
p_xy = np.histogram2d(x, y, bins)[0]
return sklearn.metrics.mutual_info_score(None, None, contingency=p_xy)
def _mutual_information_varoquaux(x, y, bins=256, sigma=1, normalized=True):
"""Based on Gael Varoquaux's implementation:
https://gist.github.com/GaelVaroquaux/ead9898bd3c973c40429."""
jh = np.histogram2d(x, y, bins=bins)[0]
# smooth the jh with a gaussian filter of given sigma
scipy.ndimage.gaussian_filter(jh, sigma=sigma, mode="constant", output=jh)
# compute marginal histograms
jh = jh + np.finfo(float).eps
sh = np.sum(jh)
jh = jh / sh
s1 = np.sum(jh, axis=0).reshape((-1, jh.shape[0]))
s2 = np.sum(jh, axis=1).reshape((jh.shape[1], -1))
if normalized:
mi = ((np.sum(s1 * np.log(s1)) + np.sum(s2 * np.log(s2))) / np.sum(jh * np.log(jh))) - 1
else:
mi = np.sum(jh * np.log(jh)) - np.sum(s1 * np.log(s1)) - np.sum(s2 * np.log(s2))
return mi
def _mutual_information_nolitsa(x, y, bins=256):
"""
Based on the nolitsa package:
https://github.com/manu-mannattil/nolitsa/blob/master/nolitsa/delay.py#L72
"""
p_x = np.histogram(x, bins)[0]
p_y = np.histogram(y, bins)[0]
p_xy = np.histogram2d(x, y, bins)[0].flatten()
# Convert frequencies into probabilities.
# Also, in the limit p -> 0, p*log(p) is 0. We need to take out those.
p_x = p_x[p_x > 0] / np.sum(p_x)
p_y = p_y[p_y > 0] / np.sum(p_y)
p_xy = p_xy[p_xy > 0] / np.sum(p_xy)
# Calculate the corresponding Shannon entropies.
h_x = np.sum(p_x * np.log2(p_x))
h_y = np.sum(p_y * np.log2(p_y))
h_xy = np.sum(p_xy * np.log2(p_xy))
return h_xy - h_x - h_y
def _mutual_information_knn(x, y, k=3):
"""
Based on the NPEET package:
https://github.com/gregversteeg/NPEET
"""
points = np.array([x, y]).T
# Find nearest neighbors in joint space, p=inf means max-norm
dvec = sklearn.neighbors.KDTree(points, metric="chebyshev").query(points, k=k + 1)[0][:, k]
a = np.array([x]).T
a = sklearn.neighbors.KDTree(a, metric="chebyshev").query_radius(
a, dvec - 1e-15, count_only=True
)
a = np.mean(scipy.special.digamma(a))
b = np.array([y]).T
b = sklearn.neighbors.KDTree(b, metric="chebyshev").query_radius(
b, dvec - 1e-15, count_only=True
)
b = np.mean(scipy.special.digamma(b))
c = scipy.special.digamma(k)
d = scipy.special.digamma(len(x))
return (-a - b + c + d) / np.log(2)
| 8,843 | 34.376 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/TODO_entropy_wiener.py | import numpy as np
import pandas as pd
from ..signal.signal_timefrequency import signal_timefrequency
def entropy_wiener(signal, sampling_rate=1000, **kwargs):
"""Wiener Entropy (WE, also known as Spectral Flatness)
The Wiener entropy (also known as Spectral Flatness, or tonality coefficient in sound
processing) is a measure to quantify how noise-like a signal is and is typically applied to
characterize an audio spectrum.
A high spectral flatness (closer to 1.0) indicates that the spectrum has a similar amount of
power in all spectral bands, and is similar to white noise. A low spectral flatness (approaching
0 for pure tone) indicates that the spectral power is concentrated in a relatively small number of spectral
bands.
It is measured on a logarithmic scale from 0 (white noise: log(1): 0) to minus infinity (complete
order such as pure tone, log(0): minus infinity).
TODO: double check implementation (especially part on `signal_timefrequency`)
See Also
--------
entropy_spectral
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
sampling_rate : int
The sampling frequency of the signal (in Hz, i.e., samples/second).
**kwargs : optional
Other arguments to be passed to ``signal_timefrequency()`` (such as 'window').
Returns
-------
ce : float
The wiener entropy.
info : dict
A dictionary containing additional information regarding the parameters used
to compute wiener entropy.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(100, sampling_rate=100, frequency=[3, 10])
we, info = nk.entropy_wiener(signal, sampling_rate=100)
we
References
----------
* Wiener, N. (1954). The Human Use of Human Beings: Cybernetics and Society (Boston). Houghton Mifflin, 1, 50.
* Dubnov, S. (2004). Generalization of spectral flatness measure for non-gaussian linear processes.
IEEE Signal Processing Letters, 11(8), 698-701.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Get magnitude spectrogram
_, _, stft = signal_timefrequency(
signal, sampling_rate=sampling_rate, method="stft", show=False, **kwargs
)
# https://github.com/librosa/librosa/blob/eb603e7a91598d1e72d3cdeada0ade21a33f9c0c/librosa/core/spectrum.py#L42
power = 2
amin = 1e-10
S_thresh = np.maximum(amin, stft ** power)
gmean = np.exp(np.mean(np.log(S_thresh), axis=-2, keepdims=True))[0][0]
amean = np.mean(S_thresh, axis=-2, keepdims=True)[0][0]
# Divide geometric mean of power spectrum by arithmetic mean of power spectrum
return gmean / amean, {}
| 3,002 | 35.180723 | 115 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_shannon.py | import numpy as np
import pandas as pd
import scipy.stats
from .utils_complexity_symbolize import complexity_symbolize
def entropy_shannon(signal=None, base=2, symbolize=None, show=False, freq=None, **kwargs):
"""**Shannon entropy (SE or ShanEn)**
Compute Shannon entropy (SE). Entropy is a measure of unpredictability of the
state, or equivalently, of its average information content. Shannon entropy (SE) is one of the
first and most basic measure of entropy and a foundational concept of information theory,
introduced by Shannon (1948) to quantify the amount of information in a variable.
.. math::
ShanEn = -\\sum_{x \\in \\mathcal{X}} p(x) \\log_2 p(x)
Shannon attempted to extend Shannon entropy in what has become known as Differential Entropy
(see :func:`entropy_differential`).
Because Shannon entropy was meant for symbolic sequences (discrete events such as ["A", "B",
"B", "A"]), it does not do well with continuous signals. One option is to binarize (i.e., cut)
the signal into a number of bins using for instance ``pd.cut(signal, bins=100, labels=False)``.
This can be done automatically using the ``method`` argument, which will be transferred to
:func:`complexity_symbolize`.
This function can be called either via ``entropy_shannon()`` or ``complexity_se()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
base: float
The logarithmic base to use, defaults to ``2``, giving a unit in *bits*. Note that ``scipy.
stats.entropy()`` uses Euler's number (``np.e``) as default (the natural logarithm), giving
a measure of information expressed in *nats*.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. ``None`` by
default, which skips the process (and assumes the input is already discrete). See
:func:`complexity_symbolize` for details.
show : bool
If ``True``, will show the discrete the signal.
freq : np.array
Instead of a signal, a vector of probabilities can be provided (used for instance in
:func:`entropy_permutation`).
**kwargs
Optional arguments. Not used for now.
Returns
--------
shanen : float
The Shannon entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Shannon entropy.
See Also
--------
entropy_differential, entropy_cumulativeresidual, entropy_tsallis, entropy_renyi,
entropy_maximum
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [1, 1, 5, 5, 2, 8, 1]
_, freq = np.unique(signal, return_counts=True)
nk.entropy_shannon(freq=freq)
.. ipython:: python
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, frequency=5, noise=0.01)
# Compute Shannon's Entropy
@savefig p_entropy_shannon1.png scale=100%
shanen, info = nk.entropy_shannon(signal, symbolize=3, show=True)
@suppress
plt.close()
.. ipython:: python
shanen
Compare with ``scipy`` (using the same base).
.. ipython:: python
import scipy.stats
# Make the binning ourselves
binned = pd.cut(signal, bins=3, labels=False)
scipy.stats.entropy(pd.Series(binned).value_counts())
shanen, info = nk.entropy_shannon(binned, base=np.e)
shanen
References
-----------
* Shannon, C. E. (1948). A mathematical theory of communication. The Bell system technical
journal, 27(3), 379-423.
"""
if freq is None:
_, freq = _entropy_freq(signal, symbolize=symbolize, show=show)
return scipy.stats.entropy(freq, base=base), {"Symbolization": symbolize, "Base": base}
# =============================================================================
# Compute frequencies (common to Shannon and Tsallis)
# =============================================================================
def _entropy_freq(signal, symbolize=None, show=False):
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Check if string ('ABBA'), and convert each character to list (['A', 'B', 'B', 'A'])
if isinstance(signal, str):
signal = list(signal)
# Force to array
if not isinstance(signal, np.ndarray):
signal = np.array(signal)
# Make discrete
if np.isscalar(signal) is False:
signal = complexity_symbolize(signal, method=symbolize, show=show)
return np.unique(signal, return_counts=True)
| 4,866 | 34.268116 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_grid.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .entropy_shannon import entropy_shannon
def entropy_grid(signal, delay=1, k=3, show=False, **kwargs):
"""**Grid Entropy (GridEn)**
Grid Entropy (GridEn or GDEn) is defined as a gridded descriptor of a :func:`Poincaré plot <.hrv_nonlinear>`,
which is a two-dimensional phase space diagram of a time series that plots the present sample
of a time series with respect to their delayed values. The plot is divided into :math:`n*n`
grids, and the :func:`Shannon entropy <entropy_shannon>` is computed from the probability
distribution of the number of points in each grid.
Yan et al. (2019) define two novel measures, namely **GridEn** and **Gridded Distribution Rate
(GDR)**, the latter being the percentage of grids containing points.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
k : int
The number of sections that the Poincaré plot is divided into. It is a coarse
graining parameter that defines how fine the grid is.
show : bool
Plot the Poincaré plot.
**kwargs : optional
Other keyword arguments, such as the logarithmic ``base`` to use for
:func:`entropy_shannon`.
Returns
-------
griden : float
Grid Entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, .hrv_nonlinear, entropy_phase
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# Compute Grid Entropy
@savefig p_entropy_grid1.png scale=100%
phasen, info = nk.entropy_grid(signal, k=3, show=True)
@suppress
plt.close()
.. ipython:: python
phasen
@savefig p_entropy_grid2.png scale=100%
phasen, info = nk.entropy_grid(signal, k=10, show=True)
@suppress
plt.close()
.. ipython:: python
info["GDR"]
References
----------
* Yan, C., Li, P., Liu, C., Wang, X., Yin, C., & Yao, L. (2019). Novel gridded descriptors of
poincaré plot for analyzing heartbeat interval time-series. Computers in biology and
medicine, 109, 280-289.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {"k": k, "Delay": delay}
# Normalization
Sig_n = (signal - min(signal)) / np.ptp(signal)
# Poincaré Plot
Temp = np.array([Sig_n[:-delay], Sig_n[delay:]])
# Get count of points in each grid
hist, _, _ = np.histogram2d(Temp[0, :], Temp[1, :], k)
# Get frequency
freq = np.flipud(hist.T) / hist.sum()
freq = freq[freq > 0]
# Compute Shannon Entropy
griden, _ = entropy_shannon(freq=freq, **kwargs)
# Compute Gridded Distribution Rate
info["GDR"] = np.sum(hist != 0) / hist.size
if show is True:
gridlines = np.linspace(0, 1, k + 1)
plt.subplots(1, 2)
x1 = plt.subplot(121)
ax1 = plt.axes(x1)
ax1.plot(Sig_n[:-delay], Sig_n[delay:], ".", color="#009688")
ax1.plot(
np.tile(gridlines, (2, 1)),
np.array((np.zeros(k + 1), np.ones(k + 1))),
color="red",
)
ax1.plot(
np.array((np.zeros(k + 1), np.ones(k + 1))),
np.tile(gridlines, (2, 1)),
color="red",
)
ax1.plot([0, 1], [0, 1], "k")
ax1.set_aspect("equal", "box")
ax1.set_xlabel(r"$X_{i}$")
ax1.set_ylabel(r"$X_{i} + \tau$")
ax1.set_xticks([0, 1])
ax1.set_yticks([0, 1])
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
x2 = plt.subplot(122)
ax2 = plt.axes(x2)
ax2.imshow(np.fliplr(hist), cmap="rainbow", aspect="equal")
ax1.set_xlabel(r"$X_{i}$")
ax1.set_ylabel(r"$X_{i} + \tau$")
ax2.set_xticks([])
ax2.set_yticks([])
plt.suptitle("Gridded Poincaré Plot and its Density")
return griden, info
| 4,579 | 30.156463 | 113 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_range.py | from .entropy_approximate import entropy_approximate
from .entropy_sample import entropy_sample
def entropy_range(signal, dimension=3, delay=1, tolerance="sd", approximate=False, **kwargs):
"""**Range Entropy (RangEn)**
Introduced by Omidvarnia et al. (2018), Range Entropy (RangEn or RangeEn) refers to a modified
form of SampEn (or ApEn).
Both ApEn and SampEn compute the logarithmic likelihood that runs of patterns that are close
remain close on the next incremental comparisons, of which this closeness is estimated by the
Chebyshev distance. Range Entropy uses instead a normalized "range distance", resulting in
modified forms of ApEn and SampEn, **RangEn (A)** (*mApEn*) and **RangEn (B)** (*mSampEn*).
However, the RangEn (A), based on ApEn, often yields undefined entropies (i.e., *NaN* or
*Inf*). As such, using RangEn (B) is recommended instead.
RangEn is described as more robust to nonstationary signal changes, and has a more linear
relationship with the Hurst exponent (compared to ApEn and SampEn), and has no need for signal
amplitude correction.
Note that the :func:`corrected <entropy_approximate>` version of ApEn (cApEn) can be computed
by setting ``corrected=True``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
approximate : bool
The entropy algorithm to use. If ``False`` (default), will use sample entropy and return
*mSampEn* (**RangEn B**). If ``True``, will use approximate entropy and return *mApEn*
(**RangEn A**).
**kwargs
Other arguments.
See Also
--------
entropy_approximate, entropy_sample
Returns
-------
RangEn : float
Range Entropy. If undefined conditional probabilities are detected (logarithm
of sum of conditional probabilities is ``ln(0)``), ``np.inf`` will
be returned, meaning it fails to retrieve 'accurate' regularity information.
This tends to happen for short data segments, increasing tolerance
levels might help avoid this.
info : dict
A dictionary containing additional information regarding the parameters used.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6])
# Range Entropy B (mSampEn)
RangEnB, info = nk.entropy_range(signal, approximate=False)
RangEnB
# Range Entropy A (mApEn)
RangEnA, info = nk.entropy_range(signal, approximate=True)
RangEnA
# Range Entropy A (corrected)
RangEnAc, info = nk.entropy_range(signal, approximate=True, corrected=True)
RangEnAc
References
----------
* Omidvarnia, A., Mesbah, M., Pedersen, M., & Jackson, G. (2018). Range entropy: A bridge
between signal complexity and self-similarity. Entropy, 20(12), 962.
"""
if approximate is False: # mSampEn - RangeEn (B)
out = entropy_sample(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
distance="range",
**kwargs,
)
else: # mApEn - RangeEn (A)
out = entropy_approximate(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
distance="range",
**kwargs,
)
return out
| 4,172 | 37.638889 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_angular.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from .utils_complexity_embedding import complexity_embedding
def entropy_angular(signal, delay=1, dimension=2, show=False, **kwargs):
"""**Angular entropy (AngEn)**
The Angular Entropy (AngEn) is the name that we use in NeuroKit to refer to the complexity
method described in Nardelli et al. (2022), referred as comEDA due to its application to EDA
signal. The method comprises the following steps: 1) Phase space reconstruction, 2) Calculation
of the angular distances between all the pairs of points in the phase space; 3) Computation of
the probability density function (PDF) of the distances; 4) Quadratic Rényi entropy of the PDF.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
**kwargs : optional
Other arguments.
Returns
--------
angen : float
The Angular Entropy (AngEn) of the signal.
info : dict
A dictionary containing additional information regarding the parameters used
to compute the index.
See Also
--------
entropy_renyi
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, frequency=[5, 3], noise=0.1)
# Compute Angular Entropy
@savefig p_entropy_angular1.png scale=100%
angen, info = nk.entropy_angular(signal, delay=1, dimension=3, show=True)
@suppress
plt.close()
References
-----------
* Nardelli, M., Greco, A., Sebastiani, L., & Scilingo, E. P. (2022). ComEDA: A new tool for
stress assessment based on electrodermal activity. Computers in Biology and Medicine, 150,
106144.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# 1. Phase space reconstruction (time-delay embeddings)
embedded = complexity_embedding(signal, delay=delay, dimension=dimension)
# 2. Angular distances between all the pairs of points in the phase space
angles = _angular_distance(embedded)
# 3. Compute the probability density function (PDF) of the upper triangular matrix
bins, pdf = _kde_sturges(angles)
# 4. Apply the quadratic Rényi entropy to the PDF
angen = -np.log2(np.sum(pdf**2))
# Normalize to the range [0, 1] by the log of the number of bins
# Note that in the paper (eq. 4 page 4) there is a minus sign, but adding it would give
# negative values, plus the linked code does not seem to do that
# https://github.com/NardelliM/ComEDA/blob/main/comEDA.m#L103
angen = angen / np.log2(len(bins))
if show is True:
# Plot the PDF as a bar chart
plt.bar(bins[:-1], pdf, width=bins[1] - bins[0], align="edge", alpha=0.5)
# Set the x-axis limits to the range of the data
plt.xlim([np.min(angles), np.max(angles)])
# Print titles
plt.suptitle(f"Angular Entropy (AngEn) = {angen:.3f}")
plt.title("Distribution of Angular Distances:")
return angen, {"bins": bins, "pdf": pdf}
def _angular_distance(m):
"""
Compute angular distances between all the pairs of points.
"""
# Get index of upper triangular to avoid double counting
idx = np.triu_indices(m.shape[0], k=1)
# compute the magnitude of each vector
magnitudes = np.linalg.norm(m, axis=1)
# compute the dot product between all pairs of vectors using np.matmul function, which is
# more efficient than np.dot for large matrices; and divide the dot product matrix by the
# product of the magnitudes to get the cosine of the angle
cos_angles = np.matmul(m, m.T)[idx] / np.outer(magnitudes, magnitudes)[idx]
# clip the cosine values to the range [-1, 1] to avoid any numerical errors and compute angles
return np.arccos(np.clip(cos_angles, -1, 1))
def _kde_sturges(x):
"""
Computes the PDF of a vector x using a kernel density estimator based on linear diffusion
processes with a Gaussian kernel. The number of bins of the PDF is chosen applying the Sturges
method.
"""
# Estimate the bandwidth
iqr = np.percentile(x, 75) - np.percentile(x, 25)
bandwidth = 0.9 * iqr / (len(x) ** 0.2)
# Compute the number of bins using the Sturges method
nbins = int(np.ceil(np.log2(len(x)) + 1))
# Compute the bin edges
bins = np.linspace(np.min(x), np.max(x), nbins + 1)
# Compute the kernel density estimate
xi = (bins[:-1] + bins[1:]) / 2
pdf = np.sum(
scipy.stats.norm.pdf((xi.reshape(-1, 1) - x.reshape(1, -1)) / bandwidth), axis=1
) / (len(x) * bandwidth)
# Normalize the PDF
pdf = pdf / np.sum(pdf)
return bins, pdf
| 5,376 | 35.331081 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/optim_complexity_dimension.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial
from .fractal_correlation import fractal_correlation
from .utils_complexity_embedding import complexity_embedding
def complexity_dimension(signal, delay=1, dimension_max=20, method="afnn", show=False, **kwargs):
"""**Automated selection of the optimal Embedding Dimension (m)**
The Embedding Dimension (*m*, sometimes referred to as *d* or *order*) is the second
critical parameter (the first being the :func:`delay <complexity_delay>` :math:`\\tau`)
involved in the construction of the time-delay embedding of a signal. It corresponds to the
number of delayed states (versions of the signals lagged by :math:`\\tau`) that we include in
the embedding.
Though one can commonly find values of 2 or 3 used in practice, several authors suggested
different numerical methods to guide the choice of *m*:
* **Correlation Dimension** (CD): One of the earliest method to estimate the optimal *m*
was to calculate the :func:`correlation dimension <fractal_correlation>` for embeddings of
various sizes and look for a saturation (i.e., a plateau) in its value as the embedding
dimension increases. One of the limitation is that a saturation will also occur when there is
not enough data to adequately fill the high-dimensional space (note that, in general, having
such large embeddings that it significantly shortens the length of the signal is not
recommended).
* **FNN** (False Nearest Neighbour): The method, introduced by Kennel et al. (1992), is based
on the assumption that two points that are near to each other in the sufficient embedding
dimension should remain close as the dimension increases. The algorithm checks the neighbours
in increasing embedding dimensions until it finds only a negligible number of false
neighbours when going from dimension :math:`m` to :math:`m+1`. This corresponds to the lowest
embedding dimension, which is presumed to give an unfolded space-state reconstruction. This
method can fail in noisy signals due to the futile attempt of unfolding the noise (and in
purely random signals, the amount of false neighbors does not substantially drops as *m*
increases). The **figure** below show how projections to higher-dimensional spaces can be
used to detect false nearest neighbours. For instance, the red and the yellow points are
neighbours in the 1D space, but not in the 2D space.
.. figure:: ../img/douglas2022b.png
:alt: Illustration of FNN (Douglas et al., 2022).
* **AFN** (Average False Neighbors): This modification by Cao (1997) of the FNN method
addresses one of its main drawback, the need for a heuristic choice for the tolerance
thresholds *r*. It uses the maximal Euclidian distance to represent nearest neighbors, and
averages all ratios of the distance in :math:`m+1` to :math:`m` dimension and defines *E1* and
*E2* as parameters. The optimal dimension corresponds to when *E1* stops changing (reaches a
plateau). E1 reaches a plateau at a dimension *d0* if the signal comes from an attractor.
Then *d0*+1 is the optimal minimum embedding dimension. *E2* is a useful quantity to
distinguish deterministic signals from stochastic signals. A constant *E2* close to 1 for any
embedding dimension *d* suggests random data, since the future values are independent of the
past values.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted Tau :math:`\\tau`, sometimes referred to as Lag) in samples.
See :func:`complexity_delay()` to choose the optimal value for this parameter.
dimension_max : int
The maximum embedding dimension to test.
method : str
Can be ``"afn"`` (Average False Neighbor), ``"fnn"`` (False Nearest Neighbour), or ``"cd"``
(Correlation Dimension).
show : bool
Visualize the result.
**kwargs
Other arguments, such as ``R=10.0`` or ``A=2.0`` (relative and absolute tolerance, only for
``'fnn'`` method).
Returns
-------
dimension : int
Optimal embedding dimension.
parameters : dict
A dictionary containing additional information regarding the parameters used
to compute the optimal dimension.
See Also
------------
complexity, complexity_dimension, complexity_delay, complexity_tolerance
Examples
---------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=[5, 7, 8], noise=0.01)
# Correlation Dimension
@savefig p_complexity_dimension1.png scale=100%
optimal_dimension, info = nk.complexity_dimension(signal,
delay=20,
dimension_max=10,
method='cd',
show=True)
@suppress
plt.close()
.. ipython:: python
# FNN
@savefig p_complexity_dimension2.png scale=100%
optimal_dimension, info = nk.complexity_dimension(signal,
delay=20,
dimension_max=20,
method='fnn',
show=True)
@suppress
plt.close()
.. ipython:: python
# AFNN
@savefig p_complexity_dimension3.png scale=100%
optimal_dimension, info = nk.complexity_dimension(signal,
delay=20,
dimension_max=20,
method='afnn',
show=True)
@suppress
plt.close()
References
-----------
* Kennel, M. B., Brown, R., & Abarbanel, H. D. (1992). Determining embedding dimension for
phase-space reconstruction using a geometrical construction. Physical review A, 45(6), 3403.
* Cao, L. (1997). Practical method for determining the minimum embedding dimension of a scalar
time series. Physica D: Nonlinear Phenomena, 110(1-2), 43-50.
* Rhodes, C., & Morari, M. (1997). The false nearest neighbors algorithm: An overview.
Computers & Chemical Engineering, 21, S1149-S1154.
* Krakovská, A., Mezeiová, K., & Budáčová, H. (2015). Use of false nearest neighbours for
selecting variables and embedding parameters for state space reconstruction. Journal of
Complex Systems, 2015.
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
"""
# Initialize vectors
if isinstance(dimension_max, int):
dimension_seq = np.arange(1, dimension_max + 1)
else:
dimension_seq = np.array(dimension_max)
# Method
method = method.lower()
if method in ["afnn", "afn"]:
# Append value (as it gets cropped afterwards anyway)
dimension_seq = np.append(dimension_seq, [dimension_seq[-1] + 1])
E, Es = _embedding_dimension_afn(signal, dimension_seq=dimension_seq, delay=delay, **kwargs)
E1 = E[1:] / E[:-1]
E2 = Es[1:] / Es[:-1]
# To find where E1 saturates, set a threshold of difference
# threshold = 0.1 * (np.max(E1) - np.min(E1))
min_dimension = [i for i, x in enumerate(E1 >= 0.85 * np.nanmax(E1)) if x][0] + 1
# To standardize the length of dimension_seq with E1 and E2
dimension_seq = dimension_seq[:-1]
# Store information
info = {"Method": method, "Values": dimension_seq, "E1": E1, "E2": E2}
if show is True:
_embedding_dimension_plot(
method=method,
dimension_seq=dimension_seq,
min_dimension=min_dimension,
E1=E1,
E2=E2,
)
elif method in ["fnn"]:
f1, f2, f3 = _embedding_dimension_ffn(signal, dimension_seq=dimension_seq, delay=delay, **kwargs)
min_dimension = [i for i, x in enumerate(f3 <= 1.85 * np.min(f3[np.nonzero(f3)])) if x][0]
# Store information
info = {"Method": method, "Values": dimension_seq, "f1": f1, "f2": f2, "f3": f3}
if show is True:
_embedding_dimension_plot(
method=method,
dimension_seq=dimension_seq,
min_dimension=min_dimension,
f1=f1,
f2=f2,
f3=f3,
)
elif method in ["correlation", "cd"]:
CDs = _embedding_dimension_correlation(signal, dimension_seq, delay=delay, **kwargs)
# Find elbow (TODO: replace by better method of elbow localization)
min_dimension = dimension_seq[np.where(CDs >= 0.66 * np.max(CDs))[0][0]]
# Store information
info = {"Method": method, "Values": dimension_seq, "CD": CDs}
if show is True:
_embedding_dimension_plot(
method=method,
dimension_seq=dimension_seq,
min_dimension=min_dimension,
CD=CDs,
)
else:
raise ValueError("NeuroKit error: complexity_dimension(): 'method' not recognized.")
return min_dimension, info
# =============================================================================
# Methods
# =============================================================================
def _embedding_dimension_correlation(signal, dimension_seq, delay=1, **kwargs):
"""Return the Correlation Dimension (CD) for a all d in dimension_seq."""
CDs = np.zeros(len(dimension_seq))
for i, d in enumerate(dimension_seq):
CDs[i] = fractal_correlation(signal, dimension=d, delay=delay, **kwargs)[0]
return CDs
def _embedding_dimension_afn(signal, dimension_seq, delay=1, **kwargs):
"""AFN."""
values = np.asarray(
[_embedding_dimension_afn_d(signal, dimension, delay, **kwargs) for dimension in dimension_seq]
).T
E, Es = values[0, :], values[1, :]
return E, Es
def _embedding_dimension_afn_d(signal, dimension, delay=1, metric="chebyshev", window=10, maxnum=None, **kwargs):
"""Returns E(d) and E^*(d) for the AFN method for a single d.
E(d) and E^*(d) will be used to calculate E1(d) and E2(d).
E1(d) = E(d + 1)/E(d).
E2(d) = E*(d + 1)/E*(d).
"""
d, dist, index, y2 = _embedding_dimension_d(signal, dimension, delay, metric, window, maxnum)
# Compute the ratio of near-neighbor distances in d + 1 over d dimension
# Its average is E(d)
if any(d == 0) or any(dist == 0):
E = np.nan
Es = np.nan
else:
E = np.mean(d / dist)
# Calculate E^*(d)
Es = np.mean(np.abs(y2[:, -1] - y2[index, -1]))
return E, Es
def _embedding_dimension_ffn(signal, dimension_seq, delay=1, R=10.0, A=2.0, **kwargs):
"""Compute the fraction of false nearest neighbors.
The false nearest neighbors (FNN) method described by Kennel et al.
(1992) to calculate the minimum embedding dimension required to embed a scalar time series.
Returns 3 vectors:
- f1 : Fraction of neighbors classified as false by Test I.
- f2 : Fraction of neighbors classified as false by Test II.
- f3 : Fraction of neighbors classified as false by either Test I or Test II.
"""
values = np.asarray(
[_embedding_dimension_ffn_d(signal, dimension, delay, R=R, A=A, **kwargs) for dimension in dimension_seq]
).T
f1, f2, f3 = values[0, :], values[1, :], values[2, :]
return f1, f2, f3
def _embedding_dimension_ffn_d(signal, dimension, delay=1, R=10.0, A=2.0, metric="euclidean", window=10, maxnum=None):
"""Return fraction of false nearest neighbors for a single d."""
d, dist, index, y2 = _embedding_dimension_d(signal, dimension, delay, metric, window, maxnum)
# Find all potential false neighbors using Kennel et al.'s tests.
dist[dist == 0] = np.nan # assign nan to avoid divide by zero error in next line
f1 = np.abs(y2[:, -1] - y2[index, -1]) / dist > R
f2 = d / np.std(signal) > A
f3 = f1 | f2
return np.mean(f1), np.mean(f2), np.mean(f3)
# =============================================================================
# Internals
# =============================================================================
def _embedding_dimension_d(signal, dimension, delay=1, metric="chebyshev", window=10, maxnum=None):
# We need to reduce the number of points in dimension d by tau
# so that after reconstruction, there'll be equal number of points
# at both dimension d as well as dimension d + 1.
y1 = complexity_embedding(signal[:-delay], delay=delay, dimension=dimension)
y2 = complexity_embedding(signal, delay=delay, dimension=dimension + 1)
# Find near neighbors in dimension d.
index, dist = _embedding_dimension_neighbors(y1, metric=metric, window=window, maxnum=maxnum)
# Compute the near-neighbor distances in d + 1 dimension
# TODO: is there a way to make this faster?
d = [scipy.spatial.distance.chebyshev(i, j) for i, j in zip(y2, y2[index])]
return np.asarray(d), dist, index, y2
def _embedding_dimension_neighbors(y, metric="chebyshev", window=0, maxnum=None, show=False):
"""Find nearest neighbors of all points in the given array. Finds the nearest neighbors of all points in the given
array using SciPy's KDTree search.
Parameters
----------
y : ndarray
embedded signal: N-dimensional array containing time-delayed vectors.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003),
or to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension_max : int
The maximum embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order')
to test.
metric : str
Metric to use for distance computation. Must be one of "cityblock" (aka the Manhattan metric),
"chebyshev" (aka the maximum norm metric), or "euclidean". Defaults to 'chebyshev'.
window : int
Minimum temporal separation (Theiler window) that should exist between near neighbors.
This is crucial while computing Lyapunov exponents and the correlation dimension. Defaults to 0.
maxnum : int
Maximum number of near neighbors that should be found for each point.
In rare cases, when there are no neighbors that are at a nonzero distance, this will have to
be increased (i.e., beyond 2 * window + 3). Defaults to None (optimum).
show : bool
Defaults to False.
Returns
-------
index : array
Array containing indices of near neighbors.
dist : array
Array containing near neighbor distances.
"""
if metric == "chebyshev":
p = np.inf
elif metric == "cityblock":
p = 1
elif metric == "euclidean":
p = 2
else:
raise ValueError('Unknown metric. Should be one of "cityblock", ' '"euclidean", or "chebyshev".')
tree = scipy.spatial.cKDTree(y) # pylint: disable=E1102
n = len(y)
if not maxnum:
maxnum = (window + 1) + 1 + (window + 1)
else:
maxnum = max(1, maxnum)
if maxnum >= n:
raise ValueError("maxnum is bigger than array length.")
# Query for k numbers of nearest neighbors
distances, indices = tree.query(y, k=range(1, maxnum + 2), p=p)
# Substract the first point
valid = indices - np.tile(np.arange(n), (indices.shape[1], 1)).T
# Remove points that are closer than min temporal separation
valid = np.abs(valid) > window
# Remove also self reference (d > 0)
valid = valid & (distances > 0)
# Get indices to keep
valid = (np.arange(len(distances)), np.argmax(valid, axis=1))
distances = distances[valid]
indices = indices[(valid)]
if show is True:
plt.plot(indices, distances)
return indices, distances
# =============================================================================
# Plotting
# =============================================================================
def _embedding_dimension_plot(
method,
dimension_seq,
min_dimension,
E1=None,
E2=None,
f1=None,
f2=None,
f3=None,
CD=None,
ax=None,
):
if ax is None:
fig, ax = plt.subplots()
else:
fig = None
ax.set_title("Optimization of Dimension (d)")
ax.set_xlabel("Embedding dimension $d$")
if method in ["correlation", "cd"]:
ax.set_ylabel("Correlation Dimension (CD)")
ax.plot(dimension_seq, CD, "o-", label="$CD$", color="#852b01")
else:
ax.set_ylabel("$E_1(d)$ and $E_2(d)$")
if method in ["afnn"]:
ax.plot(dimension_seq, E1, "o-", label="$E_1(d)$", color="#FF5722")
ax.plot(dimension_seq, E2, "o-", label="$E_2(d)$", color="#FFC107")
if method in ["fnn"]:
ax.plot(dimension_seq, 100 * f1, "o--", label="Test I", color="#FF5722")
ax.plot(dimension_seq, 100 * f2, "^--", label="Test II", color="#f44336")
ax.plot(dimension_seq, 100 * f3, "s-", label="Test I + II", color="#852b01")
ax.axvline(x=min_dimension, color="#E91E63", label="Optimal dimension: " + str(min_dimension))
ax.legend(loc="upper right")
return fig
| 18,101 | 39.954751 | 118 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/optim_complexity_k.py | from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning, find_plateau
def complexity_k(signal, k_max="max", show=False):
"""**Automated selection of k for Higuchi Fractal Dimension (HFD)**
The optimal *k-max* is computed based on the point at which HFD values plateau for a range of
*k-max* values (see Vega, 2015).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
k_max : Union[int, str, list], optional
Maximum number of interval times (should be greater than or equal to 3) to be tested. If
``max``, it selects the maximum possible value corresponding to half the length of the
signal.
show : bool
Visualise the slope of the curve for the selected kmax value.
Returns
--------
k : float
The optimal kmax of the time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute optimal kmax.
See Also
--------
fractal_higuchi
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, sampling_rate=100, frequency=[5, 6], noise=0.5)
@savefig p_complexity_k1.png scale=100%
k_max, info = nk.complexity_k(signal, k_max='default', show=True)
@suppress
plt.close()
.. ipython:: python
k_max
References
----------
* Higuchi, T. (1988). Approach to an irregular time series on the basis of the fractal theory.
Physica D: Nonlinear Phenomena, 31(2), 277-283.
* Vega, C. F., & Noel, J. (2015, June). Parameters analyzed of Higuchi's fractal dimension for
EEG brain signals. In 2015 Signal Processing Symposium (SPSympo) (pp. 1-5). IEEE. https://
ieeexplore.ieee.org/document/7168285
"""
# Get the range of k-max values to be tested
# ------------------------------------------
if isinstance(k_max, str): # e.g., "default"
# upper limit for k value (max possible value)
k_max = int(np.floor(len(signal) / 2)) # so that normalizing factor is positive
if isinstance(k_max, int):
kmax_range = np.arange(2, k_max + 1)
elif isinstance(k_max, (list, np.ndarray, pd.Series)):
kmax_range = np.array(k_max)
else:
warn(
"k_max should be an int or a list of values of kmax to be tested.",
category=NeuroKitWarning,
)
# Compute the slope for each kmax value
# --------------------------------------
vectorized_k_slope = np.vectorize(_complexity_k_slope, excluded=[1])
slopes, intercepts, info = vectorized_k_slope(kmax_range, signal)
# k_values = [d["k_values"] for d in info]
average_values = [d["average_values"] for d in info]
# Find plateau (the saturation point of slope)
# --------------------------------------------
optimal_point = find_plateau(slopes, show=False)
if optimal_point is not None:
kmax_optimal = kmax_range[optimal_point]
else:
kmax_optimal = np.max(kmax_range)
warn(
"The optimal kmax value detected is 2 or less. There may be no plateau in this case. "
+ f"You can inspect the plot by set `show=True`. We will return optimal k_max = {kmax_optimal} (the max).",
category=NeuroKitWarning,
)
# Plot
if show:
_complexity_k_plot(kmax_range, slopes, kmax_optimal, ax=None)
# Return optimal tau and info dict
return kmax_optimal, {
"Values": kmax_range,
"Scores": slopes,
"Intercepts": intercepts,
"Average_Values": average_values,
}
# =============================================================================
# Utilities
# =============================================================================
def _complexity_k_Lk(k, signal):
n = len(signal)
# Step 1: construct k number of new time series for range of k_values from 1 to kmax
k_subrange = np.arange(1, k + 1) # where m = 1, 2... k
idx = np.tile(np.arange(0, len(signal), k), (k, 1)).astype(float)
idx += np.tile(np.arange(0, k), (idx.shape[1], 1)).T
mask = idx >= len(signal)
idx[mask] = 0
sig_values = signal[idx.astype(int)].astype(float)
sig_values[mask] = np.nan
# Step 2: Calculate length Lm(k) of each curve
normalization = (n - 1) / (np.floor((n - k_subrange) / k).astype(int) * k)
sets = (np.nansum(np.abs(np.diff(sig_values)), axis=1) * normalization) / k
# Step 3: Compute average value over k sets of Lm(k)
return np.sum(sets) / k
def _complexity_k_slope(kmax, signal, k_number="max"):
if k_number == "max":
k_values = np.arange(1, kmax + 1)
else:
k_values = np.unique(np.linspace(1, kmax + 1, k_number).astype(int))
# Step 3 of Vega & Noel (2015)
vectorized_Lk = np.vectorize(_complexity_k_Lk, excluded=[1])
# Compute length of the curve, Lm(k)
average_values = vectorized_Lk(k_values, signal)
# Slope of best-fit line through points (slope equal to FD)
slope, intercept = -np.polyfit(np.log(k_values), np.log(average_values), 1)
return slope, intercept, {"k_values": k_values, "average_values": average_values}
# =============================================================================
# Plotting
# =============================================================================
def _complexity_k_plot(k_range, slope_values, k_optimal, ax=None):
# Prepare plot
if ax is None:
fig, ax = plt.subplots()
else:
fig = None
ax.set_title("Optimization of $k_{max}$ parameter")
ax.set_xlabel("$k_{max}$ values")
ax.set_ylabel("Higuchi Fractal Dimension (HFD) values")
colors = plt.cm.PuBu(np.linspace(0, 1, len(k_range)))
# if single time series
ax.plot(k_range, slope_values, color="#2196F3", zorder=1)
for i, _ in enumerate(k_range):
ax.scatter(k_range[i], slope_values[i], color=colors[i], marker="o", zorder=2)
ax.axvline(x=k_optimal, color="#E91E63", label="Optimal $k_{max}$: " + str(k_optimal))
ax.legend(loc="upper right")
return fig
| 6,280 | 33.13587 | 119 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_ofentropy.py | import numpy as np
import pandas as pd
from .entropy_shannon import entropy_shannon
def entropy_ofentropy(signal, scale=10, bins=10, **kwargs):
"""**Entropy of entropy (EnofEn)**
Entropy of entropy (EnofEn or EoE) combines the features of :func:`MSE <entropy_multiscale>`
with an alternate measure of information, called *superinformation*, used in DNA sequencing.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
scale : int
The size of the windows that the signal is divided into. Also referred to as Tau
:math:`\\tau`, it represents the scale factor and corresponds to
the amount of coarsegraining.
bins : int
The number of equal-size bins to divide the signal's range in.
**kwargs : optional
Other keyword arguments, such as the logarithmic ``base`` to use for
:func:`entropy_shannon`.
Returns
--------
enofen : float
The Entropy of entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used, such as the
average entropy ``AvEn``.
See Also
--------
entropy_shannon, entropy_multiscale
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# EnofEn
enofen, _ = nk.entropy_ofentropy(signal, scale=10, bins=10)
enofen
References
-----------
* Hsu, C. F., Wei, S. Y., Huang, H. P., Hsu, L., Chi, S., & Peng, C. K. (2017). Entropy of
entropy: Measurement of dynamical complexity for biological systems. Entropy, 19(10), 550.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {"Scale": scale, "Bins": bins}
# divide a one-dimensional discrete time series of length n into consecutive
# non-overlapping windows w where each window is of length 'scale'
n_windows = int(np.floor(len(signal) / scale))
windows = np.reshape(signal[: n_windows * scale], (n_windows, scale))
# Divide the range into s1 slices into n equal width bins corresponding to a discrete state k
sigrange = (np.min(signal), np.max(signal))
edges = np.linspace(sigrange[0], sigrange[1], bins + 1)
# Compute the probability for a sample in each window to occur in state k
freq = [np.histogram(windows[w, :], edges)[0] for w in range(n_windows)]
# Next, we calculate the Shannon entropy value of each window.
shanens = [entropy_shannon(freq=w / w.sum(), **kwargs)[0] for w in freq]
info["AvEn"] = np.nanmean(shanens)
# Number of unique ShanEn values (depending on the scale)
_, freq2 = np.unique(np.round(shanens, 12), return_counts=True)
freq2 = freq2 / freq2.sum()
# Shannon entropy again to measure the degree of the "changing"
enofen, _ = entropy_shannon(freq=freq2, **kwargs)
return enofen, info
| 3,216 | 35.146067 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_cumulativeresidual.py | import itertools
import numpy as np
import pandas as pd
from .entropy_shannon import _entropy_freq
def entropy_cumulativeresidual(signal, symbolize=None, show=False, freq=None):
"""**Cumulative residual entropy (CREn)**
The cumulative residual entropy is an alternative to the Shannon
differential entropy with several advantageous properties, such as non-negativity. The key idea
is to use the cumulative distribution (CDF) instead of the density function in Shannon's
entropy.
.. math::
CREn = -\\int_{0}^{\\infty} p(|X| > x) \\log_{2} p(|X| > x) dx
Similarly to :func:`Shannon entropy <entropy_shannon>` and :func:`Petrosian fractal dimension
<fractal_petrosian>`, different methods to transform continuous signals into discrete ones are
available. See :func:`complexity_symbolize` for details.
This function can be called either via ``entropy_cumulativeresidual()`` or ``complexity_cren()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. ``None`` by
default, which skips the process (and assumes the input is already discrete). See
:func:`complexity_symbolize` for details.
show : bool
If ``True``, will show the discrete the signal.
freq : np.array
Instead of a signal, a vector of probabilities can be provided.
Returns
-------
CREn : float
The cumulative residual entropy.
info : dict
A dictionary containing ``Values`` for each pair of events.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = [1, 1, 1, 3, 3, 2, 2, 1, 1, 3, 3, 3]
@savefig p_entropy_cumulativeresidual1.png scale=100%
cren, info = nk.entropy_cumulativeresidual(signal, show=True)
@suppress
plt.close()
.. ipython:: python
cren
References
-----------
* Rao, M., Chen, Y., Vemuri, B. C., & Wang, F. (2004). Cumulative residual entropy: a new
measure of information. IEEE transactions on Information Theory, 50(6), 1220-1228.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
if freq is None:
events, freq = _entropy_freq(signal, symbolize=symbolize, show=show)
freq = freq / np.sum(freq)
events, freq = zip(*sorted(zip(events, freq)))
# Get the CDF
cdf = {a: _ for a, _ in zip(events, np.cumsum(freq))}
terms = np.zeros(len(events))
for i, (a, b) in enumerate(_entropy_cumulativeresidual_pairwise(events)):
pgx = cdf[a]
term = (b - a) * pgx * np.log2(pgx)
terms[i] = term
return -np.nansum(terms), {"Values": terms, "Symbolization": symbolize}
# =============================================================================
# Utilities
# =============================================================================
def _entropy_cumulativeresidual_pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
| 3,365 | 32 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity_relativeroughness.py | import numpy as np
import pandas as pd
from ..signal import signal_autocor
def complexity_relativeroughness(signal, **kwargs):
"""**Relative Roughness (RR)**
Relative Roughness is a ratio of local variance (autocovariance at lag-1) to global variance
(autocovariance at lag-0) that can be used to classify different 'noises'
(see `Hasselman, 2019 <https://complexity-methods.github.io/book/relative-roughness.html>`_).
It can also be used as an index to test for the applicability of fractal analysis (see
`Marmelat et al., 2012 <https://doi.org/10.3389/fphys.2012.00208>`_).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
**kwargs : optional
Other arguments to be passed to ``nk.signal_autocor()``.
Returns
--------
rr : float
The RR value.
info : dict
A dictionary containing additional information regarding the parameters used
to compute RR.
Examples
--------
.. ipython:: python
import neurokit2 as nk
signal = [1, 2, 3, 4, 5]
rr, _ = nk.complexity_relativeroughness(signal)
rr
References
----------
* Marmelat, V., Torre, K., & Delignieres, D. (2012). Relative roughness:
an index for testing the suitability of the monofractal model.
Frontiers in Physiology, 3, 208.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
_, acov = signal_autocor(signal, **kwargs) # Retrieve the dict
acov = acov["ACov"][0:2] # Extract cov at lag 0 and 1
# RR formula
return 2 * (1 - acov[1] / acov[0]), {"ACov": acov}
| 1,865 | 30.627119 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_attention.py | from warnings import warn
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.signal
from ..misc import NeuroKitWarning
def entropy_attention(signal, show=False, **kwargs):
"""**Attention Entropy (AttEn)**
Yang et al. (2020) propose a conceptually new approach called **Attention Entropy (AttEn)**,
which pays attention only to the key observations (local maxima and minima; i.e., peaks).
Instead of counting the frequency of all observations, it analyzes the frequency distribution
of the intervals between the key observations in a time-series. The advantages of the attention
entropy are that it does not need any parameter to tune, is robust to the time-series length,
and requires only linear time to compute.
Because this index relies on peak-detection, it is not suited for noisy signals. Signal
cleaning (in particular filtering), and eventually more tuning for the peak detection
algorithm, can help.
**AttEn** is computed as the average of various subindices, such as:
* **MaxMax**: The entropy of local-maxima intervals.
* **MinMin**: The entropy of local-minima intervals.
* **MaxMin**: The entropy of intervals between local maxima and subsequent minima.
* **MinMax**: The entropy of intervals between local minima and subsequent maxima.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
show : bool
If True, the local maxima and minima will be displayed.
Returns
--------
atten : float
The attention entropy of the signal.
info : dict
A dictionary containing values of sub-entropies, such as ``MaxMax``, ``MinMin``,
``MaxMin``, and ``MinMax``.
**kwargs
Other arguments to be passed to ``scipy.signal.find_peaks()``.
See Also
--------
entropy_shannon, entropy_cumulative_residual
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=1, frequency=5, noise=0.1)
# Compute Attention Entropy
@savefig p_entropy_attention1.png scale=100%
atten, info = nk.entropy_attention(signal, show=True)
@suppress
plt.close()
.. ipython:: python
atten
References
-----------
* Yang, J., Choudhary, G. I., Rahardja, S., & Franti, P. (2020). Classification of interbeat
interval time-series using attention entropy. IEEE Transactions on Affective Computing.
"""
# Note: Code is based on the EntropyHub's package
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Identify key patterns
Xmax, _ = scipy.signal.find_peaks(signal, **kwargs)
Xmin, _ = scipy.signal.find_peaks(-signal, **kwargs)
if len(Xmax) == 0 or len(Xmin) == 0:
warn(
"No local maxima or minima was detected, which makes it impossible to compute AttEn"
". Returning np.nan",
category=NeuroKitWarning,
)
return np.nan, {}
Txx = np.diff(Xmax)
Tnn = np.diff(Xmin)
Temp = np.diff(np.sort(np.hstack((Xmax, Xmin))))
if Xmax[0] < Xmin[0]:
Txn = Temp[::2]
Tnx = Temp[1::2]
else:
Txn = Temp[1::2]
Tnx = Temp[::2]
Edges = np.arange(-0.5, len(signal) + 1)
Pnx, _ = np.histogram(Tnx, Edges)
Pnn, _ = np.histogram(Tnn, Edges)
Pxx, _ = np.histogram(Txx, Edges)
Pxn, _ = np.histogram(Txn, Edges)
Pnx = Pnx[Pnx != 0] / len(Tnx)
Pxn = Pxn[Pxn != 0] / len(Txn)
Pnn = Pnn[Pnn != 0] / len(Tnn)
Pxx = Pxx[Pxx != 0] / len(Txx)
maxmax = -sum(Pxx * np.log(Pxx))
maxmin = -sum(Pxn * np.log(Pxn))
minmax = -sum(Pnx * np.log(Pnx))
minmin = -sum(Pnn * np.log(Pnn))
Av4 = np.mean([minmin, maxmax, maxmin, minmax])
if show is True:
plt.plot(signal, zorder=0, c="black")
plt.scatter(Xmax, signal[Xmax], c="green", zorder=1)
plt.scatter(Xmin, signal[Xmin], c="red", zorder=2)
return Av4, {
"AttEn_MaxMax": maxmax,
"AttEn_MinMin": minmin,
"AttEn_MaxMin": maxmin,
"AttEn_MinMax": minmax,
}
# def _find_keypatterns(signal):
# """This original function seems to be equivalent to scipy.signal.find_peaks()"""
# n = len(signal)
# vals = np.zeros(n)
# for i in range(1, n - 1):
# if signal[i - 1] < signal[i] > signal[i + 1]:
# vals[i] = i
# elif signal[i - 1] < signal[i] == signal[i + 1]:
# k = 1
# while (i + k) < n - 1 and signal[i] == signal[i + k]:
# k += 1
# if signal[i] > signal[i + k]:
# vals[i] = i + ((k - 1) // 2)
# return vals[vals != 0].astype(int)
| 4,975 | 30.897436 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/complexity_decorrelation.py | import numpy as np
import pandas as pd
from ..signal import signal_autocor
def complexity_decorrelation(signal):
"""**Decorrelation Time (DT)**
The decorrelation time (DT) is defined as the time (in samples) of the first zero crossing of
the autocorrelation sequence. A shorter decorrelation time corresponds to a less correlated
signal. For instance, a drop in the decorrelation time of EEG has been observed prior to
seizures, related to a decrease in the low frequency power (Mormann et al., 2005).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
Returns
-------
float
Decorrelation Time (DT)
dict
A dictionary containing additional information (currently empty, but returned nonetheless
for consistency with other functions).
See Also
--------
.signal_autocor
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a signal with duration os 2s
signal = nk.signal_simulate(duration=2, frequency=[5, 9, 12])
# Compute DT
dt, _ = nk.complexity_decorrelation(signal)
dt
References
----------
* Mormann, F., Kreuz, T., Rieke, C., Andrzejak, R. G., Kraskov, A., David, P., ... & Lehnertz,
K. (2005). On the predictability of epileptic seizures. Clinical neurophysiology, 116(3),
569-587.
* Teixeira, C. A., Direito, B., Feldwisch-Drentrup, H., Valderrama, M., Costa, R. P.,
Alvarado-Rojas, C., ... & Dourado, A. (2011). EPILAB: A software package for studies on the
prediction of epileptic seizures. Journal of Neuroscience Methods, 200(2), 257-271.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Unbiased autocor (see https://github.com/mne-tools/mne-features/)
autocor, _ = signal_autocor(signal, method="unbiased")
# Get zero-crossings
zc = np.diff(np.sign(autocor)) != 0
if np.any(zc):
dt = np.argmax(zc) + 1
else:
dt = -1
return dt, {}
| 2,275 | 30.611111 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_tmf.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats
from packaging import version
from ..signal import signal_surrogate
from .fractal_dfa import fractal_dfa
def fractal_tmf(signal, n=40, show=False, **kwargs):
"""**Multifractal Nonlinearity (tMF)**
The Multifractal Nonlinearity index (*t*\\MF) is the *t*\\-value resulting from the comparison
of the multifractality of the signal (measured by the spectrum width, see
:func:`.fractal_dfa`) with the multifractality of linearized
:func:`surrogates <.signal_surrogate>` obtained by the IAAFT method (i.e., reshuffled series
with comparable linear structure).
This statistics grows larger the more the original series departs from the multifractality
attributable to the linear structure of IAAFT surrogates. When p-value reaches significance, we
can conclude that the signal's multifractality encodes processes that a linear contingency
cannot.
This index provides an extension of the assessment of multifractality, of which the
multifractal spectrum is by itself a measure of heterogeneity, rather than interactivity.
As such, it cannot alone be used to assess the specific presence of cascade-like interactivity
in the time series, but must be compared to the spectrum of a sample of its surrogates.
.. figure:: ../img/bell2019.jpg
:alt: Figure from Bell et al. (2019).
:target: https://doi.org/10.3389/fphys.2019.00998
Both significantly negative and positive values can indicate interactivity, as any difference
from the linear structure represented by the surrogates is an indication of nonlinear
contingence. Indeed, if the degree of heterogeneity for the original series is significantly
less than for the sample of linear surrogates, that is no less evidence of a failure of
linearity than if the degree of heterogeneity is significantly greater.
.. note::
Help us review the implementation of this index by checking-it out and letting us know
wether it is correct or not.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
n : int
Number of surrogates. The literature uses values between 30 and 40.
**kwargs : optional
Other arguments to be passed to :func:`.fractal_dfa`.
Returns
-------
float
tMF index.
info : dict
A dictionary containing additional information, such as the p-value.
See Also
--------
fractal_dfa, .signal_surrogate
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=1, sampling_rate=200, frequency=[5, 6, 12], noise=0.2)
# Compute tMF
@savefig p_fractal_tmf.png scale=100%
tMF, info = nk.fractal_tmf(signal, n=100, show=True)
@suppress
plt.close()
.. ipython:: python
tMF # t-value
info["p"] # p-value
References
----------
* Ihlen, E. A., & Vereijken, B. (2013). Multifractal formalisms of human behavior. Human
movement science, 32(4), 633-651.
* Kelty-Stephen, D. G., Palatinus, K., Saltzman, E., & Dixon, J. A. (2013). A tutorial on
multifractality, cascades, and interactivity for empirical time series in ecological science.
Ecological Psychology, 25(1), 1-62.
* Bell, C. A., Carver, N. S., Zbaracki, J. A., & Kelty-Stephen, D. G. (2019). Non-linear
amplification of variability through interaction across scales supports greater accuracy in
manual aiming: evidence from a multifractal analysis with comparisons to linear surrogates in
the fitts task. Frontiers in physiology, 10, 998.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {}
w0 = fractal_dfa(signal, multifractal=True, show=False)[0]["Width"]
w = np.zeros(n)
for i in range(n):
surro = signal_surrogate(signal, method="IAAFT")
w[i] = float(fractal_dfa(surro, multifractal=True, show=False)[0]["Width"].iloc[0])
# Run t-test
# TODO: adjust in the future
if version.parse(scipy.__version__) < version.parse("1.10.0"):
t, p = scipy.stats.ttest_1samp(w, w0)
t = t[0]
t = t.item()
info["p"] = p[0]
else:
t, info["p"] = scipy.stats.ttest_1samp(w, w0)
if show is True:
pd.Series(w).plot(kind="density", label="Width of surrogates")
plt.axvline(x=w0.values, c="red", label="Width of original signal")
plt.title(f"tMF = {t:.2f}, p = {info['p']:.2f}")
plt.legend()
return t, info
| 4,893 | 36.358779 | 99 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_fuzzy.py | # -*- coding: utf-8 -*-
from .entropy_approximate import entropy_approximate
from .entropy_sample import entropy_sample
def entropy_fuzzy(signal, delay=1, dimension=2, tolerance="sd", approximate=False, **kwargs):
"""**Fuzzy Entropy (FuzzyEn)**
Fuzzy entropy (FuzzyEn) of a signal stems from the combination between information theory and
fuzzy set theory (Zadeh, 1965). A fuzzy set is a set containing elements with varying degrees of
membership.
This function can be called either via ``entropy_fuzzy()`` or ``complexity_fuzzyen()``, or
``complexity_fuzzyapen()`` for its approximate version. Note that the fuzzy corrected
approximate entropy (cApEn) can also be computed via setting ``corrected=True`` (see examples).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
tolerance : float
Tolerance (often denoted as *r*), distance to consider two data points as similar. If
``"sd"`` (default), will be set to :math:`0.2 * SD_{signal}`. See
:func:`complexity_tolerance` to estimate the optimal value for this parameter.
approximate : bool
If ``True``, will compute the fuzzy approximate entropy (FuzzyApEn).
**kwargs
Other arguments.
Returns
----------
fuzzyen : float
The fuzzy entropy of the single time series.
info : dict
A dictionary containing additional information regarding the parameters used
to compute fuzzy entropy.
See Also
--------
entropy_sample
Examples
----------
..ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, frequency=5)
fuzzyen, parameters = nk.entropy_fuzzy(signal)
fuzzyen
fuzzyapen, parameters = nk.entropy_fuzzy(signal, approximate=True)
fuzzyapen
fuzzycapen, parameters = nk.entropy_fuzzy(signal, approximate=True, corrected=True)
fuzzycapen
References
----------
* Ishikawa, A., & Mieno, H. (1979). The fuzzy entropy concept and its application. Fuzzy Sets
and systems, 2(2), 113-123.
* Zadeh, L. A. (1996). Fuzzy sets. In Fuzzy sets, fuzzy logic, and fuzzy systems: selected
papers by Lotfi A Zadeh (pp. 394-432).
"""
if approximate is False:
out = entropy_sample(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
fuzzy=True,
**kwargs,
)
else:
out = entropy_approximate(
signal,
delay=delay,
dimension=dimension,
tolerance=tolerance,
fuzzy=True,
**kwargs,
)
return out
| 3,166 | 33.053763 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/utils_complexity_coarsegraining.py | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage.filters
from ..signal import signal_interpolate
def complexity_coarsegraining(signal, scale=2, method="nonoverlapping", show=False, **kwargs):
"""**Coarse-graining of a signal**
The goal of coarse-graining is to represent the signal at a different "scale". The
coarse-grained time series for a scale factor Tau (:math:`\\tau`) are obtained by averaging
non-overlapping windows of size Tau. In most of the complexity metrics, multiple coarse-grained
segments are constructed for a given signal, to represent the signal at different scales (hence
the "multiscale" adjective).
.. figure:: ../img/wu2013a.png
:alt: Figure from Wu et al. (2013).
:target: https://doi.org/10.1016/j.physleta.2014.03.034
This coarse-graining procedure is similar to moving averaging and the decimation of the original
time series. The length of each coarse-grained time series is N/Tau. For ``scale = 1``, the
coarse-grained time series is simply the original time series itself.
The coarse graining procedure (used for instance in MSE) is considered a shortcoming that
decreases the entropy rate artificially (Nikulin, 2004). One of the core issue is that the
length of coarse-grained signals becomes smaller as the scale increases.
To address this issue of length, several methods have been proposed, such as **adaptive
resampling** (Liu et al. 2012), **moving average** (Wu et al. 2013), or **timeshift**
(Wu et al. 2013).
* **Non-overlapping** (default): The coarse-grained time series are constructed by averaging
non-overlapping windows of given size.
* **Interpolate**: Interpolates (i.e., resamples) the coarse-grained time series to match the
original signal length (currently using a monotonic cubic method, but let us know if you have
any opinion on that).
* **Moving average**: The coarse-grained time series via a moving average.
* **Time-shift**: For each scale, a *k* number of coarse-grained vectors are constructed (see
**Figure** below). Somewhat similar to moving-average, with the difference that the time lag
creates new vectors.
.. figure:: ../img/wu2013b.png
:alt: Figure from Wu et al. (2013).
:target: https://doi.org/10.1016/j.physleta.2014.03.034
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
scale : int
The size of the windows that the signal is divided into. Also referred to as Tau
:math:`\\tau`, it represents the scale factor and corresponds to
the amount of coarsegraining.
method : str
Can be ``"nonoverlapping"``, ``"rolling"``, ``"interpolate"``, or ``"timeshift"``.
show : bool
If ``True``, will show the coarse-grained signal.
**kwargs
Other arguments (not used currently).
Returns
-------
array
The coarse-grained signal.
See Also
------------
complexity_delay, complexity_dimension
Examples
---------
**Simple examples**
.. ipython:: python
import neurokit2 as nk
signal = [0, 2, 4, 6, 8, 10]
nk.complexity_coarsegraining(signal, scale=2)
signal = [0, 1, 2, 0, 1]
nk.complexity_coarsegraining(signal, scale=3)
nk.complexity_coarsegraining(signal=range(10), method="interpolate")
nk.complexity_coarsegraining(signal=range(10), method="rolling")
**Simulated signal**
.. ipython:: python
signal = nk.signal_simulate(duration=2, frequency=[5, 20])
@savefig p_complexity_coarsegraining1.png scale=100%
coarsegrained = nk.complexity_coarsegraining(signal, scale=40, show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_coarsegraining2.png scale=100%
coarsegrained = nk.complexity_coarsegraining(signal, scale=40, method="interpolate", show=True)
@suppress
plt.close()
.. ipython:: python
@savefig p_complexity_coarsegraining3.png scale=100%
coarsegrained = nk.complexity_coarsegraining(signal, scale=40, method="rolling", show=True)
@suppress
plt.close()
.. ipython:: python
signal = nk.signal_simulate(duration=0.5, frequency=[5, 20])
@savefig p_complexity_coarsegraining4.png scale=100%
coarsegrained = nk.complexity_coarsegraining(signal, scale=30, method="timeshift", show=True)
@suppress
plt.close()
**Benchmarking**
.. ipython:: python
signal = nk.signal_simulate(duration=10, frequency=5)
scale = 2
x_pd = pd.Series(signal).rolling(window=scale).mean().values[scale-1::scale]
x_nk = nk.complexity_coarsegraining(signal, scale=scale)
np.allclose(x_pd - x_nk, 0)
%timeit x_pd = pd.Series(signal).rolling(window=scale).mean().values[scale-1::scale]
%timeit x_nk = nk.complexity_coarsegraining(signal, scale=scale)
signal = nk.signal_simulate(duration=30, frequency=5)
scale = 3
x_pd = pd.Series(signal).rolling(window=scale).mean().values[scale-1::]
x_nk = nk.complexity_coarsegraining(signal, scale=scale, rolling=True)
np.allclose(x_pd - x_nk[1:-1], 0)
%timeit pd.Series(signal).rolling(window=scale).mean().values[scale-1::]
%timeit nk.complexity_coarsegraining(signal, scale=scale, rolling=True)
References
-----------
* Su, C., Liang, Z., Li, X., Li, D., Li, Y., & Ursino, M. (2016). A comparison of multiscale
permutation entropy measures in on-line depth of anesthesia monitoring. PLoS One, 11(10),
e0164104.
* Nikulin, V. V., & Brismar, T. (2004). Comment on "Multiscale entropy analysis of complex
physiologic time series”" Physical review letters, 92(8), 089803.
* Liu, Q., Wei, Q., Fan, S. Z., Lu, C. W., Lin, T. Y., Abbod, M. F., & Shieh, J. S. (2012).
Adaptive computation of multiscale entropy and its application in EEG signals for monitoring
depth of anesthesia during surgery. Entropy, 14(6), 978-992.
* Wu, S. D., Wu, C. W., Lee, K. Y., & Lin, S. G. (2013). Modified multiscale entropy for
short-term time series analysis. Physica A: Statistical Mechanics and its Applications, 392
(23), 5865-5873.
* Wu, S. D., Wu, C. W., Lin, S. G., Wang, C. C., & Lee, K. Y. (2013). Time series analysis
using composite multiscale entropy. Entropy, 15(3), 1069-1084.
"""
# Sanity checks
if scale in [0, 1]:
return signal
n = len(signal)
if scale > n:
return np.array([])
if method in ["nonoverlapping", "resampling", "interpolate"]:
# The following is a fast alternative to:
# pd.Series(signal).rolling(window=scale).mean().values[scale-1::scale]
# Get max j
j = n // scale
# Coarse-grain
coarse = np.nanmean(np.reshape(signal[0 : j * scale], (j, scale)), axis=1)
if method in ["resampling", "interpolate"]:
x_values = (np.arange(len(coarse)) * scale + scale / 2).astype(int)
coarse = signal_interpolate(
x_values, coarse, x_new=np.arange(n), method="monotone_cubic"
)
elif method == "rolling":
# Relying on scipy is a fast alternative to:
# pd.Series(signal).rolling(window=scale).mean().values[scale-1::]
# https://stackoverflow.com/questions/13728392/moving-average-or-running-mean
coarse = scipy.ndimage.filters.uniform_filter1d(signal, size=scale, mode="nearest")
coarse = coarse[scale - 1 : :]
elif method == "timeshift":
coarse = np.transpose(np.reshape(signal[: scale * (n // scale)], (n // scale, scale)))
else:
raise ValueError("Unknown `method`: {}".format(method))
if show is True:
_complexity_show(signal[0:n], coarse, method=method)
return coarse
# =============================================================================
# Utils
# =============================================================================
def _complexity_show(signal, coarse, method="nonoverlapping"):
plt.plot(signal, linewidth=1.5)
if method == "nonoverlapping":
plt.plot(np.linspace(0, len(signal), len(coarse)), coarse, color="red", linewidth=0.75)
plt.scatter(np.linspace(0, len(signal), len(coarse)), coarse, color="red", linewidth=0.5)
elif method == "timeshift":
for i in range(len(coarse)):
plt.plot(
np.arange(i, len(signal) - len(coarse) + i + 1, len(coarse)),
coarse[i],
color="red",
linewidth=0.75,
)
else:
plt.plot(np.linspace(0, len(signal), len(coarse)), coarse, color="red", linewidth=1)
plt.title(f'Coarse-graining using method "{method}"')
# =============================================================================
# Get Scale Factor
# =============================================================================
def _get_scales(signal, scale="default", dimension=2):
"""Select scale factors"""
if scale is None or scale == "max":
scale = np.arange(1, len(signal) // 2) # Set to max
elif scale == "default":
# See https://github.com/neuropsychology/NeuroKit/issues/75#issuecomment-583884426
scale = np.arange(1, int(len(signal) / (dimension + 10)))
elif isinstance(scale, int):
scale = np.arange(1, scale + 1)
return scale
| 9,547 | 39.457627 | 101 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_svd.py | import numpy as np
import pandas as pd
from .utils_complexity_embedding import complexity_embedding
def entropy_svd(signal, delay=1, dimension=2, show=False):
"""**Singular Value Decomposition (SVD) Entropy**
SVD entropy (SVDEn) can be intuitively seen as an indicator of how many eigenvectors are needed
for an adequate explanation of the dataset. In other words, it measures feature-richness: the
higher the SVD entropy, the more orthogonal vectors are required to adequately explain the
space-state. Similarly to :func:`Fisher Information (FI) <information_fisher>`, it is based on
the Singular Value Decomposition of the :func:`time-delay embedded <complexity_embedding>` signal.
See Also
--------
information_fisher, complexity_embedding, complexity_delay, complexity_dimension
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
show : bool
If True, will plot the attractor.
Returns
----------
svd : float
The singular value decomposition (SVD).
info : dict
A dictionary containing additional information regarding the parameters used
to compute SVDEn.
Examples
----------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=1, frequency=5)
@savefig p_entropy_svd1.png scale=100%
svden, info = nk.entropy_svd(signal, delay=5, dimension=3, show=True)
@suppress
plt.close()
svden
References
----------
* Roberts, S. J., Penny, W., & Rezek, I. (1999). Temporal and spatial complexity measures for
electroencephalogram based brain-computer interfacing. Medical & biological engineering &
computing, 37(1), 93-98.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
embedded = complexity_embedding(signal, delay=delay, dimension=dimension, show=show)
W = np.linalg.svd(embedded, compute_uv=False) # Compute SVD
W /= np.sum(W) # Normalize singular values
return -1 * sum(W * np.log2(W)), {"Dimension": dimension, "Delay": delay}
| 2,732 | 35.932432 | 102 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_slope.py | import numpy as np
import pandas as pd
from .entropy_shannon import entropy_shannon
def entropy_slope(signal, dimension=3, thresholds=[0.1, 45], **kwargs):
"""**Slope Entropy (SlopEn)**
Slope Entropy (SlopEn) uses an alphabet of three symbols, 0, 1, and 2, with positive (+) and
negative versions (-) of the last two. Each symbol covers a range of slopes for the segment
joining two consecutive samples of the input data, and the :func:`Shannon entropy <entropy_shannon>`
of the relative frequency of each pattern is computed.
.. figure:: ../img/cuestafrau2019.png
:alt: Figure from Cuesta-Frau, D. (2019).
:target: https://doi.org/10.3390/e21121167
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
thresholds : list
Angular thresholds (called *levels*). A list of monotonically increasing values in the
range [0, 90] degrees.
**kwargs : optional
Other keyword arguments, such as the logarithmic ``base`` to use for
:func:`entropy_shannon`.
Returns
-------
slopen : float
Slope Entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# Compute Slope Entropy
slopen, info = nk.entropy_slope(signal, dimension=3, thresholds=[0.1, 45])
slopen
slopen, info = nk.entropy_slope(signal, dimension=3, thresholds=[5, 45, 60, 90])
slopen
# Compute Multiscale Slope Entropy (MSSlopEn)
@savefig p_entropy_slope1.png scale=100%
msslopen, info = nk.entropy_multiscale(signal, method="MSSlopEn", show=True)
@suppress
plt.close()
References
----------
* Cuesta-Frau, D. (2019). Slope entropy: A new time series complexity estimator based on both
symbolic patterns and amplitude information. Entropy, 21(12), 1167.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Dimension": dimension}
# We could technically expose the Delay, but the paper is about consecutive differences so...
if "delay" in kwargs.keys():
delay = kwargs["delay"]
kwargs.pop("delay")
else:
delay = 1
# each subsequence of length m drawn from x, can be transformed into another subsequence of
# length m-1 with the differences of each pair of consecutive samples
Tx = np.degrees(np.arctan(signal[delay:] - signal[:-delay]))
N = len(Tx)
# a threshold or thresholds must be applied to these differences in order to find the
# corresponding symbolic representation
symbols = np.zeros(N)
for q in range(1, len(thresholds)):
symbols[np.logical_and(Tx <= thresholds[q], Tx > thresholds[q - 1])] = q
symbols[np.logical_and(Tx >= -thresholds[q], Tx < -thresholds[q - 1])] = -q
if q == len(thresholds) - 1:
symbols[Tx > thresholds[q]] = q + 1
symbols[Tx < -thresholds[q]] = -(q + 1)
unique = np.array([symbols[k : N - dimension + k + 1] for k in range(dimension - 1)]).T
_, freq = np.unique(unique, axis=0, return_counts=True)
# Shannon Entropy
slopen, _ = entropy_shannon(freq=freq / freq.sum(), **kwargs)
return slopen, info
| 3,938 | 34.486486 | 104 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/optim_complexity_optimize.py | import matplotlib
import matplotlib.collections
import matplotlib.pyplot as plt
import numpy as np
from .entropy_approximate import entropy_approximate
from .optim_complexity_delay import (
_embedding_delay_metric,
_embedding_delay_plot,
_embedding_delay_select,
)
from .optim_complexity_dimension import (
_embedding_dimension_afn,
_embedding_dimension_ffn,
_embedding_dimension_plot,
)
from .optim_complexity_tolerance import _optimize_tolerance_plot
def complexity_optimize(
signal,
delay_max=50,
delay_method="fraser1986",
dimension_max=10,
dimension_method="afnn",
tolerance_method="maxApEn",
show=False,
**kwargs
):
"""**Joint-estimation of optimal complexity parameters**
The selection of the parameters *Dimension* and *Delay* is a challenge. One approach is to
select them (semi) independently (as dimension selection often requires the delay) from each
other, using :func:`complexity_delay` and :func:`complexity_dimension`.
Estimate optimal complexity parameters Dimension (m), Time Delay (tau) and tolerance (r).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay_max : int
See :func:`complexity_delay`.
delay_method : str
See :func:`complexity_delay`.
dimension_max : int
See :func:`complexity_dimension`.
dimension_method : str
See :func:`complexity_dimension`.
tolerance_method : str
See :func:`complexity_tolerance`.
show : bool
Defaults to ``False``.
Returns
-------
optimal_dimension : int
Optimal dimension.
optimal_delay : int
Optimal time delay.
optimal_tolerance : int
Optimal tolerance
See Also
------------
complexity_delay, complexity_dimension, complexity_tolerance
Examples
---------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=10, frequency=[5, 7], noise=0.01)
parameters = nk.complexity_optimize(signal, show=True)
parameters
References
-----------
* Gautama, T., Mandic, D. P., & Van Hulle, M. M. (2003, April). A differential entropy based
method for determining the optimal embedding parameters of a signal. In 2003 IEEE
International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings.
(ICASSP'03). (Vol. 6, pp. VI-29). IEEE.
* Camplani, M., & Cannas, B. (2009). The role of the embedding dimension and time delay in time
series forecasting. IFAC Proceedings Volumes, 42(7), 316-320.
* Rosenstein, M. T., Collins, J. J., & De Luca, C. J. (1994). Reconstruction expansion as a
geometry-based framework for choosing proper delay times. Physica-Section D, 73(1), 82-98.
* Cao, L. (1997). Practical method for determining the minimum embedding dimension of a scalar
time series. Physica D: Nonlinear Phenomena, 110(1-2), 43-50.
* Lu, S., Chen, X., Kanters, J. K., Solomon, I. C., & Chon, K. H. (2008). Automatic selection of
the threshold value r for approximate entropy. IEEE Transactions on Biomedical Engineering,
55(8), 1966-1972.
"""
out = {}
# Optimize delay
tau_sequence, metric, metric_values, out["Delay"] = _complexity_delay(
signal, delay_max=delay_max, method=delay_method
)
# Optimize dimension
dimension_seq, optimize_indices, out["Dimension"] = _complexity_dimension(
signal, delay=out["Delay"], dimension_max=dimension_max, method=dimension_method, **kwargs
)
# Optimize r
tolerance_method = tolerance_method.lower()
if tolerance_method in ["traditional"]:
out["Tolerance"] = 0.2 * np.std(signal, ddof=1)
if tolerance_method in ["maxapen", "optimize"]:
r_range, ApEn, out["Tolerance"] = _complexity_tolerance(
signal, delay=out["Delay"], dimension=out["Dimension"]
)
if show is True:
if tolerance_method in ["traditional"]:
raise ValueError(
"NeuroKit error: complexity_optimize():"
"show is not available for current tolerance_method"
)
if tolerance_method in ["maxapen", "optimize"]:
_complexity_plot(
signal,
out,
tau_sequence,
metric,
metric_values,
dimension_seq[:-1],
optimize_indices,
r_range,
ApEn,
dimension_method=dimension_method,
)
return out
# =============================================================================
# Plot
# =============================================================================
def _complexity_plot(
signal,
out,
tau_sequence,
metric,
metric_values,
dimension_seq,
optimize_indices,
r_range,
ApEn,
dimension_method="afnn",
):
# Prepare figure
fig = plt.figure(constrained_layout=False)
spec = matplotlib.gridspec.GridSpec(
ncols=2, nrows=3, height_ratios=[1, 1, 1], width_ratios=[1 - 1.2 / np.pi, 1.2 / np.pi]
)
ax_tau = fig.add_subplot(spec[0, :-1])
ax_dim = fig.add_subplot(spec[1, :-1])
ax_r = fig.add_subplot(spec[2, :-1])
if out["Dimension"] > 2:
plot_type = "3D"
ax_attractor = fig.add_subplot(spec[:, -1], projection="3d")
else:
plot_type = "2D"
ax_attractor = fig.add_subplot(spec[:, -1])
fig.suptitle("Otimization of Complexity Parameters", fontweight="bold", fontsize=16)
plt.tight_layout(h_pad=0.4, w_pad=0.05)
# Plot tau optimization
# Plot Attractor
_embedding_delay_plot(
signal,
metric_values=metric_values,
tau_sequence=tau_sequence,
tau=out["Delay"],
metric=metric,
ax0=ax_tau,
ax1=ax_attractor,
plot=plot_type,
)
# Plot dimension optimization
if dimension_method.lower() in ["afnn"]:
_embedding_dimension_plot(
method=dimension_method,
dimension_seq=dimension_seq,
min_dimension=out["Dimension"],
E1=optimize_indices[0],
E2=optimize_indices[1],
ax=ax_dim,
)
if dimension_method.lower() in ["fnn"]:
_embedding_dimension_plot(
method=dimension_method,
dimension_seq=dimension_seq,
min_dimension=out["Dimension"],
f1=optimize_indices[0],
f2=optimize_indices[1],
f3=optimize_indices[2],
ax=ax_dim,
)
# Plot r optimization
_optimize_tolerance_plot(out["Tolerance"], {"Values": r_range, "Scores": ApEn}, ax=ax_r)
return fig
# =============================================================================
# Internals
# ==============================================================================
def _complexity_delay(signal, delay_max=100, method="fraser1986"):
# Initalize vectors
if isinstance(delay_max, int):
tau_sequence = np.arange(1, delay_max)
else:
tau_sequence = np.array(delay_max)
# Get metric
# Method
method = method.lower()
if method in ["fraser", "fraser1986", "tdmi"]:
metric = "Mutual Information"
algorithm = "first local minimum"
elif method in ["theiler", "theiler1990"]:
metric = "Autocorrelation"
algorithm = "first 1/e crossing"
elif method in ["casdagli", "casdagli1991"]:
metric = "Autocorrelation"
algorithm = "first zero crossing"
elif method in ["rosenstein", "rosenstein1993", "adfd"]:
metric = "Displacement"
algorithm = "closest to 40% of the slope"
else:
raise ValueError("NeuroKit error: complexity_delay(): 'method' not recognized.")
metric_values = _embedding_delay_metric(signal, tau_sequence, metric=metric)
# Get optimal tau
optimal = _embedding_delay_select(metric_values, algorithm=algorithm)
if ~np.isnan(optimal):
tau = tau_sequence[optimal]
else:
raise ValueError(
"NeuroKit error: No optimal time delay is found."
" Consider using a higher `delay_max`."
)
return tau_sequence, metric, metric_values, tau
def _complexity_dimension(
signal, delay=1, dimension_max=20, method="afnn", R=10.0, A=2.0, **kwargs
):
# Initalize vectors
if isinstance(dimension_max, int):
dimension_seq = np.arange(1, dimension_max + 1)
else:
dimension_seq = np.array(dimension_max)
# Method
method = method.lower()
if method in ["afnn"]:
E, Es = _embedding_dimension_afn(
signal, dimension_seq=dimension_seq, delay=delay, show=False, **kwargs
)
E1 = E[1:] / E[:-1]
E2 = Es[1:] / Es[:-1]
min_dimension = [i for i, x in enumerate(E1 >= 0.85 * np.max(E1)) if x][0] + 1
optimize_indices = [E1, E2]
return dimension_seq, optimize_indices, min_dimension
if method in ["fnn"]:
f1, f2, f3 = _embedding_dimension_ffn(
signal, dimension_seq=dimension_seq, delay=delay, R=R, A=A, **kwargs
)
min_dimension = [i for i, x in enumerate(f3 <= 1.85 * np.min(f3[np.nonzero(f3)])) if x][0]
optimize_indices = [f1, f2, f3]
return dimension_seq, optimize_indices, min_dimension
else:
raise ValueError("NeuroKit error: complexity_dimension(): 'method' not recognized.")
def _complexity_tolerance(signal, delay=None, dimension=None):
modulator = np.arange(0.02, 0.8, 0.02)
r_range = modulator * np.std(signal, ddof=1)
ApEn = np.zeros_like(r_range)
for i, r in enumerate(r_range):
ApEn[i] = entropy_approximate(
signal, delay=delay, dimension=dimension, tolerance=r_range[i]
)[0]
r = r_range[np.argmax(ApEn)]
return r_range, ApEn, r
| 9,977 | 31.501629 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_nld.py | from warnings import warn
import numpy as np
import pandas as pd
from ..misc import NeuroKitWarning
from ..stats import standardize
def fractal_nld(signal, corrected=False):
"""**Fractal dimension via Normalized Length Density (NLDFD)**
NLDFD is a very simple index corresponding to the average absolute consecutive
differences of the (standardized) signal (``np.mean(np.abs(np.diff(std_signal)))``).
This method was developed for measuring signal complexity of very short durations (< 30
samples), and can be used for instance when continuous signal FD changes (or "running" FD) are
of interest (by computing it on sliding windows, see example).
For methods such as Higuchi's FD, the standard deviation of the window FD increases sharply
when the epoch becomes shorter. The NLD method results in lower standard deviation especially
for shorter epochs, though at the expense of lower accuracy in average window FD.
See Also
--------
fractal_higuchi
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
corrected : bool
If ``True``, will rescale the output value according to the power model estimated by
Kalauzi et al. (2009) to make it more comparable with "true" FD range, as follows:
``FD = 1.9079*((NLD-0.097178)^0.18383)``. Note that this can result in ``np.nan`` if the
result of the difference is negative.
Returns
--------
fd : DataFrame
A dataframe containing the fractal dimension across epochs.
info : dict
A dictionary containing additional information (currently, but returned nonetheless for
consistency with other functions).
Examples
----------
**Example 1**: Usage on a short signal
.. ipython:: python
import neurokit2 as nk
# Simulate a short signal with duration of 0.5s
signal = nk.signal_simulate(duration=0.5, frequency=[3, 5])
# Compute Fractal Dimension
fd, _ = nk.fractal_nld(signal, corrected=False)
fd
**Example 2**: Compute FD-NLD on non-overlapping windows
.. ipython:: python
import numpy as np
# Simulate a long signal with duration of 5s
signal = nk.signal_simulate(duration=5, frequency=[3, 5, 10], noise=0.1)
# We want windows of size=100 (0.1s)
n_windows = len(signal) // 100 # How many windows
# Split signal into windows
windows = np.array_split(signal, n_windows)
# Compute FD-NLD on all windows
nld = [nk.fractal_nld(i, corrected=False)[0] for i in windows]
np.mean(nld) # Get average
**Example 3**: Calculate FD-NLD on sliding windows
.. ipython:: python
# Simulate a long signal with duration of 5s
signal = nk.signal_simulate(duration=5, frequency=[3, 5, 10], noise=0.1)
# Add period of noise
signal[1000:3000] = signal[1000:3000] + np.random.normal(0, 1, size=2000)
# Create function-wrapper that only return the NLD value
nld = lambda x: nk.fractal_nld(x, corrected=False)[0]
# Use them in a rolling window of 100 samples (0.1s)
rolling_nld = pd.Series(signal).rolling(100, min_periods = 100, center=True).apply(nld)
@savefig p_nld1.png scale=100%
nk.signal_plot([signal, rolling_nld], subplots=True, labels=["Signal", "FD-NLD"])
@suppress
plt.close()
References
----------
* Kalauzi, A., Bojić, T., & Rakić, L. (2009). Extracting complexity waveforms from
one-dimensional signals. Nonlinear biomedical physics, 3(1), 1-11.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Amplitude normalization
signal = standardize(signal)
# Calculate normalized length density
nld = np.nanmean(np.abs(np.diff(signal)))
if corrected:
# Power model optimal parameters based on analysis of EEG signals (from Kalauzi et al. 2009)
a = 1.9079
k = 0.18383
nld_diff = nld - 0.097178 # NLD - NLD0
if nld_diff < 0:
warn(
"Normalized Length Density of the signal may be too small, retuning `np.nan`.",
category=NeuroKitWarning,
)
nld = np.nan
else:
nld = a * (nld_diff ** k)
# Compute fd
return nld, {}
| 4,552 | 32.233577 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/__init__.py | """Submodule for NeuroKit."""
import functools
from .complexity import complexity
from .complexity_decorrelation import complexity_decorrelation
from .complexity_hjorth import complexity_hjorth
from .complexity_lempelziv import complexity_lempelziv
from .complexity_lyapunov import complexity_lyapunov
from .complexity_relativeroughness import complexity_relativeroughness
from .complexity_rqa import complexity_rqa
from .entropy_angular import entropy_angular
from .entropy_approximate import entropy_approximate
from .entropy_attention import entropy_attention
from .entropy_bubble import entropy_bubble
from .entropy_coalition import entropy_coalition
from .entropy_cosinesimilarity import entropy_cosinesimilarity
from .entropy_cumulativeresidual import entropy_cumulativeresidual
from .entropy_differential import entropy_differential
from .entropy_dispersion import entropy_dispersion
from .entropy_distribution import entropy_distribution
from .entropy_fuzzy import entropy_fuzzy
from .entropy_grid import entropy_grid
from .entropy_hierarchical import entropy_hierarchical
from .entropy_increment import entropy_increment
from .entropy_kl import entropy_kl
from .entropy_kolmogorov import entropy_kolmogorov
from .entropy_maximum import entropy_maximum
from .entropy_multiscale import entropy_multiscale
from .entropy_ofentropy import entropy_ofentropy
from .entropy_permutation import entropy_permutation
from .entropy_phase import entropy_phase
from .entropy_power import entropy_power
from .entropy_range import entropy_range
from .entropy_rate import entropy_rate
from .entropy_renyi import entropy_renyi
from .entropy_sample import entropy_sample
from .entropy_shannon import entropy_shannon
from .entropy_shannon_joint import entropy_shannon_joint
from .entropy_slope import entropy_slope
from .entropy_spectral import entropy_spectral
from .entropy_svd import entropy_svd
from .entropy_symbolicdynamic import entropy_symbolicdynamic
from .entropy_tsallis import entropy_tsallis
from .fractal_correlation import fractal_correlation
from .fractal_density import fractal_density
from .fractal_dfa import fractal_dfa
from .fractal_higuchi import fractal_higuchi
from .fractal_hurst import fractal_hurst
from .fractal_katz import fractal_katz
from .fractal_linelength import fractal_linelength
from .fractal_nld import fractal_nld
from .fractal_petrosian import fractal_petrosian
from .fractal_psdslope import fractal_psdslope
from .fractal_sda import fractal_sda
from .fractal_sevcik import fractal_sevcik
from .fractal_tmf import fractal_tmf
from .information_fisher import fisher_information
from .information_fishershannon import fishershannon_information
from .information_gain import information_gain
from .information_mutual import mutual_information
from .optim_complexity_delay import complexity_delay
from .optim_complexity_dimension import complexity_dimension
from .optim_complexity_k import complexity_k
from .optim_complexity_optimize import complexity_optimize
from .optim_complexity_tolerance import complexity_tolerance
from .TODO_entropy_wiener import entropy_wiener
from .utils_complexity_attractor import complexity_attractor
from .utils_complexity_coarsegraining import complexity_coarsegraining
from .utils_complexity_embedding import complexity_embedding
from .utils_complexity_ordinalpatterns import complexity_ordinalpatterns
from .utils_complexity_simulate import complexity_simulate
from .utils_complexity_symbolize import complexity_symbolize
from .utils_fractal_mandelbrot import fractal_mandelbrot
from .utils_recurrence_matrix import recurrence_matrix
# Aliases
complexity_se = entropy_shannon
complexity_diffen = entropy_differential
complexity_cren = entropy_cumulativeresidual
complexity_apen = entropy_approximate
complexity_capen = functools.partial(entropy_approximate, corrected=True)
complexity_atten = entropy_attention
complexity_sampen = entropy_sample
complexity_fuzzyen = entropy_fuzzy
complexity_fuzzyapen = functools.partial(entropy_fuzzy, approximate=True)
complexity_pe = entropy_permutation
complexity_wpe = functools.partial(entropy_permutation, weighted=True)
complexity_mse = entropy_multiscale
complexity_mspe = functools.partial(entropy_multiscale, scale="MSPEn")
complexity_cmse = functools.partial(entropy_multiscale, method="CMSEn")
complexity_rcmse = functools.partial(entropy_multiscale, method="RCMSEn")
complexity_fuzzymse = functools.partial(entropy_multiscale, fuzzy=True)
complexity_fuzzycmse = functools.partial(entropy_multiscale, method="CMSEn", fuzzy=True)
complexity_fuzzyrcmse = functools.partial(entropy_multiscale, method="RCMSEn", fuzzy=True)
complexity_dfa = fractal_dfa
fractal_mfdfa = functools.partial(fractal_dfa, multifractal=True)
complexity_mfdfa = fractal_mfdfa
complexity_lzc = complexity_lempelziv
complexity_plzc = functools.partial(complexity_lzc, permutation=True)
complexity_mplzc = functools.partial(complexity_lzc, multiscale=True)
complexity_cd = fractal_correlation
complexity_plot = functools.partial(complexity_optimize, show=True)
__all__ = [
"complexity",
"complexity_attractor",
"complexity_embedding",
"complexity_coarsegraining",
"complexity_ordinalpatterns",
"complexity_symbolize",
"complexity_decorrelation",
"recurrence_matrix",
"complexity_delay",
"complexity_dimension",
"complexity_optimize",
"complexity_simulate",
"complexity_hjorth",
"fractal_hurst",
"complexity_tolerance",
"complexity_lempelziv",
"complexity_lzc",
"complexity_plzc",
"complexity_mplzc",
"complexity_lyapunov",
"complexity_mfdfa",
"complexity_cd",
"complexity_plot",
"complexity_se",
"complexity_apen",
"complexity_atten",
"complexity_capen",
"complexity_cren",
"complexity_diffen",
"complexity_k",
"complexity_sampen",
"complexity_fuzzyen",
"complexity_mse",
"complexity_fuzzymse",
"complexity_cmse",
"complexity_fuzzycmse",
"complexity_rcmse",
"complexity_fuzzyrcmse",
"complexity_pe",
"complexity_wpe",
"complexity_mspe",
"complexity_dfa",
"complexity_relativeroughness",
"complexity_rqa",
"entropy_angular",
"entropy_maximum",
"entropy_shannon",
"entropy_shannon_joint",
"entropy_power",
"entropy_rate",
"entropy_tsallis",
"entropy_renyi",
"entropy_kolmogorov",
"entropy_attention",
"entropy_ofentropy",
"entropy_slope",
"entropy_increment",
"entropy_differential",
"entropy_kl",
"entropy_distribution",
"entropy_symbolicdynamic",
"entropy_cumulativeresidual",
"entropy_approximate",
"entropy_bubble",
"entropy_coalition",
"entropy_sample",
"entropy_phase",
"entropy_dispersion",
"entropy_grid",
"entropy_spectral",
"entropy_svd",
"entropy_fuzzy",
"complexity_fuzzyapen",
"entropy_multiscale",
"entropy_hierarchical",
"entropy_wiener",
"entropy_permutation",
"entropy_range",
"entropy_cosinesimilarity",
"fisher_information",
"fishershannon_information",
"fractal_dfa",
"fractal_correlation",
"fractal_density",
"fractal_higuchi",
"fractal_katz",
"fractal_linelength",
"fractal_petrosian",
"fractal_sevcik",
"fractal_mandelbrot",
"fractal_mfdfa",
"fractal_tmf",
"fractal_nld",
"fractal_psdslope",
"fractal_sda",
"mutual_information",
"information_gain",
]
| 7,448 | 33.971831 | 90 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_phase.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .entropy_shannon import entropy_shannon
def entropy_phase(signal, delay=1, k=4, show=False, **kwargs):
"""**Phase Entropy (PhasEn)**
Phase entropy (PhasEn or PhEn) has been developed by quantifying the distribution of the signal
in accross *k* parts (of a two-dimensional phase space referred to as a second order difference
plot (SODP). It build on the concept of :func:`Grid Entropy <entropy_grid>`, that uses
:func:`Poincaré plot <.hrv_nonlinear>` as its basis.
.. figure:: ../img/rohila2019.png
:alt: Figure from Rohila et al. (2019).
:target: https://doi.org/10.1088/1361-6579/ab499e
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
k : int
The number of sections that the SODP is divided into. It is a coarse graining parameter
that defines how fine the grid is. It is recommended to use even-numbered (preferably
multiples of 4) partitions for sake of symmetry.
show : bool
Plot the Second Order Difference Plot (SODP).
**kwargs : optional
Other keyword arguments, such as the logarithmic ``base`` to use for
:func:`entropy_shannon`.
Returns
-------
phasen : float
Phase Entropy
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# Compute Phase Entropy
@savefig p_entropy_phase1.png scale=100%
phasen, info = nk.entropy_phase(signal, k=4, show=True)
@suppress
plt.close()
.. ipython:: python
phasen
.. ipython:: python
@savefig p_entropy_phase2.png scale=100%
phasen, info = nk.entropy_phase(signal, k=8, show=True)
@suppress
plt.close()
References
----------
* Rohila, A., & Sharma, A. (2019). Phase entropy: A new complexity measure for heart rate
variability. Physiological Measurement, 40(10), 105006.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
info = {"k": k, "Delay": delay}
# 1. Compute SODP axes
y = signal[2 * delay :] - signal[delay:-delay]
x = signal[delay:-delay] - signal[: -2 * delay]
# 2. Compute the slope (angle theta) of each scatter point from the origin
with np.errstate(divide="ignore", invalid="ignore"):
theta = np.arctan(y / x)
theta[np.logical_and((y < 0), (x < 0))] += np.pi
theta[np.logical_and((y < 0), (x > 0))] += 2 * np.pi
theta[np.logical_and((y > 0), (x < 0))] += np.pi
# 3. The entire plot is divided into k sections having an angle span of 2pi*k radians each
angles = np.linspace(0, 2 * np.pi, k + 1)
# 4. The cumulative slope of each sector is obtained by adding the slope of each scatter point # within that sector
# 5. The probability distribution of the slopes in each sector is computed
freq = [
np.sum(theta[np.logical_and((theta > angles[i]), (theta < angles[i + 1]))])
for i in range(k)
]
freq = np.array(freq) / np.sum(freq)
# 6. the Shannon entropy computed from the distribution p(i)
phasen, _ = entropy_shannon(freq=freq, **kwargs)
# Normalize
phasen = phasen / np.log(k)
if show is True:
Tx = np.zeros((k, len(theta)))
for i in range(k):
Temp = np.logical_and((theta > angles[i]), (theta < angles[i + 1]))
Tx[i, Temp] = 1
limx = np.ceil(np.max(np.abs([y, x])))
Tx = Tx.astype(bool)
Ys = np.sin(angles) * limx * np.sqrt(2)
Xs = np.cos(angles) * limx * np.sqrt(2)
colors = plt.get_cmap("jet")(np.linspace(0, 1, k))
plt.figure()
for i in range(k):
plt.plot(x[Tx[i, :]], y[Tx[i, :]], ".", color=tuple(colors[i, :]))
plt.plot(
np.vstack((np.zeros(k + 1), Xs)), np.vstack((np.zeros(k + 1), Ys)), color="red"
)
plt.axis([-limx, limx, -limx, limx])
plt.gca().set_aspect("equal", "box")
plt.xlabel(r"$X(n + \tau) - X(n)$"),
plt.ylabel(r"$X(n + 2 \tau) - X(n + \tau)$")
plt.xticks([-limx, 0, limx])
plt.yticks([-limx, 0, limx])
plt.title("Second Order Difference Plot (SODP)")
return phasen, info
| 4,979 | 33.825175 | 119 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/TODO_fractal_capacity.py | # flake8: noqa
# WIP ================================================================
# -0*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
import scipy.misc
from .utils_complexity_embedding import complexity_embedding
def fractal_capacity(signal, delay=1, rounding=3, show=False):
"""
Examples
---------
.. ipython:: python
import neurokit2 as nk
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
"""
# From https://gist.github.com/viveksck/1110dfca01e4ec2c608515f0d5a5b1d1
embedded = complexity_embedding(np.round(signal, 1), delay=delay, dimension=2)
# embedded = np.round(embedded, 1)
x, y = embedded[:, 0], embedded[:, 1]
z = np.full(len(x), 1)
# Create 2D representation
xsteps = 100 # resolution in x
ysteps = 100 # resolution in y
grid_x, grid_y = np.mgrid[min(x) : xsteps : max(x), min(y) : ysteps : max(y)]
Z = scipy.interpolate.griddata(
(x, y), z, (grid_x, grid_y), fill_value=0, method="nearest", rescale=True
)
plt.imshow(Z, cmap="Greys", interpolation="nearest")
# nk.signal_plot(embedded[:, 1])
Z = _signal_to_image(signal, rounding=rounding)
# Minimal dimension of image
p = np.min(Z.shape)
# Greatest power of 2 less than or equal to p
n = 2 ** np.floor(np.log(p) / np.log(2))
# Extract the exponent
n = int(np.log(n) / np.log(2))
# Build successive box sizes (from 2**n down to 2**1)
sizes = 2 ** np.arange(n, 1, -1)
# Actual box counting with decreasing size
counts = []
for size in sizes:
counts.append(_fractal_capacity_boxcount(Z, size))
# Fit the successive log(sizes) with log (counts)
coeffs = np.polyfit(np.log2(sizes), np.log2(counts), 1)
if show is True:
_fractal_capacity_plot(sizes, counts, coeffs)
return -coeffs[0]
# =============================================================================
# Utils
# =============================================================================
def _fractal_capacity_boxcount(Z, k):
# From https://github.com/rougier/numpy-100 (#87)
S = np.add.reduceat(
np.add.reduceat(Z, np.arange(0, Z.shape[0], k), axis=0), np.arange(0, Z.shape[1], k), axis=1
)
# We count non-empty (0) and non-full boxes (k*k)
return len(np.where((S > 0) & (S < k * k))[0])
def _signal_to_image(signal, rounding=3, show=False):
"""
Examples
---------
import neurokit2 as nk
>>>
signal = nk.signal_simulate()
nk.signal_plot(signal)
>>>
# signal_to_image(signal, rounding=2, show=True)
# signal_to_image(signal, rounding=1, show=True)
"""
x = np.round(signal, rounding)
y_vals = np.unique(x)
y = np.arange(len(y_vals))
m = np.zeros((len(y), len(x)))
for i in range(len(x)):
m[np.where(y_vals == x[i])[0][0], i] = 1
if show is True:
plt.imshow(m, cmap="Greys", interpolation="nearest")
return m
def _fractal_capacity_plot(sizes, counts, coeffs):
fit = 2 ** np.polyval(coeffs, np.log2(sizes))
plt.loglog(sizes, counts, "bo")
plt.loglog(sizes, fit, "r", label=r"$D$ = %0.3f" % -coeffs[0])
plt.title("Capacity Dimension")
plt.xlabel(r"$\log_{2}$(Sizes)")
plt.ylabel(r"$\log_{2}$(Counts)")
plt.legend()
plt.show()
| 3,403 | 26.901639 | 100 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/fractal_psdslope.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ..signal import signal_detrend, signal_psd
def fractal_psdslope(signal, method="voss1988", show=False, **kwargs):
"""**Fractal dimension via Power Spectral Density (PSD) slope**
Fractal exponent can be computed from Power Spectral Density slope (PSDslope) analysis in
signals characterized by a frequency power-law dependence.
It first transforms the time series into the frequency domain, and breaks down the signal into
sine and cosine waves of a particular amplitude that together "add-up" to represent the
original signal.
If there is a systematic relationship between the frequencies in the signal and the power of
those frequencies, this will reveal itself in log-log coordinates as a linear relationship. The
slope of the best fitting line is taken as an estimate of the fractal scaling exponent and can
be converted to an estimate of the fractal dimension.
A slope of 0 is consistent with white noise, and a slope of less than 0 but greater than -1,
is consistent with pink noise i.e., 1/f noise. Spectral slopes as steep as -2 indicate
fractional Brownian motion, the epitome of random walk processes.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
method : str
Method to estimate the fractal dimension from the slope,
can be ``"voss1988"`` (default) or ``"hasselman2013"``.
show : bool
If True, returns the log-log plot of PSD versus frequency.
**kwargs
Other arguments to be passed to ``signal_psd()`` (such as ``method``).
Returns
----------
slope : float
Estimate of the fractal dimension obtained from PSD slope analysis.
info : dict
A dictionary containing additional information regarding the parameters used
to perform PSD slope analysis.
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# Compute the Fractal Dimension from PSD slope
@savefig p_fractal_psdslope1.png scale=100%
psdslope, info = nk.fractal_psdslope(signal, show=True)
@suppress
plt.close()
.. ipython:: python
psdslope
References
----------
* https://complexity-methods.github.io/book/power-spectral-density-psd-slope.html
* Hasselman, F. (2013). When the blind curve is finite: dimension estimation and model
inference based on empirical waveforms. Frontiers in Physiology, 4, 75. https://doi.org/10.3389/fphys.2013.00075
* Voss, R. F. (1988). Fractals in nature: From characterization to simulation. The Science of
Fractal Images, 21-70.
* Eke, A., Hermán, P., Kocsis, L., and Kozak, L. R. (2002). Fractal characterization of
complexity in temporal physiological signals. Physiol. Meas. 23, 1-38.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Translated from https://github.com/FredHasselman/casnet/blob/master/R/fd.R
# Detrend
signal = signal_detrend(signal)
# Standardise using N instead of N-1
signal = (signal - np.nanmean(signal)) / np.nanstd(signal)
# Get psd with fourier transform
psd = signal_psd(signal, sampling_rate=1000, method="fft", show=False, **kwargs)
psd = psd[psd["Frequency"] < psd.quantile(0.25)[0]]
psd = psd[psd["Frequency"] > 0]
# Get slope
slope, intercept = np.polyfit(np.log10(psd["Frequency"]), np.log10(psd["Power"]), 1)
# "Check the global slope for anti-persistent noise (GT +0.20) and fit the line starting from
# the highest frequency" in FredHasselman/casnet.
# Not sure about that, commenting it out for now.
# if slope > 0.2:
# slope, intercept = np.polyfit(np.log10(np.flip(psd["Frequency"])), np.log10(np.flip(psd["Power"])), 1)
# Sanitize method name
method = method.lower()
if method in ["voss", "voss1988"]:
fd = (5 - slope) / 2
elif method in ["hasselman", "hasselman2013"]:
# Convert from periodogram based self-affinity parameter estimate (`sa`) to an informed
# estimate of fd
fd = 3 / 2 + ((14 / 33) * np.tanh(slope * np.log(1 + np.sqrt(2))))
if show:
_fractal_psdslope_plot(psd["Frequency"], psd["Power"], slope, intercept, fd, ax=None)
return fd, {"Slope": slope, "Method": method}
# =============================================================================
# Plotting
# =============================================================================
def _fractal_psdslope_plot(frequency, psd, slope, intercept, fd, ax=None):
if ax is None:
fig, ax = plt.subplots()
fig.suptitle(
"Power Spectral Density (PSD) slope analysis" + ", slope = " + str(np.round(slope, 2))
)
else:
fig = None
ax.set_title(
"Power Spectral Density (PSD) slope analysis" + ", slope = " + str(np.round(slope, 2))
)
ax.set_ylabel(r"$\log_{10}$(Power)")
ax.set_xlabel(r"$\log_{10}$(Frequency)")
# ax.scatter(np.log10(frequency), np.log10(psd), marker="o", zorder=1)
ax.plot(np.log10(frequency), np.log10(psd), zorder=1)
# fit_values = [slope * i + intercept for i in np.log10(frequency)]
fit = np.polyval((slope, intercept), np.log10(frequency))
ax.plot(
np.log10(frequency),
fit,
color="#FF9800",
zorder=2,
label="Fractal Dimension = " + str(np.round(fd, 2)),
)
ax.legend(loc="lower right")
return fig
| 5,899 | 37.815789 | 118 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_dispersion.py | import numpy as np
import pandas as pd
from .entropy_shannon import entropy_shannon
from .utils_complexity_embedding import complexity_embedding
from .utils_complexity_symbolize import complexity_symbolize
def entropy_dispersion(
signal, delay=1, dimension=3, c=6, symbolize="NCDF", fluctuation=False, rho=1, **kwargs
):
"""**Dispersion Entropy (DispEn)**
The Dispersion Entropy (DispEn). Also returns the Reverse Dispersion Entropy (RDEn).
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted *Tau* :math:`\\tau`, sometimes referred to as *lag*) in samples.
See :func:`complexity_delay` to estimate the optimal value for this parameter.
dimension : int
Embedding Dimension (*m*, sometimes referred to as *d* or *order*). See
:func:`complexity_dimension` to estimate the optimal value for this parameter.
c : int
Number of symbols *c*. Rostaghi (2016) recommend in practice a *c* between 4 and 8.
symbolize : str
Method to convert a continuous signal input into a symbolic (discrete) signal. Can be one
of ``"NCDF"`` (default), ``"finesort"``, or others. See :func:`complexity_symbolize` for
details.
fluctuation : bool
Fluctuation-based Dispersion entropy.
rho : float
Tuning parameter of "finesort". Only when ``method="finesort"``.
**kwargs : optional
Other keyword arguments (currently not used).
Returns
-------
DispEn : float
Dispersion Entropy (DispEn) of the signal.
info : dict
A dictionary containing additional information regarding the parameters used.
See Also
--------
entropy_shannon, entropy_multiscale, entropy_symbolicdynamic
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal
signal = nk.signal_simulate(duration=2, sampling_rate=200, frequency=[5, 6], noise=0.5)
# Compute Dispersion Entropy (DispEn)
dispen, info = nk.entropy_dispersion(signal, c=3)
dispen
# Get Reverse Dispersion Entropy (RDEn)
info["RDEn"]
# Fluctuation-based DispEn with "finesort"
dispen, info = nk.entropy_dispersion(signal, c=3, symbolize="finesort", fluctuation=True)
dispen
References
----------
* Rostaghi, M., & Azami, H. (2016). Dispersion entropy: A measure for time-series analysis.
IEEE Signal Processing Letters, 23(5), 610-614.
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Store parameters
info = {"Dimension": dimension, "Delay": delay, "c": c, "Symbolization": symbolize}
# Symbolization and embedding
if symbolize == "finesort":
symbolic = complexity_symbolize(signal, method="NCDF", c=c)
Ym = np.zeros((len(signal) - (dimension - 1) * delay, dimension))
for k in range(dimension):
Ym[:, k] = symbolic[k * delay : len(signal) - ((dimension - k - 1) * delay)]
Yi = np.floor(np.max(abs(np.diff(Ym)), axis=1) / (rho * np.std(abs(np.diff(signal)))))
embedded = complexity_embedding(symbolic, dimension=dimension, delay=delay)
Yi = np.expand_dims(Yi, axis=1)
embedded = np.hstack((embedded, Yi))
else:
symbolic = complexity_symbolize(signal, method=symbolize, c=c)
embedded = complexity_embedding(symbolic, dimension=dimension, delay=delay)
# Fluctuation
if fluctuation is True:
embedded = np.diff(embedded, axis=1)
_, freq = np.unique(embedded, return_counts=True, axis=0)
freq = freq / freq.sum()
DispEn, _ = entropy_shannon(freq=freq, **kwargs)
# Reverse Dispersion Entropy (RDEn)
if fluctuation is True:
rden = np.sum((freq - (1 / ((2 * c - 1) ** (dimension - 1)))) ** 2)
else:
rden = np.sum((freq - (1 / (c ** dimension))) ** 2)
# Normalize
DispEn = DispEn / np.log(c ** dimension)
info["RDEn"] = rden / (1 - (1 / (c ** dimension)))
return DispEn, info
| 4,292 | 35.07563 | 98 | py |
NeuroKit | NeuroKit-master/neurokit2/complexity/entropy_differential.py | import numpy as np
import pandas as pd
import scipy.stats
def entropy_differential(signal, base=2, **kwargs):
"""**Differential entropy (DiffEn)**
Differential entropy (DiffEn; also referred to as continuous entropy) started as an
attempt by Shannon to extend Shannon entropy. However, differential entropy presents some
issues too, such as that it can be negative even for simple distributions (such as the uniform
distribution).
This function can be called either via ``entropy_differential()`` or ``complexity_diffen()``.
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
base: float
The logarithmic base to use, defaults to ``2``, giving a unit in *bits*. Note that ``scipy.
stats.entropy()`` uses Euler's number (``np.e``) as default (the natural logarithm), giving
a measure of information expressed in *nats*.
**kwargs : optional
Other arguments passed to ``scipy.stats.differential_entropy()``.
Returns
--------
diffen : float
The Differential entropy of the signal.
info : dict
A dictionary containing additional information regarding the parameters used
to compute Differential entropy.
See Also
--------
entropy_shannon, entropy_cumulativeresidual, entropy_kl
Examples
----------
.. ipython:: python
import neurokit2 as nk
# Simulate a Signal with Laplace Noise
signal = nk.signal_simulate(duration=2, frequency=5, noise=0.1)
# Compute Differential Entropy
diffen, info = nk.entropy_differential(signal)
diffen
References
-----------
* `scipy.stats.differential_entropy()
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.differential_entropy.html>`_
* https://en.wikipedia.org/wiki/Differential_entropy
"""
# Sanity checks
if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1:
raise ValueError(
"Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet."
)
# Check if string ('ABBA'), and convert each character to list (['A', 'B', 'B', 'A'])
if not isinstance(signal, str):
signal = list(signal)
if "method" in kwargs:
method = kwargs["method"]
kwargs.pop("method")
else:
method = "vasicek"
diffen = scipy.stats.differential_entropy(signal, method=method, base=base, **kwargs)
return diffen, {"Method": method, "Base": base}
| 2,600 | 31.924051 | 100 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.