signature
stringlengths 8
3.44k
| body
stringlengths 0
1.41M
| docstring
stringlengths 1
122k
| id
stringlengths 5
17
|
|---|---|---|---|
def cvxEDA(eda, sampling_rate=<NUM_LIT:1000>, tau0=<NUM_LIT>, tau1=<NUM_LIT>, delta_knot=<NUM_LIT>, alpha=<NUM_LIT>, gamma=<NUM_LIT>, solver=None, verbose=False, options={'<STR_LIT>':<NUM_LIT>}):
|
frequency = <NUM_LIT:1>/sampling_rate<EOL>eda = z_score(eda)<EOL>eda = np.array(eda)[:,<NUM_LIT:0>]<EOL>n = len(eda)<EOL>eda = eda.astype('<STR_LIT>')<EOL>eda = cv.matrix(eda)<EOL>a1 = <NUM_LIT:1.>/min(tau1, tau0) <EOL>a0 = <NUM_LIT:1.>/max(tau1, tau0)<EOL>ar = np.array([(a1*frequency + <NUM_LIT>) * (a0*frequency + <NUM_LIT>), <NUM_LIT>*a1*a0*frequency**<NUM_LIT:2> - <NUM_LIT>,<EOL>(a1*frequency - <NUM_LIT>) * (a0*frequency - <NUM_LIT>)]) / ((a1 - a0) * frequency**<NUM_LIT:2>)<EOL>ma = np.array([<NUM_LIT:1.>, <NUM_LIT>, <NUM_LIT:1.>])<EOL>i = np.arange(<NUM_LIT:2>, n)<EOL>A = cv.spmatrix(np.tile(ar, (n-<NUM_LIT:2>,<NUM_LIT:1>)), np.c_[i,i,i], np.c_[i,i-<NUM_LIT:1>,i-<NUM_LIT:2>], (n,n))<EOL>M = cv.spmatrix(np.tile(ma, (n-<NUM_LIT:2>,<NUM_LIT:1>)), np.c_[i,i,i], np.c_[i,i-<NUM_LIT:1>,i-<NUM_LIT:2>], (n,n))<EOL>delta_knot_s = int(round(delta_knot / frequency))<EOL>spl = np.r_[np.arange(<NUM_LIT:1.>,delta_knot_s), np.arange(delta_knot_s, <NUM_LIT:0.>, -<NUM_LIT:1.>)] <EOL>spl = np.convolve(spl, spl, '<STR_LIT>')<EOL>spl /= max(spl)<EOL>i = np.c_[np.arange(-(len(spl)//<NUM_LIT:2>), (len(spl)+<NUM_LIT:1>)//<NUM_LIT:2>)] + np.r_[np.arange(<NUM_LIT:0>, n, delta_knot_s)]<EOL>nB = i.shape[<NUM_LIT:1>]<EOL>j = np.tile(np.arange(nB), (len(spl),<NUM_LIT:1>))<EOL>p = np.tile(spl, (nB,<NUM_LIT:1>)).T<EOL>valid = (i >= <NUM_LIT:0>) & (i < n)<EOL>B = cv.spmatrix(p[valid], i[valid], j[valid])<EOL>C = cv.matrix(np.c_[np.ones(n), np.arange(<NUM_LIT:1.>, n+<NUM_LIT:1.>)/n])<EOL>nC = C.size[<NUM_LIT:1>]<EOL>if verbose is False:<EOL><INDENT>options["<STR_LIT>"] = False<EOL><DEDENT>old_options = cv.solvers.options.copy()<EOL>cv.solvers.options.clear()<EOL>cv.solvers.options.update(options)<EOL>if solver == '<STR_LIT>':<EOL><INDENT>z = lambda m,n: cv.spmatrix([],[],[],(m,n))<EOL>G = cv.sparse([[-A,z(<NUM_LIT:2>,n),M,z(nB+<NUM_LIT:2>,n)],[z(n+<NUM_LIT:2>,nC),C,z(nB+<NUM_LIT:2>,nC)],<EOL>[z(n,<NUM_LIT:1>),-<NUM_LIT:1>,<NUM_LIT:1>,z(n+nB+<NUM_LIT:2>,<NUM_LIT:1>)],[z(<NUM_LIT:2>*n+<NUM_LIT:2>,<NUM_LIT:1>),-<NUM_LIT:1>,<NUM_LIT:1>,z(nB,<NUM_LIT:1>)],<EOL>[z(n+<NUM_LIT:2>,nB),B,z(<NUM_LIT:2>,nB),cv.spmatrix(<NUM_LIT:1.0>, range(nB), range(nB))]])<EOL>h = cv.matrix([z(n,<NUM_LIT:1>),<NUM_LIT>,<NUM_LIT>,eda,<NUM_LIT>,<NUM_LIT>,z(nB,<NUM_LIT:1>)])<EOL>c = cv.matrix([(cv.matrix(alpha, (<NUM_LIT:1>,n)) * A).T,z(nC,<NUM_LIT:1>),<NUM_LIT:1>,gamma,z(nB,<NUM_LIT:1>)])<EOL>res = cv.solvers.conelp(c, G, h, dims={'<STR_LIT:l>':n,'<STR_LIT:q>':[n+<NUM_LIT:2>,nB+<NUM_LIT:2>],'<STR_LIT:s>':[]})<EOL>obj = res['<STR_LIT>']<EOL><DEDENT>else:<EOL><INDENT>Mt, Ct, Bt = M.T, C.T, B.T<EOL>H = cv.sparse([[Mt*M, Ct*M, Bt*M], [Mt*C, Ct*C, Bt*C],<EOL>[Mt*B, Ct*B, Bt*B+gamma*cv.spmatrix(<NUM_LIT:1.0>, range(nB), range(nB))]])<EOL>f = cv.matrix([(cv.matrix(alpha, (<NUM_LIT:1>,n)) * A).T - Mt*eda, -(Ct*eda), -(Bt*eda)])<EOL>res = cv.solvers.qp(H, f, cv.spmatrix(-A.V, A.I, A.J, (n,len(f))),<EOL>cv.matrix(<NUM_LIT:0.>, (n,<NUM_LIT:1>)), solver=solver)<EOL>obj = res['<STR_LIT>'] + <NUM_LIT> * (eda.T * eda)<EOL><DEDENT>cv.solvers.options.clear()<EOL>cv.solvers.options.update(old_options)<EOL>l = res['<STR_LIT:x>'][-nB:]<EOL>d = res['<STR_LIT:x>'][n:n+nC]<EOL>tonic = B*l + C*d<EOL>q = res['<STR_LIT:x>'][:n]<EOL>p = A * q<EOL>phasic = M * q<EOL>e = eda - phasic - tonic<EOL>phasic = np.array(phasic)[:,<NUM_LIT:0>]<EOL><INDENT>results = (np.array(a).ravel() for a in (r, t, p, l, d, e, obj))<EOL><DEDENT>return(tonic, phasic)<EOL>
|
A convex optimization approach to electrodermal activity processing (CVXEDA).
This function implements the cvxEDA algorithm described in "cvxEDA: a
Convex Optimization Approach to Electrodermal Activity Processing" (Greco et al., 2015).
Parameters
----------
eda : list or array
raw EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
tau0 : float
Slow time constant of the Bateman function.
tau1 : float
Fast time constant of the Bateman function.
delta_knot : float
Time between knots of the tonic spline function.
alpha : float
Penalization for the sparse SMNA driver.
gamma : float
Penalization for the tonic spline coefficients.
solver : bool
Sparse QP solver to be used, see cvxopt.solvers.qp
verbose : bool
Print progress?
options : dict
Solver options, see http://cvxopt.org/userguide/coneprog.html#algorithm-parameters
Returns
----------
phasic : numpy.array
The phasic component.
Notes
----------
*Authors*
- Luca Citi (https://github.com/lciti)
- Alberto Greco
*Dependencies*
- cvxopt
- numpy
*See Also*
- cvxEDA: https://github.com/lciti/cvxEDA
References
-----------
- Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing.
- Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804.
|
f10896:m1
|
def eda_scr(signal, sampling_rate=<NUM_LIT:1000>, treshold=<NUM_LIT:0.1>, method="<STR_LIT>"):
|
<EOL>if method == "<STR_LIT>":<EOL><INDENT>gradient = np.gradient(signal)<EOL>size = int(<NUM_LIT:0.1> * sampling_rate)<EOL>smooth, _ = biosppy.tools.smoother(signal=gradient, kernel='<STR_LIT>', size=size, mirror=True)<EOL>zeros, = biosppy.tools.zero_cross(signal=smooth, detrend=True)<EOL>onsets = []<EOL>peaks = []<EOL>for i in zeros:<EOL><INDENT>if smooth[i+<NUM_LIT:1>] > smooth[i-<NUM_LIT:1>]:<EOL><INDENT>onsets.append(i)<EOL><DEDENT>else:<EOL><INDENT>peaks.append(i)<EOL><DEDENT><DEDENT>peaks = np.array(peaks)<EOL>onsets = np.array(onsets)<EOL><DEDENT>else:<EOL><INDENT>peaks, _ = biosppy.tools.find_extrema(signal=signal, mode='<STR_LIT>')<EOL>onsets, _ = biosppy.tools.find_extrema(signal=signal, mode='<STR_LIT>')<EOL><DEDENT>peaks = peaks[peaks > onsets[<NUM_LIT:0>]]<EOL>onsets = onsets[onsets < peaks[-<NUM_LIT:1>]]<EOL>risingtimes = peaks-onsets<EOL>risingtimes = risingtimes/sampling_rate*<NUM_LIT:1000><EOL>peaks = peaks[risingtimes > <NUM_LIT:100>]<EOL>onsets = onsets[risingtimes > <NUM_LIT:100>]<EOL>amplitudes = signal[peaks]-signal[onsets]<EOL>mask = amplitudes > np.std(signal)*treshold<EOL>peaks = peaks[mask]<EOL>onsets = onsets[mask]<EOL>amplitudes = amplitudes[mask]<EOL>recoveries = []<EOL>for x, peak in enumerate(peaks):<EOL><INDENT>try:<EOL><INDENT>window = signal[peak:onsets[x+<NUM_LIT:1>]]<EOL><DEDENT>except IndexError:<EOL><INDENT>window = signal[peak:]<EOL><DEDENT>recovery_amp = signal[peak]-amplitudes[x]/<NUM_LIT:2><EOL>try:<EOL><INDENT>smaller = find_closest_in_list(recovery_amp, window, "<STR_LIT>")<EOL>recovery_pos = peak + list(window).index(smaller)<EOL>recoveries.append(recovery_pos)<EOL><DEDENT>except ValueError:<EOL><INDENT>recoveries.append(np.nan)<EOL><DEDENT><DEDENT>recoveries = np.array(recoveries)<EOL>return(onsets, peaks, amplitudes, recoveries)<EOL>
|
Skin-Conductance Responses extraction algorithm.
Parameters
----------
signal : list or array
EDA signal array.
sampling_rate : int
Sampling rate (samples/second).
treshold : float
SCR minimum treshold (in terms of signal standart deviation).
method : str
"fast" or "slow". Either use a gradient-based approach or a local extrema one.
Returns
----------
onsets, peaks, amplitudes, recoveries : lists
SCRs features.
Example
----------
>>> import neurokit as nk
>>>
>>> onsets, peaks, amplitudes, recoveries = nk.eda_scr(eda_signal)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPy: https://github.com/PIA-Group/BioSPPy
References
-----------
- Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term monitoring of physiological signals. Medical and biological engineering and computing, 42(3), 419-427.
- Gamboa, H. (2008). Multi-Modal Behavioral Biometrics Based on HCI and Electrophysiology (Doctoral dissertation, PhD thesis, Universidade Técnica de Lisboa, Instituto Superior Técnico).
|
f10896:m2
|
def eda_EventRelated(epoch, event_length, window_post=<NUM_LIT:4>):
|
<EOL>EDA_Response = {}<EOL>window_end = event_length + window_post<EOL>if epoch.index[-<NUM_LIT:1>]-event_length < <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>" %(epoch.index[-<NUM_LIT:1>]-event_length))<EOL><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>baseline = epoch["<STR_LIT>"][<NUM_LIT:0>:<NUM_LIT:1>].min()<EOL>eda_peak = epoch["<STR_LIT>"][<NUM_LIT:1>:window_end].max()<EOL>EDA_Response["<STR_LIT>"] = eda_peak - baseline<EOL><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>peak_onset = epoch["<STR_LIT>"][<NUM_LIT:1>:window_end].idxmax()<EOL>if pd.notnull(peak_onset):<EOL><INDENT>amplitude = epoch["<STR_LIT>"][peak_onset:window_end].max()<EOL>peak_time = epoch["<STR_LIT>"][peak_onset:window_end].idxmax()<EOL>if pd.isnull(amplitude):<EOL><INDENT>magnitude = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>magnitude = amplitude<EOL><DEDENT>risetime = peak_time - peak_onset<EOL>if risetime > <NUM_LIT:0>:<EOL><INDENT>strength = magnitude/risetime<EOL><DEDENT>else:<EOL><INDENT>strength = np.nan<EOL><DEDENT>if pd.isnull(peak_time) is False:<EOL><INDENT>recovery = epoch["<STR_LIT>"][peak_time:window_end].idxmax() - peak_time<EOL><DEDENT>else:<EOL><INDENT>recovery = np.nan<EOL><DEDENT><DEDENT>else:<EOL><INDENT>amplitude = np.nan<EOL>magnitude = <NUM_LIT:0><EOL>risetime = np.nan<EOL>strength = np.nan<EOL>peak_time = np.nan<EOL>recovery = np.nan<EOL><DEDENT>EDA_Response["<STR_LIT>"] = amplitude<EOL>EDA_Response["<STR_LIT>"] = magnitude<EOL>EDA_Response["<STR_LIT>"] = np.log(<NUM_LIT:1>+amplitude)<EOL>EDA_Response["<STR_LIT>"] = np.log(<NUM_LIT:1>+magnitude)<EOL>EDA_Response["<STR_LIT>"] = peak_onset<EOL>EDA_Response["<STR_LIT>"] = peak_time<EOL>EDA_Response["<STR_LIT>"] = risetime<EOL>EDA_Response["<STR_LIT>"] = strength <EOL>EDA_Response["<STR_LIT>"] = recovery<EOL>
|
Extract event-related EDA and Skin Conductance Response (SCR).
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`. Index must range from -4s to +4s (relatively to event onset and end).
event_length : int
Event's length in seconds.
window_post : float
Post-stimulus window size (in seconds) to include eventual responses (usually 3 or 4).
Returns
----------
EDA_Response : dict
Event-related EDA response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
**Looking for help**: *Experimental*: respiration artifacts correction needs to be implemented.
*Details*
- **EDA_Peak**: Max of EDA (in a window starting 1s after the stim onset) minus baseline.
- **SCR_Amplitude**: Peak of SCR. If no SCR, returns NA.
- **SCR_Magnitude**: Peak of SCR. If no SCR, returns 0.
- **SCR_Amplitude_Log**: log of 1+amplitude.
- **SCR_Magnitude_Log**: log of 1+magnitude.
- **SCR_PeakTime**: Time of peak.
- **SCR_Latency**: Time between stim onset and SCR onset.
- **SCR_RiseTime**: Time between SCR onset and peak.
- **SCR_Strength**: *Experimental*: peak divided by latency. Angle of the line between peak and onset.
- **SCR_RecoveryTime**: Time between peak and recovery point (half of the amplitude).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
- https://www.biopac.com/wp-content/uploads/EDA-SCR-Analysis.pdf
References
-----------
- Schneider, R., Schmidt, S., Binder, M., Schäfer, F., & Walach, H. (2003). Respiration-related artifacts in EDA recordings: introducing a standardized method to overcome multiple interpretations. Psychological reports, 93(3), 907-920.
- Leiner, D., Fahr, A., & Früh, H. (2012). EDA positive change: A simple algorithm for electrodermal activity to measure general audience arousal during media exposure. Communication Methods and Measures, 6(4), 237-250.
|
f10896:m3
|
def emg_process(emg, sampling_rate=<NUM_LIT:1000>, emg_names=None, envelope_freqs=[<NUM_LIT:10>, <NUM_LIT>], envelope_lfreq=<NUM_LIT:4>, activation_treshold="<STR_LIT:default>", activation_n_above=<NUM_LIT>, activation_n_below=<NUM_LIT:1>):
|
if emg_names is None:<EOL><INDENT>if isinstance(emg, pd.DataFrame):<EOL><INDENT>emg_names = emg.columns.values<EOL><DEDENT><DEDENT>emg = np.array(emg)<EOL>if len(np.shape(emg)) == <NUM_LIT:1>:<EOL><INDENT>emg = np.array(pd.DataFrame(emg))<EOL><DEDENT>if emg_names is None:<EOL><INDENT>if np.shape(emg)[<NUM_LIT:1>]><NUM_LIT:1>:<EOL><INDENT>emg_names = []<EOL>for index in range(np.shape(emg)[<NUM_LIT:1>]):<EOL><INDENT>emg_names.append("<STR_LIT>" + str(index))<EOL><DEDENT><DEDENT>else:<EOL><INDENT>emg_names = ["<STR_LIT>"]<EOL><DEDENT><DEDENT>processed_emg = {"<STR_LIT>": pd.DataFrame()}<EOL>for index, emg_chan in enumerate(emg.T):<EOL><INDENT>processed_emg["<STR_LIT>"][emg_names[index] + "<STR_LIT>"] = emg_chan<EOL>biosppy_emg = dict(biosppy.emg.emg(emg_chan, sampling_rate=sampling_rate, show=False))<EOL>pulse_onsets = np.array([np.nan]*len(emg))<EOL>if len(biosppy_emg['<STR_LIT>']) > <NUM_LIT:0>:<EOL><INDENT>pulse_onsets[biosppy_emg['<STR_LIT>']] = <NUM_LIT:1><EOL><DEDENT>processed_emg["<STR_LIT>"][emg_names[index] + "<STR_LIT>"] = pulse_onsets<EOL>processed_emg["<STR_LIT>"][emg_names[index] + "<STR_LIT>"] = biosppy_emg["<STR_LIT>"]<EOL>processed_emg[emg_names[index]] = {}<EOL>processed_emg[emg_names[index]]["<STR_LIT>"] = biosppy_emg['<STR_LIT>']<EOL>envelope = emg_linear_envelope(biosppy_emg["<STR_LIT>"], sampling_rate=sampling_rate, freqs=envelope_freqs, lfreq=envelope_lfreq)<EOL>processed_emg["<STR_LIT>"][emg_names[index] + "<STR_LIT>"] = envelope<EOL>if activation_treshold == "<STR_LIT:default>":<EOL><INDENT>activation_treshold = <NUM_LIT:1>*np.std(envelope)<EOL><DEDENT>processed_emg["<STR_LIT>"][emg_names[index] + "<STR_LIT>"] = emg_find_activation(envelope, sampling_rate=sampling_rate, threshold=<NUM_LIT:1>*np.std(envelope), n_above=activation_n_above, n_below=activation_n_below)<EOL><DEDENT>return(processed_emg)<EOL>
|
Automated processing of EMG signal.
Parameters
----------
emg : list, array or DataFrame
EMG signal array. Can include multiple channels.
sampling_rate : int
Sampling rate (samples/second).
emg_names : list
List of EMG channel names.
envelope_freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
envelope_lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
activation_treshold : float
minimum amplitude of `x` to detect.
activation_n_above : float
minimum continuous time (in s) greater than or equal to `threshold` to detect (but see the parameter `n_below`).
activation_n_below : float
minimum time (in s) below `threshold` that will be ignored in the detection of `x` >= `threshold`.
Returns
----------
processed_emg : dict
Dict containing processed EMG features.
Contains the EMG raw signal, the filtered signal and pulse onsets.
This function is mainly a wrapper for the biosppy.emg.emg() function. Credits go to its authors.
Example
----------
>>> import neurokit as nk
>>>
>>> processed_emg = nk.emg_process(emg_signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPy: https://github.com/PIA-Group/BioSPPy
References
-----------
- None
|
f10897:m0
|
def emg_tkeo(emg):
|
emg = np.asarray(emg)<EOL>tkeo = np.copy(emg)<EOL>tkeo[<NUM_LIT:1>:-<NUM_LIT:1>] = emg[<NUM_LIT:1>:-<NUM_LIT:1>]*emg[<NUM_LIT:1>:-<NUM_LIT:1>] - emg[:-<NUM_LIT:2>]*emg[<NUM_LIT:2>:]<EOL>tkeo[<NUM_LIT:0>], tkeo[-<NUM_LIT:1>] = tkeo[<NUM_LIT:1>], tkeo[-<NUM_LIT:2>]<EOL>return(tkeo)<EOL>
|
Calculates the Teager–Kaiser Energy operator.
Parameters
----------
emg : array
raw EMG signal.
Returns
-------
tkeo : 1D array_like
signal processed by the Teager–Kaiser Energy operator.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
|
f10897:m1
|
def emg_linear_envelope(emg, sampling_rate=<NUM_LIT:1000>, freqs=[<NUM_LIT:10>, <NUM_LIT>], lfreq=<NUM_LIT:4>):
|
emg = emg_tkeo(emg)<EOL>if np.size(freqs) == <NUM_LIT:2>:<EOL><INDENT>b, a = scipy.signal.butter(<NUM_LIT:2>, np.array(freqs)/(sampling_rate/<NUM_LIT>), btype = '<STR_LIT>')<EOL>emg = scipy.signal.filtfilt(b, a, emg)<EOL><DEDENT>if np.size(lfreq) == <NUM_LIT:1>:<EOL><INDENT>envelope = abs(emg)<EOL>b, a = scipy.signal.butter(<NUM_LIT:2>, np.array(lfreq)/(sampling_rate/<NUM_LIT>), btype = '<STR_LIT>')<EOL>envelope = scipy.signal.filtfilt(b, a, envelope)<EOL><DEDENT>return (envelope)<EOL>
|
r"""Calculate the linear envelope of a signal.
Parameters
----------
emg : array
raw EMG signal.
sampling_rate : int
Sampling rate (samples/second).
freqs : list [fc_h, fc_l], optional
cutoff frequencies for the band-pass filter (in Hz).
lfreq : number, optional
cutoff frequency for the low-pass filter (in Hz).
Returns
-------
envelope : array
linear envelope of the signal.
Notes
-----
*Authors*
- Marcos Duarte
*See Also*
See this notebook [1]_.
References
----------
.. [1] https://github.com/demotu/BMC/blob/master/notebooks/Electromyography.ipynb
|
f10897:m2
|
def emg_find_activation(envelope, sampling_rate=<NUM_LIT:1000>, threshold=<NUM_LIT:0>, n_above=<NUM_LIT>, n_below=<NUM_LIT:1>):
|
n_above = n_above*sampling_rate<EOL>n_below = n_below*sampling_rate<EOL>envelope = np.atleast_1d(envelope).astype('<STR_LIT>')<EOL>envelope[np.isnan(envelope)] = -np.inf<EOL>inds = np.nonzero(envelope >= threshold)[<NUM_LIT:0>]<EOL>if inds.size:<EOL><INDENT>inds = np.vstack((inds[np.diff(np.hstack((-np.inf, inds))) > n_below+<NUM_LIT:1>],inds[np.diff(np.hstack((inds, np.inf))) > n_below+<NUM_LIT:1>])).T<EOL>inds = inds[inds[:, <NUM_LIT:1>]-inds[:, <NUM_LIT:0>] >= n_above-<NUM_LIT:1>, :]<EOL><DEDENT>if not inds.size:<EOL><INDENT>inds = np.array([]) <EOL><DEDENT>inds = np.array(inds)<EOL>activation = np.array([<NUM_LIT:0>]*len(envelope))<EOL>for i in inds:<EOL><INDENT>activation[i[<NUM_LIT:0>]:i[<NUM_LIT:1>]] = <NUM_LIT:1><EOL><DEDENT>return (activation)<EOL>
|
Detects onset in data based on amplitude threshold.
Parameters
----------
envelope : array
Linear envelope of EMG signal.
sampling_rate : int
Sampling rate (samples/second).
threshold : float
minimum amplitude of `x` to detect.
n_above : float
minimum continuous time (in s) greater than or equal to `threshold` to detect (but see the parameter `n_below`).
n_below : float
minimum time (in s) below `threshold` that will be ignored in the detection of `x` >= `threshold`.
Returns
-------
activation : array
With 1 when muscle activated and 0 when not.
Notes
-----
You might have to tune the parameters according to the signal-to-noise
characteristic of the data.
See this IPython Notebook [1]_.
References
----------
.. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectOnset.ipynb
|
f10897:m3
|
def ecg_process(ecg, rsp=None, sampling_rate=<NUM_LIT:1000>, filter_type="<STR_LIT>", filter_band="<STR_LIT>", filter_frequency=[<NUM_LIT:3>, <NUM_LIT>], segmenter="<STR_LIT>", quality_model="<STR_LIT:default>", hrv_features=["<STR_LIT:time>", "<STR_LIT>"], age=None, sex=None, position=None):
|
<EOL>processed_ecg = ecg_preprocess(ecg,<EOL>sampling_rate=sampling_rate,<EOL>filter_type=filter_type,<EOL>filter_band=filter_band,<EOL>filter_frequency=filter_frequency,<EOL>segmenter=segmenter)<EOL>if quality_model is not None:<EOL><INDENT>quality = ecg_signal_quality(cardiac_cycles=processed_ecg["<STR_LIT>"]["<STR_LIT>"], sampling_rate=sampling_rate, rpeaks=processed_ecg["<STR_LIT>"]["<STR_LIT>"], quality_model=quality_model)<EOL>processed_ecg["<STR_LIT>"].update(quality)<EOL>processed_ecg["<STR_LIT>"] = pd.concat([processed_ecg["<STR_LIT>"], quality["<STR_LIT>"]], axis=<NUM_LIT:1>)<EOL><DEDENT>if hrv_features is not None:<EOL><INDENT>hrv = ecg_hrv(rpeaks=processed_ecg["<STR_LIT>"]["<STR_LIT>"], sampling_rate=sampling_rate, hrv_features=hrv_features)<EOL>try:<EOL><INDENT>processed_ecg["<STR_LIT>"] = pd.concat([processed_ecg["<STR_LIT>"], hrv.pop("<STR_LIT>")], axis=<NUM_LIT:1>)<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT>processed_ecg["<STR_LIT>"]["<STR_LIT>"] = hrv<EOL>if age is not None and sex is not None and position is not None:<EOL><INDENT>processed_ecg["<STR_LIT>"]["<STR_LIT>"] = ecg_hrv_assessment(hrv, age, sex, position)<EOL><DEDENT><DEDENT>if rsp is not None:<EOL><INDENT>rsp = rsp_process(rsp=rsp, sampling_rate=sampling_rate)<EOL>processed_ecg["<STR_LIT>"] = rsp["<STR_LIT>"]<EOL>processed_ecg["<STR_LIT>"] = pd.concat([processed_ecg["<STR_LIT>"], rsp["<STR_LIT>"]], axis=<NUM_LIT:1>)<EOL>rsa = ecg_rsa(processed_ecg["<STR_LIT>"]["<STR_LIT>"], rsp["<STR_LIT>"]["<STR_LIT>"], sampling_rate=sampling_rate)<EOL>processed_ecg["<STR_LIT>"]["<STR_LIT>"] = rsa<EOL>processed_ecg["<STR_LIT>"] = pd.concat([processed_ecg["<STR_LIT>"], rsa.pop("<STR_LIT>")], axis=<NUM_LIT:1>)<EOL><DEDENT>return(processed_ecg)<EOL>
|
Automated processing of ECG and RSP signals.
Parameters
----------
ecg : list or ndarray
ECG signal array.
rsp : list or ndarray
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
filter_type : str
Can be Finite Impulse Response filter ("FIR"), Butterworth filter ("butter"), Chebyshev filters ("cheby1" and "cheby2"), Elliptic filter ("ellip") or Bessel filter ("bessel").
filter_band : str
Band type, can be Low-pass filter ("lowpass"), High-pass filter ("highpass"), Band-pass filter ("bandpass"), Band-stop filter ("bandstop").
filter_frequency : int or list
Cutoff frequencies, format depends on type of band: "lowpass" or "bandpass": single frequency (int), "bandpass" or "bandstop": pair of frequencies (list).
segmenter : str
The cardiac phase segmenter. Can be "hamilton", "gamboa", "engzee", "christov" or "ssf". See :func:`neurokit.ecg_preprocess()` for details.
quality_model : str
Path to model used to check signal quality. "default" uses the builtin model. None to skip this function.
hrv_features : list
What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. None to skip this function.
age : float
Subject's age for adjusted HRV.
sex : str
Subject's gender ("m" or "f") for adjusted HRV.
position : str
Recording position. To compare with data from Voss et al. (2015), use "supine".
Returns
----------
processed_ecg : dict
Dict containing processed ECG features.
Contains the ECG raw signal, the filtered signal, the R peaks indexes, HRV features, all the heartbeats, the Heart Rate, the RSP filtered signal (if respiration provided) and the respiratory sinus arrhythmia (RSA).
Example
----------
>>> import neurokit as nk
>>> processed_ecg = nk.ecg_process(ecg_signal, resp_signal)
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
- **RSA**: Respiratory sinus arrhythmia (RSA) is a naturally occurring variation in heart rate that occurs during the breathing cycle, serving as a measure of parasympathetic nervous system activity. See :func:`neurokit.ecg_rsa()` for details.
- **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods. See :func:`neurokit.ecg_hrv()` for a description of indices.
- **Adjusted HRV**: The raw HRV features are normalized :math:`(raw - Mcluster) / sd` according to the participant's age and gender. In data from Voss et al. (2015), HRV analysis was performed on 5-min ECG recordings (lead II and lead V2 simultaneously, 500 Hz sample rate) obtained in supine position after a 5–10 minutes resting phase. The cohort of healthy subjects consisted of 782 women and 1124 men between the ages of 25 and 74 years, clustered into 4 groups: YF (Female, Age = [25-49], n=571), YM (Male, Age = [25-49], n=744), EF (Female, Age = [50-74], n=211) and EM (Male, Age = [50-74], n=571).
- **Systole/Diastole**: One prominent channel of body and brain communication is that conveyed by baroreceptors, pressure and stretch-sensitive receptors within the heart and surrounding arteries. Within each cardiac cycle, bursts of baroreceptor afferent activity encoding the strength and timing of each heartbeat are carried via the vagus and glossopharyngeal nerve afferents to the nucleus of the solitary tract. This is the principal route that communicates to the brain the dynamic state of the heart, enabling the representation of cardiovascular arousal within viscerosensory brain regions, and influence ascending neuromodulator systems implicated in emotional and motivational behaviour. Because arterial baroreceptors are activated by the arterial pulse pressure wave, their phasic discharge is maximal during and immediately after the cardiac systole, that is, when the blood is ejected from the heart, and minimal during cardiac diastole, that is, between heartbeats (Azevedo, 2017).
- **ECG Signal Quality**: Using the PTB-Diagnostic dataset available from PhysioNet, we extracted all the ECG signals from the healthy participants, that contained 15 recording leads/subject. We extracted all cardiac cycles, for each lead, and downsampled them from 600 to 200 datapoints. Note that we dropped the 8 first values that were NaNs. Then, we fitted a neural network model on 2/3 of the dataset (that contains 134392 cardiac cycles) to predict the lead. Model evaluation was done on the remaining 1/3. The model show good performances in predicting the correct recording lead (accuracy=0.91, precision=0.91). In this function, this model is fitted on each cardiac cycle of the provided ECG signal. It returns the probable recording lead (the most common predicted lead), the signal quality of each cardiac cycle (the probability of belonging to the probable recording lead) and the overall signal quality (the mean of signal quality). See creation `scripts <https://github.com/neuropsychology/NeuroKit.py/tree/master/utils/ecg_signal_quality_model_creation>`_.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Rhenan Bartels (https://github.com/rhenanbartels)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
- hrv: https://github.com/rhenanbartels/hrv
- RHRV: http://rhrv.r-forge.r-project.org/
References
------------
- Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381.
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
- Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32.
- Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585.
- Azevedo, R. T., Garfinkel, S. N., Critchley, H. D., & Tsakiris, M. (2017). Cardiac afferent activity modulates the expression of racial stereotypes. Nature communications, 8.
- Edwards, L., Ring, C., McIntyre, D., & Carroll, D. (2001). Modulation of the human nociceptive flexion reflex across the cardiac cycle. Psychophysiology, 38(4), 712-718.
- Gray, M. A., Rylander, K., Harrison, N. A., Wallin, B. G., & Critchley, H. D. (2009). Following one's heart: cardiac rhythms gate central initiation of sympathetic reflexes. Journal of Neuroscience, 29(6), 1817-1825.
|
f10898:m0
|
def ecg_rsa(rpeaks, rsp, sampling_rate=<NUM_LIT:1000>):
|
<EOL>rsp_cycles = rsp_find_cycles(rsp)<EOL>rsp_onsets = rsp_cycles["<STR_LIT>"]<EOL>rsp_cycle_center = rsp_cycles["<STR_LIT>"]<EOL>rsp_cycle_center = np.array(rsp_cycle_center)[rsp_cycle_center > rsp_onsets[<NUM_LIT:0>]]<EOL>if len(rsp_cycle_center) - len(rsp_onsets) == <NUM_LIT:0>:<EOL><INDENT>rsp_cycle_center = rsp_cycle_center[:-<NUM_LIT:1>]<EOL><DEDENT>if len(rsp_cycle_center) - len(rsp_onsets) != -<NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>")<EOL>return()<EOL><DEDENT>rsa = {}<EOL>cycles_rri = []<EOL>for idx in range(len(rsp_onsets) - <NUM_LIT:1>):<EOL><INDENT>cycle_init = rsp_onsets[idx]<EOL>cycle_end = rsp_onsets[idx + <NUM_LIT:1>]<EOL>cycles_rri.append(rpeaks[np.logical_and(rpeaks >= cycle_init,<EOL>rpeaks < cycle_end)])<EOL><DEDENT>rsa["<STR_LIT>"] = []<EOL>for cycle in cycles_rri:<EOL><INDENT>RRis = np.diff(cycle)/sampling_rate<EOL>if len(RRis) > <NUM_LIT:1>:<EOL><INDENT>rsa["<STR_LIT>"].append(np.max(RRis) - np.min(RRis))<EOL><DEDENT>else:<EOL><INDENT>rsa["<STR_LIT>"].append(np.nan)<EOL><DEDENT><DEDENT>rsa["<STR_LIT>"] = pd.Series(rsa["<STR_LIT>"]).mean()<EOL>rsa["<STR_LIT>"] = np.log(rsa["<STR_LIT>"])<EOL>rsa["<STR_LIT>"] = pd.Series(rsa["<STR_LIT>"]).std()<EOL>if len(rsp_cycle_center) - len(rsa["<STR_LIT>"]) != <NUM_LIT:0>:<EOL><INDENT>print("<STR_LIT>")<EOL>return()<EOL><DEDENT>values=pd.Series(rsa["<STR_LIT>"])<EOL>NaNs_indices = values.index[values.isnull()] <EOL>values = values.drop(NaNs_indices) <EOL>value_times=(np.array(rsp_cycle_center))<EOL>value_times = np.delete(value_times, NaNs_indices) <EOL>rsa_interpolated = interpolate(values=values, value_times=value_times, sampling_rate=sampling_rate)<EOL>current_rsa = np.nan<EOL>continuous_rsa = []<EOL>phase_counter = <NUM_LIT:0><EOL>for i in range(len(rsp)):<EOL><INDENT>if i == rsp_onsets[phase_counter]:<EOL><INDENT>current_rsa = rsa["<STR_LIT>"][phase_counter]<EOL>if phase_counter < len(rsp_onsets)-<NUM_LIT:2>:<EOL><INDENT>phase_counter += <NUM_LIT:1><EOL><DEDENT><DEDENT>continuous_rsa.append(current_rsa)<EOL><DEDENT>continuous_rsa = np.array(continuous_rsa)<EOL>continuous_rsa[max(rsp_onsets):] = np.nan<EOL>df = pd.DataFrame({"<STR_LIT>":rsp})<EOL>df["<STR_LIT>"] = continuous_rsa<EOL>df["<STR_LIT>"] = rsa_interpolated<EOL>rsa["<STR_LIT>"] = df<EOL>return(rsa)<EOL>
|
Returns Respiratory Sinus Arrhythmia (RSA) features. Only the Peak-to-trough (P2T) algorithm is currently implemented (see details).
Parameters
----------
rpeaks : list or ndarray
List of R peaks indices.
rsp : list or ndarray
Filtered RSP signal.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
rsa : dict
Contains RSA features.
Example
----------
>>> import neurokit as nk
>>> rsa = nk.ecg_rsa(rpeaks, rsp)
Notes
----------
*Details*
- **RSA**: Respiratory sinus arrhythmia (RSA) is a naturally occurring variation in heart rate that occurs during the breathing cycle, serving as a measure of parasympathetic nervous system activity. Neurophysiology informs us that the functional output of the myelinated vagus originating from the nucleus ambiguus has a respiratory rhythm. Thus, there would a temporal relation between the respiratory rhythm being expressed in the firing of these efferent pathways and the functional effect on the heart rate rhythm manifested as RSA. Several methods exist to quantify RSA:
- **P2T**: The peak to trough (P2T) method measures the statistical range in ms of the heart period oscillation associated with synchronous respiration. Operationally, subtracting the shortest heart period during inspiration from the longest heart period during a breath cycle produces an estimate of RSA during each breath. The peak-to-trough method makes no statistical assumption or correction (e.g., adaptive filtering) regarding other sources of variance in the heart period time series that may confound, distort, or interact with the metric such as slower periodicities and baseline trend. Although it has been proposed that the P2T method "acts as a time-domain filter dynamically centered at the exact ongoing respiratory frequency" (Grossman, 1992), the method does not transform the time series in any way, as a filtering process would. Instead the method uses knowledge of the ongoing respiratory cycle to associate segments of the heart period time series with either inhalation or exhalation (Lewis, 2012).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Rhenan Bartels (https://github.com/rhenanbartels)
*Dependencies*
- numpy
- pandas
References
------------
- Lewis, G. F., Furman, S. A., McCool, M. F., & Porges, S. W. (2012). Statistical strategies to quantify respiratory sinus arrhythmia: Are commonly used metrics equivalent?. Biological psychology, 89(2), 349-364.
|
f10898:m1
|
def ecg_signal_quality(cardiac_cycles, sampling_rate, rpeaks=None, quality_model="<STR_LIT:default>"):
|
if len(cardiac_cycles) > <NUM_LIT:200>:<EOL><INDENT>cardiac_cycles = cardiac_cycles.rolling(<NUM_LIT:20>).mean().resample("<STR_LIT>").pad()<EOL><DEDENT>if len(cardiac_cycles) < <NUM_LIT:200>:<EOL><INDENT>cardiac_cycles = cardiac_cycles.resample("<STR_LIT>").pad()<EOL>cardiac_cycles = cardiac_cycles.rolling(<NUM_LIT:20>).mean().resample("<STR_LIT>").pad()<EOL><DEDENT>if len(cardiac_cycles) < <NUM_LIT:200>:<EOL><INDENT>fill_dict = {}<EOL>for i in cardiac_cycles.columns:<EOL><INDENT>fill_dict[i] = [np.nan] * (<NUM_LIT:200>-len(cardiac_cycles))<EOL><DEDENT>cardiac_cycles = pd.concat([pd.DataFrame(fill_dict), cardiac_cycles], ignore_index=True)<EOL><DEDENT>cardiac_cycles = cardiac_cycles.fillna(method="<STR_LIT>")<EOL>cardiac_cycles = cardiac_cycles.reset_index(drop=True)[<NUM_LIT:8>:<NUM_LIT:200>]<EOL>cardiac_cycles = z_score(cardiac_cycles).T<EOL>cardiac_cycles = np.array(cardiac_cycles)<EOL>if quality_model == "<STR_LIT:default>":<EOL><INDENT>model = sklearn.externals.joblib.load(Path.materials() + '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>model = sklearn.externals.joblib.load(quality_model)<EOL><DEDENT>quality = {}<EOL>lead = model.predict(cardiac_cycles)<EOL>lead = pd.Series(lead).value_counts().index[<NUM_LIT:0>]<EOL>quality["<STR_LIT>"] = lead<EOL>predict = pd.DataFrame(model.predict_proba(cardiac_cycles))<EOL>predict.columns = model.classes_<EOL>quality["<STR_LIT>"] = predict[lead].values<EOL>quality["<STR_LIT>"] = predict[lead].mean()<EOL>if rpeaks is not None:<EOL><INDENT>signal = quality["<STR_LIT>"]<EOL>signal = interpolate(signal, rpeaks, sampling_rate) <EOL>signal.name = "<STR_LIT>"<EOL>quality["<STR_LIT>"] = signal<EOL><DEDENT>return(quality)<EOL>
|
Attempt to find the recording lead and the overall and individual quality of heartbeats signal. Although used as a routine, this feature is experimental.
Parameters
----------
cardiac_cycles : pd.DataFrame
DataFrame containing heartbeats. Computed by :function:`neurokit.ecg_process`.
sampling_rate : int
Sampling rate (samples/second).
rpeaks : None or ndarray
R-peak location indices. Used for computing an interpolated signal of quality.
quality_model : str
Path to model used to check signal quality. "default" uses the builtin model.
Returns
----------
classification : dict
Contains classification features.
Example
----------
>>> import neurokit as nk
>>> rsa = nk.respiratory_sinus_arrhythmia(rpeaks, rsp_cycles, rsp_signal)
Notes
----------
*Details*
- **ECG Signal Quality**: Using the PTB-Diagnostic dataset available from PhysioNet, we extracted all the ECG signals from the healthy participants, that contained 15 recording leads/subject. We extracted all cardiac cycles, for each lead, and downsampled them from 600 to 200 datapoints. Note that we dropped the 8 first values that were NaNs. Then, we fitted a neural network model on 2/3 of the dataset (that contains 134392 cardiac cycles) to predict the lead. Model evaluation was done on the remaining 1/3. The model show good performances in predicting the correct recording lead (accuracy=0.91, precision=0.91). In this function, this model is fitted on each cardiac cycle of the provided ECG signal. It returns the probable recording lead (the most common predicted lead), the signal quality of each cardiac cycle (the probability of belonging to the probable recording lead) and the overall signal quality (the mean of signal quality). See creation `scripts <https://github.com/neuropsychology/NeuroKit.py/tree/master/utils/ecg_signal_quality_model_creation>`_.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
|
f10898:m2
|
def ecg_hrv(rpeaks=None, rri=None, sampling_rate=<NUM_LIT:1000>, hrv_features=["<STR_LIT:time>", "<STR_LIT>", "<STR_LIT>"]):
|
<EOL>if rpeaks is None and rri is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if rpeaks is not None and rri is not None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>hrv = {}<EOL>if rpeaks is not None:<EOL><INDENT>RRis = np.diff(rpeaks)<EOL><DEDENT>else:<EOL><INDENT>RRis = rri<EOL><DEDENT>RRis = RRis/sampling_rate<EOL>RRis = RRis.astype(float)<EOL>for index, rr in enumerate(RRis):<EOL><INDENT>if RRis[index] < RRis[index-<NUM_LIT:1>]*<NUM_LIT>:<EOL><INDENT>RRis[index] = np.nan<EOL><DEDENT>if RRis[index] > RRis[index-<NUM_LIT:1>]*<NUM_LIT>:<EOL><INDENT>RRis[index] = np.nan<EOL><DEDENT><DEDENT>RRis = pd.Series(RRis)<EOL>RRis[RRis < <NUM_LIT>] = np.nan<EOL>RRis[RRis > <NUM_LIT>] = np.nan<EOL>if len(RRis) <= <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>")<EOL>return(hrv)<EOL><DEDENT>hrv["<STR_LIT>"] = pd.isnull(RRis).sum()/len(RRis)<EOL>artifacts_indices = RRis.index[RRis.isnull()] <EOL>RRis = RRis.drop(artifacts_indices) <EOL>RRis = RRis*<NUM_LIT:1000><EOL>hrv["<STR_LIT>"] = RRis <EOL>if len(RRis) <= <NUM_LIT:1>:<EOL><INDENT>print("<STR_LIT>")<EOL>return(hrv)<EOL><DEDENT>if "<STR_LIT:time>" in hrv_features:<EOL><INDENT>hrv["<STR_LIT>"] = np.sqrt(np.mean(np.diff(RRis) ** <NUM_LIT:2>))<EOL>hrv["<STR_LIT>"] = np.mean(RRis)<EOL>hrv["<STR_LIT>"] = np.std(RRis, ddof=<NUM_LIT:1>) <EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"] / hrv["<STR_LIT>"]<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"] / hrv["<STR_LIT>"]<EOL>hrv["<STR_LIT>"] = np.median(abs(RRis))<EOL>hrv["<STR_LIT>"] = mad(RRis, constant=<NUM_LIT:1>)<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"] / hrv["<STR_LIT>"]<EOL>nn50 = sum(abs(np.diff(RRis)) > <NUM_LIT:50>)<EOL>nn20 = sum(abs(np.diff(RRis)) > <NUM_LIT:20>)<EOL>hrv["<STR_LIT>"] = nn50 / len(RRis) * <NUM_LIT:100><EOL>hrv["<STR_LIT>"] = nn20 / len(RRis) * <NUM_LIT:100><EOL><DEDENT>if "<STR_LIT>" in hrv_features:<EOL><INDENT>beats_times = rpeaks[<NUM_LIT:1>:].copy() <EOL>beats_times -= list(beats_times)[<NUM_LIT:0>] <EOL>beats_times = np.delete(list(beats_times), artifacts_indices) <EOL>try:<EOL><INDENT>RRi = interpolate(RRis, beats_times, sampling_rate) <EOL><DEDENT>except TypeError:<EOL><INDENT>print("<STR_LIT>")<EOL>return(hrv)<EOL><DEDENT>hrv["<STR_LIT>"] = RRi.to_frame("<STR_LIT>") <EOL>try:<EOL><INDENT>bin_number = <NUM_LIT:32> <EOL>for bin_number_current in range(<NUM_LIT:2>, <NUM_LIT:50>):<EOL><INDENT>bin_width = np.diff(np.histogram(RRi, bins=bin_number_current, density=True)[<NUM_LIT:1>])[<NUM_LIT:0>]<EOL>if abs(<NUM_LIT:8> - bin_width) < abs(<NUM_LIT:8> - np.diff(np.histogram(RRi, bins=bin_number, density=True)[<NUM_LIT:1>])[<NUM_LIT:0>]):<EOL><INDENT>bin_number = bin_number_current<EOL><DEDENT><DEDENT>hrv["<STR_LIT>"] = len(RRis)/np.max(np.histogram(RRi, bins=bin_number, density=True)[<NUM_LIT:0>])<EOL>hrv["<STR_LIT>"] = complexity_entropy_shannon(np.histogram(RRi, bins=bin_number, density=True)[<NUM_LIT:0>])<EOL><DEDENT>except ValueError:<EOL><INDENT>hrv["<STR_LIT>"] = np.nan<EOL>hrv["<STR_LIT>"] = np.nan<EOL><DEDENT>freq_bands = {<EOL>"<STR_LIT>": [<NUM_LIT>, <NUM_LIT>],<EOL>"<STR_LIT>": [<NUM_LIT>, <NUM_LIT>],<EOL>"<STR_LIT>": [<NUM_LIT>, <NUM_LIT>],<EOL>"<STR_LIT>": [<NUM_LIT>, <NUM_LIT>],<EOL>"<STR_LIT>": [<NUM_LIT>, <NUM_LIT:0.5>]}<EOL>freq_powers = {}<EOL>for band in freq_bands:<EOL><INDENT>freqs = freq_bands[band]<EOL>filtered, sampling_rate, params = biosppy.signals.tools.filter_signal(signal=RRi, ftype='<STR_LIT>', band='<STR_LIT>', order=<NUM_LIT:1>, frequency=freqs, sampling_rate=sampling_rate)<EOL>amplitude, phase = biosppy.signals.tools.analytic_signal(filtered)<EOL>freq_powers["<STR_LIT>" + band] = amplitude<EOL><DEDENT>freq_powers = pd.DataFrame.from_dict(freq_powers)<EOL>freq_powers.index = hrv["<STR_LIT>"].index<EOL>hrv["<STR_LIT>"] = pd.concat([hrv["<STR_LIT>"], freq_powers], axis=<NUM_LIT:1>)<EOL>power, freq = mne.time_frequency.psd_array_multitaper(RRi, sfreq=sampling_rate, fmin=<NUM_LIT:0>, fmax=<NUM_LIT:0.5>, adaptive=False, normalization='<STR_LIT>')<EOL>def power_in_band(power, freq, band):<EOL><INDENT>power = np.trapz(y=power[(freq >= band[<NUM_LIT:0>]) & (freq < band[<NUM_LIT:1>])], x=freq[(freq >= band[<NUM_LIT:0>]) & (freq < band[<NUM_LIT:1>])])<EOL>return(power)<EOL><DEDENT>hrv["<STR_LIT>"] = power_in_band(power, freq, freq_bands["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = power_in_band(power, freq, freq_bands["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = power_in_band(power, freq, freq_bands["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = power_in_band(power, freq, freq_bands["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = power_in_band(power, freq, freq_bands["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = power_in_band(power, freq, [<NUM_LIT:0>, <NUM_LIT:0.5>])<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"]/(hrv["<STR_LIT>"]+hrv["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"]/(hrv["<STR_LIT>"]+hrv["<STR_LIT>"])<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"]/hrv["<STR_LIT>"]<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"]/hrv["<STR_LIT>"]<EOL>hrv["<STR_LIT>"] = hrv["<STR_LIT>"]/hrv["<STR_LIT>"]<EOL><DEDENT>if "<STR_LIT>" in hrv_features:<EOL><INDENT>if len(RRis) > <NUM_LIT>:<EOL><INDENT>hrv["<STR_LIT>"] = nolds.dfa(RRis, range(<NUM_LIT:4>, <NUM_LIT>))<EOL><DEDENT>if len(RRis) > <NUM_LIT>:<EOL><INDENT>hrv["<STR_LIT>"] = nolds.dfa(RRis, range(<NUM_LIT:16>, <NUM_LIT>))<EOL><DEDENT>hrv["<STR_LIT>"] = complexity_entropy_shannon(RRis)<EOL>hrv["<STR_LIT>"] = nolds.sampen(RRis, emb_dim=<NUM_LIT:2>)<EOL>try:<EOL><INDENT>hrv["<STR_LIT>"] = nolds.corr_dim(RRis, emb_dim=<NUM_LIT:2>)<EOL><DEDENT>except AssertionError as error:<EOL><INDENT>print("<STR_LIT>" + str(error))<EOL>hrv["<STR_LIT>"] = np.nan<EOL><DEDENT>mse = complexity_entropy_multiscale(RRis, max_scale_factor=<NUM_LIT:20>, m=<NUM_LIT:2>)<EOL>hrv["<STR_LIT>"] = mse["<STR_LIT>"]<EOL>hrv["<STR_LIT>"] = complexity_entropy_svd(RRis, emb_dim=<NUM_LIT:2>)<EOL>hrv["<STR_LIT>"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>))<EOL>hrv["<STR_LIT>"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>))<EOL>hrv["<STR_LIT>"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(<NUM_LIT>, <NUM_LIT>, <NUM_LIT>))<EOL>hrv["<STR_LIT>"] = complexity_fisher_info(RRis, tau=<NUM_LIT:1>, emb_dim=<NUM_LIT:2>)<EOL><INDENT>lyap exp doesn't work for some reasons<EOL>hrv["<STR_LIT>"] = np.max(nolds.lyap_e(RRis, emb_dim=<NUM_LIT>, matrix_dim=<NUM_LIT:4>))<EOL><DEDENT>hrv["<STR_LIT>"] = complexity_fd_petrosian(RRis)<EOL>hrv["<STR_LIT>"] = complexity_fd_higushi(RRis, k_max=<NUM_LIT:16>)<EOL><DEDENT>return(hrv)<EOL>
|
Computes the Heart-Rate Variability (HRV). Shamelessly stolen from the `hrv <https://github.com/rhenanbartels/hrv/blob/develop/hrv>`_ package by Rhenan Bartels. All credits go to him.
Parameters
----------
rpeaks : list or ndarray
R-peak location indices.
rri: list or ndarray
RR intervals in the signal. If this argument is passed, rpeaks should not be passed.
sampling_rate : int
Sampling rate (samples/second).
hrv_features : list
What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'.
Returns
----------
hrv : dict
Contains hrv features and percentage of detected artifacts.
Example
----------
>>> import neurokit as nk
>>> sampling_rate = 1000
>>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks, sampling_rate=sampling_rate)
Notes
----------
*Details*
- **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods.
- **sdNN**: The standard deviation of the time interval between successive normal heart beats (*i.e.*, the RR intervals). Reflects all influences on HRV including slow influences across the day, circadian variations, the effect of hormonal influences such as cortisol and epinephrine. It should be noted that total variance of HRV increases with the length of the analyzed recording.
- **meanNN**: The the mean RR interval.
- **CVSD**: The coefficient of variation of successive differences (van Dellen et al., 1985), the RMSSD divided by meanNN.
- **cvNN**: The Coefficient of Variation, *i.e.* the ratio of sdNN divided by meanNN.
- **RMSSD** is the root mean square of the RR intervals (*i.e.*, square root of the mean of the squared differences in time between successive normal heart beats). Reflects high frequency (fast or parasympathetic) influences on HRV (*i.e.*, those influencing larger changes from one beat to the next).
- **medianNN**: Median of the Absolute values of the successive Differences between the RR intervals.
- **madNN**: Median Absolute Deviation (MAD) of the RR intervals.
- **mcvNN**: Median-based Coefficient of Variation, *i.e.* the ratio of madNN divided by medianNN.
- **pNN50**: The proportion derived by dividing NN50 (The number of interval differences of successive RR intervals greater than 50 ms) by the total number of RR intervals.
- **pNN20**: The proportion derived by dividing NN20 (The number of interval differences of successive RR intervals greater than 20 ms) by the total number of RR intervals.
- **Triang**: The HRV triangular index measurement is the integral of the density distribution (that is, the number of all RR intervals) divided by the maximum of the density distribution (class width of 8ms).
- **Shannon_h**: Shannon Entropy calculated on the basis of the class probabilities pi (i = 1,...,n with n—number of classes) of the NN interval density distribution (class width of 8 ms resulting in a smoothed histogram suitable for HRV analysis).
- **VLF** is the variance (*i.e.*, power) in HRV in the Very Low Frequency (.003 to .04 Hz). Reflect an intrinsic rhythm produced by the heart which is modulated by primarily by sympathetic activity.
- **LF** is the variance (*i.e.*, power) in HRV in the Low Frequency (.04 to .15 Hz). Reflects a mixture of sympathetic and parasympathetic activity, but in long-term recordings like ours, it reflects sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol (McCraty & Atkinson, 1996).
- **HF** is the variance (*i.e.*, power) in HRV in the High Frequency (.15 to .40 Hz). Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. Sometimes called the respiratory band because it corresponds to HRV changes related to the respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per minute) (Kawachi et al., 1995) and decreased by anticholinergic drugs or vagal blockade (Hainsworth, 1995).
- **Total_Power**: Total power of the density spectra.
- **LFHF**: The LF/HF ratio is sometimes used by some investigators as a quantitative mirror of the sympatho/vagal balance.
- **LFn**: normalized LF power LFn = LF/(LF+HF).
- **HFn**: normalized HF power HFn = HF/(LF+HF).
- **LFp**: ratio between LF and Total_Power.
- **HFp**: ratio between H and Total_Power.
- **DFA**: Detrended fluctuation analysis (DFA) introduced by Peng et al. (1995) quantifies the fractal scaling properties of time series. DFA_1 is the short-term fractal scaling exponent calculated over n = 4–16 beats, and DFA_2 is the long-term fractal scaling exponent calculated over n = 16–64 beats.
- **Shannon**: Shannon Entropy over the RR intervals array.
- **Sample_Entropy**: Sample Entropy (SampEn) over the RR intervals array with emb_dim=2.
- **Correlation_Dimension**: Correlation Dimension over the RR intervals array with emb_dim=2.
- **Entropy_Multiscale**: Multiscale Entropy over the RR intervals array with emb_dim=2.
- **Entropy_SVD**: SVD Entropy over the RR intervals array with emb_dim=2.
- **Entropy_Spectral_VLF**: Spectral Entropy over the RR intervals array in the very low frequency (0.003-0.04).
- **Entropy_Spectral_LF**: Spectral Entropy over the RR intervals array in the low frequency (0.4-0.15).
- **Entropy_Spectral_HF**: Spectral Entropy over the RR intervals array in the very high frequency (0.15-0.40).
- **Fisher_Info**: Fisher information over the RR intervals array with tau=1 and emb_dim=2.
- **Lyapunov**: Lyapunov Exponent over the RR intervals array with emb_dim=58 and matrix_dim=4.
- **FD_Petrosian**: Petrosian's Fractal Dimension over the RR intervals.
- **FD_Higushi**: Higushi's Fractal Dimension over the RR intervals array with k_max=16.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Rhenan Bartels (https://github.com/rhenanbartels)
*Dependencies*
- scipy
- numpy
*See Also*
- RHRV: http://rhrv.r-forge.r-project.org/
References
-----------
- Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381.
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
- Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32.
- Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585.
- Lippman, N. E. A. L., Stein, K. M., & Lerman, B. B. (1994). Comparison of methods for removal of ectopy in measurement of heart rate variability. American Journal of Physiology-Heart and Circulatory Physiology, 267(1), H411-H418.
- Peltola, M. A. (2012). Role of editing of R–R intervals in the analysis of heart rate variability. Frontiers in physiology, 3.
|
f10898:m3
|
def ecg_hrv_assessment(hrv, age=None, sex=None, position=None):
|
hrv_adjusted = {}<EOL>if position == "<STR_LIT>":<EOL><INDENT>if sex == "<STR_LIT:m>":<EOL><INDENT>if age <= <NUM_LIT>:<EOL><INDENT>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL><DEDENT><DEDENT>if sex == "<STR_LIT:f>":<EOL><INDENT>if age <= <NUM_LIT>:<EOL><INDENT>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL>hrv_adjusted["<STR_LIT>"] = (hrv["<STR_LIT>"]-<NUM_LIT>)/<NUM_LIT><EOL><DEDENT><DEDENT><DEDENT>return(hrv_adjusted)<EOL>
|
Correct HRV features based on normative data from Voss et al. (2015).
Parameters
----------
hrv : dict
HRV features obtained by :function:`neurokit.ecg_hrv`.
age : float
Subject's age.
sex : str
Subject's gender ("m" or "f").
position : str
Recording position. To compare with data from Voss et al. (2015), use "supine".
Returns
----------
hrv_adjusted : dict
Adjusted HRV features.
Example
----------
>>> import neurokit as nk
>>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks)
>>> ecg_hrv_assessment = nk.bio_ecg.ecg_hrv_assessment(hrv)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Details*
- **Adjusted HRV**: The raw HRV features are normalized :math:`(raw - Mcluster) / sd` according to the participant's age and gender. In data from Voss et al. (2015), HRV analysis was performed on 5-min ECG recordings (lead II and lead V2 simultaneously, 500 Hz sampling rate) obtained in supine position after a 5–10 minutes resting phase. The cohort of healthy subjects consisted of 782 women and 1124 men between the ages of 25 and 74 years, clustered into 4 groups: YF (Female, Age = [25-49], n=571), YM (Male, Age = [25-49], n=744), EF (Female, Age = [50-74], n=211) and EM (Male, Age = [50-74], n=571).
References
-----------
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
|
f10898:m4
|
def ecg_EventRelated(epoch, event_length=<NUM_LIT:1>, window_post=<NUM_LIT:0>, features=["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]):
|
def compute_features(variable, prefix, response):<EOL><INDENT>"""<STR_LIT>"""<EOL>response[prefix + "<STR_LIT>"] = epoch[variable][<NUM_LIT:0>]<EOL>response[prefix + "<STR_LIT>"] = epoch[variable][<NUM_LIT:0>:window_end].min()<EOL>response[prefix + "<STR_LIT>"] = response[prefix + "<STR_LIT>"] - response[prefix + "<STR_LIT>"]<EOL>response[prefix + "<STR_LIT>"] = epoch[variable][<NUM_LIT:0>:window_end].idxmin()<EOL>response[prefix + "<STR_LIT>"] = epoch[variable][<NUM_LIT:0>:window_end].max()<EOL>response[prefix + "<STR_LIT>"] = response[prefix + "<STR_LIT>"] - response[prefix + "<STR_LIT>"]<EOL>response[prefix + "<STR_LIT>"] = epoch[variable][<NUM_LIT:0>:window_end].idxmax()<EOL>response[prefix + "<STR_LIT>"] = epoch[variable][<NUM_LIT:0>:window_end].mean()<EOL>response[prefix + "<STR_LIT>"] = response[prefix + "<STR_LIT>"] - response[prefix + "<STR_LIT>"]<EOL>return(response)<EOL><DEDENT>ECG_Response = {}<EOL>window_end = event_length + window_post<EOL>if "<STR_LIT>" in features:<EOL><INDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT><DEDENT>if "<STR_LIT>" in features:<EOL><INDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response["<STR_LIT>"] = epoch["<STR_LIT>"][<NUM_LIT:0>]<EOL>systole_beg = np.nan<EOL>systole_end = np.nan<EOL>for i in epoch[<NUM_LIT:0>:window_end].index:<EOL><INDENT>if epoch["<STR_LIT>"][i] != ECG_Response["<STR_LIT>"]:<EOL><INDENT>systole_end = i<EOL>break<EOL><DEDENT><DEDENT>for i in epoch[:<NUM_LIT:0>].index[::-<NUM_LIT:1>]:<EOL><INDENT>if epoch["<STR_LIT>"][i] != ECG_Response["<STR_LIT>"]:<EOL><INDENT>systole_beg = i<EOL>break<EOL><DEDENT><DEDENT>ECG_Response["<STR_LIT>"] = -<NUM_LIT:1>*systole_beg/(systole_end - systole_beg)*<NUM_LIT:100><EOL><DEDENT><DEDENT>if "<STR_LIT>" in features:<EOL><INDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT><DEDENT>if "<STR_LIT>" in features:<EOL><INDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT><DEDENT>if "<STR_LIT>" in features:<EOL><INDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>rpeaks = epoch[epoch["<STR_LIT>"]==<NUM_LIT:1>][<NUM_LIT:0>:event_length].index*<NUM_LIT:1000><EOL>hrv = ecg_hrv(rpeaks=rpeaks, sampling_rate=<NUM_LIT:1000>, hrv_features=["<STR_LIT:time>"])<EOL>for key in hrv:<EOL><INDENT>if isinstance(hrv[key], float): <EOL><INDENT>ECG_Response["<STR_LIT>" + key] = hrv[key]<EOL><DEDENT><DEDENT>if epoch.index[<NUM_LIT:0>] > -<NUM_LIT:4>: <EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>rpeaks = epoch[epoch["<STR_LIT>"]==<NUM_LIT:1>][:<NUM_LIT:0>].index*<NUM_LIT:1000><EOL>hrv = ecg_hrv(rpeaks=rpeaks, sampling_rate=<NUM_LIT:1000>, hrv_features=["<STR_LIT:time>"])<EOL>for key in hrv:<EOL><INDENT>if isinstance(hrv[key], float): <EOL><INDENT>ECG_Response["<STR_LIT>" + key + "<STR_LIT>"] = hrv[key]<EOL><DEDENT><DEDENT>keys = [key for key in ECG_Response.keys() if '<STR_LIT>' in key] <EOL>keys = [key for key in keys if '<STR_LIT>' in key]<EOL>keys = [s.replace('<STR_LIT>', '<STR_LIT>') for s in keys] <EOL>for key in keys:<EOL><INDENT>try:<EOL><INDENT>ECG_Response[key + "<STR_LIT>"] = ECG_Response[key] - ECG_Response[key + "<STR_LIT>"]<EOL><DEDENT>except KeyError:<EOL><INDENT>ECG_Response[key + "<STR_LIT>"] = np.nan<EOL><DEDENT><DEDENT><DEDENT><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>ECG_Response = compute_features("<STR_LIT>", "<STR_LIT>", ECG_Response)<EOL><DEDENT><DEDENT>return(ECG_Response)<EOL>
|
Extract event-related ECG changes.
Parameters
----------
epoch : pandas.DataFrame
An epoch contained in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`. Index should range from -4s to +4s (relatively to event onset and end).
event_length : int
Event length in seconds.
window_post : float
Post-stimulus window size (in seconds) to include late responses (usually 3 or 4).
features : list
List of ECG features to compute, can contain "Heart_Rate", "Cardiac_Phase", "RR_Interval", "RSA", "HRV".
Returns
----------
ECG_Response : dict
Event-related ECG response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
*Details*
- ***_Baseline**: Signal at onset.
- ***_Min**: Mininmum of signal after stimulus onset.
- ***_MinDiff**: Signal mininum - baseline.
- ***_MinTime**: Time of signal minimum.
- ***_Max**: Maximum of signal after stimulus onset.
- ***_MaxDiff**: Signal maximum - baseline.
- ***_MaxTime**: Time of signal maximum.
- ***_Mean**: Mean signal after stimulus onset.
- ***_MeanDiff**: Mean signal - baseline.
- **ECG_Phase_Systole**: Cardiac phase on stimulus onset (1 = systole, 0 = diastole).
- **ECG_Phase_Systole_Completion**: Percentage of cardiac phase completion on simulus onset.
- **ECG_HRV_***: Time-domain HRV features. See :func:`neurokit.ecg_hrv()`.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
References
-----------
|
f10898:m5
|
def ecg_simulate(duration=<NUM_LIT:10>, sampling_rate=<NUM_LIT:1000>, bpm=<NUM_LIT>, noise=<NUM_LIT>):
|
<EOL>cardiac = scipy.signal.wavelets.daub(<NUM_LIT:10>)<EOL>cardiac = np.concatenate([cardiac, np.zeros(<NUM_LIT:10>)])<EOL>num_heart_beats = int(duration * bpm / <NUM_LIT>)<EOL>ecg = np.tile(cardiac , num_heart_beats)<EOL>noise = np.random.normal(<NUM_LIT:0>, noise, len(ecg))<EOL>ecg = noise + ecg<EOL>ecg = scipy.signal.resample(ecg, sampling_rate*duration)<EOL>return(ecg)<EOL>
|
Simulates an ECG signal.
Parameters
----------
duration : int
Desired recording length.
sampling_rate : int
Desired sampling rate.
bpm : int
Desired simulated heart rate.
noise : float
Desired noise level.
Returns
----------
ECG_Response : dict
Event-related ECG response features.
Example
----------
>>> import neurokit as nk
>>> import pandas as pd
>>>
>>> ecg = nk.ecg_simulate(duration=10, bpm=60, sampling_rate=1000, noise=0.01)
>>> pd.Series(ecg).plot()
Notes
----------
*Authors*
- `Diarmaid O Cualain <https://github.com/diarmaidocualain>`_
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- scipy.signal
References
-----------
|
f10898:m6
|
def read_acqknowledge(filename, path="<STR_LIT>", index="<STR_LIT>", sampling_rate="<STR_LIT>", resampling_method="<STR_LIT>", fill_interruptions=True, return_sampling_rate=True):
|
<EOL>file = path + filename<EOL>if "<STR_LIT>" not in file:<EOL><INDENT>file += "<STR_LIT>"<EOL><DEDENT>if os.path.exists(file) is False:<EOL><INDENT>print("<STR_LIT>" + filename)<EOL>return()<EOL><DEDENT>creation_date = find_creation_date(file)<EOL>creation_date = datetime.datetime.fromtimestamp(creation_date)<EOL>file = bioread.read(file)<EOL>freq_list = []<EOL>for channel in file.named_channels:<EOL><INDENT>freq_list.append(file.named_channels[channel].samples_per_second)<EOL><DEDENT>data = {}<EOL>data_else = {}<EOL>for channel in file.named_channels:<EOL><INDENT>if file.named_channels[channel].samples_per_second == max(freq_list):<EOL><INDENT>data[channel] = file.named_channels[channel].data<EOL><DEDENT>else:<EOL><INDENT>data_else[channel] = file.named_channels[channel].data<EOL><DEDENT><DEDENT>time = []<EOL>beginning_date = creation_date - datetime.timedelta(<NUM_LIT:0>, max(file.time_index))<EOL>for timestamps in file.time_index:<EOL><INDENT>time.append(beginning_date + datetime.timedelta(<NUM_LIT:0>, timestamps))<EOL><DEDENT>df = pd.DataFrame(data, index=time)<EOL>if len(data_else.keys()) > <NUM_LIT:0>: <EOL><INDENT>for channel in data_else:<EOL><INDENT>channel_frequency = file.named_channels[channel].samples_per_second<EOL>serie = data_else[channel]<EOL>index = list(np.arange(<NUM_LIT:0>, max(file.time_index), <NUM_LIT:1>/channel_frequency))<EOL>index = index[:len(serie)]<EOL>time = []<EOL>for timestamps in index:<EOL><INDENT>time.append(beginning_date + datetime.timedelta(<NUM_LIT:0>, timestamps))<EOL><DEDENT>data_else[channel] = pd.Series(serie, index=time)<EOL><DEDENT>df2 = pd.DataFrame(data_else)<EOL><DEDENT>if sampling_rate == "<STR_LIT>":<EOL><INDENT>sampling_rate = max(freq_list)<EOL><DEDENT>try:<EOL><INDENT>resampling_factor = str(int(<NUM_LIT:1000>/sampling_rate)) + "<STR_LIT:L>"<EOL><DEDENT>except TypeError:<EOL><INDENT>print("<STR_LIT>")<EOL>sampling_rate = max(freq_list)<EOL>resampling_factor = str(int(<NUM_LIT:1000>/sampling_rate)) + "<STR_LIT:L>"<EOL><DEDENT>if resampling_method not in ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>print("<STR_LIT>")<EOL>resampling_method = '<STR_LIT>'<EOL><DEDENT>if resampling_method == "<STR_LIT>":<EOL><INDENT>if len(data_else.keys()) > <NUM_LIT:0>:<EOL><INDENT>df2 = df2.resample(resampling_factor).mean()<EOL><DEDENT>if int(sampling_rate) != int(max(freq_list)):<EOL><INDENT>df = df.resample(resampling_factor).mean()<EOL><DEDENT><DEDENT>if resampling_method == "<STR_LIT>":<EOL><INDENT>if len(data_else.keys()) > <NUM_LIT:0>:<EOL><INDENT>df2 = df2.resample(resampling_factor).bfill()<EOL><DEDENT>if int(sampling_rate) != int(max(freq_list)):<EOL><INDENT>df = df.resample(resampling_factor).bfill()<EOL><DEDENT><DEDENT>if resampling_method == "<STR_LIT>":<EOL><INDENT>if len(data_else.keys()) > <NUM_LIT:0>:<EOL><INDENT>df2 = df2.resample(resampling_factor).pad()<EOL><DEDENT>if int(sampling_rate) != int(max(freq_list)):<EOL><INDENT>df = df.resample(resampling_factor).pad()<EOL><DEDENT><DEDENT>if len(data_else.keys()) > <NUM_LIT:0>:<EOL><INDENT>df = pd.concat([df, df2], <NUM_LIT:1>)<EOL><DEDENT>if index == "<STR_LIT>":<EOL><INDENT>df = df.reset_index()<EOL><DEDENT>if fill_interruptions is True:<EOL><INDENT>df = df.fillna(method="<STR_LIT>")<EOL><DEDENT>if return_sampling_rate is False:<EOL><INDENT>return(df)<EOL><DEDENT>else:<EOL><INDENT>return(df, sampling_rate)<EOL><DEDENT>
|
Read and Format a BIOPAC's AcqKnowledge file into a pandas' dataframe.
Parameters
----------
filename : str
Filename (with or without the extension) of a BIOPAC's AcqKnowledge file.
path : str
Data directory.
index : str
How to index the dataframe. "datetime" for aproximate datetime (based on the file creation/change) and "range" for a simple range index.
sampling_rate : int
Final sampling rate (samples/second).
resampling_method : str
The resampling method: "mean", "pad" or "bfill",
fill_interruptions : bool
Automatically fill the eventual signal interruptions using a backfill method.
return_sampling_rate : bool
Should it return the sampling rate in a tuple with the dataframe? Default will be changed to True in the future.
Returns
----------
df, sampling_rate : pandas.DataFrame(), int
The AcqKnowledge file converted to a dataframe and its sampling_rate.
Example
----------
>>> import neurokit as nk
>>>
>>> df, sampling_rate = nk.read_acqknowledge('file.acq', return_sampling_rate=True)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
- bioread
- datetime
*See Also*
- bioread package: https://github.com/njvack/bioread
|
f10899:m0
|
def rsp_process(rsp, sampling_rate=<NUM_LIT:1000>):
|
processed_rsp = {"<STR_LIT>": pd.DataFrame({"<STR_LIT>": np.array(rsp)})}<EOL>biosppy_rsp = dict(biosppy.signals.resp.resp(rsp, sampling_rate=sampling_rate, show=False))<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = biosppy_rsp["<STR_LIT>"]<EOL>RSP Rate<EOL>============<EOL>rsp_rate = biosppy_rsp["<STR_LIT>"]*<NUM_LIT> <EOL>rsp_times = biosppy_rsp["<STR_LIT>"] <EOL>rsp_times = np.round(rsp_times*sampling_rate).astype(int) <EOL>try:<EOL><INDENT>rsp_rate = interpolate(rsp_rate, rsp_times, sampling_rate) <EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = rsp_rate<EOL><DEDENT>except TypeError:<EOL><INDENT>print("<STR_LIT>")<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = np.nan<EOL><DEDENT>RSP Cycles<EOL>===========================<EOL>rsp_cycles = rsp_find_cycles(biosppy_rsp["<STR_LIT>"])<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = rsp_cycles["<STR_LIT>"]<EOL>processed_rsp["<STR_LIT>"] = {}<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = rsp_cycles["<STR_LIT>"]<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = rsp_cycles["<STR_LIT>"]<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = rsp_cycles["<STR_LIT>"]/sampling_rate<EOL>RSP Variability<EOL>===========================<EOL>rsp_diff = processed_rsp["<STR_LIT>"]["<STR_LIT>"]<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"] = {}<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"] = np.std(rsp_diff)<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"] = np.sqrt(np.mean(rsp_diff ** <NUM_LIT:2>))<EOL>processed_rsp["<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"] = np.log(processed_rsp["<STR_LIT>"]["<STR_LIT>"]["<STR_LIT>"])<EOL>return(processed_rsp)<EOL>
|
Automated processing of RSP signals.
Parameters
----------
rsp : list or array
Respiratory (RSP) signal array.
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
processed_rsp : dict
Dict containing processed RSP features.
Contains the RSP raw signal, the filtered signal, the respiratory cycles onsets, and respiratory phases (inspirations and expirations).
Example
----------
>>> import neurokit as nk
>>>
>>> processed_rsp = nk.rsp_process(rsp_signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
- numpy
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
|
f10900:m0
|
def rsp_find_cycles(signal):
|
<EOL>gradient = np.gradient(signal)<EOL>zeros, = biosppy.tools.zero_cross(signal=gradient, detrend=True)<EOL>phases_indices = []<EOL>for i in zeros:<EOL><INDENT>if gradient[i+<NUM_LIT:1>] > gradient[i-<NUM_LIT:1>]:<EOL><INDENT>phases_indices.append("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>phases_indices.append("<STR_LIT>")<EOL><DEDENT><DEDENT>inspiration_onsets = []<EOL>expiration_onsets = []<EOL>for index, onset in enumerate(zeros):<EOL><INDENT>if phases_indices[index] == "<STR_LIT>":<EOL><INDENT>inspiration_onsets.append(onset)<EOL><DEDENT>if phases_indices[index] == "<STR_LIT>":<EOL><INDENT>expiration_onsets.append(onset)<EOL><DEDENT><DEDENT>if phases_indices[<NUM_LIT:0>] == "<STR_LIT>":<EOL><INDENT>phase = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>phase = "<STR_LIT>"<EOL><DEDENT>inspiration = []<EOL>phase_counter = <NUM_LIT:0><EOL>for i, value in enumerate(signal):<EOL><INDENT>if i == zeros[phase_counter]:<EOL><INDENT>phase = phases_indices[phase_counter]<EOL>if phase_counter < len(zeros)-<NUM_LIT:1>:<EOL><INDENT>phase_counter += <NUM_LIT:1><EOL><DEDENT><DEDENT>inspiration.append(phase)<EOL><DEDENT>if phases_indices[len(phases_indices)-<NUM_LIT:1>] == "<STR_LIT>":<EOL><INDENT>last_phase = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>last_phase = "<STR_LIT>"<EOL><DEDENT>inspiration = np.array(inspiration)<EOL>inspiration[max(zeros):] = last_phase<EOL>inspiration[inspiration == "<STR_LIT>"] = <NUM_LIT:1><EOL>inspiration[inspiration == "<STR_LIT>"] = <NUM_LIT:0><EOL>inspiration = pd.to_numeric(inspiration)<EOL>cycles_length = np.diff(inspiration_onsets)<EOL>rsp_cycles = {"<STR_LIT>": inspiration,<EOL>"<STR_LIT>": expiration_onsets,<EOL>"<STR_LIT>": inspiration_onsets,<EOL>"<STR_LIT>": cycles_length}<EOL>return(rsp_cycles)<EOL>
|
Find Respiratory cycles onsets, durations and phases.
Parameters
----------
signal : list or array
Respiratory (RSP) signal (preferably filtered).
Returns
----------
rsp_cycles : dict
RSP cycles features.
Example
----------
>>> import neurokit as nk
>>> rsp_cycles = nk.rsp_find_cycles(signal)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- biosppy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
|
f10900:m1
|
def rsp_EventRelated(epoch, event_length, window_post=<NUM_LIT:4>):
|
<EOL>RSP_Response = {}<EOL>window_end = event_length + window_post<EOL>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>]<EOL>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>:window_end].min()<EOL>RSP_Response["<STR_LIT>"] = RSP_Response["<STR_LIT>"] - RSP_Response["<STR_LIT>"]<EOL>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>:window_end].idxmin()<EOL>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>:window_end].max()<EOL>RSP_Response["<STR_LIT>"] = RSP_Response["<STR_LIT>"] - RSP_Response["<STR_LIT>"]<EOL>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>:window_end].idxmax()<EOL>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>:window_end].mean()<EOL>RSP_Response["<STR_LIT>"] = RSP_Response["<STR_LIT>"] - RSP_Response["<STR_LIT>"]<EOL><DEDENT>if "<STR_LIT>" in epoch.columns:<EOL><INDENT>RSP_Response["<STR_LIT>"] = epoch["<STR_LIT>"].ix[<NUM_LIT:0>]<EOL>phase_beg = np.nan<EOL>phase_end = np.nan<EOL>for i in epoch[<NUM_LIT:0>:window_end].index:<EOL><INDENT>if epoch["<STR_LIT>"].ix[i] != RSP_Response["<STR_LIT>"]:<EOL><INDENT>phase_end = i<EOL>break<EOL><DEDENT><DEDENT>for i in epoch[:<NUM_LIT:0>].index[::-<NUM_LIT:1>]:<EOL><INDENT>if epoch["<STR_LIT>"].ix[i] != RSP_Response["<STR_LIT>"]:<EOL><INDENT>phase_beg = i<EOL>break<EOL><DEDENT><DEDENT>RSP_Response["<STR_LIT>"] = -<NUM_LIT:1>*phase_beg/(phase_end - phase_beg)*<NUM_LIT:100><EOL><DEDENT>return(RSP_Response)<EOL>
|
Extract event-related respiratory (RSP) changes.
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`.
event_length : int
In seconds.
sampling_rate : int
Sampling rate (samples/second).
window_post : float
Post-stimulus window size (in seconds) to include eventual responses (usually 3 or 4).
Returns
----------
RSP_Response : dict
Event-locked RSP response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
*Details*
- **RSP_Rate_Baseline**: mean RSP Rate before stimulus onset.
- **RSP_Rate_Min**: Min RSP Rate after stimulus onset.
- **RSP_Rate_MinDiff**: RSP Rate mininum - baseline.
- **RSP_Rate_MinTime**: Time of minimum.
- **RSP_Rate_Max**: Max RSP Rate after stimulus onset.
- **RSP_Rate_MaxDiff**: Max RSP Rate - baseline.
- **RSP_Rate_MaxTime**: Time of maximum.
- **RSP_Rate_Mean**: Mean RSP Rate after stimulus onset.
- **RSP_Rate_MeanDiff**: Mean RSP Rate - baseline.
- **RSP_Min**: Value in standart deviation (normalized by baseline) of the lowest point.
- **RSP_MinTime**: Time of RSP Min.
- **RSP_Max**: Value in standart deviation (normalized by baseline) of the highest point.
- **RSP_MaxTime**: Time of RSP Max.
- **RSP_Inspiration**: Respiration phase on stimulus onset (1 = inspiration, 0 = expiration).
- **RSP_Inspiration_Completion**: Percentage of respiration phase on stimulus onset.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
References
-----------
- Gomez, P., Stahel, W. A., & Danuser, B. (2004). Respiratory responses during affective picture viewing. Biological Psychology, 67(3), 359-373.
|
f10900:m2
|
def bio_process(ecg=None, rsp=None, eda=None, emg=None, add=None, sampling_rate=<NUM_LIT:1000>, age=None, sex=None, position=None, ecg_filter_type="<STR_LIT>", ecg_filter_band="<STR_LIT>", ecg_filter_frequency=[<NUM_LIT:3>, <NUM_LIT>], ecg_segmenter="<STR_LIT>", ecg_quality_model="<STR_LIT:default>", ecg_hrv_features=["<STR_LIT:time>", "<STR_LIT>"], eda_alpha=<NUM_LIT>, eda_gamma=<NUM_LIT>, scr_method="<STR_LIT>", scr_treshold=<NUM_LIT:0.1>, emg_names=None, emg_envelope_freqs=[<NUM_LIT:10>, <NUM_LIT>], emg_envelope_lfreq=<NUM_LIT:4>, emg_activation_treshold="<STR_LIT:default>", emg_activation_n_above=<NUM_LIT>, emg_activation_n_below=<NUM_LIT:1>):
|
processed_bio = {}<EOL>bio_df = pd.DataFrame({})<EOL>if ecg is not None:<EOL><INDENT>ecg = ecg_process(ecg=ecg, rsp=rsp, sampling_rate=sampling_rate, filter_type=ecg_filter_type, filter_band=ecg_filter_band, filter_frequency=ecg_filter_frequency, segmenter=ecg_segmenter, quality_model=ecg_quality_model, hrv_features=ecg_hrv_features, age=age, sex=sex, position=position)<EOL>processed_bio["<STR_LIT>"] = ecg["<STR_LIT>"]<EOL>if rsp is not None:<EOL><INDENT>processed_bio["<STR_LIT>"] = ecg["<STR_LIT>"]<EOL><DEDENT>bio_df = pd.concat([bio_df, ecg["<STR_LIT>"]], axis=<NUM_LIT:1>)<EOL><DEDENT>if rsp is not None and ecg is None:<EOL><INDENT>rsp = rsp_process(rsp=rsp, sampling_rate=sampling_rate)<EOL>processed_bio["<STR_LIT>"] = rsp["<STR_LIT>"]<EOL>bio_df = pd.concat([bio_df, rsp["<STR_LIT>"]], axis=<NUM_LIT:1>)<EOL><DEDENT>if eda is not None:<EOL><INDENT>eda = eda_process(eda=eda, sampling_rate=sampling_rate, alpha=eda_alpha, gamma=eda_gamma, scr_method=scr_method, scr_treshold=scr_treshold)<EOL>processed_bio["<STR_LIT>"] = eda["<STR_LIT>"]<EOL>bio_df = pd.concat([bio_df, eda["<STR_LIT>"]], axis=<NUM_LIT:1>)<EOL><DEDENT>if emg is not None:<EOL><INDENT>emg = emg_process(emg=emg, sampling_rate=sampling_rate, emg_names=emg_names, envelope_freqs=emg_envelope_freqs, envelope_lfreq=emg_envelope_lfreq, activation_treshold=emg_activation_treshold, activation_n_above=emg_activation_n_above, activation_n_below=emg_activation_n_below)<EOL>bio_df = pd.concat([bio_df, emg.pop("<STR_LIT>")], axis=<NUM_LIT:1>)<EOL>for i in emg:<EOL><INDENT>processed_bio[i] = emg[i]<EOL><DEDENT><DEDENT>if add is not None:<EOL><INDENT>add = add.reset_index(drop=True)<EOL>bio_df = pd.concat([bio_df, add], axis=<NUM_LIT:1>)<EOL><DEDENT>processed_bio["<STR_LIT>"] = bio_df<EOL>return(processed_bio)<EOL>
|
Automated processing of bio signals. Wrapper for other bio processing functions.
Parameters
----------
ecg : list or array
ECG signal array.
rsp : list or array
Respiratory signal array.
eda : list or array
EDA signal array.
emg : list, array or DataFrame
EMG signal array. Can include multiple channels.
add : pandas.DataFrame
Dataframe or channels to add by concatenation to the processed dataframe.
sampling_rate : int
Sampling rate (samples/second).
age : float
Subject's age.
sex : str
Subject's gender ("m" or "f").
position : str
Recording position. To compare with data from Voss et al. (2015), use "supine".
ecg_filter_type : str
Can be Finite Impulse Response filter ("FIR"), Butterworth filter ("butter"), Chebyshev filters ("cheby1" and "cheby2"), Elliptic filter ("ellip") or Bessel filter ("bessel").
ecg_filter_band : str
Band type, can be Low-pass filter ("lowpass"), High-pass filter ("highpass"), Band-pass filter ("bandpass"), Band-stop filter ("bandstop").
ecg_filter_frequency : int or list
Cutoff frequencies, format depends on type of band: "lowpass" or "bandpass": single frequency (int), "bandpass" or "bandstop": pair of frequencies (list).
ecg_quality_model : str
Path to model used to check signal quality. "default" uses the builtin model. None to skip this function.
ecg_hrv_features : list
What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'. None to skip this function.
ecg_segmenter : str
The cardiac phase segmenter. Can be "hamilton", "gamboa", "engzee", "christov" or "ssf". See :func:`neurokit.ecg_preprocess()` for details.
eda_alpha : float
cvxEDA penalization for the sparse SMNA driver.
eda_gamma : float
cvxEDA penalization for the tonic spline coefficients.
scr_method : str
SCR extraction algorithm. "makowski" (default), "kim" (biosPPy's default; See Kim et al., 2004) or "gamboa" (Gamboa, 2004).
scr_treshold : float
SCR minimum treshold (in terms of signal standart deviation).
emg_names : list
List of EMG channel names.
Returns
----------
processed_bio : dict
Dict containing processed bio features.
Contains the ECG raw signal, the filtered signal, the R peaks indexes, HRV characteristics, all the heartbeats, the Heart Rate, and the RSP filtered signal (if respiration provided), respiratory sinus arrhythmia (RSA) features, the EDA raw signal, the filtered signal, the phasic component (if cvxEDA is True), the SCR onsets, peak indexes and amplitudes, the EMG raw signal, the filtered signal and pulse onsets.
Example
----------
>>> import neurokit as nk
>>>
>>> bio_features = nk.bio_process(ecg=ecg_signal, rsp=ecg_signal, eda=eda_signal)
Notes
----------
*Details*
- **ECG Features**: See :func:`neurokit.ecg_process()`.
- **EDA Features**: See :func:`neurokit.eda_process()`.
- **RSP Features**: See :func:`neurokit.rsp_process()`.
- **EMG Features**: See :func:`neurokit.emg_process()`.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
- hrv: https://github.com/rhenanbartels/hrv
- cvxEDA: https://github.com/lciti/cvxEDA
References
-----------
- Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381.
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
- Greco, A., Valenza, G., & Scilingo, E. P. (2016). Evaluation of CDA and CvxEDA Models. In Advances in Electrodermal Activity Processing with Applications for Mental Health (pp. 35-43). Springer International Publishing.
- Greco, A., Valenza, G., Lanata, A., Scilingo, E. P., & Citi, L. (2016). cvxEDA: A convex optimization approach to electrodermal activity processing. IEEE Transactions on Biomedical Engineering, 63(4), 797-804.
- Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32.
- Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585.
- Azevedo, R. T., Garfinkel, S. N., Critchley, H. D., & Tsakiris, M. (2017). Cardiac afferent activity modulates the expression of racial stereotypes. Nature communications, 8.
- Edwards, L., Ring, C., McIntyre, D., & Carroll, D. (2001). Modulation of the human nociceptive flexion reflex across the cardiac cycle. Psychophysiology, 38(4), 712-718.
- Gray, M. A., Rylander, K., Harrison, N. A., Wallin, B. G., & Critchley, H. D. (2009). Following one's heart: cardiac rhythms gate central initiation of sympathetic reflexes. Journal of Neuroscience, 29(6), 1817-1825.
- Kim, K. H., Bang, S. W., & Kim, S. R. (2004). Emotion recognition system using short-term monitoring of physiological signals. Medical and biological engineering and computing, 42(3), 419-427.
- Gamboa, H. (2008). Multi-Modal Behavioral Biometrics Based on HCI and Electrophysiology (Doctoral dissertation, PhD thesis, Universidade Técnica de Lisboa, Instituto Superior Técnico).
|
f10901:m0
|
def bio_EventRelated(epoch, event_length, window_post_ecg=<NUM_LIT:0>, window_post_rsp=<NUM_LIT:4>, window_post_eda=<NUM_LIT:4>, ecg_features=["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]):
|
bio_response = {}<EOL>ECG_Response = ecg_EventRelated(epoch, event_length, window_post=window_post_ecg, features=ecg_features)<EOL>bio_response.update(ECG_Response)<EOL>RSP_Response = rsp_EventRelated(epoch, event_length, window_post=window_post_rsp)<EOL>bio_response.update(RSP_Response)<EOL>EDA_Response = eda_EventRelated(epoch, event_length, window_post=window_post_eda)<EOL>bio_response.update(EDA_Response)<EOL>return(bio_response)<EOL>
|
Extract event-related bio (EDA, ECG and RSP) changes.
Parameters
----------
epoch : pandas.DataFrame
An epoch contains in the epochs dict returned by :function:`neurokit.create_epochs()` on dataframe returned by :function:`neurokit.bio_process()`.
event_length : int
In seconds.
sampling_rate : int
Sampling rate (samples/second).
window_post_ecg : float
Post-stimulus window size (in seconds) for ECG.
window_post_rsp : float
Post-stimulus window size (in seconds) for RSP.
window_post_eda : float
Post-stimulus window size (in seconds) for EDA.
ecg_features : list
List of ECG features to compute, can contain "Heart_Rate", "Cardiac_Phase", "RR_Interval", "RSA", "HRV".
Returns
----------
RSP_Response : dict
Event-locked bio response features.
Example
----------
>>> import neurokit as nk
>>> bio = nk.bio_process(ecg=data["ECG"], rsp=data["RSP"], eda=data["EDA"], sampling_rate=1000, add=data["Photosensor"])
>>> df = bio["df"]
>>> events = nk.find_events(df["Photosensor"], cut="lower")
>>> epochs = nk.create_epochs(df, events["onsets"], duration=7, onset=-0.5)
>>> for epoch in epochs:
>>> bio_response = nk.bio_EventRelated(epoch, event_length=4, window_post=3)
Notes
----------
*Details*
- **ECG Features**
- ***_Baseline**: Signal at onset.
- ***_Min**: Mininmum of signal after stimulus onset.
- ***_MinDiff**: Signal mininum - baseline.
- ***_MinTime**: Time of signal minimum.
- ***_Max**: Maximum of signal after stimulus onset.
- ***_MaxDiff**: Signal maximum - baseline.
- ***_MaxTime**: Time of signal maximum.
- ***_Mean**: Mean signal after stimulus onset.
- ***_MeanDiff**: Mean signal - baseline.
- **ECG_Phase_Systole**: Cardiac phase on stimulus onset (1 = systole, 0 = diastole).
- **ECG_Phase_Systole_Completion**: Percentage of cardiac phase completion on simulus onset.
- **ECG_HRV_***: Time-domain HRV features. See :func:`neurokit.ecg_hrv()`.
- **Respiration Features**
- **RSP_Rate_Baseline**: mean RSP Rate before stimulus onset.
- **RSP_Rate_Min**: Min RSP Rate after stimulus onset.
- **RSP_Rate_MinDiff**: RSP Rate mininum - baseline.
- **RSP_Rate_MinTime**: Time of minimum.
- **RSP_Rate_Max**: Max RSP Rate after stimulus onset.
- **RSP_Rate_MaxDiff**: Max RSP Rate - baseline.
- **RSP_Rate_MaxTime**: Time of maximum.
- **RSP_Rate_Mean**: Mean RSP Rate after stimulus onset.
- **RSP_Rate_MeanDiff**: Mean RSP Rate - baseline.
- **RSP_Min**: Value in standart deviation (normalized by baseline) of the lowest point.
- **RSP_MinTime**: Time of RSP Min.
- **RSP_Max**: Value in standart deviation (normalized by baseline) of the highest point.
- **RSP_MaxTime**: Time of RSP Max.
- **RSP_Inspiration**: Respiration phase on stimulus onset (1 = inspiration, 0 = expiration).
- **RSP_Inspiration_Completion**: Percentage of respiration phase on stimulus onset.
- **EDA Features**
- **EDA_Peak**: Max of EDA (in a window starting 1s after the stim onset) minus baseline.
- **SCR_Amplitude**: Peak of SCR. If no SCR, returns NA.
- **SCR_Magnitude**: Peak of SCR. If no SCR, returns 0.
- **SCR_Amplitude_Log**: log of 1+amplitude.
- **SCR_Magnitude_Log**: log of 1+magnitude.
- **SCR_PeakTime**: Time of peak.
- **SCR_Latency**: Time between stim onset and SCR onset.
- **SCR_RiseTime**: Time between SCR onset and peak.
- **SCR_Strength**: *Experimental*: peak divided by latency. Angle of the line between peak and onset.
- **SCR_RecoveryTime**: Time between peak and recovery point (half of the amplitude).
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
*See Also*
References
-----------
- Gomez, P., Stahel, W. A., & Danuser, B. (2004). Respiratory responses during affective picture viewing. Biological Psychology, 67(3), 359-373.
- Schneider, R., Schmidt, S., Binder, M., Schäfer, F., & Walach, H. (2003). Respiration-related artifacts in EDA recordings: introducing a standardized method to overcome multiple interpretations. Psychological reports, 93(3), 907-920.
|
f10901:m1
|
def ecg_preprocess(ecg, sampling_rate=<NUM_LIT:1000>, filter_type="<STR_LIT>", filter_band="<STR_LIT>", filter_frequency=[<NUM_LIT:3>, <NUM_LIT>], filter_order=<NUM_LIT>, segmenter="<STR_LIT>"):
|
<EOL>ecg = np.array(ecg)<EOL>if filter_type in ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>order = int(filter_order * sampling_rate)<EOL>filtered, _, _ = biosppy.tools.filter_signal(signal=ecg,<EOL>ftype=filter_type,<EOL>band=filter_band,<EOL>order=order,<EOL>frequency=filter_frequency,<EOL>sampling_rate=sampling_rate)<EOL><DEDENT>else:<EOL><INDENT>filtered = ecg <EOL><DEDENT>if segmenter == "<STR_LIT>":<EOL><INDENT>rpeaks, = biosppy.ecg.hamilton_segmenter(signal=filtered, sampling_rate=sampling_rate)<EOL><DEDENT>elif segmenter == "<STR_LIT>":<EOL><INDENT>rpeaks, = biosppy.ecg.gamboa_segmenter(signal=filtered, sampling_rate=sampling_rate, tol=<NUM_LIT>)<EOL><DEDENT>elif segmenter == "<STR_LIT>":<EOL><INDENT>rpeaks, = biosppy.ecg.engzee_segmenter(signal=filtered, sampling_rate=sampling_rate, threshold=<NUM_LIT>)<EOL><DEDENT>elif segmenter == "<STR_LIT>":<EOL><INDENT>rpeaks, = biosppy.ecg.christov_segmenter(signal=filtered, sampling_rate=sampling_rate)<EOL><DEDENT>elif segmenter == "<STR_LIT>":<EOL><INDENT>rpeaks, = biosppy.ecg.ssf_segmenter(signal=filtered, sampling_rate=sampling_rate, threshold=<NUM_LIT:20>, before=<NUM_LIT>, after=<NUM_LIT>)<EOL><DEDENT>elif segmenter == "<STR_LIT>":<EOL><INDENT>rpeaks = segmenter_pekkanen(ecg=filtered, sampling_rate=sampling_rate, window_size=<NUM_LIT>, lfreq=<NUM_LIT>, hfreq=<NUM_LIT>)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>" % segmenter)<EOL><DEDENT>rpeaks, = biosppy.ecg.correct_rpeaks(signal=filtered,<EOL>rpeaks=rpeaks,<EOL>sampling_rate=sampling_rate,<EOL>tol=<NUM_LIT>)<EOL>cardiac_cycles, rpeaks = biosppy.ecg.extract_heartbeats(signal=filtered,<EOL>rpeaks=rpeaks,<EOL>sampling_rate=sampling_rate,<EOL>before=<NUM_LIT>,<EOL>after=<NUM_LIT>)<EOL>heart_rate_idx, heart_rate = biosppy.tools.get_heart_rate(beats=rpeaks,<EOL>sampling_rate=sampling_rate,<EOL>smooth=True,<EOL>size=<NUM_LIT:3>)<EOL>length = len(ecg)<EOL>T = (length - <NUM_LIT:1>) / float(sampling_rate)<EOL>ts = np.linspace(<NUM_LIT:0>, T, length, endpoint=False)<EOL>heart_rate_times = ts[heart_rate_idx]<EOL>heart_rate_times = np.round(heart_rate_times*sampling_rate).astype(int) <EOL><INDENT>cardiac_cycles_tmpl = np.linspace(-<NUM_LIT>, <NUM_LIT>, cardiac_cycles.shape[<NUM_LIT:1>], endpoint=False)<EOL><DEDENT>ecg_df = pd.DataFrame({"<STR_LIT>": np.array(ecg)}) <EOL>ecg_df["<STR_LIT>"] = filtered <EOL>rpeaks_signal = np.array([np.nan]*len(ecg))<EOL>rpeaks_signal[rpeaks] = <NUM_LIT:1><EOL>ecg_df["<STR_LIT>"] = rpeaks_signal<EOL>try:<EOL><INDENT>heart_rate = interpolate(heart_rate, heart_rate_times, sampling_rate) <EOL>ecg_df["<STR_LIT>"] = heart_rate<EOL><DEDENT>except TypeError:<EOL><INDENT>print("<STR_LIT>")<EOL>ecg_df["<STR_LIT>"] = np.nan<EOL><DEDENT>processed_ecg = {"<STR_LIT>": ecg_df,<EOL>"<STR_LIT>": {<EOL>"<STR_LIT>": rpeaks<EOL>}<EOL>}<EOL>heartbeats = pd.DataFrame(cardiac_cycles).T<EOL>heartbeats.index = pd.date_range(pd.datetime.today(), periods=len(heartbeats), freq=str(int(<NUM_LIT>/sampling_rate)) + "<STR_LIT>")<EOL>processed_ecg["<STR_LIT>"]["<STR_LIT>"] = heartbeats<EOL>waves = ecg_wave_detector(ecg_df["<STR_LIT>"], rpeaks)<EOL>processed_ecg["<STR_LIT>"].update(waves)<EOL>processed_ecg["<STR_LIT>"]["<STR_LIT>"] = ecg_systole(ecg_df["<STR_LIT>"], rpeaks, waves["<STR_LIT>"])<EOL>return(processed_ecg)<EOL>
|
ECG signal preprocessing.
Parameters
----------
ecg : list or ndarray
ECG signal array.
sampling_rate : int
Sampling rate (samples/second).
filter_type : str or None
Can be Finite Impulse Response filter ("FIR"), Butterworth filter ("butter"), Chebyshev filters ("cheby1" and "cheby2"), Elliptic filter ("ellip") or Bessel filter ("bessel").
filter_band : str
Band type, can be Low-pass filter ("lowpass"), High-pass filter ("highpass"), Band-pass filter ("bandpass"), Band-stop filter ("bandstop").
filter_frequency : int or list
Cutoff frequencies, format depends on type of band: "lowpass" or "bandpass": single frequency (int), "bandpass" or "bandstop": pair of frequencies (list).
filter_order : float
Filter order.
segmenter : str
The cardiac phase segmenter. Can be "hamilton", "gamboa", "engzee", "christov", "ssf" or "pekkanen".
Returns
----------
ecg_preprocessed : dict
Preprocesed ECG.
Example
----------
>>> import neurokit as nk
>>> ecg_preprocessed = nk.ecg_preprocess(signal)
Notes
----------
*Details*
- **segmenter**: Different methods of segmentation are implemented: **hamilton** (`Hamilton, 2002 <http://www.eplimited.com/osea13.pdf/>`_) , **gamboa** (`gamboa, 2008 <http://www.lx.it.pt/~afred/pub/thesisHugoGamboa.pdf/>`_), **engzee** (Engelse and Zeelenberg, 1979; Lourenco et al., 2012), **christov** (Christov, 2004) or **ssf** (Slope Sum Function), **pekkanen** (`Kathirvel, 2001) <http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html>`_.
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- biosppy
- numpy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
References
-----------
- Hamilton, P. (2002, September). Open source ECG analysis. In Computers in Cardiology, 2002 (pp. 101-104). IEEE.
- Kathirvel, P., Manikandan, M. S., Prasanna, S. R. M., & Soman, K. P. (2011). An efficient R-peak detection based on new nonlinear transformation and first-order Gaussian differentiator. Cardiovascular Engineering and Technology, 2(4), 408-425.
- Canento, F., Lourenço, A., Silva, H., & Fred, A. (2013). Review and Comparison of Real Time Electrocardiogram Segmentation Algorithms for Biometric Applications. In Proceedings of the 6th Int’l Conference on Health Informatics (HEALTHINF).
- Christov, I. I. (2004). Real time electrocardiogram QRS detection using combined adaptive threshold. Biomedical engineering online, 3(1), 28.
- Engelse, W. A. H., & Zeelenberg, C. (1979). A single scan algorithm for QRS-detection and feature extraction. Computers in cardiology, 6(1979), 37-42.
- Lourenço, A., Silva, H., Leite, P., Lourenço, R., & Fred, A. L. (2012, February). Real Time Electrocardiogram Segmentation for Finger based ECG Biometrics. In Biosignals (pp. 49-54).
|
f10903:m0
|
def ecg_find_peaks(signal, sampling_rate=<NUM_LIT:1000>):
|
rpeaks, = biosppy.ecg.hamilton_segmenter(np.array(signal), sampling_rate=sampling_rate)<EOL>rpeaks, = biosppy.ecg.correct_rpeaks(signal=np.array(signal), rpeaks=rpeaks, sampling_rate=sampling_rate, tol=<NUM_LIT>)<EOL>return(rpeaks)<EOL>
|
Find R peaks indices on the ECG channel.
Parameters
----------
signal : list or ndarray
ECG signal (preferably filtered).
sampling_rate : int
Sampling rate (samples/second).
Returns
----------
rpeaks : list
List of R-peaks location indices.
Example
----------
>>> import neurokit as nk
>>> Rpeaks = nk.ecg_find_peaks(signal)
Notes
----------
*Authors*
- the bioSSPy dev team (https://github.com/PIA-Group/BioSPPy)
*Dependencies*
- biosppy
*See Also*
- BioSPPY: https://github.com/PIA-Group/BioSPPy
|
f10903:m1
|
def ecg_wave_detector(ecg, rpeaks):
|
q_waves = []<EOL>p_waves = []<EOL>q_waves_starts = []<EOL>s_waves = []<EOL>t_waves = []<EOL>t_waves_starts = []<EOL>t_waves_ends = []<EOL>for index, rpeak in enumerate(rpeaks[:-<NUM_LIT:3>]):<EOL><INDENT>try:<EOL><INDENT>epoch_before = np.array(ecg)[int(rpeaks[index-<NUM_LIT:1>]):int(rpeak)]<EOL>epoch_before = epoch_before[int(len(epoch_before)/<NUM_LIT:2>):len(epoch_before)]<EOL>epoch_before = list(reversed(epoch_before))<EOL>q_wave_index = np.min(find_peaks(epoch_before))<EOL>q_wave = rpeak - q_wave_index<EOL>p_wave_index = q_wave_index + np.argmax(epoch_before[q_wave_index:])<EOL>p_wave = rpeak - p_wave_index<EOL>inter_pq = epoch_before[q_wave_index:p_wave_index]<EOL>inter_pq_derivative = np.gradient(inter_pq, <NUM_LIT:2>)<EOL>q_start_index = find_closest_in_list(len(inter_pq_derivative)/<NUM_LIT:2>, find_peaks(inter_pq_derivative))<EOL>q_start = q_wave - q_start_index<EOL>q_waves.append(q_wave)<EOL>p_waves.append(p_wave)<EOL>q_waves_starts.append(q_start)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>except IndexError:<EOL><INDENT>pass<EOL><DEDENT>try:<EOL><INDENT>epoch_after = np.array(ecg)[int(rpeak):int(rpeaks[index+<NUM_LIT:1>])]<EOL>epoch_after = epoch_after[<NUM_LIT:0>:int(len(epoch_after)/<NUM_LIT:2>)]<EOL>s_wave_index = np.min(find_peaks(epoch_after))<EOL>s_wave = rpeak + s_wave_index<EOL>t_wave_index = s_wave_index + np.argmax(epoch_after[s_wave_index:])<EOL>t_wave = rpeak + t_wave_index<EOL>inter_st = epoch_after[s_wave_index:t_wave_index]<EOL>inter_st_derivative = np.gradient(inter_st, <NUM_LIT:2>)<EOL>t_start_index = find_closest_in_list(len(inter_st_derivative)/<NUM_LIT:2>, find_peaks(inter_st_derivative))<EOL>t_start = s_wave + t_start_index<EOL>t_end = np.min(find_peaks(epoch_after[t_wave_index:]))<EOL>t_end = t_wave + t_end<EOL>s_waves.append(s_wave)<EOL>t_waves.append(t_wave)<EOL>t_waves_starts.append(t_start)<EOL>t_waves_ends.append(t_end)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT>except IndexError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>.Series(epoch_before).plot()<EOL><INDENT>t_waves = []<EOL>for index, rpeak in enumerate(rpeaks[<NUM_LIT:0>:-<NUM_LIT:1>]):<EOL><INDENT>epoch = np.array(ecg)[int(rpeak):int(rpeaks[index+<NUM_LIT:1>])]<EOL>pd.Series(epoch).plot()<EOL>middle = (rpeaks[index+<NUM_LIT:1>] - rpeak) / <NUM_LIT:2><EOL>quarter = middle/<NUM_LIT:2><EOL>epoch = np.array(ecg)[int(rpeak+quarter):int(rpeak+middle)]<EOL>try:<EOL><INDENT>t_wave = int(rpeak+quarter) + np.argmax(epoch)<EOL>t_waves.append(t_wave)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>p_waves = []<EOL>for index, rpeak in enumerate(rpeaks[<NUM_LIT:1>:]):<EOL><INDENT>index += <NUM_LIT:1><EOL>middle = (rpeak - rpeaks[index-<NUM_LIT:1>]) / <NUM_LIT:2><EOL>quarter = middle/<NUM_LIT:2><EOL>epoch = np.array(ecg)[int(rpeak-middle):int(rpeak-quarter)]<EOL>try:<EOL><INDENT>p_wave = int(rpeak-quarter) + np.argmax(epoch)<EOL>p_waves.append(p_wave)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>q_waves = []<EOL>for index, p_wave in enumerate(p_waves):<EOL><INDENT>epoch = np.array(ecg)[int(p_wave):int(rpeaks[rpeaks>p_wave][<NUM_LIT:0>])]<EOL>try:<EOL><INDENT>q_wave = p_wave + np.argmin(epoch)<EOL>q_waves.append(q_wave)<EOL><DEDENT>except ValueError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT><DEDENT>ecg_waves = {"<STR_LIT>": t_waves,<EOL>"<STR_LIT>": p_waves,<EOL>"<STR_LIT>": q_waves,<EOL>"<STR_LIT>": s_waves,<EOL>"<STR_LIT>": q_waves_starts,<EOL>"<STR_LIT>": t_waves_starts,<EOL>"<STR_LIT>": t_waves_ends}<EOL>return(ecg_waves)<EOL>
|
Returns the localization of the P, Q, T waves. This function needs massive help!
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
Returns
----------
ecg_waves : dict
Contains wave peaks location indices.
Example
----------
>>> import neurokit as nk
>>> ecg = nk.ecg_simulate(duration=5, sampling_rate=1000)
>>> ecg = nk.ecg_preprocess(ecg=ecg, sampling_rate=1000)
>>> rpeaks = ecg["ECG"]["R_Peaks"]
>>> ecg = ecg["df"]["ECG_Filtered"]
>>> ecg_waves = nk.ecg_wave_detector(ecg=ecg, rpeaks=rpeaks)
>>> nk.plot_events_in_signal(ecg, [ecg_waves["P_Waves"], ecg_waves["Q_Waves_Onsets"], ecg_waves["Q_Waves"], list(rpeaks), ecg_waves["S_Waves"], ecg_waves["T_Waves_Onsets"], ecg_waves["T_Waves"], ecg_waves["T_Waves_Ends"]], color=["green", "yellow", "orange", "red", "black", "brown", "blue", "purple"])
Notes
----------
*Details*
- **Cardiac Cycle**: A typical ECG showing a heartbeat consists of a P wave, a QRS complex and a T wave.The P wave represents the wave of depolarization that spreads from the SA-node throughout the atria. The QRS complex reflects the rapid depolarization of the right and left ventricles. Since the ventricles are the largest part of the heart, in terms of mass, the QRS complex usually has a much larger amplitude than the P-wave. The T wave represents the ventricular repolarization of the ventricles. On rare occasions, a U wave can be seen following the T wave. The U wave is believed to be related to the last remnants of ventricular repolarization.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
f10903:m2
|
def ecg_systole(ecg, rpeaks, t_waves_ends):
|
waves = np.array(["<STR_LIT>"]*len(ecg))<EOL>waves[rpeaks] = "<STR_LIT:R>"<EOL>waves[t_waves_ends] = "<STR_LIT:T>"<EOL>systole = [<NUM_LIT:0>]<EOL>current = <NUM_LIT:0><EOL>for index, value in enumerate(waves[<NUM_LIT:1>:]):<EOL><INDENT>if waves[index-<NUM_LIT:1>] == "<STR_LIT:R>":<EOL><INDENT>current = <NUM_LIT:1><EOL><DEDENT>if waves[index-<NUM_LIT:1>] == "<STR_LIT:T>":<EOL><INDENT>current = <NUM_LIT:0><EOL><DEDENT>systole.append(current)<EOL><DEDENT>return(systole)<EOL>
|
Returns the localization of systoles and diastoles.
Parameters
----------
ecg : list or ndarray
ECG signal (preferably filtered).
rpeaks : list or ndarray
R peaks localization.
t_waves_ends : list or ndarray
T waves localization.
Returns
----------
systole : ndarray
Array indicating where systole (1) and diastole (0).
Example
----------
>>> import neurokit as nk
>>> systole = nk.ecg_systole(ecg, rpeaks, t_waves_ends)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Details*
- **Systole/Diastole**: One prominent channel of body and brain communication is that conveyed by baroreceptors, pressure and stretch-sensitive receptors within the heart and surrounding arteries. Within each cardiac cycle, bursts of baroreceptor afferent activity encoding the strength and timing of each heartbeat are carried via the vagus and glossopharyngeal nerve afferents to the nucleus of the solitary tract. This is the principal route that communicates to the brain the dynamic state of the heart, enabling the representation of cardiovascular arousal within viscerosensory brain regions, and influence ascending neuromodulator systems implicated in emotional and motivational behaviour. Because arterial baroreceptors are activated by the arterial pulse pressure wave, their phasic discharge is maximal during and immediately after the cardiac systole, that is, when the blood is ejected from the heart, and minimal during cardiac diastole, that is, between heartbeats (Azevedo, 2017).
References
-----------
- Azevedo, R. T., Garfinkel, S. N., Critchley, H. D., & Tsakiris, M. (2017). Cardiac afferent activity modulates the expression of racial stereotypes. Nature communications, 8.
- Edwards, L., Ring, C., McIntyre, D., & Carroll, D. (2001). Modulation of the human nociceptive flexion reflex across the cardiac cycle. Psychophysiology, 38(4), 712-718.
- Gray, M. A., Rylander, K., Harrison, N. A., Wallin, B. G., & Critchley, H. D. (2009). Following one's heart: cardiac rhythms gate central initiation of sympathetic reflexes. Journal of Neuroscience, 29(6), 1817-1825.
|
f10903:m3
|
def segmenter_pekkanen(ecg, sampling_rate, window_size=<NUM_LIT>, lfreq=<NUM_LIT>, hfreq=<NUM_LIT>):
|
window_size = int(window_size*sampling_rate)<EOL>lowpass = scipy.signal.butter(<NUM_LIT:1>, hfreq/(sampling_rate/<NUM_LIT>), '<STR_LIT>')<EOL>highpass = scipy.signal.butter(<NUM_LIT:1>, lfreq/(sampling_rate/<NUM_LIT>), '<STR_LIT>')<EOL>ecg_low = scipy.signal.filtfilt(*lowpass, x=ecg)<EOL>ecg_band = scipy.signal.filtfilt(*highpass, x=ecg_low)<EOL>decg = np.diff(ecg_band)<EOL>decg_power = decg**<NUM_LIT:2><EOL>thresholds = []<EOL>max_powers = []<EOL>for i in range(int(len(decg_power)/window_size)):<EOL><INDENT>sample = slice(i*window_size, (i+<NUM_LIT:1>)*window_size)<EOL>d = decg_power[sample]<EOL>thresholds.append(<NUM_LIT:0.5>*np.std(d))<EOL>max_powers.append(np.max(d))<EOL><DEDENT>threshold = <NUM_LIT:0.5>*np.std(decg_power)<EOL>threshold = np.median(thresholds)<EOL>max_power = np.median(max_powers)<EOL>decg_power[decg_power < threshold] = <NUM_LIT:0><EOL>decg_power = decg_power/max_power<EOL>decg_power[decg_power > <NUM_LIT:1.0>] = <NUM_LIT:1.0><EOL>square_decg_power = decg_power**<NUM_LIT:2><EOL><INDENT>shannon_energy = -square_decg_power*np.log(square_decg_power) <EOL>shannon_energy[np.where(np.isfinite(shannon_energy) == False)] = <NUM_LIT:0.0><EOL><DEDENT>shannon_energy = -square_decg_power*np.log(square_decg_power.clip(min=<NUM_LIT>))<EOL>shannon_energy[np.where(shannon_energy <= <NUM_LIT:0>)] = <NUM_LIT:0.0><EOL>mean_window_len = int(sampling_rate*<NUM_LIT>+<NUM_LIT:1>)<EOL>lp_energy = np.convolve(shannon_energy, [<NUM_LIT:1.0>/mean_window_len]*mean_window_len, mode='<STR_LIT>')<EOL>lp_energy = scipy.ndimage.gaussian_filter1d(lp_energy, sampling_rate/<NUM_LIT>)<EOL>lp_energy_diff = np.diff(lp_energy)<EOL>rpeaks = (lp_energy_diff[:-<NUM_LIT:1>] > <NUM_LIT:0>) & (lp_energy_diff[<NUM_LIT:1>:] < <NUM_LIT:0>)<EOL>rpeaks = np.flatnonzero(rpeaks)<EOL>rpeaks -= <NUM_LIT:1><EOL>return(rpeaks)<EOL>
|
ECG R peak detection based on `Kathirvel et al. (2001) <http://link.springer.com/article/10.1007/s13239-011-0065-3/fulltext.html>`_ with some tweaks (mainly robust estimation of the rectified signal cutoff threshold).
Parameters
----------
ecg : list or ndarray
ECG signal array.
sampling_rate : int
Sampling rate (samples/second).
window_size : float
Ransac window size.
lfreq : float
Low frequency of the band pass filter.
hfreq : float
High frequency of the band pass filter.
Returns
----------
rpeaks : ndarray
R peaks location.
Example
----------
>>> import neurokit as nk
>>> rpeaks = nk.segmenter_pekkanen(ecg_signal, 1000)
*Authors*
- `Jami Pekkanen <https://github.com/jampekka>`_
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
- numpy
*See Also*
- rpeakdetect: https://github.com/tru-hy/rpeakdetect
|
f10903:m4
|
def mad(var, constant=<NUM_LIT:1>):
|
median = np.median(var)<EOL>mad = np.median(np.abs(var - median))<EOL>mad = mad*constant<EOL>return(mad)<EOL>
|
Median Absolute Deviation: a "robust" version of standard deviation.
Parameters
----------
var : list or ndarray
Value array.
constant : float
Scale factor. Use 1.4826 for results similar to default R.
Returns
----------
mad : float
The MAD.
Example
----------
>>> import neurokit as nk
>>> hrv = nk.mad([2, 8, 7, 5, 4, 12, 5, 1])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
References
-----------
- https://en.wikipedia.org/wiki/Median_absolute_deviation
|
f10904:m0
|
def z_score(raw_scores, center=True, scale=True):
|
df = pd.DataFrame(raw_scores)<EOL>mean = df.mean(axis=<NUM_LIT:0>)<EOL>sd = df.std(axis=<NUM_LIT:0>)<EOL>z_scores = (df - mean)/sd<EOL>return(z_scores)<EOL>
|
Transform an array, serie or list into Z scores (scaled and centered scores).
Parameters
----------
raw_scores : list, ndarray or pandas.Series
ECG signal array.
centered : bool
Center the array (mean = 0).
scale : bool
scale the array (sd = 1).
Returns
----------
z_scores : pandas.DataFrame
The normalized scores.
Example
----------
>>> import neurokit as nk
>>>
>>> nk.z_score([3, 1, 2, 4, 6])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
|
f10904:m1
|
def find_outliers(data, treshold=<NUM_LIT>):
|
outliers = []<EOL>mean = np.mean(data)<EOL>std = np.std(data)<EOL>for i in data:<EOL><INDENT>if abs(i - mean)/std < treshold:<EOL><INDENT>outliers.append(False)<EOL><DEDENT>else:<EOL><INDENT>outliers.append(True)<EOL><DEDENT><DEDENT>outliers = np.array(outliers)<EOL>return (outliers)<EOL>
|
Identify outliers (abnormal values) using the standart deviation.
Parameters
----------
data : list or ndarray
Data array
treshold : float
Maximum deviation (in terms of standart deviation). Rule of thumb of a gaussian distribution: 2.58 = rejecting 1%, 2.33 = rejecting 2%, 1.96 = 5% and 1.28 = rejecting 10%.
Returns
----------
outliers : ndarray
A list of True/False with True being the outliers.
Example
----------
>>> import neurokit as nk
>>> outliers = nk.find_outliers([1, 2, 1, 5, 666, 4, 1 ,3, 5])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
|
f10904:m2
|
def normal_range(mean, sd, treshold=<NUM_LIT>):
|
bottom = mean - sd*treshold<EOL>top = mean + sd*treshold<EOL>return(bottom, top)<EOL>
|
Returns a bottom and a top limit on a normal distribution portion based on a treshold.
Parameters
----------
treshold : float
maximum deviation (in terms of standart deviation). Rule of thumb of a gaussian distribution: 2.58 = keeping 99%, 2.33 = keeping 98%, 1.96 = 95% and 1.28 = keeping 90%.
Returns
----------
(bottom, top) : tuple
Lower and higher range.
Example
----------
>>> import neurokit as nk
>>> bottom, top = nk.normal_range(mean=100, sd=15, treshold=2)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
f10904:m3
|
def find_following_duplicates(array):
|
array = array[:]<EOL>uniques = []<EOL>for i in range(len(array)):<EOL><INDENT>if i == <NUM_LIT:0>:<EOL><INDENT>uniques.append(True)<EOL><DEDENT>else:<EOL><INDENT>if array[i] == array[i-<NUM_LIT:1>]:<EOL><INDENT>uniques.append(False)<EOL><DEDENT>else:<EOL><INDENT>uniques.append(True)<EOL><DEDENT><DEDENT><DEDENT>return(uniques)<EOL>
|
Find the duplicates that are following themselves.
Parameters
----------
array : list or ndarray
A list containing duplicates.
Returns
----------
uniques : list
A list containing True for each unique and False for following duplicates.
Example
----------
>>> import neurokit as nk
>>> mylist = ["a","a","b","a","a","a","c","c","b","b"]
>>> uniques = nk.find_following_duplicates(mylist)
>>> indices = np.where(uniques) # Find indices of uniques
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
|
f10904:m4
|
def find_closest_in_list(number, array, direction="<STR_LIT>", strictly=False):
|
if direction == "<STR_LIT>":<EOL><INDENT>closest = min(array, key=lambda x:abs(x-number))<EOL><DEDENT>if direction == "<STR_LIT>":<EOL><INDENT>if strictly is True:<EOL><INDENT>closest = max(x for x in array if x < number)<EOL><DEDENT>else:<EOL><INDENT>closest = max(x for x in array if x <= number)<EOL><DEDENT><DEDENT>if direction == "<STR_LIT>":<EOL><INDENT>if strictly is True:<EOL><INDENT>closest = min(filter(lambda x: x > number, array))<EOL><DEDENT>else:<EOL><INDENT>closest = min(filter(lambda x: x >= number, array))<EOL><DEDENT><DEDENT>return(closest)<EOL>
|
Find the closest number in the array from x.
Parameters
----------
number : float
The number.
array : list
The list to look in.
direction : str
"both" for smaller or greater, "greater" for only greater numbers and "smaller" for the closest smaller.
strictly : bool
False for stricly superior or inferior or True for including equal.
Returns
----------
closest : int
The closest number in the array.
Example
----------
>>> import neurokit as nk
>>> nk.find_closest_in_list(1.8, [3, 5, 6, 1, 2])
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
f10904:m5
|
def plot_polarbar(scores, labels=None, labels_size=<NUM_LIT:15>, colors="<STR_LIT:default>", distribution_means=None, distribution_sds=None, treshold=<NUM_LIT>, fig_size=(<NUM_LIT:15>, <NUM_LIT:15>)):
|
<EOL>if isinstance(scores, dict):<EOL><INDENT>if labels is None:<EOL><INDENT>labels = list(scores.keys())<EOL><DEDENT>try:<EOL><INDENT>scores = [scores[key] for key in labels]<EOL><DEDENT>except KeyError:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT><DEDENT>if colors == "<STR_LIT:default>":<EOL><INDENT>if len(scores) < <NUM_LIT:9>:<EOL><INDENT>colors = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>","<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>else:<EOL><INDENT>colors = None<EOL><DEDENT><DEDENT>if labels is None:<EOL><INDENT>labels = range(len(scores))<EOL><DEDENT>N = len(scores)<EOL>theta = np.linspace(<NUM_LIT:0.0>, -<NUM_LIT:2> * np.pi, N, endpoint=False)<EOL>width = <NUM_LIT:2> * np.pi / N<EOL>plot = plt.figure(figsize=fig_size)<EOL>layer1 = plot.add_subplot(<NUM_LIT>, projection="<STR_LIT>")<EOL>bars1 = layer1.bar(theta+np.pi/len(scores), scores, width=width, bottom=<NUM_LIT:0.0>)<EOL>layer1.yaxis.set_ticks(range(<NUM_LIT:11>))<EOL>layer1.yaxis.set_ticklabels([])<EOL>layer1.xaxis.set_ticks(theta+np.pi/len(scores))<EOL>layer1.xaxis.set_ticklabels(labels, fontsize=labels_size)<EOL>for index, bar in enumerate(bars1):<EOL><INDENT>if colors is not None:<EOL><INDENT>bar.set_facecolor(colors[index])<EOL><DEDENT>bar.set_alpha(<NUM_LIT:1>)<EOL><DEDENT>if distribution_means is not None and distribution_sds is not None:<EOL><INDENT>if isinstance(distribution_means, int):<EOL><INDENT>distribution_means = [distribution_means]*N<EOL><DEDENT>if isinstance(distribution_sds, int):<EOL><INDENT>distribution_sds = [distribution_sds]*N<EOL><DEDENT>bottoms, tops = normal_range(np.array(distribution_means), np.array(distribution_sds), treshold=treshold)<EOL>tops = tops - bottoms<EOL>layer2 = plot.add_subplot(<NUM_LIT>, polar=True)<EOL>bars2 = layer2.bar(theta, tops, width=width, bottom=bottoms, linewidth=<NUM_LIT:0>)<EOL>layer2.xaxis.set_ticks(theta+np.pi/len(scores))<EOL>layer2.xaxis.set_ticklabels(labels, fontsize=labels_size)<EOL>for index, bar in enumerate(bars2):<EOL><INDENT>bar.set_facecolor("<STR_LIT>")<EOL>bar.set_alpha(<NUM_LIT>)<EOL><DEDENT><DEDENT>return(plot)<EOL>
|
Polar bar chart.
Parameters
----------
scores : list or dict
Scores to plot.
labels : list
List of labels to be used for ticks.
labels_size : int
Label's size.
colors : list or str
List of colors or "default".
distribution_means : int or list
List of means to add a range ribbon.
distribution_sds : int or list
List of SDs to add a range ribbon.
treshold : float
Limits of the range ribbon (in terms of standart deviation from mean).
fig_size : tuple
Figure size.
Returns
----------
plot : matplotlig figure
The figure.
Example
----------
>>> import neurokit as nk
>>> fig = nk.plot_polarbar(scores=[1, 2, 3, 4, 5], labels=["A", "B", "C", "D", "E"], distribution_means=3, distribution_sds=1)
>>> fig.show()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- matplotlib
- numpy
|
f10905:m0
|
def compute_dprime(n_Hit=None, n_Miss=None, n_FA=None, n_CR=None):
|
<EOL>hit_rate = n_Hit/(n_Hit + n_Miss)<EOL>fa_rate = n_FA/(n_FA + n_CR)<EOL>hit_rate_adjusted = (n_Hit+ <NUM_LIT:0.5>)/((n_Hit+ <NUM_LIT:0.5>) + n_Miss + <NUM_LIT:1>)<EOL>fa_rate_adjusted = (n_FA+ <NUM_LIT:0.5>)/((n_FA+ <NUM_LIT:0.5>) + n_CR + <NUM_LIT:1>)<EOL>dprime = scipy.stats.norm.ppf(hit_rate_adjusted) - scipy.stats.norm.ppf(fa_rate_adjusted)<EOL>zhr = scipy.stats.norm.ppf(hit_rate_adjusted)<EOL>zfar = scipy.stats.norm.ppf(fa_rate_adjusted)<EOL>beta = np.exp(-zhr*zhr/<NUM_LIT:2> + zfar*zfar/<NUM_LIT:2>)<EOL>a = <NUM_LIT:1>/<NUM_LIT:2>+((hit_rate-fa_rate)*(<NUM_LIT:1>+hit_rate-fa_rate) / (<NUM_LIT:4>*hit_rate*(<NUM_LIT:1>-fa_rate)))<EOL>b = <NUM_LIT:1>/<NUM_LIT:2>-((fa_rate-hit_rate)*(<NUM_LIT:1>+fa_rate-hit_rate) / (<NUM_LIT:4>*fa_rate*(<NUM_LIT:1>-hit_rate)))<EOL>if fa_rate > hit_rate:<EOL><INDENT>aprime = b<EOL><DEDENT>elif fa_rate < hit_rate:<EOL><INDENT>aprime = a<EOL><DEDENT>else:<EOL><INDENT>aprime = <NUM_LIT:0.5><EOL><DEDENT>bppd = ((<NUM_LIT:1>-hit_rate)*(<NUM_LIT:1>-fa_rate)-hit_rate*fa_rate) / ((<NUM_LIT:1>-hit_rate)*(<NUM_LIT:1>-fa_rate)+hit_rate*fa_rate)<EOL>c = -(scipy.stats.norm.ppf(hit_rate_adjusted) + scipy.stats.norm.ppf(fa_rate_adjusted))/<NUM_LIT:2><EOL>parameters = dict(dprime=dprime, beta=beta, aprime=aprime, bppd=bppd, c=c)<EOL>return(parameters)<EOL>
|
Computes the d', beta, aprime, b''d and c parameters based on the signal detection theory (SDT). **Feel free to help me expand the documentation of this function with details and interpretation guides.**
Parameters
----------
n_Hit : int
Number of hits.
n_Miss : int
Number of misses.
n_FA : int
Number of false alarms.
n_CR : int
Number of correct rejections.
Returns
----------
parameters : dict
A dictionary with the parameters (see details).
Example
----------
>>> import neurokit as nk
>>>
>>> nk.compute_dprime(n_Hit=7, n_Miss=4, n_FA=6, n_CR=6)
Notes
----------
*Details*
The Signal Detection Theory (often abridged as SDT) is used in very different domains from psychology (psychophysics, perception, memory), medical diagnostics (do the symptoms match a known diagnostic or can they be dismissed are irrelevant), to statistical decision (do the data indicate that the experiment has an effect or not). It evolved from the development of communications and radar equipment the first half of this century to psychology, as an attempt to understand some features of human behavior that were not well explained by tradition models. SDT is, indeed, used to analyze data coming from experiments where the task is to categorize ambiguous stimuli which can be generated either by a known process (called the *signal*) or be obtained by chance (called the *noise* in the SDT framework). Based on the number of hits, misses, false alarms and correct rejections, it estimates two main parameters from the experimental data: **d' (d-prime, for discriminability index**) and C (a variant of it is called beta). Non parametric variants are aprime and b''d (bppd)
- **dprime**: The sensitivity index. Indicates the strength of the signal (relative to the noise). More specifically, it is the standardized difference between the means of the Signal Present and Signal Absent distributions.
- **beta**: Response bias index.
- **aprime**: Non-parametric sensitivity index.
- **bppd**: Non-parametric response bias index.
- **c**: Response bias index.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- scipy
*See Also*
- `neuropsychology <https://www.rdocumentation.org/packages/neuropsychology/topics/dprime>`_
- http://lindeloev.net/calculating-d-in-python-and-php/
|
f10907:m0
|
def compute_BMI(height, weight, age, sex):
|
<EOL>height = height/<NUM_LIT:100><EOL>bmi = {}<EOL>bmi["<STR_LIT>"] = weight/(height**<NUM_LIT:2>)<EOL>bmi["<STR_LIT>"] = <NUM_LIT>*weight/height**<NUM_LIT><EOL>if bmi["<STR_LIT>"] < <NUM_LIT:15>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT:15> < bmi["<STR_LIT>"] < <NUM_LIT:16>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT:16> < bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> < bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> < bmi["<STR_LIT>"] < <NUM_LIT:30>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT:30> < bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> < bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if bmi["<STR_LIT>"] > <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if sex.lower() == "<STR_LIT:m>":<EOL><INDENT>sex = <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>sex = <NUM_LIT:0><EOL><DEDENT>if age <= <NUM_LIT:15>:<EOL><INDENT>bmi["<STR_LIT>"] = <NUM_LIT>*bmi["<STR_LIT>"]-<NUM_LIT>*age-<NUM_LIT>*sex+<NUM_LIT><EOL><DEDENT>else:<EOL><INDENT>bmi["<STR_LIT>"] = <NUM_LIT>*bmi["<STR_LIT>"] + <NUM_LIT>*age-<NUM_LIT>*sex-<NUM_LIT><EOL><DEDENT>if sex == <NUM_LIT:1>:<EOL><INDENT>if bmi["<STR_LIT>"] < <NUM_LIT:2>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT:2> <= bmi["<STR_LIT>"] < <NUM_LIT:6>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT:6> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT:30>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if bmi["<STR_LIT>"] >= <NUM_LIT:30>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if bmi["<STR_LIT>"] < <NUM_LIT:10>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT:10> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if <NUM_LIT> <= bmi["<STR_LIT>"] < <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT>if bmi["<STR_LIT>"] >= <NUM_LIT>:<EOL><INDENT>bmi["<STR_LIT>"] = "<STR_LIT>"<EOL><DEDENT><DEDENT>return(bmi)<EOL>
|
Returns the traditional BMI, the 'new' Body Mass Index and estimates the Body Fat Percentage (BFP; Deurenberg et al., 1991).
Parameters
----------
height : float
Height in cm.
weight : float
Weight in kg.
age : float
Age in years.
sex : str
"m" or "f".
Returns
----------
bmi : dict
dict containing values and their interpretations.
Example
----------
>>> import neurokit as nk
>>>
>>> nk.compute_BMI(height=166, weight=54, age=22, sex="f")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*See Also*
- https://people.maths.ox.ac.uk/trefethen/bmi.html
References
-----------
- Deurenberg, P., Andreoli, A., Borg, P., & Kukkonen-Harjula, K. (2001). The validity of predicted body fat percentage from body mass index and from impedance in samples of five European populations. European Journal of Clinical Nutrition, 55(11), 973.
- Deurenberg, P., Weststrate, J. A., & Seidell, J. C. (1991). Body mass index as a measure of body fatness: age-and sex-specific prediction formulas. British journal of nutrition, 65(02), 105-114.
|
f10907:m1
|
def compute_interoceptive_accuracy(nbeats_real, nbeats_reported):
|
<EOL>if isinstance(nbeats_real, list):<EOL><INDENT>nbeats_real = np.array(nbeats_real)<EOL>nbeats_reported = np.array(nbeats_reported)<EOL><DEDENT>accuracy = <NUM_LIT:1> - (abs(nbeats_real-nbeats_reported))/((nbeats_real+nbeats_reported)/<NUM_LIT:2>)<EOL>return(accuracy)<EOL>
|
Computes interoceptive accuracy according to Garfinkel et al., (2015).
Parameters
----------
nbeats_real : int or list
Real number of heartbeats.
nbeats_reported : int or list
Reported number of heartbeats.
Returns
----------
accuracy : float or list
Objective accuracy in detecting internal bodily sensations. It is the central construct underpinning other interoceptive measures (Garfinkel et al., 2015).
Example
----------
>>> import neurokit as nk
>>>
>>> nk.compute_interoceptive_accuracy(5, 3)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
References
-----------
- Garfinkel, S. N., Seth, A. K., Barrett, A. B., Suzuki, K., & Critchley, H. D. (2015). Knowing your own heart: distinguishing interoceptive accuracy from interoceptive awareness. Biological psychology, 104, 65-74.
|
f10907:m2
|
def __init__(self, signal=[<NUM_LIT:0>, <NUM_LIT:100>], treshold=<NUM_LIT>, burn=<NUM_LIT:5>, stop_n_inversions=False, prior_signal=[], prior_response=[]):
|
self.treshold = treshold<EOL>self.signal_min = np.min(signal)<EOL>self.signal_max = np.max(signal)<EOL>self.signal_range = self.signal_max - self.signal_min<EOL>if len(signal) == <NUM_LIT:2>:<EOL><INDENT>self.signal = pd.DataFrame({"<STR_LIT>":np.linspace(self.signal_min, self.signal_max, <NUM_LIT:1000>)})<EOL><DEDENT>else:<EOL><INDENT>self.signal = pd.DataFrame({"<STR_LIT>": signal})<EOL><DEDENT>self.next_value = np.nan<EOL>self.data = np.nan<EOL>self.stop_n_inversions = stop_n_inversions<EOL>self.prior_signal = prior_signal<EOL>self.prior_response = prior_response<EOL>if isinstance(burn, int):<EOL><INDENT>self.burn_n = burn<EOL>self.burn = list(np.round(np.linspace(<NUM_LIT:0>, <NUM_LIT:100>, burn), <NUM_LIT:2>))<EOL><DEDENT>else:<EOL><INDENT>self.burn_n = len(burn)<EOL>self.burn = list(burn)<EOL><DEDENT>self.X = pd.DataFrame({"<STR_LIT>":prior_signal})<EOL>self.y = np.array(prior_response)<EOL>self.model = np.nan<EOL>
|
Staircase procedure handler to find a treshold. For now, using a GLM - likelihood method.
Parameters
----------
signal : list
Either list with min or max or range of possible signal values.
treshold : int or list
Treshold (between 0 and 1) to look for.
burn : int or list
Signal values to try at the beginning. If int, then it computes n equally spaced values.
stop_n_inversions : False or int
Stop generating new signal values after n inversions.
prior_signal : int or list
Range of signal values used as prior.
prior_response : int or list
Range of response values used as prior.
Example
----------
>>> # Let's imagine a perception task designed to find the treshold of
>>> # signal at which the participant detect the stimulus at 50% chance.
>>> # The signal ranges from 0 to 100. We set priors that at 100, the
>>> # stim is detected (1) and at 0, not detected (0).
>>>
>>> import neurokit as nk
>>> staircase = staircase(signal=np.linspace(0, 100, 25),
>>> treshold=0.50,
>>> burn=5,
>>> stop_n_inversions=False,
>>> prior_signal=[0, 100],
>>> prior_response=[0, 1])
>>>
>>>
>>>
>>> # Run the experiment
>>> for trial in range(50):
>>> signal = staircase.predict_next_value()
>>> if signal != "stop":
>>> # Simulate response
>>> if signal > 50:
>>> response = 1
>>> else:
>>> response = 0
>>> staircase.add_response(response=response, value=signal)
>>>
>>> # Get data
>>> staircase.diagnostic_plot()
>>> data = staircase.get_data()
>>>
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- numpy
- pandas
- sklearn
|
f10907:c0:m0
|
def add_response(self, response, value):
|
if value != "<STR_LIT>":<EOL><INDENT>self.X = pd.concat([self.X, pd.DataFrame({"<STR_LIT>":[value]})])<EOL>self.y = np.array(list(self.y) + [response])<EOL>if len(set(list(self.y))) > <NUM_LIT:1>:<EOL><INDENT>self.model = self.fit_model(self.X , self.y)<EOL><DEDENT><DEDENT>
|
Add response to staircase.
Parameters
----------
response : int or bool
0 or 1.
value : int or float
Signal corresponding to response.
|
f10907:c0:m3
|
def save_nk_object(obj, filename="<STR_LIT:file>", path="<STR_LIT>", extension="<STR_LIT>", compress=False, compatibility=-<NUM_LIT:1>):
|
if compress is True:<EOL><INDENT>with gzip.open(path + filename + "<STR_LIT:.>" + extension, '<STR_LIT:wb>') as name:<EOL><INDENT>pickle.dump(obj, name, protocol=compatibility)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>with open(path + filename + "<STR_LIT:.>" + extension, '<STR_LIT:wb>') as name:<EOL><INDENT>pickle.dump(obj, name, protocol=compatibility)<EOL><DEDENT><DEDENT>
|
Save whatever python object to a pickled file.
Parameters
----------
file : object
Whatever python thing (list, dict, ...).
filename : str
File's name.
path : str
File's path.
extension : str
File's extension. Default "nk" but can be whatever.
compress: bool
Enable compression using gzip.
compatibility : int
See :func:`pickle.dump`.
Example
----------
>>> import neurokit as nk
>>> obj = [1, 2]
>>> nk.save_nk_object(obj, filename="myobject")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pickle
- gzip
|
f10908:m0
|
def read_nk_object(filename, path="<STR_LIT>"):
|
filename = path + filename<EOL>try:<EOL><INDENT>with open(filename, '<STR_LIT:rb>') as name:<EOL><INDENT>file = pickle.load(name)<EOL><DEDENT><DEDENT>except pickle.UnpicklingError:<EOL><INDENT>with gzip.open(filename, '<STR_LIT:rb>') as name:<EOL><INDENT>file = pickle.load(name)<EOL><DEDENT><DEDENT>except ModuleNotFoundError: <EOL><INDENT>try:<EOL><INDENT>file = pd.read_pickle(filename)<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return(file)<EOL>
|
Read a pickled file.
Parameters
----------
filename : str
Full file's name (with extension).
path : str
File's path.
Example
----------
>>> import neurokit as nk
>>> obj = [1, 2]
>>> nk.save_nk_object(obj, filename="myobject")
>>> loaded_obj = nk.read_nk_object("myobject.nk")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pickle
- gzip
|
f10908:m1
|
def find_creation_date(path):
|
if platform.system() == '<STR_LIT>':<EOL><INDENT>return(os.path.getctime(path))<EOL><DEDENT>else:<EOL><INDENT>stat = os.stat(path)<EOL>try:<EOL><INDENT>return(stat.st_birthtime)<EOL><DEDENT>except AttributeError:<EOL><INDENT>print("<STR_LIT>")<EOL>return(stat.st_mtime)<EOL><DEDENT><DEDENT>
|
Try to get the date that a file was created, falling back to when it was last modified if that's not possible.
Parameters
----------
path : str
File's path.
Returns
----------
creation_date : str
Time of file creation.
Example
----------
>>> import neurokit as nk
>>> import datetime
>>>
>>> creation_date = nk.find_creation_date(file)
>>> creation_date = datetime.datetime.fromtimestamp(creation_date)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Mark Amery
*Dependencies*
- platform
- os
*See Also*
- http://stackoverflow.com/a/39501288/1709587
|
f10908:m2
|
def reset(self):
|
self.clock = builtin_time.clock()<EOL>
|
Reset the clock of the Time object.
Parameters
----------
None
Returns
----------
None
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neuropsydia_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time
|
f10910:c0:m1
|
def get(self, reset=True):
|
t = (builtin_time.clock()-self.clock)*<NUM_LIT:1000><EOL>if reset is True:<EOL><INDENT>self.reset()<EOL><DEDENT>return(t)<EOL>
|
Get time since last initialisation / reset.
Parameters
----------
reset = bool, optional
Should the clock be reset after returning time?
Returns
----------
float
Time passed in milliseconds.
Example
----------
>>> import neurokit as nk
>>> time_passed_since_neurobox_loading = nk.time.get()
>>> nk.time.reset()
>>> time_passed_since_reset = nk.time.get()
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- time
|
f10910:c0:m2
|
def eeg_add_channel(raw, channel, sync_index_eeg=<NUM_LIT:0>, sync_index_channel=<NUM_LIT:0>, channel_type=None, channel_name=None):
|
if channel_name is None:<EOL><INDENT>if isinstance(channel, pd.core.series.Series):<EOL><INDENT>if channel.name is not None:<EOL><INDENT>channel_name = channel.name<EOL><DEDENT>else:<EOL><INDENT>channel_name = "<STR_LIT>"<EOL><DEDENT><DEDENT>else:<EOL><INDENT>channel_name = "<STR_LIT>"<EOL><DEDENT><DEDENT>diff = sync_index_channel - sync_index_eeg<EOL>if diff > <NUM_LIT:0>:<EOL><INDENT>channel = list(channel)[diff:len(channel)]<EOL>channel = channel + [np.nan]*diff<EOL><DEDENT>if diff < <NUM_LIT:0>:<EOL><INDENT>channel = [np.nan]*diff + list(channel)<EOL>channel = list(channel)[<NUM_LIT:0>:len(channel)]<EOL><DEDENT>if len(channel) < len(raw):<EOL><INDENT>channel = list(channel) + [np.nan]*(len(raw)-len(channel))<EOL><DEDENT>else:<EOL><INDENT>channel = list(channel)[<NUM_LIT:0>:len(raw)] <EOL><DEDENT>info = mne.create_info([channel_name], raw.info["<STR_LIT>"], ch_types=channel_type)<EOL>channel = mne.io.RawArray([channel], info)<EOL>raw.add_channels([channel], force_update_info=True)<EOL>return(raw)<EOL>
|
Add a channel to a mne's Raw m/eeg file. It will basically synchronize the channel to the eeg data following a particular index and add it.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel : list or numpy.array
The channel to be added.
sync_index_eeg : int or list
An index, in the raw data, by which to align the two inputs.
sync_index_channel : int or list
An index, in the channel to add, by which to align the two inputs.
channel_type : str
Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'.
Returns
----------
raw : mne.io.Raw
Raw data in FIF format.
Example
----------
>>> import neurokit as nk
>>> event_index_in_eeg = 42
>>> event_index_in_ecg = 666
>>> raw = nk.eeg_add_channel(raw, ecg, sync_index_raw=event_index_in_eeg, sync_index_channel=event_index_in_ecg, channel_type="ecg")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne: http://martinos.org/mne/dev/index.html
|
f10912:m0
|
def eeg_select_channels(raw, channel_names):
|
if isinstance(channel_names, list) is False:<EOL><INDENT>channel_names = [channel_names]<EOL><DEDENT>channels, time_index = raw.copy().pick_channels(channel_names)[:]<EOL>if len(channel_names) > <NUM_LIT:1>:<EOL><INDENT>channels = pd.DataFrame(channels.T, columns=channel_names)<EOL><DEDENT>else:<EOL><INDENT>channels = pd.Series(channels[<NUM_LIT:0>])<EOL>channels.name = channel_names[<NUM_LIT:0>]<EOL><DEDENT>return(channels)<EOL>
|
Select one or several channels by name and returns them in a dataframe.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
channel_names : str or list
Channel's name(s).
Returns
----------
channels : pd.DataFrame
Channel.
Example
----------
>>> import neurokit as nk
>>> raw = nk.eeg_select_channel(raw, "TP7")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- mne
*See Also*
- mne package: http://martinos.org/mne/dev/index.html
|
f10912:m1
|
def eeg_select_electrodes(eeg, include="<STR_LIT:all>", exclude=None, hemisphere="<STR_LIT>", central=True):
|
<EOL>eeg = eeg.copy().pick_types(meg=False, eeg=True)<EOL>channel_list = eeg.ch_names<EOL>if include == "<STR_LIT:all>":<EOL><INDENT>electrodes = channel_list<EOL><DEDENT>elif isinstance(include, str):<EOL><INDENT>electrodes = [s for s in channel_list if include in s]<EOL><DEDENT>elif isinstance(include, list):<EOL><INDENT>electrodes = []<EOL>for i in include:<EOL><INDENT>electrodes += [s for s in channel_list if i in s]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>if exclude is not None:<EOL><INDENT>if isinstance(exclude, str):<EOL><INDENT>to_remove = [s for s in channel_list if exclude in s]<EOL>electrodes = [s for s in electrodes if s not in to_remove]<EOL><DEDENT>elif isinstance(exclude, list):<EOL><INDENT>to_remove = []<EOL>for i in exclude:<EOL><INDENT>to_remove += [s for s in channel_list if i in s]<EOL><DEDENT>electrodes = [s for s in electrodes if s not in to_remove]<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT><DEDENT>if hemisphere != "<STR_LIT>":<EOL><INDENT>if hemisphere.lower() == "<STR_LIT:left>" or hemisphere.lower() == "<STR_LIT:l>":<EOL><INDENT>hemi = [s for s in electrodes if len(re.findall(r'<STR_LIT>', s)) > <NUM_LIT:0> and int(re.findall(r'<STR_LIT>', s)[<NUM_LIT:0>])%<NUM_LIT:2> > <NUM_LIT:0>]<EOL><DEDENT>elif hemisphere.lower() == "<STR_LIT:right>" or hemisphere.lower() == "<STR_LIT:r>":<EOL><INDENT>hemi = [s for s in electrodes if len(re.findall(r'<STR_LIT>', s)) > <NUM_LIT:0> and int(re.findall(r'<STR_LIT>', s)[<NUM_LIT:0>])%<NUM_LIT:2> == <NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>if central is True:<EOL><INDENT>hemi += [s for s in electrodes if '<STR_LIT:z>' in s]<EOL><DEDENT>electrodes = hemi<EOL><DEDENT>return(electrodes)<EOL>
|
Returns electrodes/sensors names of selected region (according to a 10-20 EEG montage).
Parameters
----------
eeg : mne.Raw or mne.Epochs
EEG data.
include : str ot list
Sensor area to include.
exclude : str or list or None
Sensor area to exclude.
hemisphere : str
Select both hemispheres? "both", "left" or "right".
central : bool
Select the central line.
Returns
----------
electrodes : list
List of electrodes/sensors corresponding to the selected area.
Example
----------
>>> import neurokit as nk
>>> nk.eeg_select_electrodes(include="F", exclude="C")
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
f10912:m2
|
def eeg_create_mne_events(onsets, conditions=None):
|
event_id = {}<EOL>if conditions is None:<EOL><INDENT>conditions = ["<STR_LIT>"] * len(onsets)<EOL><DEDENT>if len(conditions) != len(onsets):<EOL><INDENT>print("<STR_LIT>")<EOL>return()<EOL><DEDENT>event_names = list(set(conditions))<EOL><INDENT>event_index = [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>, <NUM_LIT:4>, <NUM_LIT:5>, <NUM_LIT:32>, <NUM_LIT:64>, <NUM_LIT>]<EOL><DEDENT>event_index = list(range(len(event_names)))<EOL>for i in enumerate(event_names):<EOL><INDENT>conditions = [event_index[i[<NUM_LIT:0>]] if x==i[<NUM_LIT:1>] else x for x in conditions]<EOL>event_id[i[<NUM_LIT:1>]] = event_index[i[<NUM_LIT:0>]]<EOL><DEDENT>events = np.array([onsets, [<NUM_LIT:0>]*len(onsets), conditions]).T<EOL>return(events, event_id)<EOL>
|
Create MNE compatible events.
Parameters
----------
onsets : list or array
Events onsets.
conditions : list
A list of equal length containing the stimuli types/conditions.
Returns
----------
(events, event_id) : tuple
MNE-formated events and a dictionary with event's names.
Example
----------
>>> import neurokit as nk
>>> events, event_id = nk.eeg_create_mne_events(events_onset, conditions)
Authors
----------
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
|
f10912:m3
|
def eeg_add_events(raw, events_channel, conditions=None, treshold="<STR_LIT>", cut="<STR_LIT>", time_index=None, number="<STR_LIT:all>", after=<NUM_LIT:0>, before=None, min_duration=<NUM_LIT:1>):
|
<EOL>if isinstance(events_channel, str):<EOL><INDENT>try:<EOL><INDENT>events_channel = eeg_select_channels(raw, events_channel)<EOL><DEDENT>except:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT><DEDENT>events = find_events(events_channel, treshold=treshold, cut=cut, time_index=time_index, number=number, after=after, before=before, min_duration=min_duration)<EOL>events, event_id = eeg_create_mne_events(events["<STR_LIT>"], conditions)<EOL>raw.add_events(events)<EOL>return(raw, events, event_id)<EOL>
|
Find events on a channel, convert them into an MNE compatible format, and add them to the raw data.
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
events_channel : str or array
Name of the trigger channel if in the raw, or array of equal length if externally supplied.
conditions : list
List containing the stimuli types/conditions.
treshold : float
The treshold value by which to select the events. If "auto", takes the value between the max and the min.
cut : str
"higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower".
Add a corresponding datetime index, will return an addional array with the onsets as datetimes.
number : str or int
How many events should it select.
after : int
If number different than "all", then at what time should it start selecting the events.
before : int
If number different than "all", before what time should it select the events.
min_duration : int
The minimum duration of an event (in timepoints).
Returns
----------
(raw, events, event_id) : tuple
The raw file with events, the mne-formatted events and event_id.
Example
----------
>>> import neurokit as nk
>>>
>>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Dependencies*
- pandas
*See Also*
- mne: http://martinos.org/mne/dev/index.html
References
-----------
- None
|
f10912:m4
|
def eeg_to_all_evokeds(all_epochs, conditions=None):
|
if conditions is None:<EOL><INDENT>conditions = {}<EOL>for participant, epochs in all_epochs.items():<EOL><INDENT>conditions.update(epochs.event_id)<EOL><DEDENT><DEDENT>all_evokeds = {}<EOL>for participant, epochs in all_epochs.items():<EOL><INDENT>evokeds = {}<EOL>for cond in conditions:<EOL><INDENT>try:<EOL><INDENT>evokeds[cond] = epochs[cond].average()<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>all_evokeds[participant] = evokeds<EOL><DEDENT>return(all_evokeds)<EOL>
|
Convert all_epochs to all_evokeds.
DOCS INCOMPLETE :(
|
f10912:m5
|
def eeg_to_df(eeg, index=None, include="<STR_LIT:all>", exclude=None, hemisphere="<STR_LIT>", central=True):
|
if isinstance(eeg, mne.Epochs):<EOL><INDENT>data = {}<EOL>if index is None:<EOL><INDENT>index = range(len(eeg))<EOL><DEDENT>for epoch_index, epoch in zip(index, eeg.get_data()):<EOL><INDENT>epoch = pd.DataFrame(epoch.T)<EOL>epoch.columns = eeg.ch_names<EOL>epoch.index = eeg.times<EOL>selection = eeg_select_electrodes(eeg, include=include, exclude=exclude, hemisphere=hemisphere, central=central)<EOL>data[epoch_index] = epoch[selection]<EOL><DEDENT><DEDENT>else: <EOL><INDENT>data = eeg.get_data().T<EOL>data = pd.DataFrame(data)<EOL>data.columns = eeg.ch_names<EOL>data.index = eeg.times<EOL><DEDENT>return(data)<EOL>
|
Convert mne Raw or Epochs object to dataframe or dict of dataframes.
DOCS INCOMPLETE :(
|
f10912:m6
|
def eeg_erp(eeg, times=None, index=None, include="<STR_LIT:all>", exclude=None, hemisphere="<STR_LIT>", central=True, verbose=True, names="<STR_LIT>", method="<STR_LIT>"):
|
erp = {}<EOL>data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central)<EOL>for epoch_index, epoch in data.items():<EOL><INDENT>if isinstance(times, list):<EOL><INDENT>if isinstance(times[<NUM_LIT:0>], list):<EOL><INDENT>values = {}<EOL>for window_index, window in enumerate(times):<EOL><INDENT>df = epoch[window[<NUM_LIT:0>]:window[<NUM_LIT:1>]]<EOL>value = df.mean().mean()<EOL>values[window_index] = value<EOL><DEDENT>erp[epoch_index] = values<EOL><DEDENT>else:<EOL><INDENT>df = epoch[times[<NUM_LIT:0>]:times[<NUM_LIT:1>]]<EOL>value = df.mean().mean()<EOL>erp[epoch_index] = [value]<EOL><DEDENT><DEDENT>elif isinstance(times, tuple):<EOL><INDENT>values = {}<EOL>for window_index, window in enumerate(times):<EOL><INDENT>df = epoch[window[<NUM_LIT:0>]:window[<NUM_LIT:1>]]<EOL>if method == "<STR_LIT>":<EOL><INDENT>value = df.mean().max()<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>value = df.mean().min()<EOL><DEDENT>else:<EOL><INDENT>value = df.mean().mean()<EOL><DEDENT>values[window_index] = value<EOL><DEDENT>erp[epoch_index] = values<EOL><DEDENT>else:<EOL><INDENT>df = epoch[<NUM_LIT:0>:]<EOL>value = df.mean().mean()<EOL>erp[epoch_index] = [value]<EOL><DEDENT><DEDENT>erp = pd.DataFrame.from_dict(erp, orient="<STR_LIT:index>")<EOL>if isinstance(names, str):<EOL><INDENT>names = [names]<EOL><DEDENT>if len(names) == len(erp.columns):<EOL><INDENT>erp.columns = names<EOL><DEDENT>return(erp)<EOL>
|
DOCS INCOMPLETE :(
|
f10914:m0
|
def plot_eeg_erp(all_epochs, conditions=None, times=None, include="<STR_LIT:all>", exclude=None, hemisphere="<STR_LIT>", central=True, name=None, colors=None, gfp=False, ci=<NUM_LIT>, ci_alpha=<NUM_LIT>, invert_y=False, linewidth=<NUM_LIT:1>, linestyle="<STR_LIT:->", filter_hfreq=None):
|
<EOL>all_epochs_current = all_epochs.copy()<EOL>if (filter_hfreq is not None) and (isinstance(filter_hfreq, int)):<EOL><INDENT>for participant, epochs in all_epochs_current.items():<EOL><INDENT>all_epochs_current[participant] = epochs.savgol_filter(filter_hfreq, copy=True)<EOL><DEDENT><DEDENT>if isinstance(times, list) and len(times) == <NUM_LIT:2>:<EOL><INDENT>for participant, epochs in all_epochs_current.items():<EOL><INDENT>all_epochs_current[participant] = epochs.copy().crop(times[<NUM_LIT:0>], times[<NUM_LIT:1>])<EOL><DEDENT><DEDENT>all_evokeds = eeg_to_all_evokeds(all_epochs_current, conditions=conditions)<EOL>data = {}<EOL>for participant, epochs in all_evokeds.items():<EOL><INDENT>for condition, epoch in epochs.items():<EOL><INDENT>data[condition] = []<EOL><DEDENT><DEDENT>for participant, epochs in all_evokeds.items():<EOL><INDENT>for condition, epoch in epochs.items():<EOL><INDENT>data[condition].append(epoch)<EOL><DEDENT><DEDENT>conditions = list(data.keys())<EOL>if isinstance(linestyle, str):<EOL><INDENT>linestyle = [linestyle] * len(conditions)<EOL><DEDENT>elif isinstance(linestyle, list) and len(linestyle) >= len(conditions):<EOL><INDENT>pass<EOL><DEDENT>elif isinstance(linestyle, dict) and len(linestyle.keys()) >= len(conditions):<EOL><INDENT>linestyle = [linestyle[cond] for cond in conditions]<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>if isinstance(colors, str):<EOL><INDENT>colors = {condition: colors for condition in conditions}<EOL><DEDENT>elif isinstance(colors, list) and len(colors) >= len(conditions):<EOL><INDENT>colors= {condition: colors[index] for index, condition in enumerate(conditions)}<EOL><DEDENT>elif isinstance(colors, dict) and len(colors.keys()) >= len(conditions):<EOL><INDENT>pass<EOL><DEDENT>elif colors is None:<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>styles = {}<EOL>for index, condition in enumerate(conditions):<EOL><INDENT>styles[condition] = {"<STR_LIT>": linewidth, "<STR_LIT>": linestyle[index]}<EOL><DEDENT>picks = mne.pick_types(epoch.info, eeg=True, selection=eeg_select_electrodes(epoch, include=include, exclude=exclude, hemisphere=hemisphere, central=central))<EOL>try:<EOL><INDENT>plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y, ci_alpha=ci_alpha)<EOL><DEDENT>except TypeError:<EOL><INDENT>print("<STR_LIT>")<EOL>plot = mne.viz.plot_compare_evokeds(data, picks=picks, colors=colors, styles=styles, title=name, gfp=gfp, ci=ci, invert_y=invert_y)<EOL><DEDENT>return(plot)<EOL>
|
DOCS INCOMPLETE :(
|
f10914:m1
|
def plot_eeg_erp_topo(all_epochs, colors=None):
|
all_evokeds = eeg_to_all_evokeds(all_epochs)<EOL>data = {}<EOL>for participant, epochs in all_evokeds.items():<EOL><INDENT>for cond, epoch in epochs.items():<EOL><INDENT>data[cond] = []<EOL><DEDENT><DEDENT>for participant, epochs in all_evokeds.items():<EOL><INDENT>for cond, epoch in epochs.items():<EOL><INDENT>data[cond].append(epoch)<EOL><DEDENT><DEDENT>if colors is not None:<EOL><INDENT>color_list = []<EOL><DEDENT>else:<EOL><INDENT>color_list = None<EOL><DEDENT>evokeds = []<EOL>for condition, evoked in data.items():<EOL><INDENT>grand_average = mne.grand_average(evoked)<EOL>grand_average.comment = condition<EOL>evokeds += [grand_average]<EOL>if colors is not None:<EOL><INDENT>color_list.append(colors[condition])<EOL><DEDENT><DEDENT>plot = mne.viz.plot_evoked_topo(evokeds, background_color="<STR_LIT:w>", color=color_list)<EOL>return(plot)<EOL>
|
Plot butterfly plot.
DOCS INCOMPLETE :(
|
f10914:m2
|
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="<STR_LIT:all>", exclude=None, hemisphere="<STR_LIT>", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="<STR_LIT>"):
|
data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central)<EOL>if isinstance(data, dict) is False:<EOL><INDENT>data = {<NUM_LIT:0>: data}<EOL><DEDENT>if isinstance(times, tuple):<EOL><INDENT>times = list(times)<EOL><DEDENT>if isinstance(times, list):<EOL><INDENT>if isinstance(times[<NUM_LIT:0>], list) is False:<EOL><INDENT>times = [times]<EOL><DEDENT><DEDENT>else:<EOL><INDENT>times = [[<NUM_LIT:0>, None]]<EOL><DEDENT>if isinstance(names, str):<EOL><INDENT>prefix = [names] * len(times)<EOL>if len(times) > <NUM_LIT:1>:<EOL><INDENT>for time_index, time_window in enumerate(times):<EOL><INDENT>prefix[time_index] = prefix[time_index] + "<STR_LIT>" %(time_window[<NUM_LIT:0>], time_window[<NUM_LIT:1>])<EOL><DEDENT><DEDENT><DEDENT>else:<EOL><INDENT>prefix = names<EOL><DEDENT>complexity_all = pd.DataFrame()<EOL>for time_index, time_window in enumerate(times):<EOL><INDENT>if len(times) > <NUM_LIT:1> and verbose is True:<EOL><INDENT>print("<STR_LIT>" + str(time_window) + "<STR_LIT:/>" + str(len(times)))<EOL><DEDENT>complexity_features = {}<EOL>index = <NUM_LIT:0><EOL>for epoch_index, epoch in data.items():<EOL><INDENT>if len(times) == <NUM_LIT:1> and verbose is True:<EOL><INDENT>print("<STR_LIT>" + str(round(index/len(data.items())*<NUM_LIT:100>, <NUM_LIT:2>)) + "<STR_LIT:%>")<EOL><DEDENT>index +=<NUM_LIT:1><EOL>df = epoch[time_window[<NUM_LIT:0>]:time_window[<NUM_LIT:1>]].copy()<EOL>complexity_features[epoch_index] = {}<EOL>for channel in df:<EOL><INDENT>signal = df[channel].values<EOL>features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e)<EOL>for key, feature in features.items():<EOL><INDENT>if key in complexity_features[epoch_index].keys():<EOL><INDENT>complexity_features[epoch_index][key].append(feature)<EOL><DEDENT>else:<EOL><INDENT>complexity_features[epoch_index][key] = [feature]<EOL><DEDENT><DEDENT><DEDENT><DEDENT>for epoch_index, epoch in complexity_features.items():<EOL><INDENT>for feature in epoch:<EOL><INDENT>complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean()<EOL><DEDENT><DEDENT>complexity_features = pd.DataFrame.from_dict(complexity_features, orient="<STR_LIT:index>")<EOL>complexity_features.columns = [prefix[time_index] + "<STR_LIT:_>" + s for s in complexity_features.columns]<EOL>complexity_all = pd.concat([complexity_all, complexity_features], axis=<NUM_LIT:1>)<EOL><DEDENT>return(complexity_all)<EOL>
|
Compute complexity indices of epochs or raw object.
DOCS INCOMPLETE :(
|
f10915:m0
|
def eeg_name_frequencies(freqs):
|
freqs = list(freqs)<EOL>freqs_names = []<EOL>for freq in freqs:<EOL><INDENT>if freq < <NUM_LIT:1>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT:3>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT:7>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT:9>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT:12>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT:30>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>elif freq <= <NUM_LIT:50>:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT>else:<EOL><INDENT>freqs_names.append("<STR_LIT>")<EOL><DEDENT><DEDENT>return(freqs_names)<EOL>
|
Name frequencies according to standart classifications.
Parameters
----------
freqs : list or numpy.array
list of floats containing frequencies to classify.
Returns
----------
freqs_names : list
Named frequencies
Example
----------
>>> import neurokit as nk
>>>
>>> nk.eeg_name_frequencies([0.5, 1.5, 3, 5, 7, 15])
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
|
f10917:m0
|
def eeg_psd(raw, sensors_include="<STR_LIT:all>", sensors_exclude=None, fmin=<NUM_LIT>, fmax=<NUM_LIT>, method="<STR_LIT>", proj=False):
|
picks = mne.pick_types(raw.info, include=eeg_select_electrodes(include=sensors_include, exclude=sensors_exclude), exclude="<STR_LIT>")<EOL>if method == "<STR_LIT>":<EOL><INDENT>psds, freqs = mne.time_frequency.psd_multitaper(raw,<EOL>fmin=fmin,<EOL>fmax=fmax,<EOL>low_bias=True,<EOL>proj=proj,<EOL>picks=picks)<EOL><DEDENT>else:<EOL><INDENT>psds, freqs = mne.time_frequency.psd_welch(raw,<EOL>fmin=fmin,<EOL>fmax=fmax,<EOL>proj=proj,<EOL>picks=picks)<EOL><DEDENT>tf = pd.DataFrame(psds)<EOL>tf.columns = eeg_name_frequencies(freqs)<EOL>tf = tf.mean(axis=<NUM_LIT:0>)<EOL>mean_psd = {}<EOL>for freq in ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]:<EOL><INDENT>mean_psd[freq] = tf[[freq in s for s in tf.index]].mean()<EOL><DEDENT>mean_psd = pd.DataFrame.from_dict(mean_psd, orient="<STR_LIT:index>").T<EOL>return(mean_psd)<EOL>
|
Compute Power-Spectral Density (PSD).
Parameters
----------
raw : mne.io.Raw
Raw EEG data.
sensors_include : str
Sensor area to include. See :func:`neurokit.eeg_select_sensors()`.
sensors_exclude : str
Sensor area to exclude. See :func:`neurokit.eeg_select_sensors()`.
fmin : float
Min frequency of interest.
fmax: float
Max frequency of interest.
method : str
"multitaper" or "welch".
proj : bool
add projectors.
Returns
----------
mean_psd : pandas.DataFrame
Averaged PSDs.
Example
----------
>>> import neurokit as nk
Notes
----------
*Details*
- Delta: 1-3Hz
- Theta: 4-7Hz
- Alpha1: 8-9Hz
- Alpha2: 10-12Hz
- Beta1: 13-17Hz
- Beta2: 18-30Hz
- Gamma1: 31-40Hz
- Gamma2: 41-50Hz
- Mu: 8-13Hz
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
References
------------
- None
|
f10917:m1
|
def eeg_create_frequency_bands(bands="<STR_LIT:all>", step=<NUM_LIT:1>):
|
if bands == "<STR_LIT:all>" or bands == "<STR_LIT>":<EOL><INDENT>bands = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>if "<STR_LIT>" in bands:<EOL><INDENT>bands.remove("<STR_LIT>")<EOL>bands += ["<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>if "<STR_LIT>" in bands:<EOL><INDENT>bands.remove("<STR_LIT>")<EOL>bands += ["<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>if "<STR_LIT>" in bands:<EOL><INDENT>bands.remove("<STR_LIT>")<EOL>bands += ["<STR_LIT>", "<STR_LIT>"]<EOL><DEDENT>frequencies = {}<EOL>for band in bands:<EOL><INDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT:1>, <NUM_LIT:3>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT:4>, <NUM_LIT:7>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT:8>, <NUM_LIT:9>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT:10>, <NUM_LIT:12>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT>, <NUM_LIT>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT>, <NUM_LIT:30>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT>, <NUM_LIT>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT>, <NUM_LIT:50>+<NUM_LIT:0.1>, step)<EOL><DEDENT>if band == "<STR_LIT>":<EOL><INDENT>frequencies[band] = np.arange(<NUM_LIT:8>, <NUM_LIT>+<NUM_LIT:0.1>, step)<EOL><DEDENT><DEDENT>return(frequencies)<EOL>
|
Delta: 1-3Hz
Theta: 4-7Hz
Alpha1: 8-9Hz
Alpha2: 10-12Hz
Beta1: 13-17Hz
Beta2: 18-30Hz
Gamma1: 31-40Hz
Gamma2: 41-50Hz
Mu: 8-13Hz
|
f10917:m2
|
def eeg_gfp_peaks(data, gflp_method='<STR_LIT>', smoothing=False, smoothing_window=<NUM_LIT:100>, peak_method="<STR_LIT>", normalize=False):
|
ntf = data.shape[<NUM_LIT:0>]<EOL>gfp_curve = np.zeros((ntf, ))<EOL>if gflp_method == '<STR_LIT>':<EOL><INDENT>for i in range(ntf):<EOL><INDENT>x = data[i,:]<EOL>gfp_curve[i] = np.sqrt(np.sum((x - x.mean())**<NUM_LIT:2> / len(x) ))<EOL><DEDENT><DEDENT>elif gflp_method == '<STR_LIT>':<EOL><INDENT>for i in range(ntf):<EOL><INDENT>x = data[i,:]<EOL>gfp_curve[i] = np.sum(np.abs(x - x.mean())) / len(x)<EOL><DEDENT><DEDENT>if peak_method == "<STR_LIT>":<EOL><INDENT>gfp_peaks = np.asarray(scipy.signal.find_peaks_cwt(gfp_curve, np.arange(<NUM_LIT:1>, <NUM_LIT:10>))) <EOL><DEDENT>else:<EOL><INDENT>gfp_peaks = scipy.signal.argrelmax(gfp_curve)[<NUM_LIT:0>]<EOL><DEDENT>if smoothing == '<STR_LIT>':<EOL><INDENT>gfp_curve = scipy.signal.convolve(gfp_curve, scipy.signal.hamming(smoothing_window) )<EOL><DEDENT>elif smoothing == '<STR_LIT>':<EOL><INDENT>gfp_curve = scipy.signal.convolve(gfp_curve, scipy.signal.hanning(smoothing_window) )<EOL><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT>if normalize is True:<EOL><INDENT>for i in range(len(data)):<EOL><INDENT>data[i,:] = data[i,:]/gfp_curve[i]<EOL><DEDENT><DEDENT>return (data, gfp_curve, gfp_peaks)<EOL>
|
The Global Field Power (GFP) is a scalar measure of the strength of the scalp potential field and is calculated as the standard deviation of all electrodes at a given time point (Lehmann and Skrandies, 1980; Michel et al., 1993; Murray et al., 2008; Brunet et al., 2011). Between two GFP troughs, the strength of the potential field varies but the topography remains generally stable. The local maxima of the GFP are thus the best representative of a given microstate in terms of signal-to-noise ratio (Pascual-Marqui et al., 1995), corresponding to moments of high global neuronal synchronization (Skrandies, 2007).
Parameters
----------
X (ndarray):
Array containing values for all time frames and channels.
Dimension: number of time frames x number of channels
method ({'GFPL1', 'GFPL2'}):
`GFPL1` : use L1-Norm to compute GFP peaks
`GFPL2` : use L2-Norm to compute GFP peaks
smoothing ({'hamming', 'hanning'}):
`hamming` : use hamming window to smooth
`hanning` : use hanning window to smooth
smoothing_window = int
about 100
peak_method = str
"relative" or "wavelet"
Returns
----------
ret : ndarray
GFP curve
|
f10918:m0
|
def eeg_gfp(raws, gflp_method="<STR_LIT>", scale=True, normalize=True, smoothing=None):
|
<EOL><INDENT>if isinstance(raws, str):<EOL><INDENT>raws = load_object(filename=raws)<EOL><DEDENT><DEDENT>gfp = {}<EOL>for participant in raws:<EOL><INDENT>gfp[participant] = {}<EOL>for run in raws[participant]:<EOL><INDENT>gfp[participant][run] = {}<EOL>raw = raws[participant][run].copy()<EOL>if True in set(["<STR_LIT>" in ch for ch in raw.info["<STR_LIT>"]]):<EOL><INDENT>meg = True<EOL>eeg = False<EOL><DEDENT>else:<EOL><INDENT>meg = False<EOL>eeg = True<EOL><DEDENT>try:<EOL><INDENT>gfp[participant][run]["<STR_LIT>"] = np.array(raw.copy().pick_types(meg=False, eeg=False, ecg=True).to_data_frame())<EOL><DEDENT>except ValueError:<EOL><INDENT>gfp[participant][run]["<STR_LIT>"] = np.nan<EOL><DEDENT>data = raw.copy().pick_types(meg=meg, eeg=eeg)<EOL>gfp[participant][run]["<STR_LIT>"] = data.info<EOL>gfp[participant][run]["<STR_LIT>"] = data.info["<STR_LIT>"]<EOL>gfp[participant][run]["<STR_LIT>"] = len(data) / data.info["<STR_LIT>"]<EOL>data = np.array(data.to_data_frame())<EOL>data, gfp_curve, gfp_peaks = eeg_gfp_peaks(data,<EOL>gflp_method=gflp_method,<EOL>smoothing=smoothing,<EOL>smoothing_window=<NUM_LIT:100>,<EOL>peak_method="<STR_LIT>",<EOL>normalize=normalize)<EOL>gfp[participant][run]["<STR_LIT>"] = gfp_peaks<EOL>data_peaks = data[gfp_peaks]<EOL>if scale is True:<EOL><INDENT>gfp[participant][run]["<STR_LIT:data>"] = z_score(data_peaks)<EOL><DEDENT>else:<EOL><INDENT>gfp[participant][run]["<STR_LIT:data>"] = data_peaks<EOL><DEDENT>gfp[participant][run]["<STR_LIT>"] = scale<EOL>gfp[participant][run]["<STR_LIT>"] = normalize<EOL>gfp[participant][run]["<STR_LIT>"] = smoothing<EOL><DEDENT><DEDENT>return(gfp)<EOL>
|
Run the GFP analysis.
|
f10918:m1
|
def eeg_microstates_clustering(data, n_microstates=<NUM_LIT:4>, clustering_method="<STR_LIT>", n_jobs=<NUM_LIT:1>, n_init=<NUM_LIT>, occurence_rejection_treshold=<NUM_LIT>, max_refitting=<NUM_LIT:5>, verbose=True):
|
<EOL>training_set = data.copy()<EOL>if verbose is True:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>if clustering_method == "<STR_LIT>":<EOL><INDENT>algorithm = sklearn.cluster.KMeans(init='<STR_LIT>', n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs)<EOL><DEDENT>elif clustering_method == "<STR_LIT>":<EOL><INDENT>algorithm = sklearn.cluster.SpectralClustering(n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs)<EOL><DEDENT>elif clustering_method == "<STR_LIT>":<EOL><INDENT>algorithm = sklearn.cluster.AgglomerativeClustering(n_clusters=n_microstates, linkage="<STR_LIT>")<EOL><DEDENT>elif clustering_method == "<STR_LIT>":<EOL><INDENT>algorithm = sklearn.cluster.DBSCAN(min_samples=<NUM_LIT:100>)<EOL><DEDENT>elif clustering_method == "<STR_LIT>":<EOL><INDENT>algorithm = sklearn.cluster.AffinityPropagation(damping=<NUM_LIT:0.5>)<EOL><DEDENT>else:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>refitting = <NUM_LIT:0> <EOL>good_fit_achieved = False<EOL>while good_fit_achieved is False:<EOL><INDENT>good_fit_achieved = True<EOL>if verbose is True:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>algorithm.fit(training_set)<EOL>if verbose is True:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>predicted = algorithm.fit_predict(training_set)<EOL>if verbose is True:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>occurences = dict(collections.Counter(predicted))<EOL>masks = [np.array([True]*len(training_set))]<EOL>for microstate in occurences:<EOL><INDENT>if occurences[microstate] < len(data)*occurence_rejection_treshold:<EOL><INDENT>good_fit_achieved = False<EOL>refitting += <NUM_LIT:1> <EOL>print("<STR_LIT>" + str(refitting) + "<STR_LIT>")<EOL>masks.append(predicted!=microstate)<EOL><DEDENT><DEDENT>mask = np.all(masks, axis=<NUM_LIT:0>)<EOL>training_set = training_set[mask]<EOL><DEDENT>return(algorithm)<EOL>
|
Fit the clustering algorithm.
|
f10918:m2
|
def eeg_microstates_features(results, method, ecg=True, nonlinearity=True, verbose=True):
|
for participant in results:<EOL><INDENT>for run in results[participant]:<EOL><INDENT>if verbose is True:<EOL><INDENT>print("<STR_LIT>" + participant)<EOL><DEDENT>occurences = dict(collections.Counter(results[participant][run]["<STR_LIT>"]))<EOL>if nonlinearity is True:<EOL><INDENT>results[participant][run]["<STR_LIT>"] = complexity(results[participant][run]["<STR_LIT>"])<EOL>
|
Compute statistics and features for/of the microstates.
|
f10918:m3
|
def eeg_microstates(gfp, n_microstates=<NUM_LIT:4>, clustering_method="<STR_LIT>", n_jobs=<NUM_LIT:1>, n_init=<NUM_LIT>, occurence_rejection_treshold=<NUM_LIT>, max_refitting=<NUM_LIT:5>, clustering_metrics=True, good_fit_treshold=<NUM_LIT:0>, feature_reduction_method="<STR_LIT>", n_features=<NUM_LIT:32>, nonlinearity=True, verbose=True):
|
<EOL><INDENT>if verbose is True:<EOL><INDENT>print("""<STR_LIT>""")<EOL><DEDENT>method = {}<EOL>data_all = []<EOL>for participant in results:<EOL><INDENT>for run in results[participant]:<EOL><INDENT>data_all.append(results[participant][run]["<STR_LIT:data>"])<EOL>method["<STR_LIT>"] = results[participant][run]["<STR_LIT>"]<EOL>method["<STR_LIT>"] = results[participant][run]["<STR_LIT>"]<EOL>method["<STR_LIT>"] = results[participant][run]["<STR_LIT>"]<EOL><DEDENT><DEDENT>data_all = np.concatenate(data_all, axis=<NUM_LIT:0>)<EOL>if verbose is True:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>data_processed = feature_reduction(data_all,<EOL>method=feature_reduction_method,<EOL>n_features=n_features)<EOL>try:<EOL><INDENT>algorithm = eeg_microstates_clustering(data=data_processed,<EOL>n_microstates=n_microstates,<EOL>clustering_method=clustering_method,<EOL>n_jobs=n_jobs,<EOL>n_init=n_init,<EOL>occurence_rejection_treshold=occurence_rejection_treshold,<EOL>max_refitting=max_refitting,<EOL>verbose=verbose)<EOL><DEDENT>except:<EOL><INDENT>print("<STR_LIT>")<EOL>return(data_processed, method)<EOL><DEDENT>if verbose is True:<EOL><INDENT>print("<STR_LIT>")<EOL><DEDENT>method["<STR_LIT>"] = algorithm<EOL>method["<STR_LIT>"] = results[participant][run]["<STR_LIT>"] <EOL>method["<STR_LIT>"] = feature_reduction_method<EOL>method["<STR_LIT>"] = n_features<EOL>method["<STR_LIT:data>"] = data_all<EOL>method["<STR_LIT>"] = clustering_method<EOL>method["<STR_LIT>"] = len(data_all)<EOL>if verbose is True:<EOL><INDENT>print("""<STR_LIT>""")<EOL><DEDENT>predicted = algorithm.fit_predict(data_processed)<EOL>method["<STR_LIT>"] = predicted<EOL>if clustering_metrics is True:<EOL><INDENT>if verbose is True:<EOL><INDENT>print("""<STR_LIT>""")<EOL><DEDENT>method["<STR_LIT>"] = sklearn.metrics.silhouette_samples(data_processed, predicted) <EOL>method["<STR_LIT>"] = sklearn.metrics.calinski_harabaz_score(data_processed, predicted)<EOL>method["<STR_LIT>"] = np.where(method["<STR_LIT>"]>good_fit_treshold, predicted, "<STR_LIT>")<EOL>method["<STR_LIT>"] = dict(collections.Counter(method["<STR_LIT>"]))["<STR_LIT>"]/len(predicted)<EOL><DEDENT>if verbose is True:<EOL><INDENT>print("""<STR_LIT>""")<EOL><DEDENT>index_participant = []<EOL>index_run = []<EOL>for participant in results:<EOL><INDENT>for run in results[participant]:<EOL><INDENT>index_participant += [participant] * len(results[participant][run]["<STR_LIT:data>"])<EOL>index_run += [run] * len(results[participant][run]["<STR_LIT:data>"])<EOL><DEDENT><DEDENT>for participant in results:<EOL><INDENT>for run in results[participant]:<EOL><INDENT>mask1 = np.array(index_participant)==participant<EOL>mask2 = np.array(index_run)==run<EOL>mask = np.all([mask1, mask2], axis=<NUM_LIT:0>)<EOL>results[participant][run]["<STR_LIT>"] = method["<STR_LIT>"][mask]<EOL><DEDENT><DEDENT>results = eeg_microstates_features(results, method, nonlinearity=nonlinearity, verbose=verbose)<EOL>if verbose is True:<EOL><INDENT>print("""<STR_LIT>""")<EOL><DEDENT>return(results, method)<EOL><DEDENT>
|
Run the full microstates analysis.
Parameters
----------
raws = dict
Two levels dictionary containing the participants, within which the run(s), associated with an mne.io.Raw class object.
method ({'GFPL1', 'GFPL2'}):
`GFPL1` : use L1-Norm to compute GFP peaks
`GFPL2` : use L2-Norm to compute GFP peaks
smoothing ({'hamming', 'hanning'}):
`hamming` : use hamming window to smooth
`hanning` : use hanning window to smooth
smoothing_window = int
about 100
peak_method = str
"relative" or "wavelet"
Returns
----------
ret : ndarray
GFP curve
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
- pygame 1.9.2
|
f10918:m4
|
def eeg_microstates_plot(method, path="<STR_LIT>", extension="<STR_LIT>", show_sensors_position=False, show_sensors_name=False, plot=True, save=True, dpi=<NUM_LIT>, contours=<NUM_LIT:0>, colorbar=False, separate=False):
|
<EOL>figures = []<EOL>names = []<EOL>try:<EOL><INDENT>microstates = method["<STR_LIT>"]<EOL><DEDENT>except KeyError:<EOL><INDENT>microstates = method["<STR_LIT>"]<EOL><DEDENT>for microstate in set(microstates):<EOL><INDENT>if microstate != "<STR_LIT>":<EOL><INDENT>values = np.mean(method["<STR_LIT:data>"][np.where(microstates == microstate)], axis=<NUM_LIT:0>)<EOL>values = np.array(values, ndmin=<NUM_LIT:2>).T<EOL>evoked = mne.EvokedArray(values, method["<STR_LIT>"], <NUM_LIT:0>)<EOL>fig = evoked.plot_topomap(times=<NUM_LIT:0>, title=microstate, size=<NUM_LIT:6>, contours=contours, time_format="<STR_LIT>", show=plot, colorbar=colorbar, show_names=show_sensors_name, sensors=show_sensors_position)<EOL>figures.append(fig)<EOL>name = path + "<STR_LIT>" %(microstate, method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], extension)<EOL>fig.savefig(name, dpi=dpi)<EOL>names.append(name)<EOL><DEDENT><DEDENT>if save is True:<EOL><INDENT>image_template = PIL.Image.open(names[<NUM_LIT:0>])<EOL>X, Y = image_template.size<EOL>image_template.close()<EOL>combined = PIL.Image.new('<STR_LIT>', (int(X*len(set(microstates))/<NUM_LIT:2>), int( Y*len(set(microstates))/<NUM_LIT:2>)))<EOL>fig = <NUM_LIT:0><EOL>for x in np.arange(<NUM_LIT:0>, len(set(microstates))/<NUM_LIT:2>*int(X), int(X)):<EOL><INDENT>for y in np.arange(<NUM_LIT:0>, len(set(microstates))/<NUM_LIT:2>*int(Y), int(Y)):<EOL><INDENT>try:<EOL><INDENT>newfig = PIL.Image.open(names[fig])<EOL>combined.paste(newfig, (int(x), int(y)))<EOL>newfig.close()<EOL><DEDENT>except:<EOL><INDENT>pass<EOL><DEDENT>fig += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>bined.show()<EOL><INDENT>combined_name = path + "<STR_LIT>" %(method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], method["<STR_LIT>"], extension)<EOL>combined.save(combined_name)<EOL><DEDENT>if separate is False or save is False:<EOL><INDENT>for name in names:<EOL><INDENT>os.remove(name)<EOL><DEDENT><DEDENT>return(figures)<EOL>
|
Plot the microstates.
|
f10918:m5
|
def eeg_microstates_relabel(method, results, microstates_labels, reverse_microstates=None):
|
microstates = list(method['<STR_LIT>'])<EOL>for index, microstate in enumerate(method['<STR_LIT>']):<EOL><INDENT>if microstate in list(reverse_microstates.keys()):<EOL><INDENT>microstates[index] = reverse_microstates[microstate]<EOL>method["<STR_LIT:data>"][index] = -<NUM_LIT:1>*method["<STR_LIT:data>"][index]<EOL><DEDENT>if microstate in list(microstates_labels.keys()):<EOL><INDENT>microstates[index] = microstates_labels[microstate]<EOL><DEDENT><DEDENT>method['<STR_LIT>'] = np.array(microstates)<EOL>return(results, method)<EOL>
|
Relabel the microstates.
|
f10918:m6
|
def feature_reduction(data, method, n_features):
|
if method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.decomposition.PCA(n_components=n_features)<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.cluster.FeatureAgglomeration(n_clusters=n_features)<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.decomposition.FastICA(n_components=n_features)<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.decomposition.KernelPCA(n_components=n_features, kernel='<STR_LIT>')<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.decomposition.KernelPCA(n_components=n_features, kernel='<STR_LIT>')<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.decomposition.SparsePCA(n_components=n_features)<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>feature_red_method = sklearn.decomposition.IncrementalPCA(n_components=n_features)<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>elif method == "<STR_LIT>":<EOL><INDENT>if np.min(data) < <NUM_LIT:0>:<EOL><INDENT>data -= np.min(data)<EOL><DEDENT>feature_red_method = sklearn.decomposition.NMF(n_components=n_features)<EOL>data_processed = feature_red_method.fit_transform(data)<EOL><DEDENT>else:<EOL><INDENT>feature_red_method = None<EOL>data_processed = data.copy()<EOL><DEDENT>return(data_processed)<EOL>
|
Feature reduction.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
Dominique Makowski
Dependencies
----------
- sklearn
|
f10919:m0
|
def __aiter__(self):
|
return self<EOL>
|
We are our own iterator.
|
f10931:c0:m1
|
@asyncio.coroutine<EOL><INDENT>def __anext__(self):<DEDENT>
|
line = yield from self.readline()<EOL>if line:<EOL><INDENT>return line<EOL><DEDENT>else:<EOL><INDENT>raise StopAsyncIteration<EOL><DEDENT>
|
Simulate normal file iteration.
|
f10931:c0:m2
|
@asyncio.coroutine<EOL>def _open(file, mode='<STR_LIT:r>', buffering=-<NUM_LIT:1>, encoding=None, errors=None, newline=None,<EOL>closefd=True, opener=None, *, loop=None, executor=None):
|
if loop is None:<EOL><INDENT>loop = asyncio.get_event_loop()<EOL><DEDENT>cb = partial(sync_open, file, mode=mode, buffering=buffering,<EOL>encoding=encoding, errors=errors, newline=newline,<EOL>closefd=closefd, opener=opener)<EOL>f = yield from loop.run_in_executor(executor, cb)<EOL>return wrap(f, loop=loop, executor=executor)<EOL>
|
Open an asyncio file.
|
f10934:m1
|
def price_str(raw_price, default=_not_defined, dec_point='<STR_LIT:.>'):
|
def _error_or_default(err_msg):<EOL><INDENT>if default == _not_defined:<EOL><INDENT>raise ValueError(err_msg)<EOL><DEDENT>return default<EOL><DEDENT>if not isinstance(raw_price, str):<EOL><INDENT>return _error_or_default(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(price_type=type(raw_price)))<EOL><DEDENT>price = re.sub('<STR_LIT>', '<STR_LIT>', raw_price)<EOL>cleaned_price = _CLEANED_PRICE_RE.findall(price)<EOL>if len(cleaned_price) == <NUM_LIT:0>:<EOL><INDENT>return _error_or_default(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(price=raw_price))<EOL><DEDENT>if len(cleaned_price) > <NUM_LIT:1>:<EOL><INDENT>return _error_or_default(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(price=raw_price))<EOL><DEDENT>price = cleaned_price[<NUM_LIT:0>]<EOL>price = price.rstrip('<STR_LIT>')<EOL>sign = '<STR_LIT>'<EOL>if price[<NUM_LIT:0>] in {'<STR_LIT:->', '<STR_LIT:+>'}:<EOL><INDENT>sign, price = price[<NUM_LIT:0>], price[<NUM_LIT:1>:]<EOL>sign = '<STR_LIT:->' if sign == '<STR_LIT:->' else '<STR_LIT>'<EOL><DEDENT>fractional = _FRACTIONAL_PRICE_RE.match(price)<EOL>if fractional:<EOL><INDENT>integer, fraction = fractional.groups()<EOL><DEDENT>else:<EOL><INDENT>integer, fraction = price, '<STR_LIT>'<EOL><DEDENT>integer = re.sub('<STR_LIT>', '<STR_LIT>', integer)<EOL>integer = integer.lstrip('<STR_LIT:0>')<EOL>if integer == '<STR_LIT>':<EOL><INDENT>integer = '<STR_LIT:0>'<EOL><DEDENT>price = sign + integer<EOL>if fraction:<EOL><INDENT>price = '<STR_LIT>'.join((price, dec_point, fraction))<EOL><DEDENT>return price<EOL>
|
Search and clean price value.
Convert raw price string presented in any localization
as a valid number string with an optional decimal point.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
Examples:
12.007 => 12007
00012,33 => 12.33
+1 => 1
- 520.05 => -520.05
1,000,777.5 => 1000777.5
1.777.000,99 => 1777000.99
1 234 567.89 => 1234567.89
99.77.11.000,1 => 997711000.1
NIO5,242 => 5242
Not a MINUS-.45 => 45
42 \t \n => 42
=> <default>
1...2 => <default>
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:param str dec_point: symbol that separate integer and fractional parts.
:return: cleaned price string.
:raise ValueError: error if raw price not valid and default value not set.
|
f10939:m0
|
def price_dec(raw_price, default=_not_defined):
|
try:<EOL><INDENT>price = price_str(raw_price)<EOL>return decimal.Decimal(price)<EOL><DEDENT>except ValueError as err:<EOL><INDENT>if default == _not_defined:<EOL><INDENT>raise err<EOL><DEDENT><DEDENT>return default<EOL>
|
Price decimal value from raw string.
Extract price value from input raw string and
present as Decimal number.
If raw price does not contain valid price value or contains
more than one price value, then return default value.
If default value not set, then raise ValueError.
:param str raw_price: string that contains price value.
:param default: value that will be returned if raw price not valid.
:return: Decimal price value.
:raise ValueError: error if raw price not valid and default value not set.
|
f10939:m1
|
def __init__(self, wd_item_id='<STR_LIT>', new_item=False, data=None,<EOL>mediawiki_api_url='<STR_LIT>',<EOL>sparql_endpoint_url='<STR_LIT>',<EOL>append_value=None, fast_run=False, fast_run_base_filter=None, fast_run_use_refs=False,<EOL>ref_handler=None, global_ref_mode='<STR_LIT>', good_refs=None, keep_good_ref_statements=False,<EOL>search_only=False, item_data=None, user_agent=config['<STR_LIT>'],<EOL>core_props=None, core_prop_match_thresh=<NUM_LIT>):
|
self.core_prop_match_thresh = core_prop_match_thresh<EOL>self.wd_item_id = wd_item_id<EOL>self.new_item = new_item<EOL>self.mediawiki_api_url = mediawiki_api_url<EOL>self.sparql_endpoint_url = sparql_endpoint_url<EOL>self.data = [] if data is None else data<EOL>self.append_value = [] if append_value is None else append_value<EOL>self.fast_run = fast_run<EOL>self.fast_run_base_filter = fast_run_base_filter<EOL>self.fast_run_use_refs = fast_run_use_refs<EOL>self.ref_handler = ref_handler<EOL>self.global_ref_mode = global_ref_mode<EOL>self.good_refs = good_refs<EOL>self.keep_good_ref_statements = keep_good_ref_statements<EOL>self.search_only = search_only<EOL>self.item_data = item_data<EOL>self.user_agent = user_agent<EOL>self.create_new_item = False<EOL>self.wd_json_representation = {}<EOL>self.statements = []<EOL>self.original_statements = []<EOL>self.entity_metadata = {}<EOL>self.fast_run_container = None<EOL>self.require_write = True<EOL>self.sitelinks = dict()<EOL>self.lastrevid = None <EOL>if self.ref_handler:<EOL><INDENT>assert callable(self.ref_handler)<EOL><DEDENT>if self.global_ref_mode == "<STR_LIT>" and self.ref_handler is None:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if (core_props is None) and (self.sparql_endpoint_url not in self.DISTINCT_VALUE_PROPS):<EOL><INDENT>self.get_distinct_value_props(self.sparql_endpoint_url)<EOL><DEDENT>self.core_props = core_props if core_props is not None else self.DISTINCT_VALUE_PROPS[self.sparql_endpoint_url]<EOL>try:<EOL><INDENT>self.mrh = MappingRelationHelper(self.sparql_endpoint_url)<EOL><DEDENT>except Exception as e:<EOL><INDENT>self.mrh = None<EOL>warnings.warn("<STR_LIT>")<EOL><DEDENT>if self.fast_run:<EOL><INDENT>self.init_fastrun()<EOL><DEDENT>if not __debug__:<EOL><INDENT>if self.require_write and self.fast_run:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>elif not self.require_write and self.fast_run:<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT><DEDENT>if self.wd_item_id != '<STR_LIT>' and self.create_new_item == True:<EOL><INDENT>raise IDMissingError('<STR_LIT>')<EOL><DEDENT>elif self.new_item == True and len(self.data) > <NUM_LIT:0>:<EOL><INDENT>self.create_new_item = True<EOL>self.__construct_claim_json()<EOL><DEDENT>elif self.wd_item_id and self.require_write:<EOL><INDENT>self.init_data_load()<EOL><DEDENT>elif self.require_write:<EOL><INDENT>self.init_data_load()<EOL><DEDENT>
|
constructor
:param wd_item_id: Wikidata item id
:param new_item: This parameter lets the user indicate if a new item should be created
:type new_item: True or False
:param data: a dictionary with WD property strings as keys and the data which should be written to
a WD item as the property values
:type data: List[WDBaseDataType]
:param append_value: a list of properties where potential existing values should not be overwritten by the data
passed in the :parameter data.
:type append_value: list of property number strings
:param fast_run: True if this item should be run in fastrun mode, otherwise False. User setting this to True
should also specify the fast_run_base_filter for these item types
:type fast_run: bool
:param fast_run_base_filter: A property value dict determining the Wikidata property and the corresponding value
which should be used as a filter for this item type. Several filter criteria can be specified. The values
can be either Wikidata item QIDs, strings or empty strings if the value should be a variable in SPARQL.
Example: {'P352': '', 'P703': 'Q15978631'} if the basic common type of things this bot runs on is
human proteins (specified by Uniprot IDs (P352) and 'found in taxon' homo sapiens 'Q15978631').
:type fast_run_base_filter: dict
:param fast_run_use_refs: If `True`, fastrun mode will consider references in determining if a statement should
be updated and written to Wikidata. Otherwise, only the value and qualifiers are used. Default: False
:type fast_run_use_refs: bool
:param ref_handler: This parameter defines a function that will manage the reference handling in a custom
manner. This argument should be a function handle that accepts two arguments, the old/current statement
(first argument) and new/proposed/to be written statement (second argument), both of type: a subclass of
WDBaseDataType. The function should return an new item that is the item to be written. The item's values
properties or qualifiers should not be modified; only references. This function is also used in fastrun mode.
This will only be used if the ref_mode is set to "CUSTOM".
:type ref_handler: function
:param global_ref_mode: sets the reference handling mode for an item. Four modes are possible, 'STRICT_KEEP'
keeps all references as they are, 'STRICT_KEEP_APPEND' keeps the references as they are and appends
new ones. 'STRICT_OVERWRITE' overwrites all existing references for given. 'CUSTOM' will use the function
defined in ref_handler
:type global_ref_mode: str of value 'STRICT_KEEP', 'STRICT_KEEP_APPEND', 'STRICT_OVERWRITE', 'KEEP_GOOD', 'CUSTOM'
:param good_refs: This parameter lets the user define blocks of good references. It is a list of dictionaries.
One block is a dictionary with Wikidata properties as keys and potential values as the required value for
a property. There can be arbitrarily many key: value pairs in one reference block.
Example: [{'P248': 'Q905695', 'P352': None, 'P407': None, 'P1476': None, 'P813': None}]
This example contains one good reference block, stated in: Uniprot, Uniprot ID, title of Uniprot entry,
language of work and date when the information has been retrieved. A None type indicates that the value
varies from reference to reference. In this case, only the value for the Wikidata item for the
Uniprot database stays stable over all of these references. Key value pairs work here, as Wikidata
references can hold only one value for one property. The number of good reference blocks is not limited.
This parameter OVERRIDES any other reference mode set!!
:type good_refs: list containing dictionaries.
:param keep_good_ref_statements: Do not delete any statement which has a good reference, either defined in the
good_refs list or by any other referencing mode.
:type keep_good_ref_statements: bool
:param search_only: If this flag is set to True, the data provided will only be used to search for the
corresponding Wikidata item, but no actual data updates will performed. This is useful, if certain states or
values on the target item need to be checked before certain data is written to it. In order to write new
data to the item, the method update() will take data, modify the Wikidata item and a write() call will
then perform the actual write to Wikidata.
:type search_only: bool
:param item_data: A Python JSON object corresponding to the Wikidata item in wd_item_id. This can be used in
conjunction with wd_item_id in order to provide raw data.
:param user_agent: The user agent string to use when making http requests
:type user_agent: str
:param core_props: Core properties are used to retrieve a Wikidata item based on `data` if a `wd_item_id` is
not given. This is a set of PIDs to use. If None, all Wikidata properties with a distinct values
constraint will be used. (see: get_core_props)
:param core_prop_match_thresh: The proportion of core props that must match during retrieval of an item
when the wd_item_id is not specified.
:type core_prop_match_thresh: float
|
f10941:c0:m0
|
@classmethod<EOL><INDENT>def get_distinct_value_props(cls, sparql_endpoint_url='<STR_LIT>'):<DEDENT>
|
pcpid = config['<STR_LIT>']<EOL>dvcqid = config['<STR_LIT>']<EOL>try:<EOL><INDENT>h = WikibaseHelper(sparql_endpoint_url)<EOL>pcpid = h.get_pid(pcpid)<EOL>dvcqid = h.get_qid(dvcqid)<EOL><DEDENT>except Exception:<EOL><INDENT>warnings.warn("<STR_LIT>" +<EOL>"<STR_LIT>" +<EOL>"<STR_LIT>")<EOL>cls.DISTINCT_VALUE_PROPS[sparql_endpoint_url] = set()<EOL>return None<EOL><DEDENT>query = "<STR_LIT>".format(pcpid, dvcqid)<EOL>df = cls.execute_sparql_query(query, endpoint=sparql_endpoint_url, as_dataframe=True)<EOL>if df.empty:<EOL><INDENT>warnings.warn("<STR_LIT>")<EOL>cls.DISTINCT_VALUE_PROPS[sparql_endpoint_url] = set()<EOL>return None<EOL><DEDENT>df.p = df.p.str.rsplit("<STR_LIT:/>", <NUM_LIT:1>).str[-<NUM_LIT:1>]<EOL>cls.DISTINCT_VALUE_PROPS[sparql_endpoint_url] = set(df.p)<EOL>
|
On wikidata, the default core IDs will be the properties with a distinct values constraint
select ?p where {?p wdt:P2302 wd:Q21502410}
See: https://www.wikidata.org/wiki/Help:Property_constraints_portal
https://www.wikidata.org/wiki/Help:Property_constraints_portal/Unique_value
|
f10941:c0:m1
|
def get_wd_entity(self):
|
params = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': self.wd_item_id,<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': self.user_agent<EOL>}<EOL>json_data = self.mediawiki_api_call("<STR_LIT:GET>", self.mediawiki_api_url, params=params, headers=headers)<EOL>return self.parse_wd_json(wd_json=json_data['<STR_LIT>'][self.wd_item_id])<EOL>
|
retrieve a WD item in json representation from Wikidata
:rtype: dict
:return: python complex dictionary represenation of a json
|
f10941:c0:m4
|
def parse_wd_json(self, wd_json):
|
wd_data = {x: wd_json[x] for x in ('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>') if x in wd_json}<EOL>wd_data['<STR_LIT>'] = dict()<EOL>self.entity_metadata = {x: wd_json[x] for x in wd_json if x not in<EOL>('<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>')}<EOL>self.sitelinks = wd_json.get('<STR_LIT>', dict())<EOL>self.statements = []<EOL>for prop in wd_data['<STR_LIT>']:<EOL><INDENT>for z in wd_data['<STR_LIT>'][prop]:<EOL><INDENT>data_type = [x for x in WDBaseDataType.__subclasses__() if x.DTYPE == z['<STR_LIT>']['<STR_LIT>']][<NUM_LIT:0>]<EOL>statement = data_type.from_json(z)<EOL>self.statements.append(statement)<EOL><DEDENT><DEDENT>self.wd_json_representation = wd_data<EOL>self.original_statements = copy.deepcopy(self.statements)<EOL>return wd_data<EOL>
|
Parses a WD entity json and generates the datatype objects, sets self.wd_json_representation
:param wd_json: the json of a WD entity
:type wd_json: A Python Json representation of a WD item
:return: returns the json representation containing 'labels', 'descriptions', 'claims', 'aliases', 'sitelinks'.
|
f10941:c0:m5
|
@staticmethod<EOL><INDENT>def get_wd_search_results(search_string='<STR_LIT>', mediawiki_api_url='<STR_LIT>',<EOL>user_agent=config['<STR_LIT>'],<EOL>max_results=<NUM_LIT>, language='<STR_LIT>'):<DEDENT>
|
params = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': language,<EOL>'<STR_LIT>': search_string,<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': <NUM_LIT:50><EOL>}<EOL>headers = {<EOL>'<STR_LIT>': user_agent<EOL>}<EOL>cont_count = <NUM_LIT:1><EOL>id_list = []<EOL>id_labels = []<EOL>while cont_count > <NUM_LIT:0>:<EOL><INDENT>params.update({'<STR_LIT>': <NUM_LIT:0> if cont_count == <NUM_LIT:1> else cont_count})<EOL>reply = requests.get(mediawiki_api_url, params=params, headers=headers)<EOL>reply.raise_for_status()<EOL>search_results = reply.json()<EOL>if search_results['<STR_LIT:success>'] != <NUM_LIT:1>:<EOL><INDENT>raise WDSearchError('<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>for i in search_results['<STR_LIT>']:<EOL><INDENT>id_list.append(i['<STR_LIT:id>'])<EOL>id_labels.append(i['<STR_LIT:label>'])<EOL><DEDENT><DEDENT>if '<STR_LIT>' not in search_results:<EOL><INDENT>cont_count = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>cont_count = search_results['<STR_LIT>']<EOL><DEDENT>if cont_count > max_results:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return id_list<EOL>
|
Performs a search in WD for a certain WD search string
:param search_string: a string which should be searched for in WD
:type search_string: str
:param mediawiki_api_url: Specify the mediawiki_api_url.
:type mediawiki_api_url: str
:param user_agent: The user agent string transmitted in the http header
:type user_agent: str
:param max_results: The maximum number of search results returned. Default 500
:type max_results: int
:param language: The language in which to perform the search. Default 'en'
:type language: str
:return: returns a list of QIDs found in the search and a list of labels complementary to the QIDs
|
f10941:c0:m6
|
def get_property_list(self):
|
property_list = set()<EOL>for x in self.statements:<EOL><INDENT>property_list.add(x.get_prop_nr())<EOL><DEDENT>return list(property_list)<EOL>
|
List of properties on the current item
:return: a list of WD property ID strings (Pxxxx).
|
f10941:c0:m7
|
def __select_wd_item(self):
|
qid_list = set()<EOL>conflict_source = {}<EOL>if self.mrh:<EOL><INDENT>exact_qid = self.mrh.mrt_qids['<STR_LIT>']<EOL>mrt_pid = self.mrh.mrt_pid<EOL><DEDENT>else:<EOL><INDENT>exact_qid = "<STR_LIT>"<EOL>mrt_pid = "<STR_LIT>"<EOL><DEDENT>for statement in self.data:<EOL><INDENT>wd_property = statement.get_prop_nr()<EOL>mrt_qualifiers = [q for q in statement.get_qualifiers() if q.get_prop_nr() == mrt_pid]<EOL>if (len(mrt_qualifiers) == <NUM_LIT:1>) and (mrt_qualifiers[<NUM_LIT:0>].get_value() != int(exact_qid[<NUM_LIT:1>:])):<EOL><INDENT>continue<EOL><DEDENT>data_point = statement.get_value()<EOL>if isinstance(data_point, tuple):<EOL><INDENT>data_point = data_point[<NUM_LIT:0>]<EOL><DEDENT>core_props = self.core_props<EOL>if wd_property in core_props:<EOL><INDENT>tmp_qids = set()<EOL>query = statement.sparql_query.format(mrt_pid=mrt_pid, pid=wd_property, value=data_point)<EOL>results = WDItemEngine.execute_sparql_query(query=query, endpoint=self.sparql_endpoint_url)<EOL>for i in results['<STR_LIT>']['<STR_LIT>']:<EOL><INDENT>qid = i['<STR_LIT>']['<STR_LIT:value>'].split('<STR_LIT:/>')[-<NUM_LIT:1>]<EOL>if ('<STR_LIT>' not in i) or ('<STR_LIT>' in i and i['<STR_LIT>']['<STR_LIT:value>'].split('<STR_LIT:/>')[-<NUM_LIT:1>] == exact_qid):<EOL><INDENT>tmp_qids.add(qid)<EOL><DEDENT><DEDENT>qid_list.update(tmp_qids)<EOL>if wd_property in conflict_source:<EOL><INDENT>conflict_source[wd_property].append(tmp_qids)<EOL><DEDENT>else:<EOL><INDENT>conflict_source[wd_property] = [tmp_qids]<EOL><DEDENT>if len(tmp_qids) > <NUM_LIT:1>:<EOL><INDENT>raise ManualInterventionReqException(<EOL>'<STR_LIT>', wd_property, tmp_qids)<EOL><DEDENT><DEDENT><DEDENT>if len(qid_list) == <NUM_LIT:0>:<EOL><INDENT>self.create_new_item = True<EOL>return '<STR_LIT>'<EOL><DEDENT>if not __debug__:<EOL><INDENT>print(qid_list)<EOL><DEDENT>unique_qids = set(qid_list)<EOL>if len(unique_qids) > <NUM_LIT:1>:<EOL><INDENT>raise ManualInterventionReqException('<STR_LIT>',<EOL>conflict_source, unique_qids)<EOL><DEDENT>elif len(unique_qids) == <NUM_LIT:1>:<EOL><INDENT>return list(unique_qids)[<NUM_LIT:0>]<EOL><DEDENT>
|
The most likely WD item QID should be returned, after querying WDQ for all values in core_id properties
:return: Either a single WD QID is returned, or an empty string if no suitable item in WD
|
f10941:c0:m8
|
def __construct_claim_json(self):
|
def handle_qualifiers(old_item, new_item):<EOL><INDENT>if not new_item.check_qualifier_equality:<EOL><INDENT>old_item.set_qualifiers(new_item.get_qualifiers())<EOL><DEDENT><DEDENT>def is_good_ref(ref_block):<EOL><INDENT>if len(WDItemEngine.databases) == <NUM_LIT:0>:<EOL><INDENT>WDItemEngine._init_ref_system()<EOL><DEDENT>prop_nrs = [x.get_prop_nr() for x in ref_block]<EOL>values = [x.get_value() for x in ref_block]<EOL>good_ref = True<EOL>prop_value_map = dict(zip(prop_nrs, values))<EOL>if self.good_refs and len(self.good_refs) > <NUM_LIT:0>:<EOL><INDENT>found_good = True<EOL>for rblock in self.good_refs:<EOL><INDENT>if not all([k in prop_value_map for k, v in rblock.items()]):<EOL><INDENT>found_good = False<EOL><DEDENT>if not all([v in prop_value_map[k] for k, v in rblock.items() if v]):<EOL><INDENT>found_good = False<EOL><DEDENT>if found_good:<EOL><INDENT>return True<EOL><DEDENT><DEDENT>return False<EOL><DEDENT>ref_properties = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>'] <EOL>for v in values:<EOL><INDENT>if prop_nrs[values.index(v)] == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT>elif v == '<STR_LIT>':<EOL><INDENT>return True<EOL><DEDENT><DEDENT>for p in ref_properties:<EOL><INDENT>if p not in prop_nrs:<EOL><INDENT>return False<EOL><DEDENT><DEDENT>for ref in ref_block:<EOL><INDENT>pn = ref.get_prop_nr()<EOL>value = ref.get_value()<EOL>if pn == '<STR_LIT>' and value not in WDItemEngine.databases and '<STR_LIT>' not in prop_nrs:<EOL><INDENT>return False<EOL><DEDENT>elif pn == '<STR_LIT>' and value in WDItemEngine.databases:<EOL><INDENT>db_props = WDItemEngine.databases[value]<EOL>if not any([False if x not in prop_nrs else True for x in db_props]) and '<STR_LIT>' not in prop_nrs:<EOL><INDENT>return False<EOL><DEDENT><DEDENT><DEDENT>return good_ref<EOL><DEDENT>def handle_references(old_item, new_item):<EOL><INDENT>"""<STR_LIT>"""<EOL>ref_properties = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>new_references = new_item.get_references()<EOL>old_references = old_item.get_references()<EOL>if any([z.overwrite_references for y in new_references for z in y])or sum(map(lambda z: len(z), old_references)) == <NUM_LIT:0>or self.global_ref_mode == '<STR_LIT>':<EOL><INDENT>old_item.set_references(new_references)<EOL><DEDENT>elif self.global_ref_mode == '<STR_LIT>' or new_item.statement_ref_mode == '<STR_LIT>':<EOL><INDENT>pass<EOL><DEDENT>elif self.global_ref_mode == '<STR_LIT>' or new_item.statement_ref_mode == '<STR_LIT>':<EOL><INDENT>old_references.extend(new_references)<EOL>old_item.set_references(old_references)<EOL><DEDENT>elif self.global_ref_mode == '<STR_LIT>' or new_item.statement_ref_mode == '<STR_LIT>':<EOL><INDENT>self.ref_handler(old_item, new_item)<EOL><DEDENT>elif self.global_ref_mode == '<STR_LIT>' or new_item.statement_ref_mode == '<STR_LIT>':<EOL><INDENT>keep_block = [False for x in old_references]<EOL>for count, ref_block in enumerate(old_references):<EOL><INDENT>stated_in_value = [x.get_value() for x in ref_block if x.get_prop_nr() == '<STR_LIT>']<EOL>if is_good_ref(ref_block):<EOL><INDENT>keep_block[count] = True<EOL><DEDENT>new_ref_si_values = [x.get_value() if x.get_prop_nr() == '<STR_LIT>' else None<EOL>for z in new_references for x in z]<EOL>for si in stated_in_value:<EOL><INDENT>if si in new_ref_si_values:<EOL><INDENT>keep_block[count] = False<EOL><DEDENT><DEDENT><DEDENT>refs = [x for c, x in enumerate(old_references) if keep_block[c]]<EOL>refs.extend(new_references)<EOL>old_item.set_references(refs)<EOL><DEDENT><DEDENT>self.data.sort(key=lambda z: z.get_prop_nr().lower())<EOL>statements_for_deletion = []<EOL>for item in self.data:<EOL><INDENT>if item.get_value() == '<STR_LIT>' and isinstance(item, WDBaseDataType):<EOL><INDENT>statements_for_deletion.append(item.get_prop_nr())<EOL><DEDENT><DEDENT>if self.create_new_item:<EOL><INDENT>self.statements = copy.copy(self.data)<EOL><DEDENT>else:<EOL><INDENT>for stat in self.data:<EOL><INDENT>prop_nr = stat.get_prop_nr()<EOL>prop_data = [x for x in self.statements if x.get_prop_nr() == prop_nr]<EOL>prop_pos = [x.get_prop_nr() == prop_nr for x in self.statements]<EOL>prop_pos.reverse()<EOL>insert_pos = len(prop_pos) - (prop_pos.index(True) if any(prop_pos) else <NUM_LIT:0>)<EOL>if prop_nr in self.append_value:<EOL><INDENT>equal_items = [stat == x for x in prop_data]<EOL>if True not in equal_items:<EOL><INDENT>self.statements.insert(insert_pos + <NUM_LIT:1>, stat)<EOL><DEDENT>else:<EOL><INDENT>current_item = prop_data[equal_items.index(True)]<EOL>current_item.set_rank(stat.get_rank())<EOL>handle_references(old_item=current_item, new_item=stat)<EOL>handle_qualifiers(old_item=current_item, new_item=stat)<EOL><DEDENT>continue<EOL><DEDENT>for x in prop_data:<EOL><INDENT>if hasattr(stat, '<STR_LIT>'):<EOL><INDENT>break<EOL><DEDENT>elif x.get_id() and not hasattr(x, '<STR_LIT>'):<EOL><INDENT>if self.keep_good_ref_statements:<EOL><INDENT>if any([is_good_ref(r) for r in x.get_references()]):<EOL><INDENT>setattr(x, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>setattr(x, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>match = []<EOL>for i in prop_data:<EOL><INDENT>if stat == i and hasattr(stat, '<STR_LIT>'):<EOL><INDENT>match.append(True)<EOL>setattr(i, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif stat == i:<EOL><INDENT>match.append(True)<EOL>setattr(i, '<STR_LIT>', '<STR_LIT>')<EOL>if hasattr(i, '<STR_LIT>'):<EOL><INDENT>delattr(i, '<STR_LIT>')<EOL><DEDENT>handle_references(old_item=i, new_item=stat)<EOL>handle_qualifiers(old_item=i, new_item=stat)<EOL>i.set_rank(rank=stat.get_rank())<EOL><DEDENT>elif i.get_value():<EOL><INDENT>match.append(False)<EOL><DEDENT><DEDENT>if True not in match and not hasattr(stat, '<STR_LIT>'):<EOL><INDENT>self.statements.insert(insert_pos + <NUM_LIT:1>, stat)<EOL><DEDENT><DEDENT><DEDENT>for item in copy.deepcopy(self.statements):<EOL><INDENT>if item.get_prop_nr() in statements_for_deletion and item.get_id() != '<STR_LIT>':<EOL><INDENT>setattr(item, '<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>elif item.get_prop_nr() in statements_for_deletion:<EOL><INDENT>self.statements.remove(item)<EOL><DEDENT><DEDENT>self.wd_json_representation['<STR_LIT>'] = {}<EOL>for stat in self.statements:<EOL><INDENT>prop_nr = stat.get_prop_nr()<EOL>if prop_nr not in self.wd_json_representation['<STR_LIT>']:<EOL><INDENT>self.wd_json_representation['<STR_LIT>'][prop_nr] = []<EOL><DEDENT>self.wd_json_representation['<STR_LIT>'][prop_nr].append(stat.get_json_representation())<EOL><DEDENT>
|
Writes the properties from self.data to a new or existing json in self.wd_json_representation
:return: None
|
f10941:c0:m9
|
def update(self, data, append_value=None):
|
assert type(data) == list<EOL>if append_value:<EOL><INDENT>assert type(append_value) == list<EOL>self.append_value.extend(append_value)<EOL><DEDENT>self.data.extend(data)<EOL>self.statements = copy.deepcopy(self.original_statements)<EOL>if not __debug__:<EOL><INDENT>print(self.data)<EOL><DEDENT>if self.fast_run:<EOL><INDENT>self.init_fastrun()<EOL><DEDENT>if self.require_write and self.fast_run:<EOL><INDENT>self.init_data_load()<EOL>self.__construct_claim_json()<EOL>self.__check_integrity()<EOL><DEDENT>elif not self.fast_run:<EOL><INDENT>self.__construct_claim_json()<EOL>self.__check_integrity()<EOL><DEDENT>
|
This method takes data, and modifies the Wikidata item. This works together with the data already provided via
the constructor or if the constructor is being instantiated with search_only=True. In the latter case, this
allows for checking the item data before deciding which new data should be written to the Wikidata item.
The actual write to Wikidata only happens on calling of the write() method. If data has been provided already
via the constructor, data provided via the update() method will be appended to these data.
:param data: A list of Wikidata statment items inheriting from WDBaseDataType
:type data: list
:param append_value: list with Wikidata property strings where the values should only be appended,
not overwritten.
:type: list
|
f10941:c0:m10
|
def get_wd_json_representation(self):
|
return self.wd_json_representation<EOL>
|
A method to access the internal json representation of the WD item, mainly for testing
:return: returns a Python json representation object of the WD item at the current state of the instance
|
f10941:c0:m11
|
def __check_integrity(self):
|
<EOL>wdi_core_props = self.core_props<EOL>cp_statements = [x for x in self.statements if x.get_prop_nr() in wdi_core_props]<EOL>item_core_props = set(x.get_prop_nr() for x in cp_statements)<EOL>cp_data = [x for x in self.data if x.get_prop_nr() in wdi_core_props]<EOL>count_existing_ids = len([x for x in self.data if x.get_prop_nr() in item_core_props])<EOL>core_prop_match_count = <NUM_LIT:0><EOL>for new_stat in self.data:<EOL><INDENT>for stat in self.statements:<EOL><INDENT>if (new_stat.get_prop_nr() == stat.get_prop_nr()) and (new_stat.get_value() == stat.get_value())and (new_stat.get_prop_nr() in item_core_props):<EOL><INDENT>core_prop_match_count += <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if core_prop_match_count < count_existing_ids * self.core_prop_match_thresh:<EOL><INDENT>existing_core_pv = defaultdict(set)<EOL>for s in cp_statements:<EOL><INDENT>existing_core_pv[s.get_prop_nr()].add(s.get_value())<EOL><DEDENT>new_core_pv = defaultdict(set)<EOL>for s in cp_data:<EOL><INDENT>new_core_pv[s.get_prop_nr()].add(s.get_value())<EOL><DEDENT>nomatch_existing = {k: v - new_core_pv[k] for k, v in existing_core_pv.items()}<EOL>nomatch_existing = {k: v for k, v in nomatch_existing.items() if v}<EOL>nomatch_new = {k: v - existing_core_pv[k] for k, v in new_core_pv.items()}<EOL>nomatch_new = {k: v for k, v in nomatch_new.items() if v}<EOL>raise CorePropIntegrityException('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>.format(self.wd_item_id, core_prop_match_count,<EOL>count_existing_ids - core_prop_match_count) +<EOL>'<STR_LIT>'.format(nomatch_existing) +<EOL>'<STR_LIT>'.format(nomatch_new))<EOL><DEDENT>else:<EOL><INDENT>return True<EOL><DEDENT>
|
A method to check if when invoking __select_wd_item() and the WD item does not exist yet, but another item
has a property of the current domain with a value like submitted in the data dict, this item does not get
selected but a ManualInterventionReqException() is raised. This check is dependent on the core identifiers
of a certain domain.
:return: boolean True if test passed
|
f10941:c0:m12
|
def get_label(self, lang='<STR_LIT>'):
|
if self.fast_run:<EOL><INDENT>return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, '<STR_LIT:label>'))[<NUM_LIT:0>]<EOL><DEDENT>try:<EOL><INDENT>return self.wd_json_representation['<STR_LIT>'][lang]['<STR_LIT:value>']<EOL><DEDENT>except KeyError:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>
|
Returns the label for a certain language
:param lang:
:type lang: str
:return: returns the label in the specified language, an empty string if the label does not exist
|
f10941:c0:m13
|
def set_label(self, label, lang='<STR_LIT>'):
|
if self.fast_run and not self.require_write:<EOL><INDENT>self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,<EOL>lang_data=[label], lang=lang,<EOL>lang_data_type='<STR_LIT:label>')<EOL>if self.require_write:<EOL><INDENT>self.init_data_load()<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>if '<STR_LIT>' not in self.wd_json_representation:<EOL><INDENT>self.wd_json_representation['<STR_LIT>'] = {}<EOL><DEDENT>self.wd_json_representation['<STR_LIT>'][lang] = {<EOL>'<STR_LIT>': lang,<EOL>'<STR_LIT:value>': label<EOL>}<EOL>
|
Set the label for a WD item in a certain language
:param label: The description of the item in a certain language
:type label: str
:param lang: The language a label should be set for.
:type lang: str
:return: None
|
f10941:c0:m14
|
def get_aliases(self, lang='<STR_LIT>'):
|
if self.fast_run:<EOL><INDENT>return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, '<STR_LIT>'))<EOL><DEDENT>alias_list = []<EOL>if '<STR_LIT>' in self.wd_json_representation and lang in self.wd_json_representation['<STR_LIT>']:<EOL><INDENT>for alias in self.wd_json_representation['<STR_LIT>'][lang]:<EOL><INDENT>alias_list.append(alias['<STR_LIT:value>'])<EOL><DEDENT><DEDENT>return alias_list<EOL>
|
Retrieve the aliases in a certain language
:param lang: The Wikidata language the description should be retrieved for
:return: Returns a list of aliases, an empty list if none exist for the specified language
|
f10941:c0:m15
|
def set_aliases(self, aliases, lang='<STR_LIT>', append=True):
|
if self.fast_run and not self.require_write:<EOL><INDENT>self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,<EOL>lang_data=aliases, lang=lang,<EOL>lang_data_type='<STR_LIT>')<EOL>if self.require_write:<EOL><INDENT>self.init_data_load()<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>if '<STR_LIT>' not in self.wd_json_representation:<EOL><INDENT>self.wd_json_representation['<STR_LIT>'] = {}<EOL><DEDENT>if not append or lang not in self.wd_json_representation['<STR_LIT>']:<EOL><INDENT>self.wd_json_representation['<STR_LIT>'][lang] = []<EOL><DEDENT>for alias in aliases:<EOL><INDENT>found = False<EOL>for current_aliases in self.wd_json_representation['<STR_LIT>'][lang]:<EOL><INDENT>if alias.strip().lower() != current_aliases['<STR_LIT:value>'].strip().lower():<EOL><INDENT>continue<EOL><DEDENT>else:<EOL><INDENT>found = True<EOL>break<EOL><DEDENT><DEDENT>if not found:<EOL><INDENT>self.wd_json_representation['<STR_LIT>'][lang].append({<EOL>'<STR_LIT>': lang,<EOL>'<STR_LIT:value>': alias<EOL>})<EOL><DEDENT><DEDENT>
|
set the aliases for a WD item
:param aliases: a list of strings representing the aliases of a WD item
:param lang: The language a description should be set for
:param append: If true, append a new alias to the list of existing aliases, else, overwrite. Default: True
:return: None
|
f10941:c0:m16
|
def get_description(self, lang='<STR_LIT>'):
|
if self.fast_run:<EOL><INDENT>return list(self.fast_run_container.get_language_data(self.wd_item_id, lang, '<STR_LIT:description>'))[<NUM_LIT:0>]<EOL><DEDENT>if '<STR_LIT>' not in self.wd_json_representation or lang not in self.wd_json_representation['<STR_LIT>']:<EOL><INDENT>return '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>return self.wd_json_representation['<STR_LIT>'][lang]['<STR_LIT:value>']<EOL><DEDENT>
|
Retrieve the description in a certain language
:param lang: The Wikidata language the description should be retrieved for
:return: Returns the description string
|
f10941:c0:m17
|
def set_description(self, description, lang='<STR_LIT>'):
|
if self.fast_run and not self.require_write:<EOL><INDENT>self.require_write = self.fast_run_container.check_language_data(qid=self.wd_item_id,<EOL>lang_data=[description], lang=lang,<EOL>lang_data_type='<STR_LIT:description>')<EOL>if self.require_write:<EOL><INDENT>self.init_data_load()<EOL><DEDENT>else:<EOL><INDENT>return<EOL><DEDENT><DEDENT>if '<STR_LIT>' not in self.wd_json_representation:<EOL><INDENT>self.wd_json_representation['<STR_LIT>'] = {}<EOL><DEDENT>self.wd_json_representation['<STR_LIT>'][lang] = {<EOL>'<STR_LIT>': lang,<EOL>'<STR_LIT:value>': description<EOL>}<EOL>
|
Set the description for a WD item in a certain language
:param description: The description of the item in a certain language
:type description: str
:param lang: The language a description should be set for.
:type lang: str
:return: None
|
f10941:c0:m18
|
def set_sitelink(self, site, title, badges=()):
|
sitelink = {<EOL>'<STR_LIT>': site,<EOL>'<STR_LIT:title>': title,<EOL>'<STR_LIT>': badges<EOL>}<EOL>self.wd_json_representation['<STR_LIT>'][site] = sitelink<EOL>self.sitelinks[site] = sitelink<EOL>
|
Set sitelinks to corresponding Wikipedia pages
:param site: The Wikipedia page a sitelink is directed to (e.g. 'enwiki')
:param title: The title of the Wikipedia page the sitelink is directed to
:param badges: An iterable containing Wikipedia badge strings.
:return:
|
f10941:c0:m19
|
def get_sitelink(self, site):
|
if site in self.sitelinks:<EOL><INDENT>return self.sitelinks[site]<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>
|
A method to access the interwiki links in the json.model
:param site: The Wikipedia site the interwiki/sitelink should be returned for
:return: The interwiki/sitelink string for the specified Wikipedia will be returned.
|
f10941:c0:m20
|
def write(self, login, bot_account=True, edit_summary='<STR_LIT>', entity_type='<STR_LIT>', property_datatype='<STR_LIT:string>',<EOL>max_retries=<NUM_LIT:10>, retry_after=<NUM_LIT:30>):
|
if not self.require_write:<EOL><INDENT>return self.wd_item_id<EOL><DEDENT>if entity_type == '<STR_LIT>':<EOL><INDENT>self.wd_json_representation['<STR_LIT>'] = property_datatype<EOL>if '<STR_LIT>' in self.wd_json_representation:<EOL><INDENT>del self.wd_json_representation['<STR_LIT>']<EOL><DEDENT><DEDENT>payload = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:data>': json.JSONEncoder().encode(self.wd_json_representation),<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': login.get_edit_token(),<EOL>'<STR_LIT>': edit_summary,<EOL>'<STR_LIT>': config['<STR_LIT>']<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:utf-8>'<EOL>}<EOL>if bot_account:<EOL><INDENT>payload.update({'<STR_LIT>': '<STR_LIT>'})<EOL><DEDENT>if self.create_new_item:<EOL><INDENT>payload.update({u'<STR_LIT>': entity_type})<EOL><DEDENT>else:<EOL><INDENT>payload.update({u'<STR_LIT:id>': self.wd_item_id})<EOL><DEDENT>try:<EOL><INDENT>json_data = self.mediawiki_api_call('<STR_LIT:POST>', self.mediawiki_api_url, session=login.get_session(),<EOL>max_retries=max_retries, retry_after=retry_after,<EOL>headers=headers, data=payload)<EOL>if '<STR_LIT:error>' in json_data and '<STR_LIT>' in json_data['<STR_LIT:error>']:<EOL><INDENT>error_msg_names = set(x.get('<STR_LIT:name>') for x in json_data["<STR_LIT:error>"]['<STR_LIT>'])<EOL>if '<STR_LIT>' in error_msg_names:<EOL><INDENT>raise NonUniqueLabelDescriptionPairError(json_data)<EOL><DEDENT>else:<EOL><INDENT>raise WDApiError(json_data)<EOL><DEDENT><DEDENT>elif '<STR_LIT:error>' in json_data.keys():<EOL><INDENT>raise WDApiError(json_data)<EOL><DEDENT><DEDENT>except Exception:<EOL><INDENT>print('<STR_LIT>')<EOL>raise<EOL><DEDENT>self.create_new_item = False<EOL>self.wd_item_id = json_data['<STR_LIT>']['<STR_LIT:id>']<EOL>self.parse_wd_json(wd_json=json_data['<STR_LIT>'])<EOL>self.data = []<EOL>if "<STR_LIT:success>" in json_data and "<STR_LIT>" in json_data and "<STR_LIT>" in json_data["<STR_LIT>"]:<EOL><INDENT>self.lastrevid = json_data["<STR_LIT>"]["<STR_LIT>"]<EOL><DEDENT>return self.wd_item_id<EOL>
|
Writes the WD item Json to WD and after successful write, updates the object with new ids and hashes generated
by WD. For new items, also returns the new QIDs.
:param login: a instance of the class PBB_login which provides edit-cookies and edit-tokens
:param bot_account: Tell the Wikidata API whether the script should be run as part of a bot account or not.
:type bot_account: bool
:param edit_summary: A short (max 250 characters) summary of the purpose of the edit. This will be displayed as
the revision summary of the Wikidata item.
:type edit_summary: str
:param entity_type: Decides wether the object will become an item (default) or a property (with 'property')
:type entity_type: str
:param property_datatype: When payload_type is 'property' then this parameter set the datatype for the property
:type property_datatype: str
:param max_retries: If api request fails due to rate limiting, maxlag, or readonly mode, retry up to
`max_retries` times
:type max_retries: int
:param retry_after: Number of seconds to wait before retrying request (see max_retries)
:type retry_after: int
:return: the WD QID on sucessful write
|
f10941:c0:m21
|
@staticmethod<EOL><INDENT>def mediawiki_api_call(method, mediawiki_api_url='<STR_LIT>',<EOL>session=None, max_retries=<NUM_LIT:10>, retry_after=<NUM_LIT:30>, **kwargs):<DEDENT>
|
response = None<EOL>session = session if session else requests.session()<EOL>for n in range(max_retries):<EOL><INDENT>try:<EOL><INDENT>response = session.request(method, mediawiki_api_url, **kwargs)<EOL><DEDENT>except requests.exceptions.ConnectionError as e:<EOL><INDENT>print("<STR_LIT>".format(e, retry_after))<EOL>time.sleep(retry_after)<EOL>continue<EOL><DEDENT>if response.status_code == <NUM_LIT>:<EOL><INDENT>print("<STR_LIT>".format(retry_after))<EOL>time.sleep(retry_after)<EOL>continue<EOL><DEDENT>response.raise_for_status()<EOL>json_data = response.json()<EOL>"""<STR_LIT>"""<EOL>if '<STR_LIT:error>' in json_data:<EOL><INDENT>error_msg_names = set()<EOL>if '<STR_LIT>' in json_data['<STR_LIT:error>']:<EOL><INDENT>error_msg_names = set(x.get('<STR_LIT:name>') for x in json_data["<STR_LIT:error>"]['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' in error_msg_names:<EOL><INDENT>sleep_sec = int(response.headers.get('<STR_LIT>', retry_after))<EOL>print("<STR_LIT>".format(datetime.datetime.utcnow(), sleep_sec))<EOL>time.sleep(sleep_sec)<EOL>continue<EOL><DEDENT>if '<STR_LIT:code>' in json_data['<STR_LIT:error>'] and json_data['<STR_LIT:error>']['<STR_LIT:code>'] == '<STR_LIT>':<EOL><INDENT>sleep_sec = json_data['<STR_LIT:error>'].get('<STR_LIT>', retry_after)<EOL>print("<STR_LIT>".format(datetime.datetime.utcnow(), sleep_sec))<EOL>time.sleep(sleep_sec)<EOL>continue<EOL><DEDENT>if '<STR_LIT:code>' in json_data['<STR_LIT:error>'] and json_data['<STR_LIT:error>']['<STR_LIT:code>'] == '<STR_LIT>':<EOL><INDENT>print('<STR_LIT>'.format(retry_after))<EOL>time.sleep(retry_after)<EOL>continue<EOL><DEDENT><DEDENT>break<EOL><DEDENT>else:<EOL><INDENT>raise WDApiError(response.json() if response else dict())<EOL><DEDENT>return json_data<EOL>
|
:param method: 'GET' or 'POST'
:param mediawiki_api_url:
:param session: If a session is passed, it will be used. Otherwise a new requests session is created
:param max_retries: If api request fails due to rate limiting, maxlag, or readonly mode, retry up to
`max_retries` times
:type max_retries: int
:param retry_after: Number of seconds to wait before retrying request (see max_retries)
:type retry_after: int
:param kwargs: Passed to requests.request
:return:
|
f10941:c0:m22
|
@classmethod<EOL><INDENT>def setup_logging(cls, log_dir="<STR_LIT>", log_name=None, header=None, names=None,<EOL>delimiter="<STR_LIT:;>", logger_name='<STR_LIT>'):<DEDENT>
|
names = ["<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>", "<STR_LIT>",<EOL>"<STR_LIT>"] if names is None else names<EOL>if not os.path.exists(log_dir):<EOL><INDENT>os.makedirs(log_dir)<EOL><DEDENT>if not log_name:<EOL><INDENT>run_id = time.strftime('<STR_LIT>', time.localtime())<EOL>log_name = "<STR_LIT>".format(run_id)<EOL><DEDENT>logger = logging.getLogger(logger_name)<EOL>logger.setLevel(logging.DEBUG)<EOL>log_file_name = os.path.join(log_dir, log_name)<EOL>file_handler = logging.FileHandler(log_file_name, mode='<STR_LIT:a>')<EOL>file_handler.setLevel(logging.DEBUG)<EOL>fmt = '<STR_LIT>'.format(delimiter=delimiter)<EOL>if header:<EOL><INDENT>header = header if header.startswith("<STR_LIT:#>") else "<STR_LIT:#>" + header<EOL>header += "<STR_LIT:\n>" + delimiter.join(names)<EOL>formatter = FormatterWithHeader(header, fmt=fmt, datefmt='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>formatter = FormatterWithHeader(delimiter.join(names), fmt=fmt, datefmt='<STR_LIT>')<EOL><DEDENT>file_handler.setFormatter(formatter)<EOL>logger.addHandler(file_handler)<EOL>cls.logger = logger<EOL>
|
A static method which initiates log files compatible to .csv format, allowing for easy further analysis.
:param log_dir: allows for setting relative or absolute path for logging, default is ./logs.
:type log_dir: str
:param log_name: File name of log file to be written. e.g. "WD_bot_run-20160204.log". Default is "WD_bot_run"
and a timestamp of the current time
:type log_name: str
:param header: Log file will be prepended with header if given
:type header: str
:param names: Column names for the log file
:type names: list
:param delimiter: Log file will be delimited with `delimiter`
:type delimiter: str
|
f10941:c0:m23
|
@classmethod<EOL><INDENT>def log(cls, level, message):<DEDENT>
|
if cls.logger is None:<EOL><INDENT>cls.setup_logging()<EOL><DEDENT>log_levels = {'<STR_LIT>': logging.DEBUG, '<STR_LIT>': logging.ERROR, '<STR_LIT>': logging.INFO, '<STR_LIT>': logging.WARNING,<EOL>'<STR_LIT>': logging.CRITICAL}<EOL>cls.logger.log(level=log_levels[level], msg=message)<EOL>
|
:param level: The log level as in the Python logging documentation, 5 different possible values with increasing
severity
:type level: String of value 'DEBUG', 'INFO', 'WARNING', 'ERROR' or 'CRITICAL'.
:param message: The logging data which should be written to the log file. In order to achieve a csv-file
compatible format, all fields must be separated by a colon. Furthermore, all strings which could contain
colons, spaces or other special characters must be enclosed in double-quotes.
e.g. '{main_data_id}, "{exception_type}", "{message}", {wd_id}, {duration}'.format(
main_data_id=<main_id>,
exception_type=<excpetion type>,
message=<exception message>,
wd_id=<wikidata id>,
duration=<duration of action>
:type message: str
|
f10941:c0:m24
|
@classmethod<EOL><INDENT>def generate_item_instances(cls, items, mediawiki_api_url='<STR_LIT>', login=None,<EOL>user_agent=config['<STR_LIT>']):<DEDENT>
|
assert type(items) == list<EOL>url = mediawiki_api_url<EOL>params = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:|>'.join(items),<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': user_agent<EOL>}<EOL>if login:<EOL><INDENT>reply = login.get_session().get(url, params=params, headers=headers)<EOL><DEDENT>else:<EOL><INDENT>reply = requests.get(url, params=params)<EOL><DEDENT>item_instances = []<EOL>for qid, v in reply.json()['<STR_LIT>'].items():<EOL><INDENT>ii = cls(wd_item_id=qid, item_data=v)<EOL>ii.mediawiki_api_url = mediawiki_api_url<EOL>item_instances.append((qid, ii))<EOL><DEDENT>return item_instances<EOL>
|
A method which allows for retrieval of a list of Wikidata items or properties. The method generates a list of
tuples where the first value in the tuple is the QID or property ID, whereas the second is the new instance of
WDItemEngine containing all the data of the item. This is most useful for mass retrieval of WD items.
:param items: A list of QIDs or property IDs
:type items: list
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param login: An object of type WDLogin, which holds the credentials/session cookies required for >50 item bulk
retrieval of items.
:type login: wdi_login.WDLogin
:return: A list of tuples, first value in the tuple is the QID or property ID string, second value is the
instance of WDItemEngine with the corresponding item data.
|
f10941:c0:m25
|
@staticmethod<EOL><INDENT>@wdi_backoff()<EOL>def execute_sparql_query(query, prefix=None, endpoint='<STR_LIT>',<EOL>user_agent=config['<STR_LIT>'], as_dataframe=False):<DEDENT>
|
if not endpoint:<EOL><INDENT>endpoint = '<STR_LIT>'<EOL><DEDENT>if prefix:<EOL><INDENT>query = prefix + '<STR_LIT:\n>' + query<EOL><DEDENT>params = {<EOL>'<STR_LIT>': '<STR_LIT>' + query,<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': user_agent<EOL>}<EOL>response = requests.get(endpoint, params=params, headers=headers)<EOL>response.raise_for_status()<EOL>results = response.json()<EOL>if as_dataframe:<EOL><INDENT>return WDItemEngine._sparql_query_result_to_df(results)<EOL><DEDENT>else:<EOL><INDENT>return results<EOL><DEDENT>
|
Static method which can be used to execute any SPARQL query
:param prefix: The URI prefixes required for an endpoint, default is the Wikidata specific prefixes
:param query: The actual SPARQL query string
:param endpoint: The URL string for the SPARQL endpoint. Default is the URL for the Wikidata SPARQL endpoint
:param user_agent: Set a user agent string for the HTTP header to let the WDQS know who you are.
:param as_dataframe: Return result as pandas dataframe
:type user_agent: str
:return: The results of the query are returned in JSON format
|
f10941:c0:m26
|
@staticmethod<EOL><INDENT>def run_shex_manifest(manifest_url, index=<NUM_LIT:0>, debug=False):<DEDENT>
|
manifest = json.loads(manifest_url, debug=False)<EOL>manifest_results = dict()<EOL>for case in manifest[index]:<EOL><INDENT>if case.data.startswith("<STR_LIT>"):<EOL><INDENT>sparql_endpoint = case.data.replace("<STR_LIT>", "<STR_LIT>")<EOL>schema = requests.get(case.schemaURL).text<EOL>shex = ShExC(schema).schema<EOL>evaluator = ShExEvaluator(schema=shex, debug=debug)<EOL>sparql_query = case.queryMap.replace("<STR_LIT>", "<STR_LIT>").replace("<STR_LIT>", "<STR_LIT>")<EOL>df = WDItemEngine.execute_sparql_query(sparql_query)<EOL>for row in df["<STR_LIT>"]["<STR_LIT>"]:<EOL><INDENT>wdid = row["<STR_LIT>"]["<STR_LIT:value>"]<EOL>if wdid not in manifest_results.keys():<EOL><INDENT>manifest_results[wdid] = dict()<EOL><DEDENT>slurpeddata = SlurpyGraph(sparql_endpoint)<EOL>results = evaluator.evaluate(rdf=slurpeddata, focus=wdid, debug=debug)<EOL>for result in results:<EOL><INDENT>if result.result:<EOL><INDENT>manifest_results[wdid]["<STR_LIT:status>"] = "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>manifest_results[wdid]["<STR_LIT:status>"] = "<STR_LIT>"<EOL>manifest_results[wdid]["<STR_LIT>"] = result.reason<EOL><DEDENT><DEDENT><DEDENT><DEDENT><DEDENT>return manifest_results<EOL>
|
:param manifest: A url to a manifest that contains all the ingredients to run a shex conformance test
:param index: Manifests are stored in lists. This method only handles one manifest, hence by default the first
manifest is going to be selected
:return:
|
f10941:c0:m29
|
@staticmethod<EOL><INDENT>def merge_items(from_id, to_id, login_obj, mediawiki_api_url='<STR_LIT>',<EOL>ignore_conflicts='<STR_LIT>', user_agent=config['<STR_LIT>']):<DEDENT>
|
url = mediawiki_api_url<EOL>headers = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:utf-8>',<EOL>'<STR_LIT>': user_agent<EOL>}<EOL>params = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': from_id,<EOL>'<STR_LIT>': to_id,<EOL>'<STR_LIT>': login_obj.get_edit_token(),<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': ignore_conflicts<EOL>}<EOL>try:<EOL><INDENT>merge_reply = requests.post(url=url, data=params, headers=headers, cookies=login_obj.get_edit_cookie())<EOL>merge_reply.raise_for_status()<EOL>if '<STR_LIT:error>' in merge_reply.json():<EOL><INDENT>raise MergeError(merge_reply.json())<EOL><DEDENT><DEDENT>except requests.HTTPError as e:<EOL><INDENT>print(e)<EOL>return {'<STR_LIT:error>': '<STR_LIT>'}<EOL><DEDENT>return merge_reply.json()<EOL>
|
A static method to merge two Wikidata items
:param from_id: The QID which should be merged into another item
:type from_id: string with 'Q' prefix
:param to_id: The QID into which another item should be merged
:type to_id: string with 'Q' prefix
:param login_obj: The object containing the login credentials and cookies
:type login_obj: instance of PBB_login.WDLogin
:param mediawiki_api_url: The MediaWiki url which should be used
:type mediawiki_api_url: str
:param ignore_conflicts: A string with the values 'description', 'statement' or 'sitelink', separated
by a pipe ('|') if using more than one of those.
:type ignore_conflicts: str
|
f10941:c0:m30
|
@staticmethod<EOL><INDENT>def delete_items(item_list, reason, login, mediawiki_api_url='<STR_LIT>',<EOL>user_agent=config['<STR_LIT>']):<DEDENT>
|
url = mediawiki_api_url<EOL>bulk_deletion_string = '<STR_LIT>'<EOL>bulk_deletion_string += '<STR_LIT>'.format('<STR_LIT>'.join(item_list), reason)<EOL>params = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:content>',<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>headers = {<EOL>'<STR_LIT>': user_agent<EOL>}<EOL>page_text = [x['<STR_LIT>'][<NUM_LIT:0>]['<STR_LIT:*>']<EOL>for x in requests.get(url=url, params=params, headers=headers).json()['<STR_LIT>']['<STR_LIT>'].values()][<EOL><NUM_LIT:0>]<EOL>if not login:<EOL><INDENT>print(page_text)<EOL>print(bulk_deletion_string)<EOL><DEDENT>else:<EOL><INDENT>params = {<EOL>'<STR_LIT:action>': '<STR_LIT>',<EOL>'<STR_LIT:title>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:0>',<EOL>'<STR_LIT:text>': page_text + bulk_deletion_string,<EOL>'<STR_LIT>': login.get_edit_token(),<EOL>'<STR_LIT>': '<STR_LIT>'<EOL>}<EOL>r = requests.post(url=url, data=params, cookies=login.get_edit_cookie(), headers=headers)<EOL>print(r.json())<EOL><DEDENT>
|
Takes a list of items and posts them for deletion by Wikidata moderators, appends at the end of the deletion
request page.
:param item_list: a list of QIDs which should be deleted
:type item_list: list
:param reason: short text about the reason for the deletion request
:type reason: str
:param login: A WDI login object which contains username and password the edit should be performed with.
:type login: wdi_login.WDLogin
|
f10941:c0:m32
|
@classmethod<EOL><INDENT>def wikibase_item_engine_factory(cls, mediawiki_api_url, sparql_endpoint_url, name='<STR_LIT>'):<DEDENT>
|
class SubCls(cls):<EOL><INDENT>def __init__(self, *args, **kwargs):<EOL><INDENT>kwargs['<STR_LIT>'] = mediawiki_api_url<EOL>kwargs['<STR_LIT>'] = sparql_endpoint_url<EOL>super(SubCls, self).__init__(*args, **kwargs)<EOL><DEDENT><DEDENT>SubCls.__name__ = name<EOL>return SubCls<EOL>
|
Helper function for creating a WDItemEngine class with arguments set for a different Wikibase instance than
Wikidata.
:param mediawiki_api_url: Mediawiki api url. For wikidata, this is: 'https://www.wikidata.org/w/api.php'
:param sparql_endpoint_url: sparql endpoint url. For wikidata, this is: 'https://query.wikidata.org/sparql'
:param name: name of the resulting class
:return: a subclass of WDItemEngine with the mediawiki_api_url and sparql_endpoint_url arguments set
|
f10941:c0:m33
|
def __init__(self, value, snak_type, data_type, is_reference, is_qualifier, references, qualifiers, rank, prop_nr,<EOL>check_qualifier_equality):
|
self.value = value<EOL>self.snak_type = snak_type<EOL>self.data_type = data_type<EOL>if not references:<EOL><INDENT>self.references = []<EOL><DEDENT>else:<EOL><INDENT>self.references = references<EOL><DEDENT>self.qualifiers = qualifiers<EOL>self.is_reference = is_reference<EOL>self.is_qualifier = is_qualifier<EOL>self.rank = rank<EOL>self.check_qualifier_equality = check_qualifier_equality<EOL>self._statement_ref_mode = '<STR_LIT>'<EOL>if not references:<EOL><INDENT>self.references = list()<EOL><DEDENT>if not self.qualifiers:<EOL><INDENT>self.qualifiers = list()<EOL><DEDENT>if type(prop_nr) is int:<EOL><INDENT>self.prop_nr = '<STR_LIT:P>' + str(prop_nr)<EOL><DEDENT>elif prop_nr.startswith('<STR_LIT:P>'):<EOL><INDENT>self.prop_nr = prop_nr<EOL><DEDENT>else:<EOL><INDENT>self.prop_nr = '<STR_LIT:P>' + prop_nr<EOL><DEDENT>self._overwrite_references = False<EOL>self.id = '<STR_LIT>'<EOL>self.hash = '<STR_LIT>'<EOL>self.json_representation = {<EOL>"<STR_LIT>": self.snak_type,<EOL>"<STR_LIT>": self.prop_nr,<EOL>"<STR_LIT>": {},<EOL>"<STR_LIT>": self.data_type<EOL>}<EOL>self.snak_types = ['<STR_LIT:value>', '<STR_LIT>', '<STR_LIT>']<EOL>if snak_type not in self.snak_types:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(snak_type))<EOL><DEDENT>if self.is_qualifier and self.is_reference:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if (len(self.references) > <NUM_LIT:0> or len(self.qualifiers) > <NUM_LIT:0>) and (self.is_qualifier or self.is_reference):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>
|
Constructor, will be called by all data types.
:param value: Data value of the WD data snak
:type value: str or int or tuple
:param snak_type: The snak type of the WD data snak, three values possible, depending if the value is a
known (value), not existent (novalue) or unknown (somevalue). See WD documentation.
:type snak_type: a str of either 'value', 'novalue' or 'somevalue'
:param data_type: The WD data type declaration of this snak
:type data_type: str
:param is_reference: States if the snak is a reference, mutually exclusive with qualifier
:type is_reference: boolean
:param is_qualifier: States if the snak is a qualifier, mutually exlcusive with reference
:type is_qualifier: boolean
:param references: A one level nested list with reference WD snaks of base type WDBaseDataType, e.g.
references=[[<WDBaseDataType>, <WDBaseDataType>], [<WDBaseDataType>]]
This will create two references, the first one with two statements, the second with one
:type references: A one level nested list with instances of WDBaseDataType or children of it.
:param qualifiers: A list of qualifiers for the WD mainsnak
:type qualifiers: A list with instances of WDBaseDataType or children of it.
:param rank: The rank of a WD mainsnak, should determine the status of a value
:type rank: A string of one of three allowed values: 'normal', 'deprecated', 'preferred'
:param prop_nr: The WD property number a WD snak belongs to
:type prop_nr: A string with a prefixed 'P' and several digits e.g. 'P715' (Drugbank ID)
:return:
|
f10941:c2:m0
|
@statement_ref_mode.setter<EOL><INDENT>def statement_ref_mode(self, value):<DEDENT>
|
valid_values = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']<EOL>if value not in valid_values:<EOL><INDENT>raise ValueError('<STR_LIT>'.format('<STR_LIT:U+0020>'.join(valid_values)))<EOL><DEDENT>self._statement_ref_mode = value<EOL>
|
Set the reference mode for a statement, always overrides the global reference state.
|
f10941:c2:m7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.