signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def do_ica(self, random_state=None):
if self.data_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>result = plainica(x=self.data_[self.trial_mask_, :, :], reducedim=self.reducedim_, backend=self.backend_, random_state=random_state)<EOL>self.mixing_ = result.mixing<EOL>self.unmixing_ = result.unmixing<EOL>self.activations_ = dot_special(self.unmixing_.T, self.data_)<EOL>self.var_model_ = None<EOL>self.var_cov_ = None<EOL>self.connectivity_ = None<EOL>self.mixmaps_ = []<EOL>self.unmixmaps_ = []<EOL>return self<EOL>
Perform ICA Perform plain ICA source decomposition. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain data.
f10412:c0:m8
def remove_sources(self, sources):
if self.unmixing_ is None or self.mixing_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>self.mixing_ = np.delete(self.mixing_, sources, <NUM_LIT:0>)<EOL>self.unmixing_ = np.delete(self.unmixing_, sources, <NUM_LIT:1>)<EOL>if self.activations_ is not None:<EOL><INDENT>self.activations_ = np.delete(self.activations_, sources, <NUM_LIT:1>)<EOL><DEDENT>self.var_model_ = None<EOL>self.var_cov_ = None<EOL>self.connectivity_ = None<EOL>self.mixmaps_ = []<EOL>self.unmixmaps_ = []<EOL>return self<EOL>
Remove sources from the decomposition. This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and connectivity estimates. Parameters ---------- sources : {slice, int, array of ints} Indices of components to remove. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a source decomposition.
f10412:c0:m9
def keep_sources(self, keep):
if self.unmixing_ is None or self.mixing_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>n_sources = self.mixing_.shape[<NUM_LIT:0>]<EOL>self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep)))<EOL>return self<EOL>
Keep only the specified sources in the decomposition.
f10412:c0:m10
def fit_var(self):
if self.activations_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>self.var_.fit(data=self.activations_[self.trial_mask_, :, :])<EOL>self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)<EOL>return self<EOL>
Fit a VAR model to the source activations. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain source activations.
f10412:c0:m11
def optimize_var(self):
if self.activations_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>self.var_.optimize(self.activations_[self.trial_mask_, :, :])<EOL>return self<EOL>
Optimize the VAR model's hyperparameters (such as regularization). Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain source activations.
f10412:c0:m12
def get_connectivity(self, measure_name, plot=False):
if self.connectivity_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>cm = getattr(self.connectivity_, measure_name)()<EOL>cm = np.abs(cm) if np.any(np.iscomplex(cm)) else cm<EOL>if plot is None or plot:<EOL><INDENT>fig = plot<EOL>if self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>diagonal = <NUM_LIT:0><EOL><DEDENT>elif self.plot_diagonal == '<STR_LIT:S>':<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL>sm = np.abs(self.connectivity_.S())<EOL>sm /= np.max(sm) <EOL>fig = self.plotting.plot_connectivity_spectrum(sm, fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL><DEDENT>else:<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL><DEDENT>fig = self.plotting.plot_connectivity_spectrum(cm, fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>return cm, fig<EOL><DEDENT>return cm<EOL>
Calculate spectral connectivity measure. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. plot : {False, None, Figure object}, optional Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the Figure object. If set to **None**, a new figure is created. Returns ------- measure : array, shape = [n_channels, n_channels, nfft] Values of the connectivity measure. fig : Figure object Instance of the figure in which was plotted. This is only returned if `plot` is not **False**. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a fitted VAR model.
f10412:c0:m13
def get_surrogate_connectivity(self, measure_name, repeats=<NUM_LIT:100>, plot=False, random_state=None):
cs = surrogate_connectivity(measure_name, self.activations_[self.trial_mask_, :, :],<EOL>self.var_, self.nfft_, repeats, random_state=random_state)<EOL>if plot is None or plot:<EOL><INDENT>fig = plot<EOL>if self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>diagonal = <NUM_LIT:0><EOL><DEDENT>elif self.plot_diagonal == '<STR_LIT:S>':<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL>sb = self.get_surrogate_connectivity('<STR_LIT>', repeats)<EOL>sb /= np.max(sb) <EOL>su = np.percentile(sb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([su], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL><DEDENT>else:<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL><DEDENT>cu = np.percentile(cs, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([cu], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>return cs, fig<EOL><DEDENT>return cs<EOL>
Calculate spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. Returns ------- measure : array, shape = [`repeats`, n_channels, n_channels, nfft] Values of the connectivity measure for each surrogate. See Also -------- :func:`scot.connectivity_statistics.surrogate_connectivity` : Calculates surrogate connectivity
f10412:c0:m14
def get_bootstrap_connectivity(self, measure_names, repeats=<NUM_LIT:100>, num_samples=None, plot=False, random_state=None):
if num_samples is None:<EOL><INDENT>num_samples = np.sum(self.trial_mask_)<EOL><DEDENT>cb = bootstrap_connectivity(measure_names, self.activations_[self.trial_mask_, :, :],<EOL>self.var_, self.nfft_, repeats, num_samples, random_state=random_state)<EOL>if plot is None or plot:<EOL><INDENT>fig = plot<EOL>if self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>diagonal = <NUM_LIT:0><EOL><DEDENT>elif self.plot_diagonal == '<STR_LIT:S>':<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL>sb = self.get_bootstrap_connectivity('<STR_LIT>', repeats, num_samples)<EOL>sb /= np.max(sb) <EOL>sm = np.median(sb, axis=<NUM_LIT:0>)<EOL>sl = np.percentile(sb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>su = np.percentile(sb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL><DEDENT>else:<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL><DEDENT>cm = np.median(cb, axis=<NUM_LIT:0>)<EOL>cl = np.percentile(cb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>cu = np.percentile(cb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>return cb, fig<EOL><DEDENT>return cb<EOL>
Calculate bootstrap estimates of spectral connectivity measures. Bootstrapping is performed on trial level. Parameters ---------- measure_names : {str, list of str} Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many bootstrap estimates to take. num_samples : int, optional How many samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. Returns ------- measure : array, shape = [`repeats`, n_channels, n_channels, nfft] Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are ndarrays of shape [`repeats`, n_channels, n_channels, nfft]. See Also -------- :func:`scot.connectivity_statistics.bootstrap_connectivity` : Calculates bootstrap connectivity
f10412:c0:m15
def get_tf_connectivity(self, measure_name, winlen, winstep, plot=False, baseline=None, crange='<STR_LIT:default>'):
if self.activations_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>_, m, n = self.activations_.shape<EOL>steps = list(range(<NUM_LIT:0>, n - winlen, winstep))<EOL>nstep = len(steps)<EOL>result = np.zeros((m, m, self.nfft_, nstep), np.complex64)<EOL>for i, j in enumerate(steps):<EOL><INDENT>win = np.arange(winlen) + j<EOL>data = self.activations_[:, :, win]<EOL>data = data[self.trial_mask_, :, :]<EOL>self.var_.fit(data)<EOL>con = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)<EOL>result[:, :, :, i] = getattr(con, measure_name)()<EOL><DEDENT>if baseline:<EOL><INDENT>inref = np.zeros(nstep, bool)<EOL>for i, j in enumerate(steps):<EOL><INDENT>a, b = j, j + winlen - <NUM_LIT:1><EOL>inref[i] = b >= baseline[<NUM_LIT:0>] and a <= baseline[<NUM_LIT:1>]<EOL><DEDENT>if np.any(inref):<EOL><INDENT>ref = np.mean(result[:, :, :, inref], axis=<NUM_LIT:3>, keepdims=True)<EOL>result -= ref<EOL><DEDENT><DEDENT>if plot is None or plot:<EOL><INDENT>fig = plot<EOL>t0 = <NUM_LIT:0.5> * winlen / self.fs_ + self.time_offset_<EOL>t1 = self.data_.shape[<NUM_LIT:2>] / self.fs_ - <NUM_LIT:0.5> * winlen / self.fs_ + self.time_offset_<EOL>if self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>diagonal = <NUM_LIT:0><EOL><DEDENT>elif self.plot_diagonal == '<STR_LIT:S>':<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL>s = np.abs(self.get_tf_connectivity('<STR_LIT:S>', winlen, winstep))<EOL>if crange == '<STR_LIT:default>':<EOL><INDENT>crange = [np.min(s), np.max(s)]<EOL><DEDENT>fig = self.plotting.plot_connectivity_timespectrum(s, fs=self.fs_, crange=[np.min(s), np.max(s)],<EOL>freq_range=self.plot_f_range, time_range=[t0, t1],<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL><DEDENT>else:<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL><DEDENT>tfc = self._clean_measure(measure_name, result)<EOL>if crange == '<STR_LIT:default>':<EOL><INDENT>if diagonal == -<NUM_LIT:1>:<EOL><INDENT>for m in range(tfc.shape[<NUM_LIT:0>]):<EOL><INDENT>tfc[m, m, :, :] = <NUM_LIT:0><EOL><DEDENT><DEDENT>crange = [np.min(tfc), np.max(tfc)]<EOL><DEDENT>fig = self.plotting.plot_connectivity_timespectrum(tfc, fs=self.fs_, crange=crange,<EOL>freq_range=self.plot_f_range, time_range=[t0, t1],<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>return result, fig<EOL><DEDENT>return result<EOL>
Calculate estimate of time-varying connectivity. Connectivity is estimated in a sliding window approach on the current data set. The window is stepped `n_steps` = (`n_samples` - `winlen`) // `winstep` times. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. winlen : int Length of the sliding window (in samples). winstep : int Step size for sliding window (in samples). plot : {False, None, Figure object}, optional Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the Figure object. If set to **None**, a new figure is created. baseline : [int, int] or None Start and end of the baseline period in samples. The baseline is subtracted from the connectivity. It is computed as the average of all windows that contain start or end, or fall between start and end. If set to None no baseline is subtracted. Returns ------- result : array, shape = [n_channels, n_channels, nfft, n_steps] Values of the connectivity measure. fig : Figure object, optional Instance of the figure in which was plotted. This is only returned if `plot` is not **False**. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain a fitted VAR model.
f10412:c0:m16
def compare_conditions(self, labels1, labels2, measure_name, alpha=<NUM_LIT>, repeats=<NUM_LIT:100>, num_samples=None, plot=False, random_state=None):
self.set_used_labels(labels1)<EOL>ca = self.get_bootstrap_connectivity(measure_name, repeats, num_samples, random_state=random_state)<EOL>self.set_used_labels(labels2)<EOL>cb = self.get_bootstrap_connectivity(measure_name, repeats, num_samples, random_state=random_state)<EOL>p = test_bootstrap_difference(ca, cb)<EOL>s = significance_fdr(p, alpha)<EOL>if plot is None or plot:<EOL><INDENT>fig = plot<EOL>if self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL><DEDENT>elif self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>diagonal = <NUM_LIT:0><EOL><DEDENT>elif self.plot_diagonal is '<STR_LIT:S>':<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL>self.set_used_labels(labels1)<EOL>sa = self.get_bootstrap_connectivity('<STR_LIT>', repeats, num_samples)<EOL>sm = np.median(sa, axis=<NUM_LIT:0>)<EOL>sl = np.percentile(sa, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>su = np.percentile(sa, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL>self.set_used_labels(labels2)<EOL>sb = self.get_bootstrap_connectivity('<STR_LIT>', repeats, num_samples)<EOL>sm = np.median(sb, axis=<NUM_LIT:0>)<EOL>sl = np.percentile(sb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>su = np.percentile(sb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL>p_s = test_bootstrap_difference(ca, cb)<EOL>s_s = significance_fdr(p_s, alpha)<EOL>self.plotting.plot_connectivity_significance(s_s, fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=<NUM_LIT:1>, border=self.plot_outside_topo, fig=fig)<EOL><DEDENT>else:<EOL><INDENT>diagonal = -<NUM_LIT:1><EOL><DEDENT>cm = np.median(ca, axis=<NUM_LIT:0>)<EOL>cl = np.percentile(ca, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>cu = np.percentile(ca, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>cm = np.median(cb, axis=<NUM_LIT:0>)<EOL>cl = np.percentile(cb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>cu = np.percentile(cb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>self.plotting.plot_connectivity_significance(s, fs=self.fs_, freq_range=self.plot_f_range,<EOL>diagonal=diagonal, border=self.plot_outside_topo, fig=fig)<EOL>return p, s, fig<EOL><DEDENT>return p, s<EOL>
Test for significant difference in connectivity of two sets of class labels. Connectivity estimates are obtained by bootstrapping. Correction for multiple testing is performed by controlling the false discovery rate (FDR). Parameters ---------- labels1, labels2 : list of class labels The two sets of class labels to compare. Each set may contain more than one label. measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. alpha : float, optional Maximum allowed FDR. The ratio of falsely detected significant differences is guaranteed to be less than `alpha`. repeats : int, optional How many bootstrap estimates to take. num_samples : int, optional How many samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. plot : {False, None, Figure object}, optional Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the Figure object. If set to **None**, a new figure is created. Returns ------- p : array, shape = [n_channels, n_channels, nfft] Uncorrected p-values. s : array, dtype=bool, shape = [n_channels, n_channels, nfft] FDR corrected significance. True means the difference is significant in this location. fig : Figure object, optional Instance of the figure in which was plotted. This is only returned if `plot` is not **False**.
f10412:c0:m17
def show_plots(self):
self.plotting.show_plots()<EOL>
Show current plots. This is only a convenience wrapper around :func:`matplotlib.pyplot.show_plots`.
f10412:c0:m18
def plot_source_topos(self, common_scale=None):
if self.unmixing_ is None and self.mixing_ is None:<EOL><INDENT>raise RuntimeError("<STR_LIT>")<EOL><DEDENT>self._prepare_plots(True, True)<EOL>self.plotting.plot_sources(self.topo_, self.mixmaps_, self.unmixmaps_, common_scale)<EOL>
Plot topography of the Source decomposition. Parameters ---------- common_scale : float, optional If set to None, each topoplot's color axis is scaled individually. Otherwise specifies the percentile (1-99) of values in all plot. This value is taken as the maximum color scale.
f10412:c0:m19
def plot_connectivity_topos(self, fig=None):
self._prepare_plots(True, False)<EOL>if self.plot_outside_topo:<EOL><INDENT>fig = self.plotting.plot_connectivity_topos('<STR_LIT>', self.topo_, self.mixmaps_, fig)<EOL><DEDENT>elif self.plot_diagonal == '<STR_LIT>':<EOL><INDENT>fig = self.plotting.plot_connectivity_topos('<STR_LIT>', self.topo_, self.mixmaps_, fig)<EOL><DEDENT>return fig<EOL>
Plot scalp projections of the sources. This function only plots the topos. Use in combination with connectivity plotting. Parameters ---------- fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted.
f10412:c0:m20
def plot_connectivity_surrogate(self, measure_name, repeats=<NUM_LIT:100>, fig=None):
cb = self.get_surrogate_connectivity(measure_name, repeats)<EOL>self._prepare_plots(True, False)<EOL>cu = np.percentile(cb, <NUM_LIT>, axis=<NUM_LIT:0>)<EOL>fig = self.plotting.plot_connectivity_spectrum([cu], self.fs_, freq_range=self.plot_f_range, fig=fig)<EOL>return fig<EOL>
Plot spectral connectivity measure under the assumption of no actual connectivity. Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity distribution if there was no causal structure in the data. Parameters ---------- measure_name : str Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures. repeats : int, optional How many surrogate samples to take. fig : {None, Figure object}, optional Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided figure object. Returns ------- fig : Figure object Instance of the figure in which was plotted.
f10412:c0:m21
@classmethod<EOL><INDENT>def fromvector(cls, v):<DEDENT>
w = v.normalized()<EOL>return cls(w.x, w.y, w.z)<EOL>
Initialize from euclidean vector
f10414:c0:m1
@property<EOL><INDENT>def vector(self):<DEDENT>
return self._pos3d<EOL>
position in 3d space
f10414:c0:m2
@property<EOL><INDENT>def list(self):<DEDENT>
return [self._pos3d.x, self._pos3d.y, self._pos3d.z]<EOL>
position in 3d space
f10414:c0:m3
def distance(self, other):
return math.acos(self._pos3d.dot(other.vector))<EOL>
Distance to another point on the sphere
f10414:c0:m6
def distances(self, points):
return [math.acos(self._pos3d.dot(p.vector)) for p in points]<EOL>
Distance to other points on the sphere
f10414:c0:m7
@staticmethod<EOL><INDENT>def midpoint(a, b):<DEDENT>
return Point.fromvector((a.vector + b.vector) / <NUM_LIT:2>)<EOL>
Point exactly between a and b
f10414:c3:m0
def topoplot(values, locations, axes=None, offset=(<NUM_LIT:0>, <NUM_LIT:0>), plot_locations=True,<EOL>plot_head=True, **kwargs):
topo = Topoplot(**kwargs)<EOL>topo.set_locations(locations)<EOL>topo.set_values(values)<EOL>topo.create_map()<EOL>topo.plot_map(axes=axes, offset=offset)<EOL>if plot_locations:<EOL><INDENT>topo.plot_locations(axes=axes, offset=offset)<EOL><DEDENT>if plot_head:<EOL><INDENT>topo.plot_head(axes=axes, offset=offset)<EOL><DEDENT>return topo<EOL>
Wrapper function for :class:`Topoplot.
f10415:m0
def warp_locations(locations, y_center=None, return_ellipsoid=False, verbose=False):
locations = np.asarray(locations)<EOL>if y_center is None:<EOL><INDENT>c, r = _fit_ellipsoid_full(locations)<EOL><DEDENT>else:<EOL><INDENT>c, r = _fit_ellipsoid_partial(locations, y_center)<EOL><DEDENT>elliptic_locations = _project_on_ellipsoid(c, r, locations)<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>', c)<EOL>print('<STR_LIT>', r)<EOL>distance = np.sqrt(np.sum((locations - elliptic_locations)**<NUM_LIT:2>, axis=<NUM_LIT:1>))<EOL>print('<STR_LIT>', np.min(distance))<EOL>print('<STR_LIT>', np.mean(distance))<EOL>print('<STR_LIT>', np.max(distance))<EOL><DEDENT>spherical_locations = (elliptic_locations - c) / r<EOL>if return_ellipsoid:<EOL><INDENT>return spherical_locations, c, r<EOL><DEDENT>return spherical_locations<EOL>
Warp EEG electrode locations to spherical layout. EEG Electrodes are warped to a spherical layout in three steps: 1. An ellipsoid is least-squares-fitted to the electrode locations. 2. Electrodes are displaced to the nearest point on the ellipsoid's surface. 3. The ellipsoid is transformed to a sphere, causing the new locations to lie exactly on a spherical surface with unit radius. This procedure intends to minimize electrode displacement in the original coordinate space. Simply projecting electrodes on a sphere (e.g. by normalizing the x/y/z coordinates) typically gives much larger displacements. Parameters ---------- locations : array-like, shape = [n_electrodes, 3] Eeach row of `locations` corresponds to the location of an EEG electrode in cartesian x/y/z coordinates. y_center : float, optional Fix the y-coordinate of the ellipsoid's center to this value (optional). This is useful to align the ellipsoid with the central electrodes. return_ellipsoid : bool, optional If `true` center and radii of the ellipsoid are returned. Returns ------- newlocs : array-like, shape = [n_electrodes, 3] Electrode locations on unit sphere. c : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Center of the ellipsoid in the original location's coordinate space. r : array-like, shape = [3], (only returned if `return_ellipsoid` evaluates to `True`) Radii (x, y, z) of the ellipsoid in the original location's coordinate space.
f10416:m0
def _fit_ellipsoid_full(locations):
a = np.hstack([locations*<NUM_LIT:2>, locations**<NUM_LIT:2>])<EOL>lsq = sp.linalg.lstsq(a, np.ones(locations.shape[<NUM_LIT:0>]))<EOL>x = lsq[<NUM_LIT:0>]<EOL>c = -x[:<NUM_LIT:3>] / x[<NUM_LIT:3>:]<EOL>gam = <NUM_LIT:1> + np.sum(x[:<NUM_LIT:3>]**<NUM_LIT:2> / x[<NUM_LIT:3>:])<EOL>r = np.sqrt(gam / x[<NUM_LIT:3>:])<EOL>return c, r<EOL>
identify all 6 ellipsoid parametes (center, radii)
f10416:m1
def _fit_ellipsoid_partial(locations, cy):
a = np.vstack([locations[:, <NUM_LIT:0>]**<NUM_LIT:2>,<EOL>locations[:, <NUM_LIT:1>]**<NUM_LIT:2> - <NUM_LIT:2> * locations[:, <NUM_LIT:1>] * cy,<EOL>locations[:, <NUM_LIT:2>]**<NUM_LIT:2>,<EOL>locations[:, <NUM_LIT:0>]*<NUM_LIT:2>,<EOL>locations[:, <NUM_LIT:2>]*<NUM_LIT:2>]).T<EOL>x = sp.linalg.lstsq(a, np.ones(locations.shape[<NUM_LIT:0>]))[<NUM_LIT:0>]<EOL>c = [-x[<NUM_LIT:3>] / x[<NUM_LIT:0>], cy, -x[<NUM_LIT:4>] / x[<NUM_LIT:2>]]<EOL>gam = <NUM_LIT:1> + x[<NUM_LIT:3>]**<NUM_LIT:2> / x[<NUM_LIT:0>] + x[<NUM_LIT:4>]**<NUM_LIT:2> / x[<NUM_LIT:2>]<EOL>r = np.sqrt([gam / x[<NUM_LIT:0>], gam / x[<NUM_LIT:1>], gam / x[<NUM_LIT:2>]])<EOL>return c, r<EOL>
identify only 5 ellipsoid parameters (y-center determined by e.g. Cz)
f10416:m2
def _project_on_ellipsoid(c, r, locations):
p0 = locations - c <EOL>l2 = <NUM_LIT:1> / np.sum(p0**<NUM_LIT:2> / r**<NUM_LIT:2>, axis=<NUM_LIT:1>, keepdims=True)<EOL>p = p0 * np.sqrt(l2) <EOL>fun = lambda x: np.sum((x.reshape(p0.shape) - p0)**<NUM_LIT:2>) <EOL>con = lambda x: np.sum(x.reshape(p0.shape)**<NUM_LIT:2> / r**<NUM_LIT:2>, axis=<NUM_LIT:1>) - <NUM_LIT:1> <EOL>res = sp.optimize.minimize(fun, p, constraints={'<STR_LIT:type>': '<STR_LIT>', '<STR_LIT>': con}, method='<STR_LIT>')<EOL>return res['<STR_LIT:x>'].reshape(p0.shape) + c<EOL>
displace locations to the nearest point on ellipsoid surface
f10416:m3
def __init__(self, x=<NUM_LIT:0.0>, y=<NUM_LIT:0.0>, z=<NUM_LIT:0.0>):
self.x, self.y, self.z = float(x), float(y), float(z)<EOL>
Initialize from three numbers
f10419:c0:m0
@classmethod<EOL><INDENT>def fromiterable(cls, itr):<DEDENT>
x, y, z = itr<EOL>return cls(x, y, z)<EOL>
Initialize from iterable
f10419:c0:m1
@classmethod<EOL><INDENT>def fromvector(cls, v):<DEDENT>
return cls(v.x, v.y, v.z)<EOL>
Copy another vector
f10419:c0:m2
def copy(self):
return Vector(self.x, self.y, self.z)<EOL>
return a copy of this vector
f10419:c0:m6
def dot(self, other):
return self.x * other.x + self.y * other.y + self.z * other.z<EOL>
Dot product with another vector
f10419:c0:m18
def cross(self, other):
x = self.y * other.z - self.z * other.y<EOL>y = self.z * other.x - self.x * other.z<EOL>z = self.x * other.y - self.y * other.x<EOL>return Vector(x, y, z)<EOL>
Cross product with another vector
f10419:c0:m19
def norm2(self):
return self.x * self.x + self.y * self.y + self.z * self.z<EOL>
Squared norm of the vector
f10419:c0:m20
def norm(self):
return math.sqrt(self.norm2())<EOL>
Length of the vector
f10419:c0:m21
def normalize(self):
<EOL>self /= self.norm()<EOL>return self<EOL>
Normalize vector to length 1
f10419:c0:m22
def normalized(self):
return self / self.norm()<EOL>
Return normalized vector, but don't change original
f10419:c0:m23
def rotate(self, l, u):
cl = math.cos(l)<EOL>sl = math.sin(l)<EOL>x = (cl + u.x * u.x * (<NUM_LIT:1> - cl)) * self.x + (u.x * u.y * (<NUM_LIT:1> - cl) - u.z * sl) * self.y + (<EOL>u.x * u.z * (<NUM_LIT:1> - cl) + u.y * sl) * self.z<EOL>y = (u.y * u.x * (<NUM_LIT:1> - cl) + u.z * sl) * self.x + (cl + u.y * u.y * (<NUM_LIT:1> - cl)) * self.y + (<EOL>u.y * u.z * (<NUM_LIT:1> - cl) - u.x * sl) * self.z<EOL>z = (u.z * u.x * (<NUM_LIT:1> - cl) - u.y * sl) * self.x + (u.z * u.y * (<NUM_LIT:1> - cl) + u.x * sl) * self.y + (<EOL>cl + u.z * u.z * (<NUM_LIT:1> - cl)) * self.z<EOL>self.x, self.y, self.z = x, y, z<EOL>return self<EOL>
rotate l radians around axis u
f10419:c0:m24
def rotated(self, l, u):
return self.copy().rotate(l, u)<EOL>
rotate l radians around axis, but don't change original
f10419:c0:m25
def csp(x, cl, numcomp=None):
x = np.asarray(x)<EOL>cl = np.asarray(cl).ravel()<EOL>if x.ndim != <NUM_LIT:3> or x.shape[<NUM_LIT:0>] < <NUM_LIT:2>:<EOL><INDENT>raise AttributeError('<STR_LIT>')<EOL><DEDENT>t, m, n = x.shape<EOL>if t != cl.size:<EOL><INDENT>raise AttributeError('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'.format(cl.size, t))<EOL><DEDENT>labels = np.unique(cl)<EOL>if labels.size != <NUM_LIT:2>:<EOL><INDENT>raise AttributeError('<STR_LIT>'<EOL>'<STR_LIT>'.format(labels.size))<EOL><DEDENT>x1 = x[cl == labels[<NUM_LIT:0>], :, :]<EOL>x2 = x[cl == labels[<NUM_LIT:1>], :, :]<EOL>sigma1 = np.zeros((m, m))<EOL>for t in range(x1.shape[<NUM_LIT:0>]):<EOL><INDENT>sigma1 += np.cov(x1[t, :, :]) / x1.shape[<NUM_LIT:0>]<EOL><DEDENT>sigma1 /= sigma1.trace()<EOL>sigma2 = np.zeros((m, m))<EOL>for t in range(x2.shape[<NUM_LIT:0>]):<EOL><INDENT>sigma2 += np.cov(x2[t, :, :]) / x2.shape[<NUM_LIT:0>]<EOL><DEDENT>sigma2 /= sigma2.trace()<EOL>e, w = eigh(sigma1, sigma1 + sigma2, overwrite_a=True, overwrite_b=True,<EOL>check_finite=False)<EOL>order = np.argsort(e)[::-<NUM_LIT:1>]<EOL>w = w[:, order]<EOL>v = np.linalg.inv(w)<EOL>if numcomp is None:<EOL><INDENT>numcomp = w.shape[<NUM_LIT:1>]<EOL><DEDENT>while w.shape[<NUM_LIT:1>] > numcomp:<EOL><INDENT>i = int(np.floor(w.shape[<NUM_LIT:1>]/<NUM_LIT:2>))<EOL>w = np.delete(w, i, <NUM_LIT:1>)<EOL>v = np.delete(v, i, <NUM_LIT:0>)<EOL><DEDENT>return w, v<EOL>
Calculate common spatial patterns (CSP). Parameters ---------- x : array, shape (trials, channels, samples) or (channels, samples) EEG data set. cl : list of valid dict keys Class labels associated with each trial. Currently, only two classes are supported. numcomp : int, optional Number of patterns to keep after applying CSP. If `numcomp` is greater than channels or None, all patterns are returned. Returns ------- w : array, shape (channels, components) CSP weight matrix. v : array, shape (components, channels) CSP projection matrix.
f10421:m0
def surrogate_connectivity(measure_names, data, var, nfft=<NUM_LIT>, repeats=<NUM_LIT:100>,<EOL>n_jobs=<NUM_LIT:1>, verbose=<NUM_LIT:0>, random_state=None):
par, func = parallel_loop(_calc_surrogate, n_jobs=n_jobs, verbose=verbose)<EOL>output = par(func(randomize_phase(data, random_state=random_state), var,<EOL>measure_names, nfft) for _ in range(repeats))<EOL>return convert_output_(output, measure_names)<EOL>
Calculate surrogate connectivity for a multivariate time series by phase randomization [1]_. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters ---------- measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) or (channels, samples) Time series data (2D or 3D for multiple trials) var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. repeats : int, optional Number of surrogate samples to take. n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- result : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each surrogate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft). .. [1] J. Theiler et al. Testing for nonlinearity in time series: the method of surrogate data. Physica D, 58: 77-94, 1992.
f10423:m0
def jackknife_connectivity(measures, data, var, nfft=<NUM_LIT>, leaveout=<NUM_LIT:1>, n_jobs=<NUM_LIT:1>,<EOL>verbose=<NUM_LIT:0>):
data = atleast_3d(data)<EOL>t, m, n = data.shape<EOL>assert(t > <NUM_LIT:1>)<EOL>if leaveout < <NUM_LIT:1>:<EOL><INDENT>leaveout = int(leaveout * t)<EOL><DEDENT>num_blocks = t // leaveout<EOL>mask = lambda block: [i for i in range(t) if i < block*leaveout or<EOL>i >= (block + <NUM_LIT:1>) * leaveout]<EOL>par, func = parallel_loop(_calc_jackknife, n_jobs=n_jobs, verbose=verbose)<EOL>output = par(func(data[mask(b), :, :], var, measures, nfft)<EOL>for b in range(num_blocks))<EOL>return convert_output_(output, measures)<EOL>
Calculate jackknife estimates of connectivity. For each jackknife estimate a block of trials is left out. This is repeated until each trial was left out exactly once. The number of estimates depends on the number of trials and the value of `leaveout`. It is calculated by repeats = `n_trials` // `leaveout`. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters ---------- measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) Time series data (multiple trials). var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. leaveout : int, optional Number of trials to leave out in each estimate. n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- result : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each surrogate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft).
f10423:m2
def bootstrap_connectivity(measures, data, var, nfft=<NUM_LIT>, repeats=<NUM_LIT:100>,<EOL>num_samples=None, n_jobs=<NUM_LIT:1>, verbose=<NUM_LIT:0>,<EOL>random_state=None):
rng = check_random_state(random_state)<EOL>data = atleast_3d(data)<EOL>n, m, t = data.shape<EOL>assert(t > <NUM_LIT:1>)<EOL>if num_samples is None:<EOL><INDENT>num_samples = t<EOL><DEDENT>mask = lambda r: rng.random_integers(<NUM_LIT:0>, data.shape[<NUM_LIT:0>]-<NUM_LIT:1>, num_samples)<EOL>par, func = parallel_loop(_calc_bootstrap, n_jobs=n_jobs, verbose=verbose)<EOL>output = par(func(data[mask(r), :, :], var, measures, nfft)<EOL>for r in range(repeats))<EOL>return convert_output_(output, measures)<EOL>
Calculate bootstrap estimates of connectivity. To obtain a bootstrap estimate trials are sampled randomly with replacement from the data set. .. note:: Parameter `var` will be modified by the function. Treat as undefined after the function returns. Parameters ---------- measures : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. data : array, shape (trials, channels, samples) Time series data (multiple trials). var : VARBase-like object Instance of a VAR model. nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. repeats : int, optional Number of bootstrap estimates to take. num_samples : int, optional Number of samples to take for each bootstrap estimates. Defaults to the same number of trials as present in the data. n_jobs : int, optional n_jobs : int | None, optional Number of jobs to run in parallel. If set to None, joblib is not used at all. See `joblib.Parallel` for details. verbose : int, optional Verbosity level passed to joblib. Returns ------- measure : array, shape (`repeats`, n_channels, n_channels, nfft) Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (`repeats`, n_channels, n_channels, nfft).
f10423:m4
def significance_fdr(p, alpha):
i = np.argsort(p, axis=None)<EOL>m = i.size - np.sum(np.isnan(p))<EOL>j = np.empty(p.shape, int)<EOL>j.flat[i] = np.arange(<NUM_LIT:1>, i.size + <NUM_LIT:1>)<EOL>mask = p <= alpha * j / m<EOL>if np.sum(mask) == <NUM_LIT:0>:<EOL><INDENT>return mask<EOL><DEDENT>k = np.max(j[mask])<EOL>s = j <= k<EOL>return s<EOL>
Calculate significance by controlling for the false discovery rate. This function determines which of the p-values in `p` can be considered significant. Correction for multiple comparisons is performed by controlling the false discovery rate (FDR). The FDR is the maximum fraction of p-values that are wrongly considered significant [1]_. Parameters ---------- p : array, shape (channels, channels, nfft) p-values. alpha : float Maximum false discovery rate. Returns ------- s : array, dtype=bool, shape (channels, channels, nfft) Significance of each p-value. References ---------- .. [1] Y. Benjamini, Y. Hochberg. Controlling the false discovery rate: a practical and powerful approach to multiple testing. J. Royal Stat. Soc. Series B 57(1): 289-300, 1995.
f10423:m7
def cut_segments(x2d, tr, start, stop):
if start != int(start):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if stop != int(stop):<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>x2d = np.atleast_2d(x2d)<EOL>tr = np.asarray(tr, dtype=int).ravel()<EOL>win = np.arange(start, stop, dtype=int)<EOL>return np.concatenate([x2d[np.newaxis, :, t + win] for t in tr])<EOL>
Cut continuous signal into segments. Parameters ---------- x2d : array, shape (m, n) Input data with m signals and n samples. tr : list of int Trigger positions. start : int Window start (offset relative to trigger). stop : int Window end (offset relative to trigger). Returns ------- x3d : array, shape (len(tr), m, stop-start) Segments cut from data. Individual segments are stacked along the first dimension. See also -------- cat_trials : Concatenate segments. Examples -------- >>> data = np.random.randn(5, 1000) # 5 channels, 1000 samples >>> tr = [750, 500, 250] # three segments >>> x3d = cut_segments(data, tr, 50, 100) # each segment is 50 samples >>> x3d.shape (3, 5, 50)
f10424:m1
def cat_trials(x3d):
x3d = atleast_3d(x3d)<EOL>t = x3d.shape[<NUM_LIT:0>]<EOL>return np.concatenate(np.split(x3d, t, <NUM_LIT:0>), axis=<NUM_LIT:2>).squeeze(<NUM_LIT:0>)<EOL>
Concatenate trials along time axis. Parameters ---------- x3d : array, shape (t, m, n) Segmented input data with t trials, m signals, and n samples. Returns ------- x2d : array, shape (m, t * n) Trials are concatenated along the second axis. See also -------- cut_segments : Cut segments from continuous data. Examples -------- >>> x = np.random.randn(6, 4, 150) >>> y = cat_trials(x) >>> y.shape (4, 900)
f10424:m2
def dot_special(x2d, x3d):
x3d = atleast_3d(x3d)<EOL>x2d = np.atleast_2d(x2d)<EOL>return np.concatenate([x2d.dot(x3d[i, ...])[np.newaxis, ...]<EOL>for i in range(x3d.shape[<NUM_LIT:0>])])<EOL>
Segment-wise dot product. This function calculates the dot product of x2d with each trial of x3d. Parameters ---------- x2d : array, shape (p, m) Input argument. x3d : array, shape (t, m, n) Segmented input data with t trials, m signals, and n samples. The dot product with x2d is calculated for each trial. Returns ------- out : array, shape (t, p, n) Dot product of x2d with each trial of x3d. Examples -------- >>> x = np.random.randn(6, 40, 150) >>> a = np.ones((7, 40)) >>> y = dot_special(a, x) >>> y.shape (6, 7, 150)
f10424:m3
def randomize_phase(data, random_state=None):
rng = check_random_state(random_state)<EOL>data = np.asarray(data)<EOL>data_freq = np.fft.rfft(data)<EOL>data_freq = np.abs(data_freq) * np.exp(<NUM_LIT>*rng.random_sample(data_freq.shape)*<NUM_LIT:2>*np.pi)<EOL>return np.fft.irfft(data_freq, data.shape[-<NUM_LIT:1>])<EOL>
Phase randomization. This function randomizes the spectral phase of the input data along the last dimension. Parameters ---------- data : array Input array. Returns ------- out : array Array of same shape as data. Notes ----- The algorithm randomizes the phase component of the input's complex Fourier transform. Examples -------- .. plot:: :include-source: from pylab import * from scot.datatools import randomize_phase np.random.seed(1234) s = np.sin(np.linspace(0,10*np.pi,1000)) x = np.vstack([s, np.sign(s)]) y = randomize_phase(x) subplot(2,1,1) title('Phase randomization of sine wave and rectangular function') plot(x.T + [1.5, -1.5]), axis([0,1000,-3,3]) subplot(2,1,2) plot(y.T + [1.5, -1.5]), axis([0,1000,-3,3]) plt.show()
f10424:m4
def acm(x, l):
x = atleast_3d(x)<EOL>if l > x.shape[<NUM_LIT:2>]-<NUM_LIT:1>:<EOL><INDENT>raise AttributeError("<STR_LIT>")<EOL><DEDENT>if l == <NUM_LIT:0>:<EOL><INDENT>a, b = x, x<EOL><DEDENT>else:<EOL><INDENT>a = x[:, :, l:]<EOL>b = x[:, :, <NUM_LIT:0>:-l]<EOL><DEDENT>c = np.zeros((x.shape[<NUM_LIT:1>], x.shape[<NUM_LIT:1>]))<EOL>for t in range(x.shape[<NUM_LIT:0>]):<EOL><INDENT>c += a[t, :, :].dot(b[t, :, :].T) / a.shape[<NUM_LIT:2>]<EOL><DEDENT>c /= x.shape[<NUM_LIT:0>]<EOL>return c.T<EOL>
Compute autocovariance matrix at lag l. This function calculates the autocovariance matrix of `x` at lag `l`. Parameters ---------- x : array, shape (n_trials, n_channels, n_samples) Signal data (2D or 3D for multiple trials) l : int Lag Returns ------- c : ndarray, shape = [nchannels, n_channels] Autocovariance matrix of `x` at lag `l`.
f10424:m5
def mvarica(x, var, cl=None, reducedim=<NUM_LIT>, optimize_var=False, backend=None,<EOL>varfit='<STR_LIT>', random_state=None):
x = atleast_3d(x)<EOL>t, m, l = np.shape(x)<EOL>if backend is None:<EOL><INDENT>backend = scotbackend<EOL><DEDENT>if reducedim == '<STR_LIT>':<EOL><INDENT>c = np.eye(m)<EOL>d = np.eye(m)<EOL>xpca = x<EOL><DEDENT>else:<EOL><INDENT>c, d, xpca = backend['<STR_LIT>'](x, reducedim)<EOL><DEDENT>if optimize_var:<EOL><INDENT>var.optimize(xpca)<EOL><DEDENT>if varfit == '<STR_LIT>':<EOL><INDENT>r = np.zeros(xpca.shape)<EOL>for i in range(t):<EOL><INDENT>a = var.fit(xpca[i, :, :])<EOL>r[i, :, :] = xpca[i, :, :] - var.predict(xpca[i, :, :])[<NUM_LIT:0>, :, :]<EOL><DEDENT><DEDENT>elif varfit == '<STR_LIT:class>':<EOL><INDENT>r = np.zeros(xpca.shape)<EOL>for i in np.unique(cl):<EOL><INDENT>mask = cl == i<EOL>a = var.fit(xpca[mask, :, :])<EOL>r[mask, :, :] = xpca[mask, :, :] - var.predict(xpca[mask, :, :])<EOL><DEDENT><DEDENT>elif varfit == '<STR_LIT>':<EOL><INDENT>a = var.fit(xpca)<EOL>r = xpca - var.predict(xpca)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(varfit))<EOL><DEDENT>mx, ux = backend['<STR_LIT>'](cat_trials(r), random_state=random_state)<EOL>e = dot_special(ux.T, r)<EOL>b = a.copy()<EOL>for k in range(<NUM_LIT:0>, a.p):<EOL><INDENT>b.coef[:, k::a.p] = mx.dot(a.coef[:, k::a.p].transpose()).dot(ux).transpose()<EOL><DEDENT>mx = mx.dot(d)<EOL>ux = c.dot(ux)<EOL>class Result:<EOL><INDENT>unmixing = ux<EOL>mixing = mx<EOL>residuals = e<EOL>var_residuals = r<EOL>c = np.cov(cat_trials(e).T, rowvar=False)<EOL><DEDENT>Result.b = b<EOL>Result.a = a<EOL>Result.xpca = xpca<EOL>return Result<EOL>
Performs joint VAR model fitting and ICA source separation. This function implements the MVARICA procedure [1]_. Parameters ---------- x : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples] data set var : :class:`~scot.var.VARBase`-like object Vector autoregressive model (VAR) object that is used for model fitting. cl : list of valid dict keys, optional Class labels associated with each trial. reducedim : {int, float, 'no_pca', None}, optional A number of less than 1 is interpreted as the fraction of variance that should remain in the data. All components that describe in total less than `1-reducedim` of the variance are removed by the PCA step. An integer number of 1 or greater is interpreted as the number of components to keep after applying PCA. If set to None, all PCA components are retained. If set to 'no_pca', the PCA step is skipped. optimize_var : bool, optional Whether to call automatic optimization of the VAR fitting routine. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. varfit : string Determines how to calculate the residuals for source decomposition. 'ensemble' (default) fits one model to the whole data set, 'class' fits a new model for each class, and 'trial' fits a new model for each individual trial. Returns ------- result : class A class with the following attributes is returned: +---------------+----------------------------------------------------------+ | mixing | Source mixing matrix | +---------------+----------------------------------------------------------+ | unmixing | Source unmixing matrix | +---------------+----------------------------------------------------------+ | residuals | Residuals of the VAR model(s) in source space | +---------------+----------------------------------------------------------+ | var_residuals | Residuals of the VAR model(s) in EEG space (before ICA) | +---------------+----------------------------------------------------------+ | c | Noise covariance of the VAR model(s) in source space | +---------------+----------------------------------------------------------+ | b | VAR model coefficients (source space) | +---------------+----------------------------------------------------------+ | a | VAR model coefficients (EEG space) | +---------------+----------------------------------------------------------+ Notes ----- MVARICA is performed with the following steps: 1. Optional dimensionality reduction with PCA 2. Fitting a VAR model tho the data 3. Decomposing the VAR model residuals with ICA 4. Correcting the VAR coefficients References ---------- .. [1] G. Gomez-Herrero et al. "Measuring directional coupling between EEG sources", NeuroImage, 2008
f10425:m0
def cspvarica(x, var, cl, reducedim=None, optimize_var=False, backend=None,<EOL>varfit='<STR_LIT>', random_state=None):
x = atleast_3d(x)<EOL>t, m, l = np.shape(x)<EOL>if backend is None:<EOL><INDENT>backend = scotbackend<EOL><DEDENT>c, d, xcsp = backend['<STR_LIT>'](x, cl, reducedim)<EOL>if optimize_var:<EOL><INDENT>var.optimize(xcsp)<EOL><DEDENT>if varfit == '<STR_LIT>':<EOL><INDENT>r = np.zeros(xcsp.shape)<EOL>for i in range(t):<EOL><INDENT>a = var.fit(xcsp[i, :, :])<EOL>r[i, :, :] = xcsp[i, :, :] - var.predict(xcsp[i, :, :])[<NUM_LIT:0>, :, :]<EOL><DEDENT><DEDENT>elif varfit == '<STR_LIT:class>':<EOL><INDENT>r = np.zeros(xcsp.shape)<EOL>for i in np.unique(cl):<EOL><INDENT>mask = cl == i<EOL>a = var.fit(xcsp[mask, :, :])<EOL>r[mask, :, :] = xcsp[mask, :, :] - var.predict(xcsp[mask, :, :])<EOL><DEDENT><DEDENT>elif varfit == '<STR_LIT>':<EOL><INDENT>a = var.fit(xcsp)<EOL>r = xcsp - var.predict(xcsp)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>'.format(varfit))<EOL><DEDENT>mx, ux = backend['<STR_LIT>'](r, random_state=random_state)<EOL>e = dot_special(ux.T, r)<EOL>b = a.copy()<EOL>for k in range(<NUM_LIT:0>, a.p):<EOL><INDENT>b.coef[:, k::a.p] = mx.dot(a.coef[:, k::a.p].transpose()).dot(ux).transpose()<EOL><DEDENT>mx = mx.dot(d)<EOL>ux = c.dot(ux)<EOL>class Result:<EOL><INDENT>unmixing = ux<EOL>mixing = mx<EOL>residuals = e<EOL>var_residuals = r<EOL>c = np.cov(cat_trials(e))<EOL><DEDENT>Result.b = b<EOL>Result.a = a<EOL>Result.xcsp = xcsp<EOL>return Result<EOL>
Performs joint VAR model fitting and ICA source separation. This function implements the CSPVARICA procedure [1]_. Parameters ---------- x : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples] data set var : :class:`~scot.var.VARBase`-like object Vector autoregressive model (VAR) object that is used for model fitting. cl : list of valid dict keys Class labels associated with each trial. reducedim : {int}, optional Number of (most discriminative) components to keep after applying the CSP. If set to None, retain all components. optimize_var : bool, optional Whether to call automatic optimization of the VAR fitting routine. backend : dict-like, optional Specify backend to use. When set to None the backend configured in config.backend is used. varfit : string Determines how to calculate the residuals for source decomposition. 'ensemble' (default) fits one model to the whole data set, 'class' fits a new model for each class, and 'trial' fits a new model for each individual trial. Returns ------- Result : class A class with the following attributes is returned: +---------------+----------------------------------------------------------+ | mixing | Source mixing matrix | +---------------+----------------------------------------------------------+ | unmixing | Source unmixing matrix | +---------------+----------------------------------------------------------+ | residuals | Residuals of the VAR model(s) in source space | +---------------+----------------------------------------------------------+ | var_residuals | Residuals of the VAR model(s) in EEG space (before ICA) | +---------------+----------------------------------------------------------+ | c | Noise covariance of the VAR model(s) in source space | +---------------+----------------------------------------------------------+ | b | VAR model coefficients (source space) | +---------------+----------------------------------------------------------+ | a | VAR model coefficients (EEG space) | +---------------+----------------------------------------------------------+ Notes ----- CSPVARICA is performed with the following steps: 1. Dimensionality reduction with CSP 2. Fitting a VAR model tho the data 3. Decomposing the VAR model residuals with ICA 4. Correcting the VAR coefficients References ---------- .. [1] M. Billinger et al. "SCoT: A Python Toolbox for EEG Source Connectivity", Frontiers in Neuroinformatics, 2014
f10425:m1
def check_random_state(seed):
if seed is None or seed is np.random:<EOL><INDENT>return np.random.mtrand._rand<EOL><DEDENT>if isinstance(seed, (int, np.integer)):<EOL><INDENT>return np.random.RandomState(seed)<EOL><DEDENT>if isinstance(seed, np.random.RandomState):<EOL><INDENT>return seed<EOL><DEDENT>raise ValueError('<STR_LIT>'<EOL>'<STR_LIT>' % seed)<EOL>
Turn seed into a np.random.RandomState instance. If seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError.
f10426:m0
def cuthill_mckee(matrix):
matrix = np.atleast_2d(matrix)<EOL>n, m = matrix.shape<EOL>assert(n == m)<EOL>matrix = np.logical_or(matrix, matrix.T)<EOL>degree = np.sum(matrix, <NUM_LIT:0>)<EOL>order = [np.argmin(degree)]<EOL>for i in range(n):<EOL><INDENT>adj = np.nonzero(matrix[order[i]])[<NUM_LIT:0>]<EOL>adj = [a for a in adj if a not in order]<EOL>if not adj:<EOL><INDENT>idx = [i for i in range(n) if i not in order]<EOL>order.append(idx[np.argmin(degree[idx])])<EOL><DEDENT>else:<EOL><INDENT>if len(adj) == <NUM_LIT:1>:<EOL><INDENT>order.append(adj[<NUM_LIT:0>])<EOL><DEDENT>else:<EOL><INDENT>adj = np.asarray(adj)<EOL>i = adj[np.argsort(degree[adj])]<EOL>order.extend(i.tolist())<EOL><DEDENT><DEDENT>if len(order) == n:<EOL><INDENT>break<EOL><DEDENT><DEDENT>return order<EOL>
Implementation of the Cuthill-McKee algorithm. Permute a symmetric binary matrix into a band matrix form with a small bandwidth. Parameters ---------- matrix : ndarray, dtype=bool, shape = [n, n] The matrix is internally converted to a symmetric matrix by setting each element [i,j] to True if either [i,j] or [j,i] evaluates to true. Returns ------- order : list of int Permutation intices Examples -------- >>> A = np.array([[0,0,1,1], [0,0,0,0], [1,0,1,0], [1,0,0,0]]) >>> p = cuthill_mckee(A) >>> A array([[0, 0, 1, 1], [0, 0, 0, 0], [1, 0, 1, 0], [1, 0, 0, 0]]) >>> A[p,:][:,p] array([[0, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 1], [0, 0, 1, 1]])
f10426:m1
def cartesian(arrays, out=None):
arrays = [np.asarray(x) for x in arrays]<EOL>dtype = arrays[<NUM_LIT:0>].dtype<EOL>n = np.prod([x.size for x in arrays])<EOL>if out is None:<EOL><INDENT>out = np.zeros([n, len(arrays)], dtype=dtype)<EOL><DEDENT>m = n // arrays[<NUM_LIT:0>].size<EOL>out[:, <NUM_LIT:0>] = np.repeat(arrays[<NUM_LIT:0>], m)<EOL>if arrays[<NUM_LIT:1>:]:<EOL><INDENT>cartesian(arrays[<NUM_LIT:1>:], out=out[<NUM_LIT:0>:m, <NUM_LIT:1>:])<EOL>for j in range(<NUM_LIT:1>, arrays[<NUM_LIT:0>].size):<EOL><INDENT>out[j * m: (j + <NUM_LIT:1>) * m, <NUM_LIT:1>:] = out[<NUM_LIT:0>:m, <NUM_LIT:1>:]<EOL><DEDENT><DEDENT>return out<EOL>
Generate a cartesian product of input arrays. Parameters ---------- arrays : list of array-like 1-D arrays to form the cartesian product of. out : ndarray Array to place the cartesian product in. Returns ------- out : ndarray 2-D array of shape (M, len(arrays)) containing cartesian products formed of input arrays. Examples -------- >>> cartesian(([1, 2, 3], [4, 5], [6, 7])) array([[1, 4, 6], [1, 4, 7], [1, 5, 6], [1, 5, 7], [2, 4, 6], [2, 4, 7], [2, 5, 6], [2, 5, 7], [3, 4, 6], [3, 4, 7], [3, 5, 6], [3, 5, 7]]) References ---------- http://stackoverflow.com/a/1235363/3005167
f10426:m2
def connectivity(measure_names, b, c=None, nfft=<NUM_LIT>):
con = Connectivity(b, c, nfft)<EOL>try:<EOL><INDENT>return getattr(con, measure_names)()<EOL><DEDENT>except TypeError:<EOL><INDENT>return dict((m, getattr(con, m)()) for m in measure_names)<EOL><DEDENT>
Calculate connectivity measures. Parameters ---------- measure_names : str or list of str Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures. b : array, shape (n_channels, n_channels * model_order) VAR model coefficients. See :ref:`var-model-coefficients` for details about the arrangement of coefficients. c : array, shape (n_channels, n_channels), optional Covariance matrix of the driving noise process. Identity matrix is used if set to None (default). nfft : int, optional Number of frequency bins to calculate. Note that these points cover the range between 0 and half the sampling rate. Returns ------- result : array, shape (n_channels, n_channels, `nfft`) An array of shape (m, m, nfft) is returned if measures is a string. If measures is a list of strings, a dictionary is returned, where each key is the name of the measure, and the corresponding values are arrays of shape (m, m, nfft). Notes ----- When using this function, it is more efficient to get several measures at once than calling the function multiple times. Examples -------- >>> c = connectivity(['DTF', 'PDC'], [[0.3, 0.6], [0.0, 0.9]])
f10428:m0
@memoize<EOL><INDENT>def Cinv(self):<DEDENT>
try:<EOL><INDENT>return np.linalg.inv(self.c)<EOL><DEDENT>except np.linalg.linalg.LinAlgError:<EOL><INDENT>print('<STR_LIT>')<EOL>return np.eye(self.c.shape[<NUM_LIT:0>])<EOL><DEDENT>
Inverse of the noise covariance.
f10428:c0:m1
@memoize<EOL><INDENT>def A(self):<DEDENT>
return fft(np.dstack([np.eye(self.m), -self.b]),<EOL>self.nfft * <NUM_LIT:2> - <NUM_LIT:1>)[:, :, :self.nfft]<EOL>
Spectral VAR coefficients. .. math:: \mathbf{A}(f) = \mathbf{I} - \sum_{k=1}^{p} \mathbf{a}^{(k)} \mathrm{e}^{-2\pi f}
f10428:c0:m2
@memoize<EOL><INDENT>def H(self):<DEDENT>
return _inv3(self.A())<EOL>
VAR transfer function. .. math:: \mathbf{H}(f) = \mathbf{A}(f)^{-1}
f10428:c0:m3
@memoize<EOL><INDENT>def S(self):<DEDENT>
if self.c is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>H = self.H()<EOL>S = np.empty(H.shape, dtype=H.dtype)<EOL>for f in range(H.shape[<NUM_LIT:2>]):<EOL><INDENT>S[:, :, f] = H[:, :, f].dot(self.c).dot(H[:, :, f].conj().T)<EOL><DEDENT>return S<EOL>
Cross-spectral density. .. math:: \mathbf{S}(f) = \mathbf{H}(f) \mathbf{C} \mathbf{H}'(f)
f10428:c0:m4
@memoize<EOL><INDENT>def logS(self):<DEDENT>
return np.log10(np.abs(self.S()))<EOL>
Logarithmic cross-spectral density. .. math:: \mathrm{logS}(f) = \log | \mathbf{S}(f) |
f10428:c0:m5
@memoize<EOL><INDENT>def absS(self):<DEDENT>
return np.abs(self.S())<EOL>
Absolute cross-spectral density. .. math:: \mathrm{absS}(f) = | \mathbf{S}(f) |
f10428:c0:m6
@memoize<EOL><INDENT>def G(self):<DEDENT>
if self.c is None:<EOL><INDENT>raise RuntimeError('<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>A = self.A()<EOL>G = np.einsum('<STR_LIT>', A.conj(), self.Cinv())<EOL>G = np.einsum('<STR_LIT>', G, A)<EOL>return G<EOL>
Inverse cross-spectral density. .. math:: \mathbf{G}(f) = \mathbf{A}(f) \mathbf{C}^{-1} \mathbf{A}'(f)
f10428:c0:m7
@memoize<EOL><INDENT>def logG(self):<DEDENT>
return np.log10(np.abs(self.G()))<EOL>
Logarithmic inverse cross-spectral density. .. math:: \mathrm{logG}(f) = \log | \mathbf{G}(f) |
f10428:c0:m8
@memoize<EOL><INDENT>def COH(self):<DEDENT>
S = self.S()<EOL>return S / np.sqrt(np.einsum('<STR_LIT>', S, S.conj()))<EOL>
Coherence. .. math:: \mathrm{COH}_{ij}(f) = \\frac{S_{ij}(f)} {\sqrt{S_{ii}(f) S_{jj}(f)}} References ---------- P. L. Nunez, R. Srinivasan, A. F. Westdorp, R. S. Wijesinghe, D. M. Tucker, R. B. Silverstein, P. J. Cadusch. EEG coherency I: statistics, reference electrode, volume conduction, Laplacians, cortical imaging, and interpretation at multiple scales. Electroenceph. Clin. Neurophysiol. 103(5): 499-515, 1997.
f10428:c0:m9
@memoize<EOL><INDENT>def PHI(self):<DEDENT>
return np.angle(self.S())<EOL>
Phase angle. Phase angle of complex :func:`S`.
f10428:c0:m10
@memoize<EOL><INDENT>def pCOH(self):<DEDENT>
G = self.G()<EOL>return G / np.sqrt(np.einsum('<STR_LIT>', G, G))<EOL>
Partial coherence. .. math:: \mathrm{pCOH}_{ij}(f) = \\frac{G_{ij}(f)} {\sqrt{G_{ii}(f) G_{jj}(f)}} References ---------- P. J. Franaszczuk, K. J. Blinowska, M. Kowalczyk. The application of parametric multichannel spectral estimates in the study of electrical brain activity. Biol. Cybernetics 51(4): 239-247, 1985.
f10428:c0:m11
@memoize<EOL><INDENT>def PDC(self):<DEDENT>
A = self.A()<EOL>return np.abs(A / np.sqrt(np.sum(A.conj() * A, axis=<NUM_LIT:0>, keepdims=True)))<EOL>
Partial directed coherence. .. math:: \mathrm{PDC}_{ij}(f) = \\frac{A_{ij}(f)} {\sqrt{A_{:j}'(f) A_{:j}(f)}} References ---------- L. A. Baccalá, K. Sameshima. Partial directed coherence: a new concept in neural structure determination. Biol. Cybernetics 84(6): 463-474, 2001.
f10428:c0:m12
@memoize<EOL><INDENT>def sPDC(self):<DEDENT>
return self.PDC()**<NUM_LIT:2><EOL>
Squared partial directed coherence. .. math:: \mathrm{sPDC}_{ij}(f) = \\frac{|A_{ij}(f)|^2} {\mathbf{1}^T | A_{:j}(f) |^2} References ---------- L. Astolfi, F. Cincotti, D. Mattia, M. G. Marciani, L. Baccala, F. D. Fallani, S. Salinari, M. Ursino, M. Zavaglia, F. Babiloni. Partial directed coherence: a new concept in neural structure determination. IEEE Trans. Biomed. Eng. 53(9): 1802-1812, 2006.
f10428:c0:m13
@memoize<EOL><INDENT>def ffPDC(self):<DEDENT>
A = self.A()<EOL>return np.abs(A * self.nfft / np.sqrt(np.sum(A.conj() * A, axis=(<NUM_LIT:0>, <NUM_LIT:2>),<EOL>keepdims=True)))<EOL>
Full frequency partial directed coherence. .. math:: \mathrm{ffPDC}_{ij}(f) = \\frac{A_{ij}(f)}{\sqrt{\sum_f A_{:j}'(f) A_{:j}(f)}}
f10428:c0:m14
@memoize<EOL><INDENT>def PDCF(self):<DEDENT>
A = self.A()<EOL>return np.abs(A / np.sqrt(np.einsum('<STR_LIT>',<EOL>A.conj(), self.Cinv(), A)))<EOL>
Partial directed coherence factor. .. math:: \mathrm{PDCF}_{ij}(f) = \\frac{A_{ij}(f)}{\sqrt{A_{:j}'(f) \mathbf{C}^{-1} A_{:j}(f)}} References ---------- L. A. Baccalá, K. Sameshima. Partial directed coherence: a new concept in neural structure determination. Biol. Cybernetics 84(6): 463-474, 2001.
f10428:c0:m15
@memoize<EOL><INDENT>def GPDC(self):<DEDENT>
A = self.A()<EOL>tmp = A / np.sqrt(np.einsum('<STR_LIT>',<EOL>A.conj(), <NUM_LIT:1> / np.diag(self.c), A, self.c))<EOL>return np.abs(tmp)<EOL>
Generalized partial directed coherence. .. math:: \mathrm{GPDC}_{ij}(f) = \\frac{|A_{ij}(f)|} {\sigma_i \sqrt{A_{:j}'(f) \mathrm{diag}(\mathbf{C})^{-1} A_{:j}(f)}} References ---------- L. Faes, S. Erla, G. Nollo. Measuring connectivity in linear multivariate processes: definitions, interpretation, and practical analysis. Comput. Math. Meth. Med. 2012: 140513, 2012.
f10428:c0:m16
@memoize<EOL><INDENT>def DTF(self):<DEDENT>
H = self.H()<EOL>return np.abs(H / np.sqrt(np.sum(H * H.conj(), axis=<NUM_LIT:1>, keepdims=True)))<EOL>
Directed transfer function. .. math:: \mathrm{DTF}_{ij}(f) = \\frac{H_{ij}(f)} {\sqrt{H_{i:}(f) H_{i:}'(f)}} References ---------- M. J. Kaminski, K. J. Blinowska. A new method of the description of the information flow in the brain structures. Biol. Cybernetics 65(3): 203-210, 1991.
f10428:c0:m17
@memoize<EOL><INDENT>def ffDTF(self):<DEDENT>
H = self.H()<EOL>return np.abs(H * self.nfft / np.sqrt(np.sum(H * H.conj(), axis=(<NUM_LIT:1>, <NUM_LIT:2>),<EOL>keepdims=True)))<EOL>
Full frequency directed transfer function. .. math:: \mathrm{ffDTF}_{ij}(f) = \\frac{H_{ij}(f)}{\sqrt{\sum_f H_{i:}(f) H_{i:}'(f)}} References ---------- A. Korzeniewska, M. Mańczak, M. Kaminski, K. J. Blinowska, S. Kasicki. Determination of information flow direction among brain structures by a modified directed transfer function (dDTF) method. J. Neurosci. Meth. 125(1-2): 195-207, 2003.
f10428:c0:m18
@memoize<EOL><INDENT>def dDTF(self):<DEDENT>
return np.abs(self.pCOH()) * self.ffDTF()<EOL>
Direct directed transfer function. .. math:: \mathrm{dDTF}_{ij}(f) = |\mathrm{pCOH}_{ij}(f)| \mathrm{ffDTF}_{ij}(f) References ---------- A. Korzeniewska, M. Mańczak, M. Kaminski, K. J. Blinowska, S. Kasicki. Determination of information flow direction among brain structures by a modified directed transfer function (dDTF) method. J. Neurosci. Meth. 125(1-2): 195-207, 2003.
f10428:c0:m19
@memoize<EOL><INDENT>def GDTF(self):<DEDENT>
H = self.H()<EOL>tmp = H / np.sqrt(np.einsum('<STR_LIT>',<EOL>H.conj(), self.c, H,<EOL><NUM_LIT:1> / self.c.diagonal()))<EOL>return np.abs(tmp)<EOL>
Generalized directed transfer function. .. math:: \mathrm{GPDC}_{ij}(f) = \\frac{\sigma_j |H_{ij}(f)|} {\sqrt{H_{i:}(f) \mathrm{diag}(\mathbf{C}) H_{i:}'(f)}} References ---------- L. Faes, S. Erla, G. Nollo. Measuring connectivity in linear multivariate processes: definitions, interpretation, and practical analysis. Comput. Math. Meth. Med. 2012: 140513, 2012.
f10428:c0:m20
def pca_svd(x):
w, s, _ = np.linalg.svd(x, full_matrices=False)<EOL>return w, s ** <NUM_LIT:2><EOL>
Calculate PCA using SVD. Parameters ---------- x : ndarray, shape (channels, samples) Two-dimensional input data. Returns ------- w : ndarray, shape (channels, channels) Eigenvectors (principal components) (in columns). s : ndarray, shape (channels,) Eigenvalues.
f10429:m0
def pca_eig(x):
s, w = np.linalg.eigh(x.dot(x.T))<EOL>return w, s<EOL>
Calculate PCA using eigenvalue decomposition. Parameters ---------- x : ndarray, shape (channels, samples) Two-dimensional input data. Returns ------- w : ndarray, shape (channels, channels) Eigenvectors (principal components) (in columns). s : ndarray, shape (channels,) Eigenvalues.
f10429:m1
def pca(x, subtract_mean=False, normalize=False, sort_components=True,<EOL>reducedim=None, algorithm=pca_eig):
x = np.asarray(x)<EOL>if x.ndim == <NUM_LIT:3>:<EOL><INDENT>x = cat_trials(x)<EOL><DEDENT>if reducedim:<EOL><INDENT>sort_components = True<EOL><DEDENT>if subtract_mean:<EOL><INDENT>x = x - np.mean(x, axis=<NUM_LIT:1>, keepdims=True)<EOL><DEDENT>k, l = None, None<EOL>if normalize:<EOL><INDENT>l = np.std(x, axis=<NUM_LIT:1>, ddof=<NUM_LIT:1>)<EOL>k = np.diag(<NUM_LIT:1.0> / l)<EOL>l = np.diag(l)<EOL>x = np.dot(k, x)<EOL><DEDENT>w, latent = algorithm(x)<EOL>v = w.T<EOL>if normalize:<EOL><INDENT>w = np.dot(k, w)<EOL>v = np.dot(v, l)<EOL><DEDENT>latent /= sum(latent)<EOL>if sort_components:<EOL><INDENT>order = np.argsort(latent)[::-<NUM_LIT:1>]<EOL>w = w[:, order]<EOL>v = v[order, :]<EOL>latent = latent[order]<EOL><DEDENT>if reducedim is not None:<EOL><INDENT>if reducedim < <NUM_LIT:1>:<EOL><INDENT>selected = np.nonzero(np.cumsum(latent) < reducedim)[<NUM_LIT:0>]<EOL>try:<EOL><INDENT>selected = np.concatenate([selected, [selected[-<NUM_LIT:1>] + <NUM_LIT:1>]])<EOL><DEDENT>except IndexError:<EOL><INDENT>selected = [<NUM_LIT:0>]<EOL><DEDENT>if selected[-<NUM_LIT:1>] >= w.shape[<NUM_LIT:1>]:<EOL><INDENT>selected = selected[<NUM_LIT:0>:-<NUM_LIT:1>]<EOL><DEDENT>w = w[:, selected]<EOL>v = v[selected, :]<EOL><DEDENT>else:<EOL><INDENT>w = w[:, :reducedim]<EOL>v = v[:reducedim, :]<EOL><DEDENT><DEDENT>return w, v<EOL>
Calculate principal component analysis (PCA). Parameters ---------- x : ndarray, shape (trials, channels, samples) or (channels, samples) Input data. subtract_mean : bool, optional Subtract sample mean from x. normalize : bool, optional Normalize variances before applying PCA. sort_components : bool, optional Sort principal components in order of decreasing eigenvalues. reducedim : float or int or None, optional A value less than 1 is interpreted as the fraction of variance that should be retained in the data. All components that account for less than `1 - reducedim` of the variance are removed. An integer value of 1 or greater is interpreted as the number of (sorted) components to retain. If None, do not reduce dimensionality (i.e. keep all components). algorithm : func, optional Function to use for eigenvalue decomposition (:func:`pca_eig` or :func:`pca_svd`). Returns ------- w : ndarray, shape (channels, components) PCA transformation matrix. v : ndarray, shape (components, channels) Inverse PCA transformation matrix.
f10429:m2
def _msge_with_gradient_underdetermined(data, delta, xvschema, skipstep, p):
t, m, l = data.shape<EOL>d = None<EOL>j, k = <NUM_LIT:0>, <NUM_LIT:0><EOL>nt = np.ceil(t / skipstep)<EOL>for trainset, testset in xvschema(t, skipstep):<EOL><INDENT>a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p)<EOL>c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p)<EOL>e = sp.linalg.inv(np.eye(a.shape[<NUM_LIT:0>]) * delta ** <NUM_LIT:2> + a.dot(a.T))<EOL>cc = c.transpose().dot(c)<EOL>be = b.transpose().dot(e)<EOL>bee = be.dot(e)<EOL>bea = be.dot(a)<EOL>beea = bee.dot(a)<EOL>beacc = bea.dot(cc)<EOL>dc = d.transpose().dot(c)<EOL>j += np.sum(beacc * bea - <NUM_LIT:2> * bea * dc) + np.sum(d ** <NUM_LIT:2>)<EOL>k += np.sum(beea * dc - beacc * beea) * <NUM_LIT:4> * delta<EOL><DEDENT>return j / (nt * d.size), k / (nt * d.size)<EOL>
Calculate mean squared generalization error and its gradient for underdetermined equation system.
f10430:m0
def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p):
t, m, l = data.shape<EOL>d = None<EOL>l, k = <NUM_LIT:0>, <NUM_LIT:0><EOL>nt = np.ceil(t / skipstep)<EOL>for trainset, testset in xvschema(t, skipstep):<EOL><INDENT>a, b = _construct_var_eqns(atleast_3d(data[trainset, :, :]), p)<EOL>c, d = _construct_var_eqns(atleast_3d(data[testset, :, :]), p)<EOL>e = sp.linalg.inv(np.eye(a.shape[<NUM_LIT:1>]) * delta ** <NUM_LIT:2> + a.T.dot(a))<EOL>ba = b.transpose().dot(a)<EOL>dc = d.transpose().dot(c)<EOL>bae = ba.dot(e)<EOL>baee = bae.dot(e)<EOL>baecc = bae.dot(c.transpose().dot(c))<EOL>l += np.sum(baecc * bae - <NUM_LIT:2> * bae * dc) + np.sum(d ** <NUM_LIT:2>)<EOL>k += np.sum(baee * dc - baecc * baee) * <NUM_LIT:4> * delta<EOL><DEDENT>return l / (nt * d.size), k / (nt * d.size)<EOL>
Calculate mean squared generalization error and its gradient for overdetermined equation system.
f10430:m1
def _get_msge_with_gradient_func(shape, p):
t, m, l = shape<EOL>n = (l - p) * t<EOL>underdetermined = n < m * p<EOL>if underdetermined:<EOL><INDENT>return _msge_with_gradient_underdetermined<EOL><DEDENT>else:<EOL><INDENT>return _msge_with_gradient_overdetermined<EOL><DEDENT>
Select which function to use for MSGE calculation (over- or underdetermined).
f10430:m2
def _get_msge_with_gradient(data, delta, xvschema, skipstep, p):
t, m, l = data.shape<EOL>n = (l - p) * t<EOL>underdetermined = n < m * p<EOL>if underdetermined:<EOL><INDENT>return _msge_with_gradient_underdetermined(data, delta, xvschema,<EOL>skipstep, p)<EOL><DEDENT>else:<EOL><INDENT>return _msge_with_gradient_overdetermined(data, delta, xvschema,<EOL>skipstep, p)<EOL><DEDENT>
Calculate mean squared generalization error and its gradient, automatically selecting the best function.
f10430:m3
def fit(self, data):
data = atleast_3d(data)<EOL>if self.delta == <NUM_LIT:0> or self.delta is None:<EOL><INDENT>x, y = self._construct_eqns(data)<EOL><DEDENT>else:<EOL><INDENT>x, y = self._construct_eqns_rls(data)<EOL><DEDENT>b, res, rank, s = sp.linalg.lstsq(x, y)<EOL>self.coef = b.transpose()<EOL>self.residuals = data - self.predict(data)<EOL>self.rescov = sp.cov(cat_trials(self.residuals[:, :, self.p:]))<EOL>return self<EOL>
Fit VAR model to data. Parameters ---------- data : array, shape (trials, channels, samples) or (channels, samples) Epoched or continuous data set. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example).
f10430:c0:m1
def optimize_order(self, data, min_p=<NUM_LIT:1>, max_p=None):
data = np.asarray(data)<EOL>if data.shape[<NUM_LIT:0>] < <NUM_LIT:2>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>msge, prange = [], []<EOL>par, func = parallel_loop(_get_msge_with_gradient, n_jobs=self.n_jobs,<EOL>verbose=self.verbose)<EOL>if self.n_jobs is None:<EOL><INDENT>npar = <NUM_LIT:1><EOL><DEDENT>elif self.n_jobs < <NUM_LIT:0>:<EOL><INDENT>npar = <NUM_LIT:4> <EOL><DEDENT>else:<EOL><INDENT>npar = self.n_jobs<EOL><DEDENT>p = min_p<EOL>while True:<EOL><INDENT>result = par(func(data, self.delta, self.xvschema, <NUM_LIT:1>, p_)<EOL>for p_ in range(p, p + npar))<EOL>j, k = zip(*result)<EOL>prange.extend(range(p, p + npar))<EOL>msge.extend(j)<EOL>p += npar<EOL>if max_p is None:<EOL><INDENT>if len(msge) >= <NUM_LIT:2> and msge[-<NUM_LIT:1>] > msge[-<NUM_LIT:2>]:<EOL><INDENT>break<EOL><DEDENT><DEDENT>else:<EOL><INDENT>if prange[-<NUM_LIT:1>] >= max_p:<EOL><INDENT>i = prange.index(max_p) + <NUM_LIT:1><EOL>prange = prange[:i]<EOL>msge = msge[:i]<EOL>break<EOL><DEDENT><DEDENT><DEDENT>self.p = prange[np.argmin(msge)]<EOL>return zip(prange, msge)<EOL>
Determine optimal model order by minimizing the mean squared generalization error. Parameters ---------- data : array, shape (n_trials, n_channels, n_samples) Epoched data set on which to optimize the model order. At least two trials are required. min_p : int Minimal model order to check. max_p : int Maximum model order to check
f10430:c0:m2
def optimize_delta_bisection(self, data, skipstep=<NUM_LIT:1>, verbose=None):
data = atleast_3d(data)<EOL>if data.shape[<NUM_LIT:0>] < <NUM_LIT:2>:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if verbose is None:<EOL><INDENT>verbose = config.getboolean('<STR_LIT>', '<STR_LIT>')<EOL><DEDENT>maxsteps = <NUM_LIT:10><EOL>maxdelta = <NUM_LIT><EOL>a = -<NUM_LIT:10><EOL>b = <NUM_LIT:10><EOL>trform = lambda x: np.sqrt(np.exp(x))<EOL>msge = _get_msge_with_gradient_func(data.shape, self.p)<EOL>ja, ka = msge(data, trform(a), self.xvschema, skipstep, self.p)<EOL>jb, kb = msge(data, trform(b), self.xvschema, skipstep, self.p)<EOL>while np.sign(ka) == np.sign(kb):<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>'<EOL>'<STR_LIT>' % (a, b, a * <NUM_LIT:2>, b * <NUM_LIT:2>))<EOL><DEDENT>a *= <NUM_LIT:2><EOL>b *= <NUM_LIT:2><EOL>ja, ka = msge(data, trform(a), self.xvschema, skipstep, self.p)<EOL>jb, kb = msge(data, trform(b), self.xvschema, skipstep, self.p)<EOL>if trform(b) >= maxdelta:<EOL><INDENT>if verbose:<EOL><INDENT>print('<STR_LIT>')<EOL>print('<STR_LIT>')<EOL><DEDENT>return <NUM_LIT:0><EOL><DEDENT><DEDENT>nsteps = <NUM_LIT:0><EOL>while nsteps < maxsteps:<EOL><INDENT>c = (a + b) / <NUM_LIT:2><EOL>j, k = msge(data, trform(c), self.xvschema, skipstep, self.p)<EOL>if np.sign(k) == np.sign(ka):<EOL><INDENT>a, ka = c, k<EOL><DEDENT>else:<EOL><INDENT>b, kb = c, k<EOL><DEDENT>nsteps += <NUM_LIT:1><EOL>tmp = trform([a, b, a + (b - a) * np.abs(ka) / np.abs(kb - ka)])<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' %<EOL>(nsteps, tmp[<NUM_LIT:0>], tmp[<NUM_LIT:1>], tmp[<NUM_LIT:2>]))<EOL><DEDENT><DEDENT>self.delta = trform(a + (b - a) * np.abs(ka) / np.abs(kb - ka))<EOL>if verbose:<EOL><INDENT>print('<STR_LIT>' % self.delta)<EOL><DEDENT>return self<EOL>
Find optimal ridge penalty with bisection search. Parameters ---------- data : array, shape (n_trials, n_channels, n_samples) Epoched data set. At least two trials are required. skipstep : int, optional Speed up calculation by skipping samples during cost function calculation. Returns ------- self : :class:`VAR` The :class:`VAR` object to facilitate method chaining (see usage example).
f10430:c0:m3
def _construct_eqns_rls(self, data):
return _construct_var_eqns(data, self.p, self.delta)<EOL>
Construct VAR equation system with RLS constraint.
f10430:c0:m4
def fetch(dataset="<STR_LIT>", datadir=datadir):
if dataset not in datasets:<EOL><INDENT>raise ValueError("<STR_LIT>".format(dataset))<EOL><DEDENT>else:<EOL><INDENT>files = datasets[dataset]["<STR_LIT>"]<EOL>url = datasets[dataset]["<STR_LIT:url>"]<EOL>md5 = datasets[dataset]["<STR_LIT>"]<EOL><DEDENT>if not isdir(datadir):<EOL><INDENT>makedirs(datadir)<EOL><DEDENT>data = []<EOL>for n, filename in enumerate(files):<EOL><INDENT>fullfile = join(datadir, filename)<EOL>if not isfile(fullfile):<EOL><INDENT>with open(fullfile, "<STR_LIT:wb>") as f:<EOL><INDENT>response = get(join(url, filename))<EOL>f.write(response.content)<EOL><DEDENT><DEDENT>with open(fullfile, "<STR_LIT:rb>") as f: <EOL><INDENT>hash = hashlib.md5(f.read()).hexdigest()<EOL><DEDENT>if hash != md5[n]:<EOL><INDENT>raise MD5MismatchError("<STR_LIT>".format(fullfile, md5[n]))<EOL><DEDENT>data.append(convert(dataset, loadmat(fullfile)))<EOL><DEDENT>return data<EOL>
Fetch example dataset. If the requested dataset is not found in the location specified by `datadir`, the function attempts to download it. Parameters ---------- dataset : str Which dataset to load. Currently only 'mi' is supported. datadir : str Path to the storage location of example datasets. Datasets are downloaded to this location if they cannot be found. If the directory does not exist it is created. Returns ------- data : list of dicts The data set is stored in a list, where each list element corresponds to data from one subject. Each list element is a dictionary with the following keys: "eeg" ... EEG signals "triggers" ... Trigger latencies "labels" ... Class labels "fs" ... Sample rate "locations" ... Channel locations
f10432:m0
def __init__(self,<EOL>package_name,<EOL>rst_extension='<STR_LIT>',<EOL>package_skip_patterns=None,<EOL>module_skip_patterns=None,<EOL>):
if package_skip_patterns is None:<EOL><INDENT>package_skip_patterns = ['<STR_LIT>']<EOL><DEDENT>if module_skip_patterns is None:<EOL><INDENT>module_skip_patterns = ['<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>self.package_name = package_name<EOL>self.rst_extension = rst_extension<EOL>self.package_skip_patterns = package_skip_patterns<EOL>self.module_skip_patterns = module_skip_patterns<EOL>
Initialize package for parsing Parameters ---------- package_name : string Name of the top-level package. *package_name* must be the name of an importable package rst_extension : string, optional Extension for reST files, default '.rst' package_skip_patterns : None or sequence of {strings, regexps} Sequence of strings giving URIs of packages to be excluded Operates on the package path, starting at (including) the first dot in the package path, after *package_name* - so, if *package_name* is ``sphinx``, then ``sphinx.util`` will result in ``.util`` being passed for earching by these regexps. If is None, gives default. Default is: ['\.tests$'] module_skip_patterns : None or sequence Sequence of strings giving URIs of modules to be excluded Operates on the module name including preceding URI path, back to the first dot after *package_name*. For example ``sphinx.util.console`` results in the string to search of ``.util.console`` If is None, gives default. Default is: ['\.setup$', '\._']
f10435:c0:m0
def set_package_name(self, package_name):
<EOL>self._package_name = package_name<EOL>self.root_module = __import__(package_name)<EOL>self.root_path = self.root_module.__path__[<NUM_LIT:0>]<EOL>self.written_modules = None<EOL>
Set package_name >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> docwriter.root_path == sphinx.__path__[0] True >>> docwriter.package_name = 'docutils' >>> import docutils >>> docwriter.root_path == docutils.__path__[0] True
f10435:c0:m2
def _get_object_name(self, line):
name = line.split()[<NUM_LIT:1>].split('<STR_LIT:(>')[<NUM_LIT:0>].strip()<EOL>return name.rstrip('<STR_LIT::>')<EOL>
Get second token in line >>> docwriter = ApiDocWriter('sphinx') >>> docwriter._get_object_name(" def func(): ") 'func' >>> docwriter._get_object_name(" class Klass(object): ") 'Klass' >>> docwriter._get_object_name(" class Klass: ") 'Klass'
f10435:c0:m3
def _uri2path(self, uri):
if uri == self.package_name:<EOL><INDENT>return os.path.join(self.root_path, '<STR_LIT>')<EOL><DEDENT>path = uri.replace('<STR_LIT:.>', os.path.sep)<EOL>path = path.replace(self.package_name + os.path.sep, '<STR_LIT>')<EOL>path = os.path.join(self.root_path, path)<EOL>if os.path.exists(path + '<STR_LIT>'): <EOL><INDENT>path += '<STR_LIT>'<EOL><DEDENT>elif os.path.exists(os.path.join(path, '<STR_LIT>')):<EOL><INDENT>path = os.path.join(path, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>return None<EOL><DEDENT>return path<EOL>
Convert uri to absolute filepath Parameters ---------- uri : string URI of python module to return path for Returns ------- path : None or string Returns None if there is no valid path for this URI Otherwise returns absolute file system path for URI Examples -------- >>> docwriter = ApiDocWriter('sphinx') >>> import sphinx >>> modpath = sphinx.__path__[0] >>> res = docwriter._uri2path('sphinx.builder') >>> res == os.path.join(modpath, 'builder.py') True >>> res = docwriter._uri2path('sphinx') >>> res == os.path.join(modpath, '__init__.py') True >>> docwriter._uri2path('sphinx.does_not_exist')
f10435:c0:m4
def _path2uri(self, dirpath):
relpath = dirpath.replace(self.root_path, self.package_name)<EOL>if relpath.startswith(os.path.sep):<EOL><INDENT>relpath = relpath[<NUM_LIT:1>:]<EOL><DEDENT>return relpath.replace(os.path.sep, '<STR_LIT:.>')<EOL>
Convert directory path to uri
f10435:c0:m5
def _parse_module(self, uri):
filename = self._uri2path(uri)<EOL>if filename is None:<EOL><INDENT>return ([],[])<EOL><DEDENT>f = open(filename, '<STR_LIT>')<EOL>functions, classes = self._parse_lines(f)<EOL>f.close()<EOL>return functions, classes<EOL>
Parse module defined in *uri*
f10435:c0:m6
def _parse_lines(self, linesource):
functions = []<EOL>classes = []<EOL>for line in linesource:<EOL><INDENT>if line.startswith('<STR_LIT>') and line.count('<STR_LIT:(>'):<EOL><INDENT>name = self._get_object_name(line)<EOL>if not name.startswith('<STR_LIT:_>'):<EOL><INDENT>functions.append(name)<EOL><DEDENT><DEDENT>elif line.startswith('<STR_LIT>'):<EOL><INDENT>name = self._get_object_name(line)<EOL>if not name.startswith('<STR_LIT:_>'):<EOL><INDENT>classes.append(name)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>functions.sort()<EOL>classes.sort()<EOL>return functions, classes<EOL>
Parse lines of text for functions and classes
f10435:c0:m7
def generate_api_doc(self, uri):
<EOL>functions, classes = self._parse_module(uri)<EOL>if not len(functions) and not len(classes):<EOL><INDENT>print('<STR_LIT>',uri) <EOL>return '<STR_LIT>'<EOL><DEDENT>uri_short = re.sub(r'<STR_LIT>' % self.package_name,'<STR_LIT>',uri)<EOL>ad = '<STR_LIT>'<EOL>chap_title = uri_short<EOL>ad += (chap_title+'<STR_LIT:\n>'+ self.rst_section_levels[<NUM_LIT:1>] * len(chap_title)<EOL>+ '<STR_LIT>')<EOL>if '<STR_LIT:.>' in uri:<EOL><INDENT>title = '<STR_LIT>' + uri_short + '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>title = '<STR_LIT>' + uri_short + '<STR_LIT>'<EOL><DEDENT>ad += title + '<STR_LIT:\n>' + self.rst_section_levels[<NUM_LIT:2>] * len(title)<EOL>if len(classes):<EOL><INDENT>ad += '<STR_LIT>' % uri<EOL>ad += '<STR_LIT>' % uri<EOL>ad += '<STR_LIT>'<EOL><DEDENT>ad += '<STR_LIT>' + uri + '<STR_LIT:\n>'<EOL>ad += '<STR_LIT>' + uri + '<STR_LIT:\n>'<EOL>multi_class = len(classes) > <NUM_LIT:1><EOL>multi_fx = len(functions) > <NUM_LIT:1><EOL>if multi_class:<EOL><INDENT>ad += '<STR_LIT:\n>' + '<STR_LIT>' + '<STR_LIT:\n>' +self.rst_section_levels[<NUM_LIT:2>] * <NUM_LIT:7> + '<STR_LIT:\n>'<EOL><DEDENT>elif len(classes) and multi_fx:<EOL><INDENT>ad += '<STR_LIT:\n>' + '<STR_LIT>' + '<STR_LIT:\n>' +self.rst_section_levels[<NUM_LIT:2>] * <NUM_LIT:5> + '<STR_LIT:\n>'<EOL><DEDENT>for c in classes:<EOL><INDENT>ad += '<STR_LIT>' + c + '<STR_LIT>'+ self.rst_section_levels[multi_class + <NUM_LIT:2> ] *(len(c)+<NUM_LIT:9>) + '<STR_LIT>'<EOL>ad += '<STR_LIT>' + c + '<STR_LIT:\n>'<EOL>ad += '<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT>''<STR_LIT:\n>''<STR_LIT>'<EOL><DEDENT>if multi_fx:<EOL><INDENT>ad += '<STR_LIT:\n>' + '<STR_LIT>' + '<STR_LIT:\n>' +self.rst_section_levels[<NUM_LIT:2>] * <NUM_LIT:9> + '<STR_LIT>'<EOL><DEDENT>elif len(functions) and multi_class:<EOL><INDENT>ad += '<STR_LIT:\n>' + '<STR_LIT>' + '<STR_LIT:\n>' +self.rst_section_levels[<NUM_LIT:2>] * <NUM_LIT:8> + '<STR_LIT>'<EOL><DEDENT>for f in functions:<EOL><INDENT>ad += '<STR_LIT>' + uri + '<STR_LIT:.>' + f + '<STR_LIT>'<EOL><DEDENT>return ad<EOL>
Make autodoc documentation template string for a module Parameters ---------- uri : string python location of module - e.g 'sphinx.builder' Returns ------- S : string Contents of API doc
f10435:c0:m8
def _survives_exclude(self, matchstr, match_type):
if match_type == '<STR_LIT>':<EOL><INDENT>patterns = self.module_skip_patterns<EOL><DEDENT>elif match_type == '<STR_LIT>':<EOL><INDENT>patterns = self.package_skip_patterns<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>' <EOL>% match_type)<EOL><DEDENT>L = len(self.package_name)<EOL>if matchstr[:L] == self.package_name:<EOL><INDENT>matchstr = matchstr[L:]<EOL><DEDENT>for pat in patterns:<EOL><INDENT>try:<EOL><INDENT>pat.search<EOL><DEDENT>except AttributeError:<EOL><INDENT>pat = re.compile(pat)<EOL><DEDENT>if pat.search(matchstr):<EOL><INDENT>return False<EOL><DEDENT><DEDENT>return True<EOL>
Returns True if *matchstr* does not match patterns ``self.package_name`` removed from front of string if present Examples -------- >>> dw = ApiDocWriter('sphinx') >>> dw._survives_exclude('sphinx.okpkg', 'package') True >>> dw.package_skip_patterns.append('^\\.badpkg$') >>> dw._survives_exclude('sphinx.badpkg', 'package') False >>> dw._survives_exclude('sphinx.badpkg', 'module') True >>> dw._survives_exclude('sphinx.badmod', 'module') True >>> dw.module_skip_patterns.append('^\\.badmod$') >>> dw._survives_exclude('sphinx.badmod', 'module') False
f10435:c0:m9
def discover_modules(self):
modules = [self.package_name]<EOL>for dirpath, dirnames, filenames in os.walk(self.root_path):<EOL><INDENT>root_uri = self._path2uri(os.path.join(self.root_path,<EOL>dirpath))<EOL>for dirname in dirnames[:]: <EOL><INDENT>package_uri = '<STR_LIT:.>'.join((root_uri, dirname))<EOL>if (self._uri2path(package_uri) and<EOL>self._survives_exclude(package_uri, '<STR_LIT>')):<EOL><INDENT>modules.append(package_uri)<EOL><DEDENT>else:<EOL><INDENT>dirnames.remove(dirname)<EOL><DEDENT><DEDENT>for filename in filenames:<EOL><INDENT>module_name = filename[:-<NUM_LIT:3>]<EOL>module_uri = '<STR_LIT:.>'.join((root_uri, module_name))<EOL>if (self._uri2path(module_uri) and<EOL>self._survives_exclude(module_uri, '<STR_LIT>')):<EOL><INDENT>modules.append(module_uri)<EOL><DEDENT><DEDENT><DEDENT>return sorted(modules)<EOL>
Return module sequence discovered from ``self.package_name`` Parameters ---------- None Returns ------- mods : sequence Sequence of module names within ``self.package_name`` Examples -------- >>> dw = ApiDocWriter('sphinx') >>> mods = dw.discover_modules() >>> 'sphinx.util' in mods True >>> dw.package_skip_patterns.append('\.util$') >>> 'sphinx.util' in dw.discover_modules() False >>>
f10435:c0:m10
def write_api_docs(self, outdir):
if not os.path.exists(outdir):<EOL><INDENT>os.mkdir(outdir)<EOL><DEDENT>modules = self.discover_modules()<EOL>self.write_modules_api(modules,outdir)<EOL>
Generate API reST files. Parameters ---------- outdir : string Directory name in which to store files We create automatic filenames for each module Returns ------- None Notes ----- Sets self.written_modules to list of written modules
f10435:c0:m12
def write_index(self, outdir, froot='<STR_LIT>', relative_to=None):
if self.written_modules is None:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>path = os.path.join(outdir, froot+self.rst_extension)<EOL>if relative_to is not None:<EOL><INDENT>relpath = outdir.replace(relative_to + os.path.sep, '<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>relpath = outdir<EOL><DEDENT>idx = open(path,'<STR_LIT>')<EOL>w = idx.write<EOL>w('<STR_LIT>')<EOL>w('<STR_LIT>')<EOL>for f in self.written_modules:<EOL><INDENT>w('<STR_LIT>' % os.path.join(relpath,f))<EOL><DEDENT>idx.close()<EOL>
Make a reST API index file from written files Parameters ---------- path : string Filename to write index to outdir : string Directory to which to write generated index file froot : string, optional root (filename without extension) of filename to write to Defaults to 'gen'. We add ``self.rst_extension``. relative_to : string path to which written filenames are relative. This component of the written file path will be removed from outdir, in the generated index. Default is None, meaning, leave path as it is.
f10435:c0:m13
def my_import(name):
mod = __import__(name)<EOL>components = name.split('<STR_LIT:.>')<EOL>for comp in components[<NUM_LIT:1>:]:<EOL><INDENT>mod = getattr(mod, comp)<EOL><DEDENT>return mod<EOL>
Module importer - taken from the python documentation. This function allows importing names with dots in them.
f10436:m0
def inheritance_diagram_directive(name, arguments, options, content, lineno,<EOL>content_offset, block_text, state,<EOL>state_machine):
node = inheritance_diagram()<EOL>class_names = arguments<EOL>graph = InheritanceGraph(class_names)<EOL>for name in graph.get_all_class_names():<EOL><INDENT>refnodes, x = xfileref_role(<EOL>'<STR_LIT:class>', '<STR_LIT>' % name, name, <NUM_LIT:0>, state)<EOL>node.extend(refnodes)<EOL><DEDENT>node['<STR_LIT>'] = graph<EOL>node['<STR_LIT>'] = options.get('<STR_LIT>', <NUM_LIT:0>)<EOL>node['<STR_LIT:content>'] = "<STR_LIT:U+0020>".join(class_names)<EOL>return [node]<EOL>
Run when the inheritance_diagram directive is first encountered.
f10436:m1
def html_output_graph(self, node):
graph = node['<STR_LIT>']<EOL>parts = node['<STR_LIT>']<EOL>graph_hash = get_graph_hash(node)<EOL>name = "<STR_LIT>" % graph_hash<EOL>path = '<STR_LIT>'<EOL>dest_path = os.path.join(setup.app.builder.outdir, path)<EOL>if not os.path.exists(dest_path):<EOL><INDENT>os.makedirs(dest_path)<EOL><DEDENT>png_path = os.path.join(dest_path, name + "<STR_LIT>")<EOL>path = setup.app.builder.imgpath<EOL>urls = {}<EOL>for child in node:<EOL><INDENT>if child.get('<STR_LIT>') is not None:<EOL><INDENT>urls[child['<STR_LIT>']] = child.get('<STR_LIT>')<EOL><DEDENT>elif child.get('<STR_LIT>') is not None:<EOL><INDENT>urls[child['<STR_LIT>']] = '<STR_LIT:#>' + child.get('<STR_LIT>')<EOL><DEDENT><DEDENT>image_map = graph.run_dot(['<STR_LIT>', '<STR_LIT>' % png_path, '<STR_LIT>'],<EOL>name, parts, urls)<EOL>return ('<STR_LIT>' %<EOL>(path, name, name, image_map))<EOL>
Output the graph for HTML. This will insert a PNG with clickable image map.
f10436:m3