idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
15,600
def from_sym_2_tri ( symm ) : inds = np . triu_indices_from ( symm ) tri = symm [ inds ] return tri
convert a 2D symmetric matrix to an upper triangular matrix in 1D format
15,601
def sumexp_stable ( data ) : max_value = data . max ( axis = 0 ) result_exp = np . exp ( data - max_value ) result_sum = np . sum ( result_exp , axis = 0 ) return result_sum , max_value , result_exp
Compute the sum of exponents for a list of samples
15,602
def concatenate_not_none ( l , axis = 0 ) : mask = [ ] for i in range ( len ( l ) ) : if l [ i ] is not None : mask . append ( i ) l_stacked = np . concatenate ( [ l [ i ] for i in mask ] , axis = axis ) return l_stacked
Construct a numpy array by stacking not - None arrays in a list
15,603
def cov2corr ( cov ) : assert cov . ndim == 2 , 'covariance matrix should be 2D array' inv_sd = 1 / np . sqrt ( np . diag ( cov ) ) corr = cov * inv_sd [ None , : ] * inv_sd [ : , None ] return corr
Calculate the correlation matrix based on a covariance matrix
15,604
def usable_cpu_count ( ) : try : result = len ( os . sched_getaffinity ( 0 ) ) except AttributeError : try : result = len ( psutil . Process ( ) . cpu_affinity ( ) ) except AttributeError : result = os . cpu_count ( ) return result
Get number of CPUs usable by the current process .
15,605
def phase_randomize ( data , voxelwise = False , random_state = None ) : data_ndim = data . ndim data , n_TRs , n_voxels , n_subjects = _check_timeseries_input ( data ) if isinstance ( random_state , np . random . RandomState ) : prng = random_state else : prng = np . random . RandomState ( random_state ) if n_TRs % 2 ...
Randomize phase of time series across subjects
15,606
def p_from_null ( observed , distribution , side = 'two-sided' , exact = False , axis = None ) : if side not in ( 'two-sided' , 'left' , 'right' ) : raise ValueError ( "The value for 'side' must be either " "'two-sided', 'left', or 'right', got {0}" . format ( side ) ) n_samples = len ( distribution ) logger . info ( "...
Compute p - value from null distribution
15,607
def array_correlation ( x , y , axis = 0 ) : if not isinstance ( x , np . ndarray ) : x = np . asarray ( x ) if not isinstance ( y , np . ndarray ) : y = np . asarray ( y ) if x . shape != y . shape : raise ValueError ( "Input arrays must be the same shape" ) if axis == 1 : x , y = x . T , y . T x_demean = x - np . mea...
Column - or row - wise Pearson correlation between two arrays
15,608
def _prepare_corerelation_data ( self , X1 , X2 , start_voxel = 0 , num_processed_voxels = None ) : num_samples = len ( X1 ) assert num_samples > 0 , 'at least one sample is needed for correlation computation' num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] assert num_voxels1 * num_voxels2 == ...
Compute auto - correlation for the input data X1 and X2 .
15,609
def _normalize_correlation_data ( self , corr_data , norm_unit ) : if norm_unit > 1 : num_samples = len ( corr_data ) [ _ , d2 , d3 ] = corr_data . shape second_dimension = d2 * d3 normalized_corr_data = corr_data . reshape ( 1 , num_samples , second_dimension ) fcma_extension . normalization ( normalized_corr_data , n...
Normalize the correlation data if necessary .
15,610
def _compute_kernel_matrix_in_portion ( self , X1 , X2 ) : kernel_matrix = np . zeros ( ( self . num_samples_ , self . num_samples_ ) , np . float32 , order = 'C' ) sr = 0 row_length = self . num_processed_voxels num_voxels2 = X2 [ 0 ] . shape [ 1 ] normalized_corr_data = None while sr < self . num_voxels_ : if row_len...
Compute kernel matrix for sklearn . svm . SVC with precomputed kernel .
15,611
def _generate_training_data ( self , X1 , X2 , num_training_samples ) : if not ( isinstance ( self . clf , sklearn . svm . SVC ) and self . clf . kernel == 'precomputed' ) : corr_data = self . _prepare_corerelation_data ( X1 , X2 ) normalized_corr_data = self . _normalize_correlation_data ( corr_data , self . epochs_pe...
Generate training data for the classifier .
15,612
def fit ( self , X , y , num_training_samples = None ) : time1 = time . time ( ) assert len ( X ) == len ( y ) , 'the number of samples must be equal to the number of labels' for x in X : assert len ( x ) == 2 , 'there must be two parts for each correlation computation' X1 , X2 = zip ( * X ) if not ( isinstance ( self ...
Use correlation data to train a model .
15,613
def predict ( self , X = None ) : time1 = time . time ( ) if X is not None : for x in X : assert len ( x ) == 2 , 'there must be two parts for each correlation computation' X1 , X2 = zip ( * X ) num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] if num_voxels1 < num_voxels2 : X1 , X2 = X2 , X1 nu...
Use a trained model to predict correlation data .
15,614
def decision_function ( self , X = None ) : if X is not None and not self . _is_equal_to_test_raw_data ( X ) : for x in X : assert len ( x ) == 2 , 'there must be two parts for each correlation computation' X1 , X2 = zip ( * X ) num_voxels1 = X1 [ 0 ] . shape [ 1 ] num_voxels2 = X2 [ 0 ] . shape [ 1 ] assert len ( X1 )...
Output the decision value of the prediction .
15,615
def _check_isc_input ( iscs , pairwise = False ) : if type ( iscs ) == list : iscs = np . array ( iscs ) [ : , np . newaxis ] elif isinstance ( iscs , np . ndarray ) : if iscs . ndim == 1 : iscs = iscs [ : , np . newaxis ] if pairwise : try : test_square = squareform ( iscs [ : , 0 ] ) n_subjects = test_square . shape ...
Checks ISC inputs for statistical tests
15,616
def _check_targets_input ( targets , data ) : if isinstance ( targets , np . ndarray ) or isinstance ( targets , list ) : targets , n_TRs , n_voxels , n_subjects = ( _check_timeseries_input ( targets ) ) if data . shape [ 0 ] != n_TRs : raise ValueError ( "Targets array must have same number of " "TRs as input data" ) ...
Checks ISFC targets input array
15,617
def compute_summary_statistic ( iscs , summary_statistic = 'mean' , axis = None ) : if summary_statistic not in ( 'mean' , 'median' ) : raise ValueError ( "Summary statistic must be 'mean' or 'median'" ) if summary_statistic == 'mean' : statistic = np . tanh ( np . nanmean ( np . arctanh ( iscs ) , axis = axis ) ) elif...
Computes summary statistics for ISCs
15,618
def _threshold_nans ( data , tolerate_nans ) : nans = np . all ( np . any ( np . isnan ( data ) , axis = 0 ) , axis = 1 ) if tolerate_nans is True : logger . info ( "ISC computation will tolerate all NaNs when averaging" ) elif type ( tolerate_nans ) is float : if not 0.0 <= tolerate_nans <= 1.0 : raise ValueError ( "I...
Thresholds data based on proportion of subjects with NaNs
15,619
def bootstrap_isc ( iscs , pairwise = False , summary_statistic = 'median' , n_bootstraps = 1000 , ci_percentile = 95 , random_state = None ) : iscs , n_subjects , n_voxels = _check_isc_input ( iscs , pairwise = pairwise ) if summary_statistic not in ( 'mean' , 'median' ) : raise ValueError ( "Summary statistic must be...
One - sample group - level bootstrap hypothesis test for ISCs
15,620
def _permute_one_sample_iscs ( iscs , group_parameters , i , pairwise = False , summary_statistic = 'median' , group_matrix = None , exact_permutations = None , prng = None ) : if exact_permutations : sign_flipper = np . array ( exact_permutations [ i ] ) else : sign_flipper = prng . choice ( [ - 1 , 1 ] , size = group...
Applies one - sample permutations to ISC data
15,621
def _permute_two_sample_iscs ( iscs , group_parameters , i , pairwise = False , summary_statistic = 'median' , exact_permutations = None , prng = None ) : if exact_permutations : group_shuffler = np . array ( exact_permutations [ i ] ) elif not exact_permutations and pairwise : group_shuffler = prng . permutation ( np ...
Applies two - sample permutations to ISC data
15,622
def timeshift_isc ( data , pairwise = False , summary_statistic = 'median' , n_shifts = 1000 , tolerate_nans = True , random_state = None ) : data , n_TRs , n_voxels , n_subjects = _check_timeseries_input ( data ) observed = isc ( data , pairwise = pairwise , summary_statistic = summary_statistic , tolerate_nans = tole...
Circular time - shift randomization for one - sample ISC test
15,623
def phaseshift_isc ( data , pairwise = False , summary_statistic = 'median' , n_shifts = 1000 , tolerate_nans = True , random_state = None ) : data , n_TRs , n_voxels , n_subjects = _check_timeseries_input ( data ) observed = isc ( data , pairwise = pairwise , summary_statistic = summary_statistic , tolerate_nans = tol...
Phase randomization for one - sample ISC test
15,624
def init_prior ( self , R ) : centers , widths = self . init_centers_widths ( R ) prior = np . zeros ( self . K * ( self . n_dim + 1 ) ) self . set_centers ( prior , centers ) self . set_widths ( prior , widths ) self . set_prior ( prior ) return self
initialize prior for the subject
15,625
def _assign_posterior ( self ) : prior_centers = self . get_centers ( self . local_prior ) posterior_centers = self . get_centers ( self . local_posterior_ ) posterior_widths = self . get_widths ( self . local_posterior_ ) cost = distance . cdist ( prior_centers , posterior_centers , 'euclidean' ) _ , col_ind = linear_...
assign posterior to prior based on Hungarian algorithm
15,626
def _mse_converged ( self ) : mse = mean_squared_error ( self . local_prior , self . local_posterior_ , multioutput = 'uniform_average' ) if mse > self . threshold : return False , mse else : return True , mse
Check convergence based on mean squared error
15,627
def init_centers_widths ( self , R ) : kmeans = KMeans ( init = 'k-means++' , n_clusters = self . K , n_init = 10 , random_state = 100 ) kmeans . fit ( R ) centers = kmeans . cluster_centers_ widths = self . _get_max_sigma ( R ) * np . ones ( ( self . K , 1 ) ) return centers , widths
Initialize prior of centers and widths
15,628
def get_template ( self , R ) : centers , widths = self . init_centers_widths ( R ) template_prior = np . zeros ( self . K * ( self . n_dim + 2 + self . cov_vec_size ) ) template_centers_cov = np . cov ( R . T ) * math . pow ( self . K , - 2 / 3.0 ) template_widths_var = self . _get_max_sigma ( R ) centers_cov_all = np...
Compute a template on latent factors
15,629
def set_widths ( self , estimation , widths ) : estimation [ self . map_offset [ 1 ] : self . map_offset [ 2 ] ] = widths . ravel ( )
Set estimation on widths
15,630
def set_centers_mean_cov ( self , estimation , centers_mean_cov ) : estimation [ self . map_offset [ 2 ] : self . map_offset [ 3 ] ] = centers_mean_cov . ravel ( )
Set estimation on centers
15,631
def get_centers ( self , estimation ) : centers = estimation [ 0 : self . map_offset [ 1 ] ] . reshape ( self . K , self . n_dim ) return centers
Get estimation on centers
15,632
def get_widths ( self , estimation ) : widths = estimation [ self . map_offset [ 1 ] : self . map_offset [ 2 ] ] . reshape ( self . K , 1 ) return widths
Get estimation on widths
15,633
def get_centers_mean_cov ( self , estimation ) : centers_mean_cov = estimation [ self . map_offset [ 2 ] : self . map_offset [ 3 ] ] . reshape ( self . K , self . cov_vec_size ) return centers_mean_cov
Get estimation on the covariance of centers mean
15,634
def get_widths_mean_var ( self , estimation ) : widths_mean_var = estimation [ self . map_offset [ 3 ] : ] . reshape ( self . K , 1 ) return widths_mean_var
Get estimation on the variance of widths mean
15,635
def get_factors ( self , unique_R , inds , centers , widths ) : F = np . zeros ( ( len ( inds [ 0 ] ) , self . K ) ) tfa_extension . factor ( F , centers , widths , unique_R [ 0 ] , unique_R [ 1 ] , unique_R [ 2 ] , inds [ 0 ] , inds [ 1 ] , inds [ 2 ] ) return F
Calculate factors based on centers and widths
15,636
def get_weights ( self , data , F ) : beta = np . var ( data ) trans_F = F . T . copy ( ) W = np . zeros ( ( self . K , data . shape [ 1 ] ) ) if self . weight_method == 'rr' : W = np . linalg . solve ( trans_F . dot ( F ) + beta * np . identity ( self . K ) , trans_F . dot ( data ) ) else : W = np . linalg . solve ( t...
Calculate weight matrix based on fMRI data and factors
15,637
def _get_max_sigma ( self , R ) : max_sigma = 2.0 * math . pow ( np . nanmax ( np . std ( R , axis = 0 ) ) , 2 ) return max_sigma
Calculate maximum sigma of scanner RAS coordinates
15,638
def get_bounds ( self , R ) : max_sigma = self . _get_max_sigma ( R ) final_lower = np . zeros ( self . K * ( self . n_dim + 1 ) ) final_lower [ 0 : self . K * self . n_dim ] = np . tile ( np . nanmin ( R , axis = 0 ) , self . K ) final_lower [ self . K * self . n_dim : ] = np . repeat ( self . lower_ratio * max_sigma ...
Calculate lower and upper bounds for centers and widths
15,639
def _residual_multivariate ( self , estimate , unique_R , inds , X , W , template_centers , template_centers_mean_cov , template_widths , template_widths_mean_var_reci , data_sigma ) : centers = self . get_centers ( estimate ) widths = self . get_widths ( estimate ) recon = X . size other_err = 0 if template_centers is...
Residual function for estimating centers and widths
15,640
def _estimate_centers_widths ( self , unique_R , inds , X , W , init_centers , init_widths , template_centers , template_widths , template_centers_mean_cov , template_widths_mean_var_reci ) : init_estimate = np . hstack ( ( init_centers . ravel ( ) , init_widths . ravel ( ) ) ) data_sigma = 1.0 / math . sqrt ( 2.0 ) * ...
Estimate centers and widths
15,641
def _fit_tfa ( self , data , R , template_prior = None ) : if template_prior is None : template_centers = None template_widths = None template_centers_mean_cov = None template_widths_mean_var_reci = None else : template_centers = self . get_centers ( template_prior ) template_widths = self . get_widths ( template_prior...
TFA main algorithm
15,642
def get_unique_R ( self , R ) : unique_R = [ ] inds = [ ] for d in np . arange ( self . n_dim ) : tmp_unique , tmp_inds = np . unique ( R [ : , d ] , return_inverse = True ) unique_R . append ( tmp_unique ) inds . append ( tmp_inds ) return unique_R , inds
Get unique vlaues from coordinate matrix
15,643
def _fit_tfa_inner ( self , data , R , template_centers , template_widths , template_centers_mean_cov , template_widths_mean_var_reci ) : nfeature = data . shape [ 0 ] nsample = data . shape [ 1 ] feature_indices = np . random . choice ( nfeature , self . max_num_voxel , replace = False ) sample_features = np . zeros (...
Fit TFA model the inner loop part
15,644
def recon_err ( data , F , W ) : recon = F . dot ( W ) . ravel ( ) err = mean_squared_error ( data . ravel ( ) , recon , multioutput = 'uniform_average' ) return math . sqrt ( err )
Calcuate reconstruction error
15,645
def get_train_err ( htfa , data , F ) : W = htfa . get_weights ( data , F ) return recon_err ( data , F , W )
Calcuate training error
15,646
def _sfn ( l , mask , myrad , bcast_var ) : clf = bcast_var [ 2 ] data = l [ 0 ] [ mask , : ] . T skf = model_selection . StratifiedKFold ( n_splits = bcast_var [ 1 ] , shuffle = False ) accuracy = np . mean ( model_selection . cross_val_score ( clf , data , y = bcast_var [ 0 ] , cv = skf , n_jobs = 1 ) ) return accura...
Score classifier on searchlight data using cross - validation .
15,647
def run ( self , clf ) : rank = MPI . COMM_WORLD . Get_rank ( ) if rank == 0 : logger . info ( 'running activity-based voxel selection via Searchlight' ) self . sl . distribute ( [ self . data ] , self . mask ) self . sl . broadcast ( ( self . labels , self . num_folds , clf ) ) if rank == 0 : logger . info ( 'data pre...
run activity - based voxel selection
15,648
def _cross_validation_for_one_voxel ( clf , vid , num_folds , subject_data , labels ) : skf = model_selection . StratifiedKFold ( n_splits = num_folds , shuffle = False ) scores = model_selection . cross_val_score ( clf , subject_data , y = labels , cv = skf , n_jobs = 1 ) logger . debug ( 'cross validation for voxel %...
Score classifier on data using cross validation .
15,649
def run ( self , clf ) : rank = MPI . COMM_WORLD . Get_rank ( ) if rank == self . master_rank : results = self . _master ( ) results . sort ( key = lambda tup : tup [ 1 ] , reverse = True ) else : self . _worker ( clf ) results = [ ] return results
Run correlation - based voxel selection in master - worker model .
15,650
def _master ( self ) : logger . info ( 'Master at rank %d starts to allocate tasks' , MPI . COMM_WORLD . Get_rank ( ) ) results = [ ] comm = MPI . COMM_WORLD size = comm . Get_size ( ) sending_voxels = self . voxel_unit if self . voxel_unit < self . num_voxels else self . num_voxels current_task = ( 0 , sending_voxels ...
Master node s operation .
15,651
def _worker ( self , clf ) : logger . debug ( 'worker %d is running, waiting for tasks from master at rank %d' % ( MPI . COMM_WORLD . Get_rank ( ) , self . master_rank ) ) comm = MPI . COMM_WORLD status = MPI . Status ( ) while 1 : task = comm . recv ( source = self . master_rank , tag = MPI . ANY_TAG , status = status...
Worker node s operation .
15,652
def _correlation_normalization ( self , corr ) : time1 = time . time ( ) ( sv , e , av ) = corr . shape for i in range ( sv ) : start = 0 while start < e : cur_val = corr [ i , start : start + self . epochs_per_subj , : ] cur_val = .5 * np . log ( ( cur_val + 1 ) / ( 1 - cur_val ) ) corr [ i , start : start + self . ep...
Do within - subject normalization .
15,653
def _prepare_for_cross_validation ( self , corr , clf ) : time1 = time . time ( ) ( num_processed_voxels , num_epochs , _ ) = corr . shape if isinstance ( clf , sklearn . svm . SVC ) and clf . kernel == 'precomputed' : kernel_matrices = np . zeros ( ( num_processed_voxels , num_epochs , num_epochs ) , np . float32 , or...
Prepare data for voxelwise cross validation .
15,654
def _do_cross_validation ( self , clf , data , task ) : time1 = time . time ( ) if isinstance ( clf , sklearn . svm . SVC ) and clf . kernel == 'precomputed' and self . use_multiprocessing : inlist = [ ( clf , i + task [ 0 ] , self . num_folds , data [ i , : , : ] , self . labels ) for i in range ( task [ 1 ] ) ] with ...
Run voxelwise cross validation based on correlation vectors .
15,655
def _voxel_scoring ( self , task , clf ) : time1 = time . time ( ) corr = self . _correlation_computation ( task ) time3 = time . time ( ) fcma_extension . normalization ( corr , self . epochs_per_subj ) time4 = time . time ( ) logger . debug ( 'within-subject normalization for %d voxels ' 'using C++, takes %.2f s' % (...
The voxel selection process done in the worker node .
15,656
def fit ( self , X , y , Z ) : logger . info ( 'Starting SS-SRM' ) if 0.0 >= self . alpha or self . alpha >= 1.0 : raise ValueError ( "Alpha parameter should be in range (0.0, 1.0)" ) if 0.0 >= self . gamma : raise ValueError ( "Gamma parameter should be positive." ) if len ( X ) <= 1 or len ( y ) <= 1 or len ( Z ) <= ...
Compute the Semi - Supervised Shared Response Model
15,657
def predict ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if len ( X ) != len ( self . w_ ) : raise ValueError ( "The number of subjects does not match the one" " in the model." ) X_shared = self . transform ( X ) p = [ None ] * len ( X_shared ) for ...
Classify the output for given data
15,658
def _sssrm ( self , data_align , data_sup , labels ) : classes = self . classes_ . size self . random_state_ = np . random . RandomState ( self . rand_seed ) random_states = [ np . random . RandomState ( self . random_state_ . randint ( 2 ** 32 ) ) for i in range ( len ( data_align ) ) ] w , _ = srm . _init_w_transform...
Block - Coordinate Descent algorithm for fitting SS - SRM .
15,659
def _update_classifier ( self , data , labels , w , classes ) : data_stacked , labels_stacked , weights = SSSRM . _stack_list ( data , labels , w ) features = w [ 0 ] . shape [ 1 ] total_samples = weights . size data_th = S . shared ( data_stacked . astype ( theano . config . floatX ) ) val_ = S . shared ( labels_stack...
Update the classifier parameters theta and bias
15,660
def _compute_shared_response ( data , w ) : s = np . zeros ( ( w [ 0 ] . shape [ 1 ] , data [ 0 ] . shape [ 1 ] ) ) for m in range ( len ( w ) ) : s = s + w [ m ] . T . dot ( data [ m ] ) s /= len ( w ) return s
Compute the shared response S
15,661
def _objective_function ( self , data_align , data_sup , labels , w , s , theta , bias ) : subjects = len ( data_align ) f_val = 0.0 for subject in range ( subjects ) : samples = data_align [ subject ] . shape [ 1 ] f_val += ( 1 - self . alpha ) * ( 0.5 / samples ) * np . linalg . norm ( data_align [ subject ] - w [ su...
Compute the objective function of the Semi - Supervised SRM
15,662
def _objective_function_subject ( self , data_align , data_sup , labels , w , s , theta , bias ) : f_val = 0.0 samples = data_align . shape [ 1 ] f_val += ( 1 - self . alpha ) * ( 0.5 / samples ) * np . linalg . norm ( data_align - w . dot ( s ) , 'fro' ) ** 2 f_val += self . _loss_lr_subject ( data_sup , labels , w , ...
Compute the objective function for one subject .
15,663
def _stack_list ( data , data_labels , w ) : labels_stacked = utils . concatenate_not_none ( data_labels ) weights = np . empty ( ( labels_stacked . size , ) ) data_shared = [ None ] * len ( data ) curr_samples = 0 for s in range ( len ( data ) ) : if data [ s ] is not None : subject_samples = data [ s ] . shape [ 1 ] ...
Construct a numpy array by stacking arrays in a list
15,664
def _singlenode_searchlight ( l , msk , mysl_rad , bcast_var , extra_params ) : voxel_fn = extra_params [ 0 ] shape_mask = extra_params [ 1 ] min_active_voxels_proportion = extra_params [ 2 ] outmat = np . empty ( msk . shape , dtype = np . object ) [ mysl_rad : - mysl_rad , mysl_rad : - mysl_rad , mysl_rad : - mysl_ra...
Run searchlight function on block data in parallel .
15,665
def _get_ownership ( self , data ) : rank = self . comm . rank B = [ ( rank , idx ) for ( idx , c ) in enumerate ( data ) if c is not None ] C = self . comm . allreduce ( B ) ownership = [ None ] * len ( data ) for c in C : ownership [ c [ 1 ] ] = c [ 0 ] return ownership
Determine on which rank each subject currently resides
15,666
def _get_blocks ( self , mask ) : blocks = [ ] outerblk = self . max_blk_edge + 2 * self . sl_rad for i in range ( 0 , mask . shape [ 0 ] , self . max_blk_edge ) : for j in range ( 0 , mask . shape [ 1 ] , self . max_blk_edge ) : for k in range ( 0 , mask . shape [ 2 ] , self . max_blk_edge ) : block_shape = mask [ i :...
Divide the volume into a set of blocks
15,667
def _get_block_data ( self , mat , block ) : ( pt , sz ) = block if len ( mat . shape ) == 3 : return mat [ pt [ 0 ] : pt [ 0 ] + sz [ 0 ] , pt [ 1 ] : pt [ 1 ] + sz [ 1 ] , pt [ 2 ] : pt [ 2 ] + sz [ 2 ] ] . copy ( ) elif len ( mat . shape ) == 4 : return mat [ pt [ 0 ] : pt [ 0 ] + sz [ 0 ] , pt [ 1 ] : pt [ 1 ] + sz...
Retrieve a block from a 3D or 4D volume
15,668
def _split_volume ( self , mat , blocks ) : return [ self . _get_block_data ( mat , block ) for block in blocks ]
Convert a volume into a list of block data
15,669
def _scatter_list ( self , data , owner ) : rank = self . comm . rank size = self . comm . size subject_submatrices = [ ] nblocks = self . comm . bcast ( len ( data ) if rank == owner else None , root = owner ) for idx in range ( 0 , nblocks , size ) : padded = None extra = max ( 0 , idx + size - nblocks ) if data is n...
Distribute a list from one rank to other ranks in a cyclic manner
15,670
def distribute ( self , subjects , mask ) : if mask . ndim != 3 : raise ValueError ( 'mask should be a 3D array' ) for ( idx , subj ) in enumerate ( subjects ) : if subj is not None : if subj . ndim != 4 : raise ValueError ( 'subjects[{}] must be 4D' . format ( idx ) ) self . mask = mask rank = self . comm . rank owner...
Distribute data to MPI ranks
15,671
def run_block_function ( self , block_fn , extra_block_fn_params = None , pool_size = None ) : rank = self . comm . rank results = [ ] usable_cpus = usable_cpu_count ( ) if pool_size is None : processes = usable_cpus else : processes = min ( pool_size , usable_cpus ) if processes > 1 : with Pool ( processes ) as pool :...
Perform a function for each block in a volume .
15,672
def run_searchlight ( self , voxel_fn , pool_size = None ) : extra_block_fn_params = ( voxel_fn , self . shape , self . min_active_voxels_proportion ) block_fn_result = self . run_block_function ( _singlenode_searchlight , extra_block_fn_params , pool_size ) return block_fn_result
Perform a function at each voxel which is set to True in the user - provided mask . The mask passed to the searchlight function will be further masked by the user - provided searchlight shape .
15,673
def _normalize_for_correlation ( data , axis , return_nans = False ) : shape = data . shape data = zscore ( data , axis = axis , ddof = 0 ) if not return_nans : data = np . nan_to_num ( data ) data = data / math . sqrt ( shape [ axis ] ) return data
normalize the data before computing correlation
15,674
def compute_correlation ( matrix1 , matrix2 , return_nans = False ) : matrix1 = matrix1 . astype ( np . float32 ) matrix2 = matrix2 . astype ( np . float32 ) [ r1 , d1 ] = matrix1 . shape [ r2 , d2 ] = matrix2 . shape if d1 != d2 : raise ValueError ( 'Dimension discrepancy' ) matrix1 = _normalize_for_correlation ( matr...
compute correlation between two sets of variables
15,675
def _zscore ( a ) : assert a . ndim > 1 , 'a must have more than one dimensions' zscore = scipy . stats . zscore ( a , axis = 0 ) zscore [ : , np . logical_not ( np . all ( np . isfinite ( zscore ) , axis = 0 ) ) ] = 0 return zscore
Calculating z - score of data on the first axis . If the numbers in any column are all equal scipy . stats . zscore will return NaN for this column . We shall correct them all to be zeros .
15,676
def score ( self , X , design , scan_onsets = None ) : assert X . ndim == 2 and X . shape [ 1 ] == self . beta_ . shape [ 1 ] , 'The shape of X is not consistent with the shape of data ' 'used in the fitting step. They should have the same number ' 'of voxels' assert scan_onsets is None or ( scan_onsets . ndim == 1 and...
Use the model and parameters estimated by fit function from some data of a participant to evaluate the log likelihood of some new data of the same participant . Design matrix of the same set of experimental conditions in the testing data should be provided with each column corresponding to the same condition as that co...
15,677
def _prepare_data_XY ( self , X , Y , D , F ) : XTY , XTDY , XTFY = self . _make_templates ( D , F , X , Y ) YTY_diag = np . sum ( Y * Y , axis = 0 ) YTDY_diag = np . sum ( Y * np . dot ( D , Y ) , axis = 0 ) YTFY_diag = np . sum ( Y * np . dot ( F , Y ) , axis = 0 ) XTX , XTDX , XTFX = self . _make_templates ( D , F ,...
Prepares different forms of products of design matrix X and data Y or between themselves . These products are re - used a lot during fitting . So we pre - calculate them . Because these are reused it is in principle possible to update the fitting as new data come in by just incrementally adding the products of new data...
15,678
def _prepare_data_XYX0 ( self , X , Y , X_base , X_res , D , F , run_TRs , no_DC = False ) : X_DC = self . _gen_X_DC ( run_TRs ) reg_sol = np . linalg . lstsq ( X_DC , X ) if np . any ( np . isclose ( reg_sol [ 1 ] , 0 ) ) : raise ValueError ( 'Your design matrix appears to have ' 'included baseline time series.' 'Eith...
Prepares different forms of products between design matrix X or data Y or nuisance regressors X0 . These products are re - used a lot during fitting . So we pre - calculate them . no_DC means not inserting regressors for DC components into nuisance regressor . It will only take effect if X_base is not None .
15,679
def _merge_DC_to_base ( self , X_DC , X_base , no_DC ) : if X_base is not None : reg_sol = np . linalg . lstsq ( X_DC , X_base ) if not no_DC : if not np . any ( np . isclose ( reg_sol [ 1 ] , 0 ) ) : X_base = np . concatenate ( ( X_DC , X_base ) , axis = 1 ) idx_DC = np . arange ( 0 , X_DC . shape [ 1 ] ) else : logge...
Merge DC components X_DC to the baseline time series X_base ( By baseline this means any fixed nuisance regressors not updated during fitting including DC components and any nuisance regressors provided by the user . X_DC is always in the first few columns of X_base .
15,680
def _build_index_param ( self , n_l , n_V , n_smooth ) : idx_param_sing = { 'Cholesky' : np . arange ( n_l ) , 'a1' : n_l } idx_param_fitU = { 'Cholesky' : np . arange ( n_l ) , 'a1' : np . arange ( n_l , n_l + n_V ) } idx_param_fitV = { 'log_SNR2' : np . arange ( n_V - 1 ) , 'c_space' : n_V - 1 , 'c_inten' : n_V , 'c_...
Build dictionaries to retrieve each parameter from the combined parameters .
15,681
def _score ( self , Y , design , beta , scan_onsets , beta0 , rho_e , sigma_e , rho_X0 , sigma2_X0 ) : logger . info ( 'Estimating cross-validated score for new data.' ) n_T = Y . shape [ 0 ] if design is not None : Y = Y - np . dot ( design , beta ) T_X = np . diag ( rho_X0 ) Var_X = sigma2_X0 / ( 1 - rho_X0 ** 2 ) Va...
Given the data Y and the spatial pattern beta0 of nuisance time series return the cross - validated score of the data Y given all parameters of the subject estimated during the first step . It is assumed that the user has design matrix built for the data Y . Both beta and beta0 are posterior expectation estimated from ...
15,682
def _backward_step ( self , deltaY , deltaY_sigma2inv_rho_weightT , sigma2_e , weight , mu , mu_Gamma_inv , Gamma_inv , Lambda_0 , Lambda_1 , H ) : n_T = len ( Gamma_inv ) Gamma_inv_hat = [ None ] * n_T mu_Gamma_inv_hat = [ None ] * n_T mu_hat = [ None ] * n_T mu_hat [ - 1 ] = mu [ - 1 ] . copy ( ) mu_Gamma_inv_hat [ -...
backward step for HMM assuming both the hidden state and noise have 1 - step dependence on the previous value .
15,683
def _set_SNR_grids ( self ) : if self . SNR_prior == 'unif' : SNR_grids = np . linspace ( 0 , 1 , self . SNR_bins ) SNR_weights = np . ones ( self . SNR_bins ) / ( self . SNR_bins - 1 ) SNR_weights [ 0 ] = SNR_weights [ 0 ] / 2.0 SNR_weights [ - 1 ] = SNR_weights [ - 1 ] / 2.0 elif self . SNR_prior == 'lognorm' : dist ...
Set the grids and weights for SNR used in numerical integration of SNR parameters .
15,684
def _matrix_flattened_grid ( self , X0TAX0 , X0TAX0_i , SNR_grids , XTAcorrX , YTAcorrY_diag , XTAcorrY , X0TAY , XTAX0 , n_C , n_V , n_X0 , n_grid ) : half_log_det_X0TAX0 = np . reshape ( np . repeat ( self . _half_log_det ( X0TAX0 ) [ None , : ] , self . SNR_bins , axis = 0 ) , n_grid ) X0TAX0 = np . reshape ( np . r...
We need to integrate parameters SNR and rho on 2 - d discrete grids . This function generates matrices which have only one dimension for these two parameters with each slice in that dimension corresponding to each combination of the discrete grids of SNR and discrete grids of rho .
15,685
def fit ( self , X ) : logger . info ( 'Starting RSRM' ) if 0.0 >= self . lam : raise ValueError ( "Gamma parameter should be positive." ) if len ( X ) <= 1 : raise ValueError ( "There are not enough subjects in the input " "data to train the model." ) if X [ 0 ] . shape [ 1 ] < self . features : raise ValueError ( "Th...
Compute the Robust Shared Response Model
15,686
def transform ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if len ( X ) != len ( self . w_ ) : raise ValueError ( "The number of subjects does not match the one" " in the model." ) r = [ None ] * len ( X ) s = [ None ] * len ( X ) for subject in ran...
Use the model to transform new data to Shared Response space
15,687
def _transform_new_data ( self , X , subject ) : S = np . zeros_like ( X ) R = None for i in range ( self . n_iter ) : R = self . w_ [ subject ] . T . dot ( X - S ) S = self . _shrink ( X - self . w_ [ subject ] . dot ( R ) , self . lam ) return R , S
Transform new data for a subjects by projecting to the shared subspace and computing the individual information .
15,688
def transform_subject ( self , X ) : if hasattr ( self , 'w_' ) is False : raise NotFittedError ( "The model fit has not been run yet." ) if X . shape [ 1 ] != self . r_ . shape [ 1 ] : raise ValueError ( "The number of timepoints(TRs) does not match the" "one in the model." ) s = np . zeros_like ( X ) for i in range (...
Transform a new subject using the existing model
15,689
def _rsrm ( self , X ) : subjs = len ( X ) voxels = [ X [ i ] . shape [ 0 ] for i in range ( subjs ) ] TRs = X [ 0 ] . shape [ 1 ] features = self . features W = self . _init_transforms ( subjs , voxels , features , self . random_state_ ) S = self . _init_individual ( subjs , voxels , TRs ) R = self . _update_shared_re...
Block - Coordinate Descent algorithm for fitting RSRM .
15,690
def _objective_function ( X , W , R , S , gamma ) : subjs = len ( X ) func = .0 for i in range ( subjs ) : func += 0.5 * np . sum ( ( X [ i ] - W [ i ] . dot ( R ) - S [ i ] ) ** 2 ) + gamma * np . sum ( np . abs ( S [ i ] ) ) return func
Evaluate the objective function .
15,691
def _update_individual ( X , W , R , gamma ) : subjs = len ( X ) S = [ ] for i in range ( subjs ) : S . append ( RSRM . _shrink ( X [ i ] - W [ i ] . dot ( R ) , gamma ) ) return S
Update the individual components S_i .
15,692
def _update_shared_response ( X , S , W , features ) : subjs = len ( X ) TRs = X [ 0 ] . shape [ 1 ] R = np . zeros ( ( features , TRs ) ) for i in range ( subjs ) : R += W [ i ] . T . dot ( X [ i ] - S [ i ] ) R /= subjs return R
Update the shared response R .
15,693
def _update_transforms ( X , S , R ) : subjs = len ( X ) W = [ ] for i in range ( subjs ) : W . append ( RSRM . _update_transform_subject ( X [ i ] , S [ i ] , R ) ) return W
Updates the mappings W_i for each subject .
15,694
def _shrink ( v , gamma ) : pos = v > gamma neg = v < - gamma v [ pos ] -= gamma v [ neg ] += gamma v [ np . logical_and ( ~ pos , ~ neg ) ] = .0 return v
Soft - shrinkage of an array with parameter gamma .
15,695
def plot_confusion_matrix ( cm , title = "Confusion Matrix" ) : import matplotlib . pyplot as plt import math plt . figure ( ) subjects = len ( cm ) root_subjects = math . sqrt ( subjects ) cols = math . ceil ( root_subjects ) rows = math . ceil ( subjects / cols ) classes = cm [ 0 ] . shape [ 0 ] for subject in range ...
Plots a confusion matrix for each subject
15,696
def mask_image ( image : SpatialImage , mask : np . ndarray , data_type : type = None ) -> np . ndarray : image_data = image . get_data ( ) if image_data . shape [ : 3 ] != mask . shape : raise ValueError ( "Image data and mask have different shapes." ) if data_type is not None : cast_data = image_data . astype ( data_...
Mask image after optionally casting its type .
15,697
def multimask_images ( images : Iterable [ SpatialImage ] , masks : Sequence [ np . ndarray ] , image_type : type = None ) -> Iterable [ Sequence [ np . ndarray ] ] : for image in images : yield [ mask_image ( image , mask , image_type ) for mask in masks ]
Mask images with multiple masks .
15,698
def mask_images ( images : Iterable [ SpatialImage ] , mask : np . ndarray , image_type : type = None ) -> Iterable [ np . ndarray ] : for images in multimask_images ( images , ( mask , ) , image_type ) : yield images [ 0 ]
Mask images .
15,699
def from_masked_images ( cls : Type [ T ] , masked_images : Iterable [ np . ndarray ] , n_subjects : int ) -> T : images_iterator = iter ( masked_images ) first_image = next ( images_iterator ) first_image_shape = first_image . T . shape result = np . empty ( ( first_image_shape [ 0 ] , first_image_shape [ 1 ] , n_subj...
Create a new instance of MaskedMultiSubjecData from masked images .