idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
51,700
def _get_axis_label ( self , dim ) : if u ( dim [ : - 1 ] ) . isdecimal ( ) : n = len ( self . channel_ids ) return str ( self . channel_ids [ int ( dim [ : - 1 ] ) % n ] ) + dim [ - 1 ] else : return dim
Return the channel id from a dimension if applicable .
51,701
def _get_axis_data ( self , bunch , dim , cluster_id = None , load_all = None ) : if dim in self . attributes : return self . attributes [ dim ] ( cluster_id , load_all = load_all ) masks = bunch . get ( 'masks' , None ) assert dim not in self . attributes s = 'ABCDEFGHIJ' c_rel = int ( dim [ : - 1 ] ) channel_id = self . channel_ids [ c_rel % len ( self . channel_ids ) ] if channel_id not in bunch . channel_ids : return None c = list ( bunch . channel_ids ) . index ( channel_id ) d = s . index ( dim [ - 1 ] ) if masks is not None : masks = masks [ : , c ] return Bunch ( data = bunch . data [ : , c , d ] , masks = masks , )
Extract the points from the data on a given dimension .
51,702
def _plot_labels ( self ) : br = self . n_cols - 1 for k in range ( 0 , self . n_cols ) : dim_x , _ = self . grid_dim [ 0 ] [ k ] . split ( ',' ) _ , dim_y = self . grid_dim [ k ] [ br ] . split ( ',' ) dim_x = self . _get_axis_label ( dim_x ) dim_y = self . _get_axis_label ( dim_y ) self [ k , 0 ] . text ( pos = [ - 1. , 0. ] , text = dim_y , anchor = [ - 1.03 , 0. ] , data_bounds = None , ) self [ br , k ] . text ( pos = [ 0. , - 1. ] , text = dim_x , anchor = [ 0. , - 1.04 ] , data_bounds = None , )
Plot feature labels along left and bottom edge of subplots
51,703
def on_channel_click ( self , channel_id = None , key = None , button = None ) : channels = self . channel_ids if channels is None : return if len ( channels ) == 1 : self . on_select ( ) return assert len ( channels ) >= 2 d = 0 if button == 1 else 1 old = channels [ d ] if channel_id == old : return channels [ d ] = channel_id if channels [ 1 - d ] == channel_id : channels [ 1 - d ] = old assert channels [ 0 ] != channels [ 1 ] self . channel_ids = _uniq ( channels ) logger . debug ( "Choose channels %d and %d in feature view." , * channels [ : 2 ] ) self . on_select ( fixed_channels = True )
Respond to the click on a channel .
51,704
def on_request_split ( self ) : if ( self . lasso . count < 3 or not len ( self . cluster_ids ) ) : return np . array ( [ ] , dtype = np . int64 ) assert len ( self . channel_ids ) i , j = self . lasso . box dim = self . grid_dim [ i ] [ j ] dim_x , dim_y = dim . split ( ',' ) pos = [ ] spike_ids = [ ] for cluster_id in self . cluster_ids : bunch = self . features ( cluster_id , channel_ids = self . channel_ids , load_all = True ) px = self . _get_axis_data ( bunch , dim_x , cluster_id = cluster_id , load_all = True ) py = self . _get_axis_data ( bunch , dim_y , cluster_id = cluster_id , load_all = True ) points = np . c_ [ px . data , py . data ] xmin , xmax = self . _get_axis_bounds ( dim_x , px ) ymin , ymax = self . _get_axis_bounds ( dim_y , py ) r = Range ( ( xmin , ymin , xmax , ymax ) ) points = r . apply ( points ) pos . append ( points ) spike_ids . append ( bunch . spike_ids ) pos = np . vstack ( pos ) spike_ids = np . concatenate ( spike_ids ) ind = self . lasso . in_polygon ( pos ) self . lasso . clear ( ) return np . unique ( spike_ids [ ind ] )
Return the spikes enclosed by the lasso .
51,705
def get_closest_box ( self , pos ) : pos = np . atleast_2d ( pos ) d = np . sum ( ( np . array ( self . box_pos ) - pos ) ** 2 , axis = 1 ) idx = np . argmin ( d ) return idx
Get the box closest to some position .
51,706
def update_boxes ( self , box_pos , box_size ) : assert box_pos . shape == ( self . n_boxes , 2 ) assert len ( box_size ) == 2 self . box_bounds = _get_boxes ( box_pos , size = box_size , keep_aspect_ratio = self . keep_aspect_ratio , )
Set the box bounds from specified box positions and sizes .
51,707
def require_qt ( func ) : @ wraps ( func ) def wrapped ( * args , ** kwargs ) : if not QApplication . instance ( ) : raise RuntimeError ( "A Qt application must be created." ) return func ( * args , ** kwargs ) return wrapped
Specify that a function requires a Qt application .
51,708
def create_app ( ) : global QT_APP QT_APP = QApplication . instance ( ) if QT_APP is None : QT_APP = QApplication ( sys . argv ) return QT_APP
Create a Qt application .
51,709
def set ( self , f ) : self . stop ( ) self . _create_timer ( f ) self . start ( )
Call a function after a delay unless another function is set in the meantime .
51,710
def stop ( self ) : if self . _timer : self . _timer . stop ( ) self . _timer . deleteLater ( )
Stop the current timer if there is one and cancel the async call .
51,711
def _wrap_callback_args ( f , docstring = None ) : def wrapped ( checked , * args ) : if args : return f ( * args ) if isinstance ( f , partial ) : argspec = inspect . getargspec ( f . func ) else : argspec = inspect . getargspec ( f ) f_args = argspec . args if 'self' in f_args : f_args . remove ( 'self' ) if len ( argspec . defaults or ( ) ) : f_args = f_args [ : - len ( argspec . defaults ) ] if isinstance ( f , partial ) : f_args = f_args [ len ( f . args ) : ] f_args = [ arg for arg in f_args if arg not in f . keywords ] if not f_args : return f ( ) r = re . search ( 'Example: `([^`]+)`' , docstring ) docstring_ = docstring [ : r . start ( ) ] . strip ( ) if r else docstring text = r . group ( 1 ) if r else None s , ok = _input_dialog ( getattr ( f , '__name__' , 'action' ) , docstring_ , text ) if not ok or not s : return args = _parse_snippet ( s ) return f ( * args ) return wrapped
Display a Qt dialog when a function has arguments .
51,712
def _get_shortcut_string ( shortcut ) : if shortcut is None : return '' if isinstance ( shortcut , ( tuple , list ) ) : return ', ' . join ( [ _get_shortcut_string ( s ) for s in shortcut ] ) if isinstance ( shortcut , string_types ) : if hasattr ( QKeySequence , shortcut ) : shortcut = QKeySequence ( getattr ( QKeySequence , shortcut ) ) else : return shortcut . lower ( ) assert isinstance ( shortcut , QKeySequence ) s = shortcut . toString ( ) or '' return str ( s ) . lower ( )
Return a string representation of a shortcut .
51,713
def _get_qkeysequence ( shortcut ) : if shortcut is None : return [ ] if isinstance ( shortcut , ( tuple , list ) ) : return [ _get_qkeysequence ( s ) for s in shortcut ] assert isinstance ( shortcut , string_types ) if hasattr ( QKeySequence , shortcut ) : return QKeySequence ( getattr ( QKeySequence , shortcut ) ) sequence = QKeySequence . fromString ( shortcut ) assert not sequence . isEmpty ( ) return sequence
Return a QKeySequence or list of QKeySequence from a shortcut string .
51,714
def _show_shortcuts ( shortcuts , name = None ) : name = name or '' print ( '' ) if name : name = ' for ' + name print ( 'Keyboard shortcuts' + name ) for name in sorted ( shortcuts ) : shortcut = _get_shortcut_string ( shortcuts [ name ] ) if not name . startswith ( '_' ) : print ( '- {0:<40}: {1:s}' . format ( name , shortcut ) )
Display shortcuts .
51,715
def add ( self , callback = None , name = None , shortcut = None , alias = None , docstring = None , menu = None , verbose = True ) : if callback is None : return partial ( self . add , name = name , shortcut = shortcut , alias = alias , menu = menu ) assert callback name = name or callback . __name__ alias = alias or _alias ( name ) name = name . replace ( '&' , '' ) shortcut = shortcut or self . _default_shortcuts . get ( name , None ) if name in self . _actions_dict : return docstring = docstring or callback . __doc__ or name docstring = re . sub ( r'[ \t\r\f\v]{2,}' , ' ' , docstring . strip ( ) ) action = _create_qaction ( self . gui , name , callback , shortcut , docstring = docstring , alias = alias , ) action_obj = Bunch ( qaction = action , name = name , alias = alias , shortcut = shortcut , callback = callback , menu = menu ) if verbose and not name . startswith ( '_' ) : logger . log ( 5 , "Add action `%s` (%s)." , name , _get_shortcut_string ( action . shortcut ( ) ) ) self . gui . addAction ( action ) menu = menu or self . menu if menu and not name . startswith ( '_' ) : self . gui . get_menu ( menu ) . addAction ( action ) self . _actions_dict [ name ] = action_obj self . _aliases [ alias ] = name if callback : setattr ( self , name , callback )
Add an action with a keyboard shortcut .
51,716
def separator ( self , menu = None ) : self . gui . get_menu ( menu or self . menu ) . addSeparator ( )
Add a separator
51,717
def disable ( self , name = None ) : if name is None : for name in self . _actions_dict : self . disable ( name ) return self . _actions_dict [ name ] . qaction . setEnabled ( False )
Disable one or all actions .
51,718
def enable ( self , name = None ) : if name is None : for name in self . _actions_dict : self . enable ( name ) return self . _actions_dict [ name ] . qaction . setEnabled ( True )
Enable one or all actions .
51,719
def run ( self , name , * args ) : assert isinstance ( name , string_types ) name = self . _aliases . get ( name , name ) action = self . _actions_dict . get ( name , None ) if not action : raise ValueError ( "Action `{}` doesn't exist." . format ( name ) ) if not name . startswith ( '_' ) : logger . debug ( "Execute action `%s`." , name ) return action . callback ( * args )
Run an action as specified by its name .
51,720
def remove ( self , name ) : self . gui . removeAction ( self . _actions_dict [ name ] . qaction ) del self . _actions_dict [ name ] delattr ( self , name )
Remove an action .
51,721
def remove_all ( self ) : names = sorted ( self . _actions_dict . keys ( ) ) for name in names : self . remove ( name )
Remove all actions .
51,722
def shortcuts ( self ) : return { name : action . shortcut for name , action in self . _actions_dict . items ( ) }
A dictionary of action shortcuts .
51,723
def show_shortcuts ( self ) : gui_name = self . gui . name actions_name = self . name name = ( '{} - {}' . format ( gui_name , actions_name ) if actions_name else gui_name ) _show_shortcuts ( self . shortcuts , name )
Print all shortcuts .
51,724
def command ( self ) : msg = self . gui . status_message n = len ( msg ) n_cur = len ( self . cursor ) return msg [ : n - n_cur ]
This is used to write a snippet message in the status bar .
51,725
def _backspace ( self ) : if self . command == ':' : return logger . log ( 5 , "Snippet keystroke `Backspace`." ) self . command = self . command [ : - 1 ]
Erase the last character in the snippet command .
51,726
def _enter ( self ) : command = self . command logger . log ( 5 , "Snippet keystroke `Enter`." ) self . mode_off ( ) self . run ( command )
Disable the snippet mode and execute the command .
51,727
def _create_snippet_actions ( self ) : for i , char in enumerate ( self . _snippet_chars ) : def _make_func ( char ) : def callback ( ) : logger . log ( 5 , "Snippet keystroke `%s`." , char ) self . command += char return callback self . actions . add ( name = '_snippet_{}' . format ( i ) , shortcut = char , callback = _make_func ( char ) ) self . actions . add ( name = '_snippet_backspace' , shortcut = 'backspace' , callback = self . _backspace ) self . actions . add ( name = '_snippet_activate' , shortcut = ( 'enter' , 'return' ) , callback = self . _enter ) self . actions . add ( name = '_snippet_disable' , shortcut = 'escape' , callback = self . mode_off )
Add mock Qt actions for snippet keystrokes .
51,728
def run ( self , snippet ) : assert snippet [ 0 ] == ':' snippet = snippet [ 1 : ] snippet_args = _parse_snippet ( snippet ) name = snippet_args [ 0 ] logger . info ( "Processing snippet `%s`." , snippet ) try : for actions in self . gui . actions : try : actions . run ( name , * snippet_args [ 1 : ] ) return except ValueError : pass logger . warn ( "Couldn't find action `%s`." , name ) except Exception as e : logger . warn ( "Error when executing snippet: \"%s\"." , str ( e ) ) logger . debug ( '' . join ( traceback . format_exception ( * sys . exc_info ( ) ) ) )
Executes a snippet command .
51,729
def _before_after ( n_samples ) : if not isinstance ( n_samples , ( tuple , list ) ) : before = n_samples // 2 after = n_samples - before else : assert len ( n_samples ) == 2 before , after = n_samples n_samples = before + after assert before >= 0 assert after >= 0 assert before + after == n_samples return before , after
Get the number of samples before and after .
51,730
def _slice ( index , n_samples , margin = None ) : if margin is None : margin = ( 0 , 0 ) assert isinstance ( n_samples , ( tuple , list ) ) assert len ( n_samples ) == 2 before , after = n_samples assert isinstance ( margin , ( tuple , list ) ) assert len ( margin ) == 2 margin_before , margin_after = margin before += margin_before after += margin_after index = int ( index ) before = int ( before ) after = int ( after ) return slice ( max ( 0 , index - before ) , index + after , None )
Return a waveform slice .
51,731
def _load_at ( self , time , channels = None ) : if channels is None : channels = slice ( None , None , None ) time = int ( time ) time_o = time ns = self . n_samples_trace if not ( 0 <= time_o < ns ) : raise ValueError ( "Invalid time {0:d}/{1:d}." . format ( time_o , ns ) ) slice_extract = _slice ( time_o , self . n_samples_before_after , self . _filter_margin ) extract = self . _traces [ slice_extract ] [ : , channels ] . astype ( np . float32 ) if slice_extract . start <= 0 : extract = _pad ( extract , self . _n_samples_extract , 'left' ) elif slice_extract . stop >= ns - 1 : extract = _pad ( extract , self . _n_samples_extract , 'right' ) assert extract . shape [ 0 ] == self . _n_samples_extract return extract
Load a waveform at a given time .
51,732
def get ( self , spike_ids , channels = None ) : if isinstance ( spike_ids , slice ) : spike_ids = _range_from_slice ( spike_ids , start = 0 , stop = self . n_spikes , ) if not hasattr ( spike_ids , '__len__' ) : spike_ids = [ spike_ids ] if channels is None : channels = slice ( None , None , None ) nc = self . n_channels else : channels = np . asarray ( channels , dtype = np . int32 ) assert np . all ( channels < self . n_channels ) nc = len ( channels ) spike_ids = _as_array ( spike_ids ) n_spikes = len ( spike_ids ) shape = ( n_spikes , nc , self . _n_samples_extract ) waveforms = np . zeros ( shape , dtype = np . float32 ) if self . n_samples_trace == 0 : return np . transpose ( waveforms , ( 0 , 2 , 1 ) ) for i , spike_id in enumerate ( spike_ids ) : assert 0 <= spike_id < self . n_spikes time = self . _spike_samples [ spike_id ] try : w = self . _load_at ( time , channels ) except ValueError as e : logger . warn ( "Error while loading waveform: %s" , str ( e ) ) continue assert w . shape == ( self . _n_samples_extract , nc ) waveforms [ i , : , : ] = w . T waveforms_f = waveforms . reshape ( ( - 1 , self . _n_samples_extract ) ) unmasked = waveforms_f . max ( axis = 1 ) != 0 waveforms_f [ unmasked ] = self . _filter ( waveforms_f [ unmasked ] , axis = 1 ) waveforms_f = waveforms_f . reshape ( ( n_spikes , nc , self . _n_samples_extract ) ) margin_before , margin_after = self . _filter_margin if margin_after > 0 : assert margin_before >= 0 waveforms_f = waveforms_f [ : , : , margin_before : - margin_after ] assert waveforms_f . shape == ( n_spikes , nc , self . n_samples_waveforms , ) return np . transpose ( waveforms_f , ( 0 , 2 , 1 ) )
Load the waveforms of the specified spikes .
51,733
def get_waveform_amplitude ( mean_masks , mean_waveforms ) : assert mean_waveforms . ndim == 2 n_samples , n_channels = mean_waveforms . shape assert mean_masks . ndim == 1 assert mean_masks . shape == ( n_channels , ) mean_waveforms = mean_waveforms * mean_masks assert mean_waveforms . shape == ( n_samples , n_channels ) m , M = mean_waveforms . min ( axis = 0 ) , mean_waveforms . max ( axis = 0 ) return M - m
Return the amplitude of the waveforms on all channels .
51,734
def get_mean_masked_features_distance ( mean_features_0 , mean_features_1 , mean_masks_0 , mean_masks_1 , n_features_per_channel = None , ) : assert n_features_per_channel > 0 mu_0 = mean_features_0 . ravel ( ) mu_1 = mean_features_1 . ravel ( ) omeg_0 = mean_masks_0 omeg_1 = mean_masks_1 omeg_0 = np . repeat ( omeg_0 , n_features_per_channel ) omeg_1 = np . repeat ( omeg_1 , n_features_per_channel ) d_0 = mu_0 * omeg_0 d_1 = mu_1 * omeg_1 return np . linalg . norm ( d_0 - d_1 )
Compute the distance between the mean masked features .
51,735
def _extend_spikes ( spike_ids , spike_clusters ) : old_spike_clusters = spike_clusters [ spike_ids ] unique_clusters = _unique ( old_spike_clusters ) changed_spike_ids = _spikes_in_clusters ( spike_clusters , unique_clusters ) extended_spike_ids = np . setdiff1d ( changed_spike_ids , spike_ids , assume_unique = True ) return extended_spike_ids
Return all spikes belonging to the clusters containing the specified spikes .
51,736
def reset ( self ) : self . _undo_stack . clear ( ) self . _spike_clusters = self . _spike_clusters_base self . _new_cluster_id = self . _new_cluster_id_0
Reset the clustering to the original clustering .
51,737
def _do_assign ( self , spike_ids , new_spike_clusters ) : spike_ids = _as_array ( spike_ids ) if len ( new_spike_clusters ) == 1 and len ( spike_ids ) > 1 : new_spike_clusters = ( np . ones ( len ( spike_ids ) , dtype = np . int64 ) * new_spike_clusters [ 0 ] ) old_spike_clusters = self . _spike_clusters [ spike_ids ] assert len ( spike_ids ) == len ( old_spike_clusters ) assert len ( new_spike_clusters ) == len ( spike_ids ) old_clusters = _unique ( old_spike_clusters ) new_clusters = _unique ( new_spike_clusters ) if len ( new_clusters ) == 1 : return self . _do_merge ( spike_ids , old_clusters , new_clusters [ 0 ] ) up = _assign_update_info ( spike_ids , old_spike_clusters , new_spike_clusters ) self . _new_cluster_id = max ( self . _new_cluster_id , max ( up . added ) + 1 ) self . _spike_clusters [ spike_ids ] = new_spike_clusters new_spc = _spikes_per_cluster ( new_spike_clusters , spike_ids ) self . _update_cluster_ids ( to_remove = old_clusters , to_add = new_spc ) return up
Make spike - cluster assignments after the spike selection has been extended to full clusters .
51,738
def merge ( self , cluster_ids , to = None ) : if not _is_array_like ( cluster_ids ) : raise ValueError ( "The first argument should be a list or " "an array." ) cluster_ids = sorted ( cluster_ids ) if not set ( cluster_ids ) <= set ( self . cluster_ids ) : raise ValueError ( "Some clusters do not exist." ) if to is None : to = self . new_cluster_id ( ) if to < self . new_cluster_id ( ) : raise ValueError ( "The new cluster numbers should be higher than " "{0}." . format ( self . new_cluster_id ( ) ) ) spike_ids = _spikes_in_clusters ( self . spike_clusters , cluster_ids ) up = self . _do_merge ( spike_ids , cluster_ids , to ) undo_state = self . emit ( 'request_undo_state' , up ) self . _undo_stack . add ( ( spike_ids , [ to ] , undo_state ) ) self . emit ( 'cluster' , up ) return up
Merge several clusters to a new cluster .
51,739
def assign ( self , spike_ids , spike_clusters_rel = 0 ) : assert not isinstance ( spike_ids , slice ) if not hasattr ( spike_clusters_rel , '__len__' ) : spike_clusters_rel = spike_clusters_rel * np . ones ( len ( spike_ids ) , dtype = np . int64 ) spike_ids = _as_array ( spike_ids ) if len ( spike_ids ) == 0 : return UpdateInfo ( ) assert len ( spike_ids ) == len ( spike_clusters_rel ) assert spike_ids . min ( ) >= 0 assert spike_ids . max ( ) < self . _n_spikes , "Some spikes don't exist." spike_ids , cluster_ids = _extend_assignment ( spike_ids , self . _spike_clusters , spike_clusters_rel , self . new_cluster_id ( ) , ) up = self . _do_assign ( spike_ids , cluster_ids ) undo_state = self . emit ( 'request_undo_state' , up ) self . _undo_stack . add ( ( spike_ids , cluster_ids , undo_state ) ) self . emit ( 'cluster' , up ) return up
Make new spike cluster assignments .
51,740
def undo ( self ) : _ , _ , undo_state = self . _undo_stack . back ( ) spike_clusters_new = self . _spike_clusters_base . copy ( ) for spike_ids , cluster_ids , _ in self . _undo_stack : if spike_ids is not None : spike_clusters_new [ spike_ids ] = cluster_ids changed = np . nonzero ( self . _spike_clusters != spike_clusters_new ) [ 0 ] clusters_changed = spike_clusters_new [ changed ] up = self . _do_assign ( changed , clusters_changed ) up . history = 'undo' up . undo_state = undo_state self . emit ( 'cluster' , up ) return up
Undo the last cluster assignment operation .
51,741
def redo ( self ) : item = self . _undo_stack . forward ( ) if item is None : return spike_ids , cluster_ids , undo_state = item assert spike_ids is not None up = self . _do_assign ( spike_ids , cluster_ids ) up . history = 'redo' self . emit ( 'cluster' , up ) return up
Redo the last cluster assignment operation .
51,742
def _increment ( arr , indices ) : arr = _as_array ( arr ) indices = _as_array ( indices ) bbins = np . bincount ( indices ) arr [ : len ( bbins ) ] += bbins return arr
Increment some indices in a 1D vector of non - negative integers . Repeated indices are taken into account .
51,743
def _symmetrize_correlograms ( correlograms ) : n_clusters , _ , n_bins = correlograms . shape assert n_clusters == _ correlograms [ ... , 0 ] = np . maximum ( correlograms [ ... , 0 ] , correlograms [ ... , 0 ] . T ) sym = correlograms [ ... , 1 : ] [ ... , : : - 1 ] sym = np . transpose ( sym , ( 1 , 0 , 2 ) ) return np . dstack ( ( sym , correlograms ) )
Return the symmetrized version of the CCG arrays .
51,744
def correlograms ( spike_times , spike_clusters , cluster_ids = None , sample_rate = 1. , bin_size = None , window_size = None , symmetrize = True , ) : assert sample_rate > 0. assert np . all ( np . diff ( spike_times ) >= 0 ) , ( "The spike times must be " "increasing." ) spike_times = np . asarray ( spike_times , dtype = np . float64 ) spike_samples = ( spike_times * sample_rate ) . astype ( np . int64 ) spike_clusters = _as_array ( spike_clusters ) assert spike_samples . ndim == 1 assert spike_samples . shape == spike_clusters . shape bin_size = np . clip ( bin_size , 1e-5 , 1e5 ) binsize = int ( sample_rate * bin_size ) assert binsize >= 1 window_size = np . clip ( window_size , 1e-5 , 1e5 ) winsize_bins = 2 * int ( .5 * window_size / bin_size ) + 1 assert winsize_bins >= 1 assert winsize_bins % 2 == 1 if cluster_ids is None : clusters = _unique ( spike_clusters ) else : clusters = _as_array ( cluster_ids ) n_clusters = len ( clusters ) spike_clusters_i = _index_of ( spike_clusters , clusters ) shift = 1 mask = np . ones_like ( spike_samples , dtype = np . bool ) correlograms = _create_correlograms_array ( n_clusters , winsize_bins ) while mask [ : - shift ] . any ( ) : spike_diff = _diff_shifted ( spike_samples , shift ) spike_diff_b = spike_diff // binsize mask [ : - shift ] [ spike_diff_b > ( winsize_bins // 2 ) ] = False m = mask [ : - shift ] . copy ( ) d = spike_diff_b [ m ] d = spike_diff_b [ m ] indices = np . ravel_multi_index ( ( spike_clusters_i [ : - shift ] [ m ] , spike_clusters_i [ + shift : ] [ m ] , d ) , correlograms . shape ) _increment ( correlograms . ravel ( ) , indices ) shift += 1 correlograms [ np . arange ( n_clusters ) , np . arange ( n_clusters ) , 0 ] = 0 if symmetrize : return _symmetrize_correlograms ( correlograms ) else : return correlograms
Compute all pairwise cross - correlograms among the clusters appearing in spike_clusters .
51,745
def set_bin_window ( self , bin_size = None , window_size = None ) : bin_size = bin_size or self . bin_size window_size = window_size or self . window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self . bin_size = bin_size self . window_size = window_size b , w = self . bin_size * 1000 , self . window_size * 1000 self . set_status ( 'Bin: {:.1f} ms. Window: {:.1f} ms.' . format ( b , w ) )
Set the bin and window sizes .
51,746
def _md5 ( path , blocksize = 2 ** 20 ) : m = hashlib . md5 ( ) with open ( path , 'rb' ) as f : while True : buf = f . read ( blocksize ) if not buf : break m . update ( buf ) return m . hexdigest ( )
Compute the checksum of a file .
51,747
def download_file ( url , output_path ) : output_path = op . realpath ( output_path ) assert output_path is not None if op . exists ( output_path ) : checked = _check_md5_of_url ( output_path , url ) if checked is False : logger . debug ( "The file `%s` already exists " "but is invalid: redownloading." , output_path ) elif checked is True : logger . debug ( "The file `%s` already exists: skipping." , output_path ) return output_path r = _download ( url , stream = True ) _save_stream ( r , output_path ) if _check_md5_of_url ( output_path , url ) is False : logger . debug ( "The checksum doesn't match: retrying the download." ) r = _download ( url , stream = True ) _save_stream ( r , output_path ) if _check_md5_of_url ( output_path , url ) is False : raise RuntimeError ( "The checksum of the downloaded file " "doesn't match the provided checksum." ) return
Download a binary file from an URL .
51,748
def _make_class ( cls , ** kwargs ) : kwargs = { k : ( v if v is not None else getattr ( cls , k , None ) ) for k , v in kwargs . items ( ) } name = cls . __name__ + '_' + _hash ( kwargs ) if name not in _CLASSES : logger . log ( 5 , "Create class %s %s." , name , kwargs ) cls = type ( name , ( cls , ) , kwargs ) _CLASSES [ name ] = cls return _CLASSES [ name ]
Return a custom Visual class with given parameters .
51,749
def _add_item ( self , cls , * args , ** kwargs ) : box_index = kwargs . pop ( 'box_index' , self . _default_box_index ) data = cls . validate ( * args , ** kwargs ) n = cls . vertex_count ( ** data ) if not isinstance ( box_index , np . ndarray ) : k = len ( self . _default_box_index ) box_index = _get_array ( box_index , ( n , k ) ) data [ 'box_index' ] = box_index if cls not in self . _items : self . _items [ cls ] = [ ] self . _items [ cls ] . append ( data ) return data
Add a plot item .
51,750
def scatter ( self , * args , ** kwargs ) : cls = _make_class ( ScatterVisual , _default_marker = kwargs . pop ( 'marker' , None ) , ) return self . _add_item ( cls , * args , ** kwargs )
Add a scatter plot .
51,751
def build ( self ) : for cls , data_list in self . _items . items ( ) : data = _accumulate ( data_list , cls . allow_list ) box_index = data . pop ( 'box_index' ) visual = cls ( ) self . add_visual ( visual ) visual . set_data ( ** data ) if 'a_box_index' in visual . program . _code_variables : visual . program [ 'a_box_index' ] = box_index . astype ( np . float32 ) if self . lasso : self . lasso . create_visual ( ) self . update ( )
Build all added items .
51,752
def _range_from_slice ( myslice , start = None , stop = None , step = None , length = None ) : assert isinstance ( myslice , slice ) step = myslice . step if myslice . step is not None else step if step is None : step = 1 start = myslice . start if myslice . start is not None else start if start is None : start = 0 stop = myslice . stop if myslice . stop is not None else stop if length is not None : stop_inferred = floor ( start + step * length ) if stop is not None and stop < stop_inferred : raise ValueError ( "'stop' ({stop}) and " . format ( stop = stop ) + "'length' ({length}) " . format ( length = length ) + "are not compatible." ) stop = stop_inferred if stop is None and length is None : raise ValueError ( "'stop' and 'length' cannot be both unspecified." ) myrange = np . arange ( start , stop , step ) if length is not None : assert len ( myrange ) == length return myrange
Convert a slice to an array of integers .
51,753
def _index_of ( arr , lookup ) : lookup = np . asarray ( lookup , dtype = np . int32 ) m = ( lookup . max ( ) if len ( lookup ) else 0 ) + 1 tmp = np . zeros ( m + 1 , dtype = np . int ) tmp [ - 1 ] = - 1 if len ( lookup ) : tmp [ lookup ] = np . arange ( len ( lookup ) ) return tmp [ arr ]
Replace scalars in an array by their indices in a lookup table .
51,754
def _pad ( arr , n , dir = 'right' ) : assert dir in ( 'left' , 'right' ) if n < 0 : raise ValueError ( "'n' must be positive: {0}." . format ( n ) ) elif n == 0 : return np . zeros ( ( 0 , ) + arr . shape [ 1 : ] , dtype = arr . dtype ) n_arr = arr . shape [ 0 ] shape = ( n , ) + arr . shape [ 1 : ] if n_arr == n : assert arr . shape == shape return arr elif n_arr < n : out = np . zeros ( shape , dtype = arr . dtype ) if dir == 'left' : out [ - n_arr : , ... ] = arr elif dir == 'right' : out [ : n_arr , ... ] = arr assert out . shape == shape return out else : if dir == 'left' : out = arr [ - n : , ... ] elif dir == 'right' : out = arr [ : n , ... ] assert out . shape == shape return out
Pad an array with zeros along the first axis .
51,755
def _in_polygon ( points , polygon ) : from matplotlib . path import Path points = _as_array ( points ) polygon = _as_array ( polygon ) assert points . ndim == 2 assert polygon . ndim == 2 if len ( polygon ) : polygon = np . vstack ( ( polygon , polygon [ 0 ] ) ) path = Path ( polygon , closed = True ) return path . contains_points ( points )
Return the points that are inside a polygon .
51,756
def read_array ( path , mmap_mode = None ) : file_ext = op . splitext ( path ) [ 1 ] if file_ext == '.npy' : return np . load ( path , mmap_mode = mmap_mode ) raise NotImplementedError ( "The file extension `{}` " . format ( file_ext ) + "is not currently supported." )
Read a . npy array .
51,757
def write_array ( path , arr ) : file_ext = op . splitext ( path ) [ 1 ] if file_ext == '.npy' : return np . save ( path , arr ) raise NotImplementedError ( "The file extension `{}` " . format ( file_ext ) + "is not currently supported." )
Write an array to a . npy file .
51,758
def _concatenate_virtual_arrays ( arrs , cols = None , scaling = None ) : return None if not len ( arrs ) else ConcatenatedArrays ( arrs , cols , scaling = scaling )
Return a virtual concatenate of several NumPy arrays .
51,759
def _excerpt_step ( n_samples , n_excerpts = None , excerpt_size = None ) : assert n_excerpts >= 2 step = max ( ( n_samples - excerpt_size ) // ( n_excerpts - 1 ) , excerpt_size ) return step
Compute the step of an excerpt set as a function of the number of excerpts or their sizes .
51,760
def chunk_bounds ( n_samples , chunk_size , overlap = 0 ) : s_start = 0 s_end = chunk_size keep_start = s_start keep_end = s_end - overlap // 2 yield s_start , s_end , keep_start , keep_end while s_end - overlap + chunk_size < n_samples : s_start = s_end - overlap s_end = s_start + chunk_size keep_start = keep_end keep_end = s_end - overlap // 2 if s_start < s_end : yield s_start , s_end , keep_start , keep_end s_start = s_end - overlap s_end = n_samples keep_start = keep_end keep_end = s_end if s_start < s_end : yield s_start , s_end , keep_start , keep_end
Return chunk bounds .
51,761
def data_chunk ( data , chunk , with_overlap = False ) : assert isinstance ( chunk , tuple ) if len ( chunk ) == 2 : i , j = chunk elif len ( chunk ) == 4 : if with_overlap : i , j = chunk [ : 2 ] else : i , j = chunk [ 2 : ] else : raise ValueError ( "'chunk' should have 2 or 4 elements, " "not {0:d}" . format ( len ( chunk ) ) ) return data [ i : j , ... ]
Get a data chunk .
51,762
def _spikes_in_clusters ( spike_clusters , clusters ) : if len ( spike_clusters ) == 0 or len ( clusters ) == 0 : return np . array ( [ ] , dtype = np . int ) return np . nonzero ( np . in1d ( spike_clusters , clusters ) ) [ 0 ]
Return the ids of all spikes belonging to the specified clusters .
51,763
def grouped_mean ( arr , spike_clusters ) : arr = np . asarray ( arr ) spike_clusters = np . asarray ( spike_clusters ) assert arr . ndim == 1 assert arr . shape [ 0 ] == len ( spike_clusters ) cluster_ids = _unique ( spike_clusters ) spike_clusters_rel = _index_of ( spike_clusters , cluster_ids ) spike_counts = np . bincount ( spike_clusters_rel ) assert len ( spike_counts ) == len ( cluster_ids ) t = np . zeros ( len ( cluster_ids ) ) np . add . at ( t , spike_clusters_rel , arr ) return t / spike_counts
Compute the mean of a spike - dependent quantity for every cluster .
51,764
def regular_subset ( spikes , n_spikes_max = None , offset = 0 ) : assert spikes is not None if n_spikes_max is None or len ( spikes ) <= n_spikes_max : return spikes step = math . ceil ( np . clip ( 1. / n_spikes_max * len ( spikes ) , 1 , len ( spikes ) ) ) step = int ( step ) my_spikes = spikes [ offset : : step ] [ : n_spikes_max ] assert len ( my_spikes ) <= len ( spikes ) assert len ( my_spikes ) <= n_spikes_max return my_spikes
Prune the current selection to get at most n_spikes_max spikes .
51,765
def select_spikes ( cluster_ids = None , max_n_spikes_per_cluster = None , spikes_per_cluster = None , batch_size = None , subset = None , ) : subset = subset or 'regular' assert _is_array_like ( cluster_ids ) if not len ( cluster_ids ) : return np . array ( [ ] , dtype = np . int64 ) if max_n_spikes_per_cluster in ( None , 0 ) : selection = { c : spikes_per_cluster ( c ) for c in cluster_ids } else : assert max_n_spikes_per_cluster > 0 selection = { } n_clusters = len ( cluster_ids ) for cluster in cluster_ids : n = int ( max_n_spikes_per_cluster * exp ( - .1 * ( n_clusters - 1 ) ) ) n = max ( 1 , n ) spike_ids = spikes_per_cluster ( cluster ) if subset == 'regular' : if batch_size is None or len ( spike_ids ) <= max ( batch_size , n ) : spike_ids = regular_subset ( spike_ids , n_spikes_max = n ) else : spike_ids = get_excerpts ( spike_ids , n // batch_size , batch_size ) elif subset == 'random' and len ( spike_ids ) > n : spike_ids = np . random . choice ( spike_ids , n , replace = False ) spike_ids = np . unique ( spike_ids ) selection [ cluster ] = spike_ids return _flatten_per_cluster ( selection )
Return a selection of spikes belonging to the specified clusters .
51,766
def _get_recording ( self , index ) : assert index >= 0 recs = np . nonzero ( ( index - self . offsets [ : - 1 ] ) >= 0 ) [ 0 ] if len ( recs ) == 0 : return len ( self . arrs ) - 1 return recs [ - 1 ]
Return the recording that contains a given index .
51,767
def bandpass_filter ( rate = None , low = None , high = None , order = None ) : assert low < high assert order >= 1 return signal . butter ( order , ( low / ( rate / 2. ) , high / ( rate / 2. ) ) , 'pass' )
Butterworth bandpass filter .
51,768
def apply_filter ( x , filter = None , axis = 0 ) : x = _as_array ( x ) if x . shape [ axis ] == 0 : return x b , a = filter return signal . filtfilt ( b , a , x , axis = axis )
Apply a filter to an array .
51,769
def fit ( self , x , fudge = 1e-18 ) : assert x . ndim == 2 ns , nc = x . shape x_cov = np . cov ( x , rowvar = 0 ) assert x_cov . shape == ( nc , nc ) d , v = np . linalg . eigh ( x_cov ) d = np . diag ( 1. / np . sqrt ( d + fudge ) ) w = np . dot ( np . dot ( v , d ) , v . T ) self . _matrix = w return w
Compute the whitening matrix .
51,770
def current_item ( self ) : if self . _history and self . _index >= 0 : self . _check_index ( ) return self . _history [ self . _index ]
Return the current element .
51,771
def _check_index ( self ) : assert 0 <= self . _index <= len ( self . _history ) - 1 assert len ( self . _history ) >= 1
Check that the index is without the bounds of _history .
51,772
def iter ( self , start = 0 , end = None ) : if end is None : end = self . _index + 1 elif end == 0 : raise StopIteration ( ) if start >= end : raise StopIteration ( ) assert 0 <= end <= len ( self . _history ) assert 0 <= start <= end - 1 for i in range ( start , end ) : yield self . _history [ i ]
Iterate through successive history items .
51,773
def add ( self , item ) : self . _check_index ( ) self . _history = self . _history [ : self . _index + 1 ] self . _history . append ( item ) self . _index += 1 self . _check_index ( ) assert id ( self . current_item ) == id ( item )
Add an item in the history .
51,774
def back ( self ) : if self . _index <= 0 : return None undone = self . current_item self . _index -= 1 self . _check_index ( ) return undone
Go back in history if possible .
51,775
def forward ( self ) : if self . _index >= len ( self . _history ) - 1 : return None self . _index += 1 self . _check_index ( ) return self . current_item
Go forward in history if possible .
51,776
def add_to_current_action ( self , controller ) : item = self . current_item self . _history [ self . _index ] = item + ( controller , )
Add a controller to the current action .
51,777
def redo ( self ) : controllers = self . forward ( ) if controllers is None : ups = ( ) else : ups = tuple ( [ controller . redo ( ) for controller in controllers ] ) if self . process_ups is not None : return self . process_ups ( ups ) else : return ups
Redo the last action .
51,778
def _insert_glsl ( vertex , fragment , to_insert ) : vs_regex = re . compile ( r'gl_Position = transform\(([\S]+)\);' ) r = vs_regex . search ( vertex ) if not r : logger . debug ( "The vertex shader doesn't contain the transform " "placeholder: skipping the transform chain " "GLSL insertion." ) return vertex , fragment assert r logger . log ( 5 , "Found transform placeholder in vertex code: `%s`" , r . group ( 0 ) ) var = r . group ( 1 ) assert var and var in vertex vertex = to_insert [ 'vert' , 'header' ] + '\n\n' + vertex fragment = to_insert [ 'frag' , 'header' ] + '\n\n' + fragment vs_insert = to_insert [ 'vert' , 'before_transforms' ] vs_insert += to_insert [ 'vert' , 'transforms' ] vs_insert += to_insert [ 'vert' , 'after_transforms' ] vertex = vs_regex . sub ( indent ( vs_insert ) , vertex ) fs_regex = re . compile ( r'(void main\(\)\s*\{)' ) fs_insert = '\\1\n' + to_insert [ 'frag' , 'before_transforms' ] fragment = fs_regex . sub ( indent ( fs_insert ) , fragment ) vertex = vertex . replace ( '{{ var }}' , var ) return vertex , fragment
Insert snippets in a shader .
51,779
def on_draw ( self ) : if self . program : self . program . draw ( self . gl_primitive_type ) else : logger . debug ( "Skipping drawing visual `%s` because the program " "has not been built yet." , self )
Draw the visual .
51,780
def add_transform_chain ( self , tc ) : for t in tc . gpu_transforms : if isinstance ( t , Clip ) : self . insert_vert ( 'v_temp_pos_tr = temp_pos_tr;' ) continue self . insert_vert ( t . glsl ( 'temp_pos_tr' ) ) clip = tc . get ( 'Clip' ) if clip : self . insert_frag ( clip . glsl ( 'v_temp_pos_tr' ) , 'before_transforms' )
Insert the GLSL snippets of a transform chain .
51,781
def insert_into_shaders ( self , vertex , fragment ) : to_insert = defaultdict ( str ) to_insert . update ( { key : '\n' . join ( self . _to_insert [ key ] ) + '\n' for key in self . _to_insert } ) return _insert_glsl ( vertex , fragment , to_insert )
Apply the insertions to shader code .
51,782
def add_visual ( self , visual ) : inserter = visual . inserter inserter . add_transform_chain ( visual . transforms ) canvas_transforms = visual . canvas_transforms_filter ( self . transforms ) inserter . add_transform_chain ( canvas_transforms ) inserter += self . inserter vs , fs = visual . vertex_shader , visual . fragment_shader vs , fs = inserter . insert_into_shaders ( vs , fs ) visual . program = gloo . Program ( vs , fs ) logger . log ( 5 , "Vertex shader: %s" , vs ) logger . log ( 5 , "Fragment shader: %s" , fs ) visual . on_resize ( self . size ) self . visuals . append ( visual ) self . events . visual_added ( visual = visual )
Add a visual to the canvas and build its program by the same occasion .
51,783
def on_resize ( self , event ) : self . context . set_viewport ( 0 , 0 , event . size [ 0 ] , event . size [ 1 ] ) for visual in self . visuals : visual . on_resize ( event . size ) self . update ( )
Resize the OpenGL context .
51,784
def on_draw ( self , e ) : gloo . clear ( ) for visual in self . visuals : logger . log ( 5 , "Draw visual `%s`." , visual ) visual . on_draw ( )
Draw all visuals .
51,785
def update ( self ) : if not self . canvas : return for visual in self . canvas . visuals : self . update_program ( visual . program ) self . canvas . update ( )
Update all visuals in the attached canvas .
51,786
def _add_log_file ( filename ) : handler = logging . FileHandler ( filename ) handler . setLevel ( logging . DEBUG ) formatter = _Formatter ( fmt = _logger_fmt , datefmt = '%Y-%m-%d %H:%M:%S' ) handler . setFormatter ( formatter ) logging . getLogger ( ) . addHandler ( handler )
Create a phy . log log file with DEBUG level in the current directory .
51,787
def _run_cmd ( cmd , ctx , glob , loc ) : if PDB : _enable_pdb ( ) if IPYTHON : from IPython import start_ipython args_ipy = [ '-i' , '--gui=qt' ] ns = glob . copy ( ) ns . update ( loc ) return start_ipython ( args_ipy , user_ns = ns ) prof = __builtins__ . get ( 'profile' , None ) if prof : prof = __builtins__ [ 'profile' ] return _profile ( prof , cmd , glob , loc ) return exec_ ( cmd , glob , loc )
Run a command with optionally a debugger IPython or profiling .
51,788
def load_cli_plugins ( cli , config_dir = None ) : from . config import load_master_config config = load_master_config ( config_dir = config_dir ) plugins = discover_plugins ( config . Plugins . dirs ) for plugin in plugins : if not hasattr ( plugin , 'attach_to_cli' ) : continue logger . debug ( "Attach plugin `%s` to CLI." , _fullname ( plugin ) ) try : plugin ( ) . attach_to_cli ( cli ) except Exception as e : logger . error ( "Error when loading plugin `%s`: %s" , plugin , e )
Load all plugins and attach them to a CLI object .
51,789
def get_mouse_pos ( self , pos ) : position = np . asarray ( self . _normalize ( pos ) ) zoom = np . asarray ( self . _zoom_aspect ( ) ) pan = np . asarray ( self . pan ) mouse_pos = ( ( position / zoom ) - pan ) return mouse_pos
Return the mouse coordinates in NDC taking panzoom into account .
51,790
def pan ( self , value ) : assert len ( value ) == 2 self . _pan [ : ] = value self . _constrain_pan ( ) self . update ( )
Pan translation .
51,791
def zoom ( self , value ) : if isinstance ( value , ( int , float ) ) : value = ( value , value ) assert len ( value ) == 2 self . _zoom = np . clip ( value , self . _zmin , self . _zmax ) self . _constrain_pan ( ) self . _constrain_zoom ( ) self . update ( )
Zoom level .
51,792
def pan_delta ( self , d ) : dx , dy = d pan_x , pan_y = self . pan zoom_x , zoom_y = self . _zoom_aspect ( self . _zoom ) self . pan = ( pan_x + dx / zoom_x , pan_y + dy / zoom_y ) self . update ( )
Pan the view by a given amount .
51,793
def zoom_delta ( self , d , p = ( 0. , 0. ) , c = 1. ) : dx , dy = d x0 , y0 = p pan_x , pan_y = self . _pan zoom_x , zoom_y = self . _zoom zoom_x_new , zoom_y_new = ( zoom_x * math . exp ( c * self . _zoom_coeff * dx ) , zoom_y * math . exp ( c * self . _zoom_coeff * dy ) ) zoom_x_new = max ( min ( zoom_x_new , self . _zmax ) , self . _zmin ) zoom_y_new = max ( min ( zoom_y_new , self . _zmax ) , self . _zmin ) self . zoom = zoom_x_new , zoom_y_new if self . _zoom_to_pointer : zoom_x , zoom_y = self . _zoom_aspect ( ( zoom_x , zoom_y ) ) zoom_x_new , zoom_y_new = self . _zoom_aspect ( ( zoom_x_new , zoom_y_new ) ) self . pan = ( pan_x - x0 * ( 1. / zoom_x - 1. / zoom_x_new ) , pan_y - y0 * ( 1. / zoom_y - 1. / zoom_y_new ) ) self . update ( )
Zoom the view by a given amount .
51,794
def set_range ( self , bounds , keep_aspect = False ) : bounds = np . asarray ( bounds , dtype = np . float64 ) v0 = bounds [ : 2 ] v1 = bounds [ 2 : ] pan = - .5 * ( v0 + v1 ) zoom = 2. / ( v1 - v0 ) if keep_aspect : zoom = zoom . min ( ) * np . ones ( 2 ) self . set_pan_zoom ( pan = pan , zoom = zoom )
Zoom to fit a box .
51,795
def get_range ( self ) : p , z = np . asarray ( self . pan ) , np . asarray ( self . zoom ) x0 , y0 = - 1. / z - p x1 , y1 = + 1. / z - p return ( x0 , y0 , x1 , y1 )
Return the bounds currently visible .
51,796
def on_mouse_move ( self , event ) : if event . modifiers : return if event . is_dragging : x0 , y0 = self . _normalize ( event . press_event . pos ) x1 , y1 = self . _normalize ( event . last_event . pos ) x , y = self . _normalize ( event . pos ) dx , dy = x - x1 , y - y1 if event . button == 1 : self . pan_delta ( ( dx , dy ) ) elif event . button == 2 : c = np . sqrt ( self . size [ 0 ] ) * .03 self . zoom_delta ( ( dx , dy ) , ( x0 , y0 ) , c = c )
Pan and zoom with the mouse .
51,797
def on_mouse_wheel ( self , event ) : if event . modifiers : return dx = np . sign ( event . delta [ 1 ] ) * self . _wheel_coeff x0 , y0 = self . _normalize ( event . pos ) self . zoom_delta ( ( dx , dx ) , ( x0 , y0 ) )
Zoom with the mouse wheel .
51,798
def on_key_press ( self , event ) : key = event . key if event . modifiers : return if self . enable_keyboard_pan and key in self . _arrows : self . _pan_keyboard ( key ) if key in self . _pm : self . _zoom_keyboard ( key ) if key == 'R' : self . reset ( )
Pan and zoom with the keyboard .
51,799
def _replace_docstring_header ( paragraph ) : paragraph = re . sub ( _docstring_header_pattern , r'*\1*' , paragraph , ) paragraph = re . sub ( _docstring_parameters_pattern , r'\n* `\1` (\2)\n' , paragraph , ) return paragraph
Process NumPy - like function docstrings .