idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
13,500 | def _update_all_devices ( self ) : self . all_devices = [ ] self . all_devices . extend ( self . keyboards ) self . all_devices . extend ( self . mice ) self . all_devices . extend ( self . gamepads ) self . all_devices . extend ( self . other_devices ) | Update the all_devices list . |
13,501 | def _parse_device_path ( self , device_path , char_path_override = None ) : try : device_type = device_path . rsplit ( '-' , 1 ) [ 1 ] except IndexError : warn ( "The following device path was skipped as it could " "not be parsed: %s" % device_path , RuntimeWarning ) return realpath = os . path . realpath ( device_path ) if realpath in self . _raw : return self . _raw . append ( realpath ) if device_type == 'kbd' : self . keyboards . append ( Keyboard ( self , device_path , char_path_override ) ) elif device_type == 'mouse' : self . mice . append ( Mouse ( self , device_path , char_path_override ) ) elif device_type == 'joystick' : self . gamepads . append ( GamePad ( self , device_path , char_path_override ) ) else : self . other_devices . append ( OtherDevice ( self , device_path , char_path_override ) ) | Parse each device and add to the approriate list . |
13,502 | def _find_xinput ( self ) : for dll in XINPUT_DLL_NAMES : try : self . xinput = getattr ( ctypes . windll , dll ) except OSError : pass else : self . xinput_dll = dll break else : warn ( "No xinput driver dll found, gamepads not supported." , RuntimeWarning ) | Find most recent xinput library . |
13,503 | def _find_devices_win ( self ) : self . _find_xinput ( ) self . _detect_gamepads ( ) self . _count_devices ( ) if self . _raw_device_counts [ 'keyboards' ] > 0 : self . keyboards . append ( Keyboard ( self , "/dev/input/by-id/usb-A_Nice_Keyboard-event-kbd" ) ) if self . _raw_device_counts [ 'mice' ] > 0 : self . mice . append ( Mouse ( self , "/dev/input/by-id/usb-A_Nice_Mouse_called_Arthur-event-mouse" ) ) | Find devices on Windows . |
13,504 | def _find_devices_mac ( self ) : self . keyboards . append ( Keyboard ( self ) ) self . mice . append ( MightyMouse ( self ) ) self . mice . append ( Mouse ( self ) ) | Find devices on Mac . |
13,505 | def _detect_gamepads ( self ) : state = XinputState ( ) for device_number in range ( 4 ) : res = self . xinput . XInputGetState ( device_number , ctypes . byref ( state ) ) if res == XINPUT_ERROR_SUCCESS : device_path = ( "/dev/input/by_id/" + "usb-Microsoft_Corporation_Controller_%s-event-joystick" % device_number ) self . gamepads . append ( GamePad ( self , device_path ) ) continue if res != XINPUT_ERROR_DEVICE_NOT_CONNECTED : raise RuntimeError ( "Unknown error %d attempting to get state of device %d" % ( res , device_number ) ) | Find gamepads . |
13,506 | def _count_devices ( self ) : number_of_devices = ctypes . c_uint ( ) if ctypes . windll . user32 . GetRawInputDeviceList ( ctypes . POINTER ( ctypes . c_int ) ( ) , ctypes . byref ( number_of_devices ) , ctypes . sizeof ( RawInputDeviceList ) ) == - 1 : warn ( "Call to GetRawInputDeviceList was unsuccessful." "We have no idea if a mouse or keyboard is attached." , RuntimeWarning ) return devices_found = ( RawInputDeviceList * number_of_devices . value ) ( ) if ctypes . windll . user32 . GetRawInputDeviceList ( devices_found , ctypes . byref ( number_of_devices ) , ctypes . sizeof ( RawInputDeviceList ) ) == - 1 : warn ( "Call to GetRawInputDeviceList was unsuccessful." "We have no idea if a mouse or keyboard is attached." , RuntimeWarning ) return for device in devices_found : if device . dwType == 0 : self . _raw_device_counts [ 'mice' ] += 1 elif device . dwType == 1 : self . _raw_device_counts [ 'keyboards' ] += 1 elif device . dwType == 2 : self . _raw_device_counts [ 'otherhid' ] += 1 else : self . _raw_device_counts [ 'unknown' ] += 1 | See what Windows GetRawInputDeviceList wants to tell us . |
13,507 | def _find_by ( self , key ) : by_path = glob . glob ( '/dev/input/by-{key}/*-event-*' . format ( key = key ) ) for device_path in by_path : self . _parse_device_path ( device_path ) | Find devices . |
13,508 | def _find_special ( self ) : charnames = self . _get_char_names ( ) for eventdir in glob . glob ( '/sys/class/input/event*' ) : char_name = os . path . split ( eventdir ) [ 1 ] if char_name in charnames : continue name_file = os . path . join ( eventdir , 'device' , 'name' ) with open ( name_file ) as name_file : device_name = name_file . read ( ) . strip ( ) if device_name in self . codes [ 'specials' ] : self . _parse_device_path ( self . codes [ 'specials' ] [ device_name ] , os . path . join ( '/dev/input' , char_name ) ) | Look for special devices . |
13,509 | def get_event_string ( self , evtype , code ) : if WIN and evtype == 'Key' : try : code = self . codes [ 'wincodes' ] [ code ] except KeyError : pass try : return self . codes [ evtype ] [ code ] except KeyError : raise UnknownEventCode ( "We don't know this event." , evtype , code ) | Get the string name of the event . |
13,510 | def detect_microbit ( self ) : try : gpad = MicroBitPad ( self ) except ModuleNotFoundError : warn ( "The microbit library could not be found in the pythonpath. \n" "For more information, please visit \n" "https://inputs.readthedocs.io/en/latest/user/microbit.html" , RuntimeWarning ) else : self . microbits . append ( gpad ) self . gamepads . append ( gpad ) | Detect a microbit . |
13,511 | def set_display ( self , index = None ) : if index : image = self . microbit . Image . STD_IMAGES [ index ] else : image = self . default_image self . microbit . display . show ( image ) | Show an image on the display . |
13,512 | def _setup_rumble ( self ) : self . left_rumble = self . _get_ready_to ( '99500' ) self . right_rumble = self . _get_ready_to ( '00599' ) self . double_rumble = self . _get_ready_to ( '99599' ) | Setup the three animations which simulate a rumble . |
13,513 | def _get_ready_to ( self , rumble ) : return [ self . microbit . Image ( ':' . join ( [ rumble if char == '1' else '00500' for char in code ] ) ) for code in SPIN_UP_MOTOR ] | Watch us wreck the mike! PSYCHE! |
13,514 | def _full_speed_rumble ( self , images , duration ) : while duration > 0 : self . microbit . display . show ( images [ 0 ] ) time . sleep ( 0.04 ) self . microbit . display . show ( images [ 1 ] ) time . sleep ( 0.04 ) duration -= 0.08 | Simulate the motors running at full . |
13,515 | def _spin_up ( self , images , duration ) : total = 0 for image in images : self . microbit . display . show ( image ) time . sleep ( 0.05 ) total += 0.05 if total >= duration : return remaining = duration - total self . _full_speed_rumble ( images [ - 2 : ] , remaining ) self . set_display ( ) | Simulate the motors getting warmed up . |
13,516 | def handle_new_events ( self , events ) : for event in events : self . events . append ( self . create_event_object ( event [ 0 ] , event [ 1 ] , int ( event [ 2 ] ) ) ) | Add each new events to the event queue . |
13,517 | def handle_abs ( self ) : x_raw = self . microbit . accelerometer . get_x ( ) y_raw = self . microbit . accelerometer . get_y ( ) x_abs = ( 'Absolute' , 0x00 , x_raw ) y_abs = ( 'Absolute' , 0x01 , y_raw ) return x_abs , y_abs | Gets the state as the raw abolute numbers . |
13,518 | def handle_dpad ( self ) : x_raw = self . microbit . accelerometer . get_x ( ) y_raw = self . microbit . accelerometer . get_y ( ) minus_sens = self . sensitivity * - 1 if x_raw < minus_sens : x_state = ( 'Absolute' , 0x10 , - 1 ) elif x_raw > self . sensitivity : x_state = ( 'Absolute' , 0x10 , 1 ) else : x_state = ( 'Absolute' , 0x10 , 0 ) if y_raw < minus_sens : y_state = ( 'Absolute' , 0x11 , - 1 ) elif y_raw > self . sensitivity : y_state = ( 'Absolute' , 0x11 , 1 ) else : y_state = ( 'Absolute' , 0x11 , 1 ) return x_state , y_state | Gets the state of the virtual dpad . |
13,519 | def check_state ( self ) : if self . dpad : x_state , y_state = self . handle_dpad ( ) else : x_state , y_state = self . handle_abs ( ) new_state = set ( ( x_state , y_state , ( 'Key' , 0x130 , int ( self . microbit . button_a . is_pressed ( ) ) ) , ( 'Key' , 0x131 , int ( self . microbit . button_b . is_pressed ( ) ) ) , ( 'Key' , 0x13a , int ( self . microbit . pin0 . is_touched ( ) ) ) , ( 'Key' , 0x133 , int ( self . microbit . pin1 . is_touched ( ) ) ) , ( 'Key' , 0x134 , int ( self . microbit . pin2 . is_touched ( ) ) ) , ) ) events = new_state - self . state self . state = new_state return events | Tracks differences in the device state . |
13,520 | def handle_input ( self ) : difference = self . check_state ( ) if not difference : return self . events = [ ] self . handle_new_events ( difference ) self . update_timeval ( ) self . events . append ( self . sync_marker ( self . timeval ) ) self . write_to_pipe ( self . events ) | Sends differences in the device state to the MicroBitPad as events . |
13,521 | def main ( ) : while 1 : events = get_mouse ( ) for event in events : print ( event . ev_type , event . code , event . state ) | Just print out some event infomation when the mouse is used . |
13,522 | def main ( ) : while 1 : events = get_key ( ) if events : for event in events : print ( event . ev_type , event . code , event . state ) | Just print out some event infomation when keys are pressed . |
13,523 | def main ( ) : while 1 : events = get_gamepad ( ) for event in events : print ( event . ev_type , event . code , event . state ) | Just print out some event infomation when the gamepad is used . |
13,524 | def main ( gamepad = None ) : if not gamepad : gamepad = inputs . devices . gamepads [ 0 ] gamepad . set_vibration ( 1 , 0 , 1000 ) time . sleep ( 2 ) gamepad . set_vibration ( 0 , 1 , 1000 ) time . sleep ( 2 ) gamepad . set_vibration ( 1 , 1 , 2000 ) time . sleep ( 2 ) | Vibrate the gamepad . |
13,525 | def validate ( self , value ) : errors = [ ] valid = self . _is_valid ( value ) if not valid : errors . append ( self . fail ( value ) ) return errors for constraint in self . _constraints_inst : error = constraint . is_valid ( value ) if error : errors . append ( error ) return errors | Check if value is valid . |
13,526 | def _process_schema ( self , schema_dict , validators ) : schema_flat = util . flatten ( schema_dict ) for key , expression in schema_flat . items ( ) : try : schema_flat [ key ] = syntax . parse ( expression , validators ) except SyntaxError as e : error = str ( e ) + ' at node \'%s\'' % key raise SyntaxError ( error ) return schema_flat | Go through a schema and construct validators . |
13,527 | def _validate ( self , validator , data , key , position = None , includes = None ) : errors = [ ] if position : position = '%s.%s' % ( position , key ) else : position = key try : data_item = util . get_value ( data , key ) except KeyError : if validator . is_optional : return errors errors . append ( '%s: Required field missing' % position ) return errors return self . _validate_item ( validator , data_item , position , includes ) | Run through a schema and a data structure validating along the way . |
13,528 | def _validate_item ( self , validator , data_item , position , includes ) : errors = [ ] if data_item is None and validator . is_optional and validator . can_be_none : return errors errors += self . _validate_primitive ( validator , data_item , position ) if errors : return errors if isinstance ( validator , val . Include ) : errors += self . _validate_include ( validator , data_item , includes , position ) elif isinstance ( validator , ( val . Map , val . List ) ) : errors += self . _validate_map_list ( validator , data_item , includes , position ) elif isinstance ( validator , val . Any ) : errors += self . _validate_any ( validator , data_item , includes , position ) return errors | Validates a single data item against validator . |
13,529 | def _find_data_path_schema ( data_path , schema_name ) : if not data_path or data_path == '/' or data_path == '.' : return None directory = os . path . dirname ( data_path ) path = glob . glob ( os . path . join ( directory , schema_name ) ) if not path : return _find_schema ( directory , schema_name ) return path [ 0 ] | Starts in the data file folder and recursively looks in parents for schema_name |
13,530 | def _find_schema ( data_path , schema_name ) : path = glob . glob ( schema_name ) for p in path : if os . path . isfile ( p ) : return p return _find_data_path_schema ( data_path , schema_name ) | Checks if schema_name is a valid file if not searches in data_path for it . |
13,531 | def flatten ( dic , keep_iter = False , position = None ) : child = { } if not dic : return { } for k , v in get_iter ( dic ) : if isstr ( k ) : k = k . replace ( '.' , '_' ) if position : item_position = '%s.%s' % ( position , k ) else : item_position = '%s' % k if is_iter ( v ) : child . update ( flatten ( dic [ k ] , keep_iter , item_position ) ) if keep_iter : child [ item_position ] = v else : child [ item_position ] = v return child | Returns a flattened dictionary from a dictionary of nested dictionaries and lists . keep_iter will treat iterables as valid values while also flattening them . |
13,532 | def to_representation ( self , obj ) : value = self . model_field . __get__ ( obj , None ) return smart_text ( value , strings_only = True ) | convert value to representation . |
13,533 | def run_validators ( self , value ) : try : self . model_field . validate ( value ) except MongoValidationError as e : raise ValidationError ( e . message ) super ( DocumentField , self ) . run_validators ( value ) | validate value . |
13,534 | def get_object_or_404 ( queryset , * args , ** kwargs ) : try : return queryset . get ( * args , ** kwargs ) except ( ValueError , TypeError , DoesNotExist , ValidationError ) : raise Http404 ( ) | replacement of rest_framework . generics and django . shrtcuts analogues |
13,535 | def recursive_save ( self , validated_data , instance = None ) : me_data = dict ( ) for key , value in validated_data . items ( ) : try : field = self . fields [ key ] if isinstance ( field , EmbeddedDocumentSerializer ) : me_data [ key ] = field . recursive_save ( value ) elif ( ( isinstance ( field , serializers . ListSerializer ) or isinstance ( field , serializers . ListField ) ) and isinstance ( field . child , EmbeddedDocumentSerializer ) ) : me_data [ key ] = [ ] for datum in value : me_data [ key ] . append ( field . child . recursive_save ( datum ) ) elif ( isinstance ( field , drfm_fields . DictField ) and hasattr ( field , "child" ) and isinstance ( field . child , EmbeddedDocumentSerializer ) ) : me_data [ key ] = { } for datum_key , datum_value in value . items ( ) : me_data [ key ] [ datum_key ] = field . child . recursive_save ( datum_value ) else : me_data [ key ] = value except KeyError : me_data [ key ] = value if not instance : instance = self . Meta . model ( ** me_data ) else : for key , value in me_data . items ( ) : setattr ( instance , key , value ) if self . _saving_instances : instance . save ( ) return instance | Recursively traverses validated_data and creates EmbeddedDocuments of the appropriate subtype from them . |
13,536 | def apply_customization ( self , serializer , customization ) : if customization . fields is not None : if len ( customization . fields ) == 0 : serializer . Meta . fields = ALL_FIELDS else : serializer . Meta . fields = customization . fields if customization . exclude is not None : serializer . Meta . exclude = customization . exclude if customization . extra_kwargs is not None : serializer . Meta . extra_kwargs = customization . extra_kwargs for method_name , method in customization . validate_methods . items ( ) : setattr ( serializer , method_name , method ) | Applies fields customization to a nested or embedded DocumentSerializer . |
13,537 | def to_internal_value ( self , data ) : ret = super ( DynamicDocumentSerializer , self ) . to_internal_value ( data ) dynamic_data = self . _get_dynamic_data ( ret ) ret . update ( dynamic_data ) return ret | Updates _validated_data with dynamic data i . e . data not listed in fields . |
13,538 | def get_field_kwargs ( field_name , model_field ) : kwargs = { } kwargs [ 'model_field' ] = model_field if hasattr ( model_field , 'verbose_name' ) and needs_label ( model_field , field_name ) : kwargs [ 'label' ] = capfirst ( model_field . verbose_name ) if hasattr ( model_field , 'help_text' ) : kwargs [ 'help_text' ] = model_field . help_text if isinstance ( model_field , me_fields . DecimalField ) : precision = model_field . precision max_value = getattr ( model_field , 'max_value' , None ) if max_value is not None : max_length = len ( str ( max_value ) ) + precision else : max_length = 65536 kwargs [ 'decimal_places' ] = precision kwargs [ 'max_digits' ] = max_length if isinstance ( model_field , me_fields . GeoJsonBaseField ) : kwargs [ 'geo_type' ] = model_field . _type if isinstance ( model_field , me_fields . SequenceField ) or model_field . primary_key or model_field . db_field == '_id' : kwargs [ 'read_only' ] = True return kwargs if model_field . default and not isinstance ( model_field , me_fields . ComplexBaseField ) : kwargs [ 'default' ] = model_field . default if model_field . null : kwargs [ 'allow_null' ] = True if model_field . null and isinstance ( model_field , me_fields . StringField ) : kwargs [ 'allow_blank' ] = True if 'default' not in kwargs : kwargs [ 'required' ] = model_field . required if kwargs [ 'required' ] is True : if isinstance ( model_field , me_fields . ListField ) or isinstance ( model_field , me_fields . DictField ) : kwargs [ 'allow_empty' ] = False if model_field . choices : kwargs [ 'choices' ] = model_field . choices return kwargs if isinstance ( model_field , me_fields . StringField ) : if model_field . regex : kwargs [ 'regex' ] = model_field . regex max_length = getattr ( model_field , 'max_length' , None ) if max_length is not None and isinstance ( model_field , me_fields . StringField ) : kwargs [ 'max_length' ] = max_length min_length = getattr ( model_field , 'min_length' , None ) if min_length is not None and isinstance ( model_field , me_fields . StringField ) : kwargs [ 'min_length' ] = min_length max_value = getattr ( model_field , 'max_value' , None ) if max_value is not None and isinstance ( model_field , NUMERIC_FIELD_TYPES ) : kwargs [ 'max_value' ] = max_value min_value = getattr ( model_field , 'min_value' , None ) if min_value is not None and isinstance ( model_field , NUMERIC_FIELD_TYPES ) : kwargs [ 'min_value' ] = min_value return kwargs | Creating a default instance of a basic non - relational field . |
13,539 | def get_relation_kwargs ( field_name , relation_info ) : model_field , related_model = relation_info kwargs = { } if related_model and not issubclass ( related_model , EmbeddedDocument ) : kwargs [ 'queryset' ] = related_model . objects if model_field : if hasattr ( model_field , 'verbose_name' ) and needs_label ( model_field , field_name ) : kwargs [ 'label' ] = capfirst ( model_field . verbose_name ) if hasattr ( model_field , 'help_text' ) : kwargs [ 'help_text' ] = model_field . help_text kwargs [ 'required' ] = model_field . required if model_field . null : kwargs [ 'allow_null' ] = True if getattr ( model_field , 'unique' , False ) : validator = UniqueValidator ( queryset = related_model . objects ) kwargs [ 'validators' ] = [ validator ] return kwargs | Creating a default instance of a flat relational field . |
13,540 | def get_nested_relation_kwargs ( field_name , relation_info ) : kwargs = get_relation_kwargs ( field_name , relation_info ) kwargs . pop ( 'queryset' ) kwargs . pop ( 'required' ) kwargs [ 'read_only' ] = True return kwargs | Creating a default instance of a nested serializer |
13,541 | def degrees_dir ( CIJ ) : CIJ = binarize ( CIJ , copy = True ) id = np . sum ( CIJ , axis = 0 ) od = np . sum ( CIJ , axis = 1 ) deg = id + od return id , od , deg | Node degree is the number of links connected to the node . The indegree is the number of inward links and the outdegree is the number of outward links . |
13,542 | def degrees_und ( CIJ ) : CIJ = binarize ( CIJ , copy = True ) return np . sum ( CIJ , axis = 0 ) | Node degree is the number of links connected to the node . |
13,543 | def strengths_dir ( CIJ ) : istr = np . sum ( CIJ , axis = 0 ) ostr = np . sum ( CIJ , axis = 1 ) return istr + ostr | Node strength is the sum of weights of links connected to the node . The instrength is the sum of inward link weights and the outstrength is the sum of outward link weights . |
13,544 | def strengths_und_sign ( W ) : W = W . copy ( ) n = len ( W ) np . fill_diagonal ( W , 0 ) Spos = np . sum ( W * ( W > 0 ) , axis = 0 ) Sneg = np . sum ( W * ( W < 0 ) , axis = 0 ) vpos = np . sum ( W [ W > 0 ] ) vneg = np . sum ( W [ W < 0 ] ) return Spos , Sneg , vpos , vneg | Node strength is the sum of weights of links connected to the node . |
13,545 | def edge_nei_overlap_bu ( CIJ ) : ik , jk = np . where ( CIJ ) lel = len ( CIJ [ ik , jk ] ) n = len ( CIJ ) deg = degrees_und ( CIJ ) ec = np . zeros ( ( lel , ) ) degij = np . zeros ( ( 2 , lel ) ) for e in range ( lel ) : neiik = np . setdiff1d ( np . union1d ( np . where ( CIJ [ ik [ e ] , : ] ) , np . where ( CIJ [ : , ik [ e ] ] ) ) , ( ik [ e ] , jk [ e ] ) ) neijk = np . setdiff1d ( np . union1d ( np . where ( CIJ [ jk [ e ] , : ] ) , np . where ( CIJ [ : , jk [ e ] ] ) ) , ( ik [ e ] , jk [ e ] ) ) ec [ e ] = len ( np . intersect1d ( neiik , neijk ) ) / len ( np . union1d ( neiik , neijk ) ) degij [ : , e ] = ( deg [ ik [ e ] ] , deg [ jk [ e ] ] ) EC = np . tile ( np . inf , ( n , n ) ) EC [ ik , jk ] = ec return EC , ec , degij | This function determines the neighbors of two nodes that are linked by an edge and then computes their overlap . Connection matrix must be binary and directed . Entries of EC that are inf indicate that no edge is present . Entries of EC that are 0 denote local bridges i . e . edges that link completely non - overlapping neighborhoods . Low values of EC indicate edges that are weak ties . |
13,546 | def matching_ind ( CIJ ) : n = len ( CIJ ) Min = np . zeros ( ( n , n ) ) Mout = np . zeros ( ( n , n ) ) Mall = np . zeros ( ( n , n ) ) for i in range ( n - 1 ) : for j in range ( i + 1 , n ) : c1i = CIJ [ : , i ] c2i = CIJ [ : , j ] usei = np . logical_or ( c1i , c2i ) usei [ i ] = 0 usei [ j ] = 0 nconi = np . sum ( c1i [ usei ] ) + np . sum ( c2i [ usei ] ) if not nconi : Min [ i , j ] = 0 else : Min [ i , j ] = 2 * np . sum ( np . logical_and ( c1i [ usei ] , c2i [ usei ] ) ) / nconi c1o = CIJ [ i , : ] c2o = CIJ [ j , : ] useo = np . logical_or ( c1o , c2o ) useo [ i ] = 0 useo [ j ] = 0 ncono = np . sum ( c1o [ useo ] ) + np . sum ( c2o [ useo ] ) if not ncono : Mout [ i , j ] = 0 else : Mout [ i , j ] = 2 * np . sum ( np . logical_and ( c1o [ useo ] , c2o [ useo ] ) ) / ncono c1a = np . ravel ( ( c1i , c1o ) ) c2a = np . ravel ( ( c2i , c2o ) ) usea = np . logical_or ( c1a , c2a ) usea [ i ] = 0 usea [ i + n ] = 0 usea [ j ] = 0 usea [ j + n ] = 0 ncona = np . sum ( c1a [ usea ] ) + np . sum ( c2a [ usea ] ) if not ncona : Mall [ i , j ] = 0 else : Mall [ i , j ] = 2 * np . sum ( np . logical_and ( c1a [ usea ] , c2a [ usea ] ) ) / ncona Min = Min + Min . T Mout = Mout + Mout . T Mall = Mall + Mall . T return Min , Mout , Mall | For any two nodes u and v the matching index computes the amount of overlap in the connection patterns of u and v . Self - connections and u - v connections are ignored . The matching index is a symmetric quantity similar to a correlation or a dot product . |
13,547 | def dice_pairwise_und ( a1 , a2 ) : a1 = binarize ( a1 , copy = True ) a2 = binarize ( a2 , copy = True ) n = len ( a1 ) np . fill_diagonal ( a1 , 0 ) np . fill_diagonal ( a2 , 0 ) d = np . zeros ( ( n , ) ) for i in range ( n ) : d [ i ] = 2 * ( np . sum ( np . logical_and ( a1 [ : , i ] , a2 [ : , i ] ) ) / ( np . sum ( a1 [ : , i ] ) + np . sum ( a2 [ : , i ] ) ) ) return d | Calculates pairwise dice similarity for each vertex between two matrices . Treats the matrices as binary and undirected . |
13,548 | def corr_flat_und ( a1 , a2 ) : n = len ( a1 ) if len ( a2 ) != n : raise BCTParamError ( "Cannot calculate flattened correlation on " "matrices of different size" ) triu_ix = np . where ( np . triu ( np . ones ( ( n , n ) ) , 1 ) ) return np . corrcoef ( a1 [ triu_ix ] . flat , a2 [ triu_ix ] . flat ) [ 0 ] [ 1 ] | Returns the correlation coefficient between two flattened adjacency matrices . Only the upper triangular part is used to avoid double counting undirected matrices . Similarity metric for weighted matrices . |
13,549 | def corr_flat_dir ( a1 , a2 ) : n = len ( a1 ) if len ( a2 ) != n : raise BCTParamError ( "Cannot calculate flattened correlation on " "matrices of different size" ) ix = np . logical_not ( np . eye ( n ) ) return np . corrcoef ( a1 [ ix ] . flat , a2 [ ix ] . flat ) [ 0 ] [ 1 ] | Returns the correlation coefficient between two flattened adjacency matrices . Similarity metric for weighted matrices . |
13,550 | def adjacency_plot_und ( A , coor , tube = False ) : from mayavi import mlab n = len ( A ) nr_edges = ( n * n - 1 ) // 2 ixes , = np . where ( np . triu ( np . ones ( ( n , n ) ) , 1 ) . flat ) adjdat = A . flat [ ixes ] A_r = np . tile ( coor , ( n , 1 , 1 ) ) starts = np . reshape ( A_r , ( n * n , 3 ) ) [ ixes , : ] vecs = np . reshape ( A_r - np . transpose ( A_r , ( 1 , 0 , 2 ) ) , ( n * n , 3 ) ) [ ixes , : ] fig = mlab . figure ( ) nodesource = mlab . pipeline . scalar_scatter ( coor [ : , 0 ] , coor [ : , 1 ] , coor [ : , 2 ] , figure = fig ) nodes = mlab . pipeline . glyph ( nodesource , scale_mode = 'none' , scale_factor = 3. , mode = 'sphere' , figure = fig ) nodes . glyph . color_mode = 'color_by_scalar' vectorsrc = mlab . pipeline . vector_scatter ( starts [ : , 0 ] , starts [ : , 1 ] , starts [ : , 2 ] , vecs [ : , 0 ] , vecs [ : , 1 ] , vecs [ : , 2 ] , figure = fig ) vectorsrc . mlab_source . dataset . point_data . scalars = adjdat thres = mlab . pipeline . threshold ( vectorsrc , low = 0.0001 , up = np . max ( A ) , figure = fig ) vectors = mlab . pipeline . vectors ( thres , colormap = 'YlOrRd' , scale_mode = 'vector' , figure = fig ) vectors . glyph . glyph . clamping = False vectors . glyph . glyph . color_mode = 'color_by_scalar' vectors . glyph . color_mode = 'color_by_scalar' vectors . glyph . glyph_source . glyph_position = 'head' vectors . actor . property . opacity = .7 if tube : vectors . glyph . glyph_source . glyph_source = ( vectors . glyph . glyph_source . glyph_dict [ 'cylinder_source' ] ) vectors . glyph . glyph_source . glyph_source . radius = 0.015 else : vectors . glyph . glyph_source . glyph_source . glyph_type = 'dash' return fig | This function in matlab is a visualization helper which translates an adjacency matrix and an Nx3 matrix of spatial coordinates and plots a 3D isometric network connecting the undirected unweighted nodes using a specific plotting format . Including the formatted output is not useful at all for bctpy since matplotlib will not be able to plot it in quite the same way . |
13,551 | def backbone_wu ( CIJ , avgdeg ) : n = len ( CIJ ) if not np . all ( CIJ == CIJ . T ) : raise BCTParamError ( 'backbone_wu can only be computed for undirected ' 'matrices. If your matrix is has noise, correct it with np.around' ) CIJtree = np . zeros ( ( n , n ) ) i , j = np . where ( np . max ( CIJ ) == CIJ ) im = [ i [ 0 ] , i [ 1 ] ] jm = [ j [ 0 ] , j [ 1 ] ] CIJtree [ im , jm ] = CIJ [ im , jm ] in_ = im out = np . setdiff1d ( range ( n ) , in_ ) for ix in range ( n - 2 ) : CIJ_io = CIJ [ np . ix_ ( in_ , out ) ] i , j = np . where ( np . max ( CIJ_io ) == CIJ_io ) print ( i , j ) im = in_ [ i [ 0 ] ] jm = out [ j [ 0 ] ] CIJtree [ im , jm ] = CIJ [ im , jm ] CIJtree [ jm , im ] = CIJ [ jm , im ] in_ = np . append ( in_ , jm ) out = np . setdiff1d ( range ( n ) , in_ ) CIJnotintree = CIJ * np . logical_not ( CIJtree ) ix , = np . where ( CIJnotintree . flat ) a = np . sort ( CIJnotintree . flat [ ix ] ) [ : : - 1 ] cutoff = avgdeg * n - 2 * ( n - 1 ) - 1 if cutoff >= np . size ( a ) : CIJclus = CIJtree . copy ( ) else : thr = a [ cutoff ] CIJclus = CIJtree + CIJnotintree * ( CIJnotintree >= thr ) return CIJtree , CIJclus | The network backbone contains the dominant connections in the network and may be used to aid network visualization . This function computes the backbone of a given weighted and undirected connection matrix CIJ using a minimum - spanning - tree based algorithm . |
13,552 | def reorderMAT ( m , H = 5000 , cost = 'line' ) : from scipy import linalg , stats m = m . copy ( ) n = len ( m ) np . fill_diagonal ( m , 0 ) if cost == 'line' : profile = stats . norm . pdf ( range ( 1 , n + 1 ) , 0 , n / 2 ) [ : : - 1 ] elif cost == 'circ' : profile = stats . norm . pdf ( range ( 1 , n + 1 ) , n / 2 , n / 4 ) [ : : - 1 ] else : raise BCTParamError ( 'dfun must be line or circ' ) costf = linalg . toeplitz ( profile , r = profile ) lowcost = np . sum ( costf * m ) m_start = m . copy ( ) starta = np . arange ( n ) for h in range ( H ) : a = np . arange ( n ) r1 , r2 = rng . randint ( n , size = ( 2 , ) ) a [ r1 ] = r2 a [ r2 ] = r1 costnew = np . sum ( ( m [ np . ix_ ( a , a ) ] ) * costf ) if costnew < lowcost : m = m [ np . ix_ ( a , a ) ] r2_swap = starta [ r2 ] r1_swap = starta [ r1 ] starta [ r1 ] = r2_swap starta [ r2 ] = r1_swap lowcost = costnew M_reordered = m_start [ np . ix_ ( starta , starta ) ] m_indices = starta cost = lowcost return M_reordered , m_indices , cost | This function reorders the connectivity matrix in order to place more edges closer to the diagonal . This often helps in displaying community structure clusters etc . |
13,553 | def reorder_matrix ( m1 , cost = 'line' , verbose = False , H = 1e4 , Texp = 10 , T0 = 1e-3 , Hbrk = 10 ) : from scipy import linalg , stats n = len ( m1 ) if n < 2 : raise BCTParamError ( "align_matrix will infinite loop on a singleton " "or null matrix." ) if cost == 'line' : profile = stats . norm . pdf ( range ( 1 , n + 1 ) , loc = 0 , scale = n / 2 ) [ : : - 1 ] elif cost == 'circ' : profile = stats . norm . pdf ( range ( 1 , n + 1 ) , loc = n / 2 , scale = n / 4 ) [ : : - 1 ] else : raise BCTParamError ( 'cost must be line or circ' ) costf = linalg . toeplitz ( profile , r = profile ) * np . logical_not ( np . eye ( n ) ) costf /= np . sum ( costf ) maxcost = np . sum ( np . sort ( costf . flat ) * np . sort ( m1 . flat ) ) lowcost = np . sum ( m1 * costf ) / maxcost mincost = lowcost anew = np . arange ( n ) amin = np . arange ( n ) h = 0 hcnt = 0 Texp = 1 - Texp / H Hbrk = H / Hbrk while h < H : h += 1 hcnt += 1 if hcnt > Hbrk : break T = T0 * Texp ** h atmp = anew . copy ( ) r1 , r2 = rng . randint ( n , size = ( 2 , ) ) while r1 == r2 : r2 = rng . randint ( n ) atmp [ r1 ] = anew [ r2 ] atmp [ r2 ] = anew [ r1 ] costnew = np . sum ( ( m1 [ np . ix_ ( atmp , atmp ) ] ) * costf ) / maxcost if costnew < lowcost or rng . random_sample ( ) < np . exp ( - ( costnew - lowcost ) / T ) : anew = atmp lowcost = costnew if lowcost < mincost : amin = anew mincost = lowcost if verbose : print ( 'step %i ... current lowest cost = %f' % ( h , mincost ) ) hcnt = 0 if verbose : print ( 'step %i ... final lowest cost = %f' % ( h , mincost ) ) M_reordered = m1 [ np . ix_ ( amin , amin ) ] M_indices = amin cost = mincost return M_reordered , M_indices , cost | This function rearranges the nodes in matrix M1 such that the matrix elements are squeezed along the main diagonal . The function uses a version of simulated annealing . |
13,554 | def writetoPAJ ( CIJ , fname , directed ) : n = np . size ( CIJ , axis = 0 ) with open ( fname , 'w' ) as fd : fd . write ( '*vertices %i \r' % n ) for i in range ( 1 , n + 1 ) : fd . write ( '%i "%i" \r' % ( i , i ) ) if directed : fd . write ( '*arcs \r' ) else : fd . write ( '*edges \r' ) for i in range ( n ) : for j in range ( n ) : if CIJ [ i , j ] != 0 : fd . write ( '%i %i %.6f \r' % ( i + 1 , j + 1 , CIJ [ i , j ] ) ) | This function writes a Pajek . net file from a numpy matrix |
13,555 | def makeevenCIJ ( n , k , sz_cl , seed = None ) : rng = get_rng ( seed ) mx_lvl = int ( np . floor ( np . log2 ( n ) ) ) sz_cl -= 1 t = np . ones ( ( 2 , 2 ) ) * 2 Nlvl = 2 ** mx_lvl if Nlvl != n : print ( "Warning: n must be a power of 2" ) n = Nlvl for lvl in range ( 1 , mx_lvl ) : s = 2 ** ( lvl + 1 ) CIJ = np . ones ( ( s , s ) ) grp1 = range ( int ( s / 2 ) ) grp2 = range ( int ( s / 2 ) , s ) ix1 = np . add . outer ( np . array ( grp1 ) * s , grp1 ) . flatten ( ) ix2 = np . add . outer ( np . array ( grp2 ) * s , grp2 ) . flatten ( ) CIJ . flat [ ix1 ] = t CIJ . flat [ ix2 ] = t CIJ += 1 t = CIJ . copy ( ) CIJ -= ( np . ones ( ( s , s ) ) + mx_lvl * np . eye ( s ) ) CIJp = ( CIJ >= ( mx_lvl - sz_cl ) ) rem_k = k - np . size ( np . where ( CIJp . flatten ( ) ) ) if rem_k < 0 : print ( "Warning: K is too small, output matrix contains clusters only" ) return CIJp a , b = np . where ( np . logical_not ( CIJp + np . eye ( n ) ) ) rp = rng . permutation ( len ( a ) ) a = a [ rp [ : rem_k ] ] b = b [ rp [ : rem_k ] ] for ai , bi in zip ( a , b ) : CIJp [ ai , bi ] = 1 return np . array ( CIJp , dtype = int ) | This function generates a random directed network with a specified number of fully connected modules linked together by evenly distributed remaining random connections . |
13,556 | def makerandCIJdegreesfixed ( inv , outv , seed = None ) : rng = get_rng ( seed ) n = len ( inv ) k = np . sum ( inv ) in_inv = np . zeros ( ( k , ) ) out_inv = np . zeros ( ( k , ) ) i_in = 0 i_out = 0 for i in range ( n ) : in_inv [ i_in : i_in + inv [ i ] ] = i out_inv [ i_out : i_out + outv [ i ] ] = i i_in += inv [ i ] i_out += outv [ i ] CIJ = np . eye ( n ) edges = np . array ( ( out_inv , in_inv [ rng . permutation ( k ) ] ) ) for i in range ( k ) : if CIJ [ edges [ 0 , i ] , edges [ 1 , i ] ] : tried = set ( ) while True : if len ( tried ) == k : raise BCTParamError ( 'Could not resolve the given ' 'in and out vectors' ) switch = rng . randint ( k ) while switch in tried : switch = rng . randint ( k ) if not ( CIJ [ edges [ 0 , i ] , edges [ 1 , switch ] ] or CIJ [ edges [ 0 , switch ] , edges [ 1 , i ] ] ) : CIJ [ edges [ 0 , switch ] , edges [ 1 , switch ] ] = 0 CIJ [ edges [ 0 , switch ] , edges [ 1 , i ] ] = 1 if switch < i : CIJ [ edges [ 0 , switch ] , edges [ 1 , switch ] ] = 0 CIJ [ edges [ 0 , switch ] , edges [ 1 , i ] ] = 1 t = edges [ 1 , i ] edges [ 1 , i ] = edges [ 1 , switch ] edges [ 1 , switch ] = t break tried . add ( switch ) else : CIJ [ edges [ 0 , i ] , edges [ 1 , i ] ] = 1 CIJ -= np . eye ( n ) return CIJ | This function generates a directed random network with a specified in - degree and out - degree sequence . |
13,557 | def makerandCIJ_dir ( n , k , seed = None ) : rng = get_rng ( seed ) ix , = np . where ( np . logical_not ( np . eye ( n ) ) . flat ) rp = rng . permutation ( np . size ( ix ) ) CIJ = np . zeros ( ( n , n ) ) CIJ . flat [ ix [ rp ] [ : k ] ] = 1 return CIJ | This function generates a directed random network |
13,558 | def randmio_dir ( R , itr , seed = None ) : rng = get_rng ( seed ) R = R . copy ( ) n = len ( R ) i , j = np . where ( R ) k = len ( i ) itr *= k max_attempts = np . round ( n * k / ( n * ( n - 1 ) ) ) eff = 0 for it in range ( int ( itr ) ) : att = 0 while att <= max_attempts : while True : e1 = rng . randint ( k ) e2 = rng . randint ( k ) while e1 == e2 : e2 = rng . randint ( k ) a = i [ e1 ] b = j [ e1 ] c = i [ e2 ] d = j [ e2 ] if a != c and a != d and b != c and b != d : break if not ( R [ a , d ] or R [ c , b ] ) : R [ a , d ] = R [ a , b ] R [ a , b ] = 0 R [ c , b ] = R [ c , d ] R [ c , d ] = 0 i . setflags ( write = True ) j . setflags ( write = True ) i [ e1 ] = d j [ e2 ] = b eff += 1 break att += 1 return R , eff | This function randomizes a directed network while preserving the in - and out - degree distributions . In weighted networks the function preserves the out - strength but not the in - strength distributions . |
13,559 | def randmio_und ( R , itr , seed = None ) : if not np . all ( R == R . T ) : raise BCTParamError ( "Input must be undirected" ) rng = get_rng ( seed ) R = R . copy ( ) n = len ( R ) i , j = np . where ( np . tril ( R ) ) k = len ( i ) itr *= k max_attempts = np . round ( n * k / ( n * ( n - 1 ) ) ) eff = 0 for it in range ( int ( itr ) ) : att = 0 while att <= max_attempts : while True : e1 , e2 = rng . randint ( k , size = ( 2 , ) ) while e1 == e2 : e2 = rng . randint ( k ) a = i [ e1 ] b = j [ e1 ] c = i [ e2 ] d = j [ e2 ] if a != c and a != d and b != c and b != d : break if rng . random_sample ( ) > .5 : i . setflags ( write = True ) j . setflags ( write = True ) i [ e2 ] = d j [ e2 ] = c c = i [ e2 ] d = j [ e2 ] if not ( R [ a , d ] or R [ c , b ] ) : R [ a , d ] = R [ a , b ] R [ a , b ] = 0 R [ d , a ] = R [ b , a ] R [ b , a ] = 0 R [ c , b ] = R [ c , d ] R [ c , d ] = 0 R [ b , c ] = R [ d , c ] R [ d , c ] = 0 j . setflags ( write = True ) j [ e1 ] = d j [ e2 ] = b eff += 1 break att += 1 return R , eff | This function randomizes an undirected network while preserving the degree distribution . The function does not preserve the strength distribution in weighted networks . |
13,560 | def randmio_und_signed ( R , itr , seed = None ) : rng = get_rng ( seed ) R = R . copy ( ) n = len ( R ) itr *= int ( n * ( n - 1 ) / 2 ) max_attempts = int ( np . round ( n / 2 ) ) eff = 0 for it in range ( int ( itr ) ) : att = 0 while att <= max_attempts : a , b , c , d = pick_four_unique_nodes_quickly ( n , rng ) r0_ab = R [ a , b ] r0_cd = R [ c , d ] r0_ad = R [ a , d ] r0_cb = R [ c , b ] if ( np . sign ( r0_ab ) == np . sign ( r0_cd ) and np . sign ( r0_ad ) == np . sign ( r0_cb ) and np . sign ( r0_ab ) != np . sign ( r0_ad ) ) : R [ a , d ] = R [ d , a ] = r0_ab R [ a , b ] = R [ b , a ] = r0_ad R [ c , b ] = R [ b , c ] = r0_cd R [ c , d ] = R [ d , c ] = r0_cb eff += 1 break att += 1 return R , eff | This function randomizes an undirected weighted network with positive and negative weights while simultaneously preserving the degree distribution of positive and negative weights . The function does not preserve the strength distribution in weighted networks . |
13,561 | def evaluate_generative_model ( A , Atgt , D , eta , gamma = None , model_type = 'matching' , model_var = 'powerlaw' , epsilon = 1e-6 , seed = None ) : m = np . size ( np . where ( Atgt . flat ) ) // 2 n = len ( Atgt ) xk = np . sum ( Atgt , axis = 1 ) xc = clustering_coef_bu ( Atgt ) xb = betweenness_bin ( Atgt ) xe = D [ np . triu ( Atgt , 1 ) > 0 ] B = generative_model ( A , D , m , eta , gamma , model_type = model_type , model_var = model_var , epsilon = epsilon , copy = True , seed = seed ) nB = len ( eta ) if nB == 1 : B = np . reshape ( B , np . append ( np . shape ( B ) , 1 ) ) K = np . zeros ( ( nB , 4 ) ) def kstats ( x , y ) : bin_edges = np . concatenate ( [ [ - np . inf ] , np . sort ( np . concatenate ( ( x , y ) ) ) , [ np . inf ] ] ) bin_x , _ = np . histogram ( x , bin_edges ) bin_y , _ = np . histogram ( y , bin_edges ) sum_x = np . cumsum ( bin_x ) / np . sum ( bin_x ) sum_y = np . cumsum ( bin_y ) / np . sum ( bin_y ) cdfsamp_x = sum_x [ : - 1 ] cdfsamp_y = sum_y [ : - 1 ] delta_cdf = np . abs ( cdfsamp_x - cdfsamp_y ) print ( np . shape ( delta_cdf ) ) print ( np . argmax ( delta_cdf ) , np . max ( delta_cdf ) ) return np . max ( delta_cdf ) for ib in range ( nB ) : Bc = B [ : , : , ib ] yk = np . sum ( Bc , axis = 1 ) yc = clustering_coef_bu ( Bc ) yb = betweenness_bin ( Bc ) ye = D [ np . triu ( Bc , 1 ) > 0 ] K [ ib , 0 ] = kstats ( xk , yk ) K [ ib , 1 ] = kstats ( xc , yc ) K [ ib , 2 ] = kstats ( xb , yb ) K [ ib , 3 ] = kstats ( xe , ye ) return np . max ( K , axis = 1 ) | Generates synthetic networks with parameters provided and evaluates their energy function . The energy function is defined as in Betzel et al . 2016 . Basically it takes the Kolmogorov - Smirnov statistics of 4 network measures ; comparing the degree distributions clustering coefficients betweenness centrality and Euclidean distances between connected regions . The energy is globally low if the synthetic network matches the target . Energy is defined as the maximum difference across the four statistics . |
13,562 | def diversity_coef_sign ( W , ci ) : n = len ( W ) _ , ci = np . unique ( ci , return_inverse = True ) ci += 1 m = np . max ( ci ) def entropy ( w_ ) : S = np . sum ( w_ , axis = 1 ) Snm = np . zeros ( ( n , m ) ) for i in range ( m ) : Snm [ : , i ] = np . sum ( w_ [ : , ci == i + 1 ] , axis = 1 ) pnm = Snm / ( np . tile ( S , ( m , 1 ) ) . T ) pnm [ np . isnan ( pnm ) ] = 0 pnm [ np . logical_not ( pnm ) ] = 1 return - np . sum ( pnm * np . log ( pnm ) , axis = 1 ) / np . log ( m ) with np . errstate ( invalid = 'ignore' ) : Hpos = entropy ( W * ( W > 0 ) ) Hneg = entropy ( - W * ( W < 0 ) ) return Hpos , Hneg | The Shannon - entropy based diversity coefficient measures the diversity of intermodular connections of individual nodes and ranges from 0 to 1 . |
13,563 | def edge_betweenness_bin ( G ) : n = len ( G ) BC = np . zeros ( ( n , ) ) EBC = np . zeros ( ( n , n ) ) for u in range ( n ) : D = np . zeros ( ( n , ) ) D [ u ] = 1 NP = np . zeros ( ( n , ) ) NP [ u ] = 1 P = np . zeros ( ( n , n ) ) Q = np . zeros ( ( n , ) , dtype = int ) q = n - 1 Gu = G . copy ( ) V = np . array ( [ u ] ) while V . size : Gu [ : , V ] = 0 for v in V : Q [ q ] = v q -= 1 W , = np . where ( Gu [ v , : ] ) for w in W : if D [ w ] : NP [ w ] += NP [ v ] P [ w , v ] = 1 else : D [ w ] = 1 NP [ w ] = NP [ v ] P [ w , v ] = 1 V , = np . where ( np . any ( Gu [ V , : ] , axis = 0 ) ) if np . any ( np . logical_not ( D ) ) : Q [ : q ] , = np . where ( np . logical_not ( D ) ) DP = np . zeros ( ( n , ) ) for w in Q [ : n - 1 ] : BC [ w ] += DP [ w ] for v in np . where ( P [ w , : ] ) [ 0 ] : DPvw = ( 1 + DP [ w ] ) * NP [ v ] / NP [ w ] DP [ v ] += DPvw EBC [ v , w ] += DPvw return EBC , BC | Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge . Edges with high values of betweenness centrality participate in a large number of shortest paths . |
13,564 | def erange ( CIJ ) : N = len ( CIJ ) K = np . size ( np . where ( CIJ ) [ 1 ] ) Erange = np . zeros ( ( N , N ) ) i , j = np . where ( CIJ ) for c in range ( len ( i ) ) : CIJcut = CIJ . copy ( ) CIJcut [ i [ c ] , j [ c ] ] = 0 R , D = reachdist ( CIJcut ) Erange [ i [ c ] , j [ c ] ] = D [ i [ c ] , j [ c ] ] eta = ( np . sum ( Erange [ np . logical_and ( Erange > 0 , Erange < np . inf ) ] ) / len ( Erange [ np . logical_and ( Erange > 0 , Erange < np . inf ) ] ) ) Eshort = Erange > 2 fs = len ( np . where ( Eshort ) ) / K return Erange , eta , Eshort , fs | Shortcuts are central edges which significantly reduce the characteristic path length in the network . |
13,565 | def module_degree_zscore ( W , ci , flag = 0 ) : _ , ci = np . unique ( ci , return_inverse = True ) ci += 1 if flag == 2 : W = W . copy ( ) W = W . T elif flag == 3 : W = W . copy ( ) W = W + W . T n = len ( W ) Z = np . zeros ( ( n , ) ) for i in range ( 1 , int ( np . max ( ci ) + 1 ) ) : Koi = np . sum ( W [ np . ix_ ( ci == i , ci == i ) ] , axis = 1 ) Z [ np . where ( ci == i ) ] = ( Koi - np . mean ( Koi ) ) / np . std ( Koi ) Z [ np . where ( np . isnan ( Z ) ) ] = 0 return Z | The within - module degree z - score is a within - module version of degree centrality . |
13,566 | def pagerank_centrality ( A , d , falff = None ) : from scipy import linalg N = len ( A ) if falff is None : norm_falff = np . ones ( ( N , ) ) / N else : norm_falff = falff / np . sum ( falff ) deg = np . sum ( A , axis = 0 ) deg [ deg == 0 ] = 1 D1 = np . diag ( 1 / deg ) B = np . eye ( N ) - d * np . dot ( A , D1 ) b = ( 1 - d ) * norm_falff r = linalg . solve ( B , b ) r /= np . sum ( r ) return r | The PageRank centrality is a variant of eigenvector centrality . This function computes the PageRank centrality of each vertex in a graph . |
13,567 | def subgraph_centrality ( CIJ ) : from scipy import linalg vals , vecs = linalg . eig ( CIJ ) Cs = np . real ( np . dot ( vecs * vecs , np . exp ( vals ) ) ) return Cs | The subgraph centrality of a node is a weighted sum of closed walks of different lengths in the network starting and ending at the node . This function returns a vector of subgraph centralities for each node of the network . |
13,568 | def invert ( W , copy = True ) : if copy : W = W . copy ( ) E = np . where ( W ) W [ E ] = 1. / W [ E ] return W | Inverts elementwise the weights in an input connection matrix . In other words change the from the matrix of internode strengths to the matrix of internode distances . |
13,569 | def ci2ls ( ci ) : if not np . size ( ci ) : return ci _ , ci = np . unique ( ci , return_inverse = True ) ci += 1 nr_indices = int ( max ( ci ) ) ls = [ ] for c in range ( nr_indices ) : ls . append ( [ ] ) for i , x in enumerate ( ci ) : ls [ ci [ i ] - 1 ] . append ( i ) return ls | Convert from a community index vector to a 2D python list of modules The list is a pure python list not requiring numpy . |
13,570 | def ls2ci ( ls , zeroindexed = False ) : if ls is None or np . size ( ls ) == 0 : return ( ) nr_indices = sum ( map ( len , ls ) ) ci = np . zeros ( ( nr_indices , ) , dtype = int ) z = int ( not zeroindexed ) for i , x in enumerate ( ls ) : for j , y in enumerate ( ls [ i ] ) : ci [ ls [ i ] [ j ] ] = i + z return ci | Convert from a 2D python list of modules to a community index vector . The list is a pure python list not requiring numpy . |
13,571 | def _safe_squeeze ( arr , * args , ** kwargs ) : out = np . squeeze ( arr , * args , ** kwargs ) if np . ndim ( out ) == 0 : out = out . reshape ( ( 1 , ) ) return out | numpy . squeeze will reduce a 1 - item array down to a zero - dimensional array which is not necessarily desirable . This function does the squeeze operation but ensures that there is at least 1 dimension in the output . |
13,572 | def modularity_und_sign ( W , ci , qtype = 'sta' ) : n = len ( W ) _ , ci = np . unique ( ci , return_inverse = True ) ci += 1 W0 = W * ( W > 0 ) W1 = - W * ( W < 0 ) s0 = np . sum ( W0 ) s1 = np . sum ( W1 ) Knm0 = np . zeros ( ( n , n ) ) Knm1 = np . zeros ( ( n , n ) ) for m in range ( int ( np . max ( ci ) ) ) : Knm0 [ : , m ] = np . sum ( W0 [ : , ci == m + 1 ] , axis = 1 ) Knm1 [ : , m ] = np . sum ( W1 [ : , ci == m + 1 ] , axis = 1 ) Kn0 = np . sum ( Knm0 , axis = 1 ) Kn1 = np . sum ( Knm1 , axis = 1 ) Km0 = np . sum ( Knm0 , axis = 0 ) Km1 = np . sum ( Knm1 , axis = 0 ) if qtype == 'smp' : d0 = 1 / s0 d1 = 1 / s1 elif qtype == 'gja' : d0 = 1 / ( s0 + s1 ) d1 = 1 / ( s0 + s1 ) elif qtype == 'sta' : d0 = 1 / s0 d1 = 1 / ( s0 + s1 ) elif qtype == 'pos' : d0 = 1 / s0 d1 = 0 elif qtype == 'neg' : d0 = 0 d1 = 1 / s1 else : raise KeyError ( 'modularity type unknown' ) if not s0 : s0 = 1 d0 = 0 if not s1 : s1 = 1 d1 = 0 m = np . tile ( ci , ( n , 1 ) ) q0 = ( W0 - np . outer ( Kn0 , Kn0 ) / s0 ) * ( m == m . T ) q1 = ( W1 - np . outer ( Kn1 , Kn1 ) / s1 ) * ( m == m . T ) q = d0 * np . sum ( q0 ) - d1 * np . sum ( q1 ) return ci , q | This function simply calculates the signed modularity for a given partition . It does not do automatic partition generation right now . |
13,573 | def partition_distance ( cx , cy ) : n = np . size ( cx ) _ , cx = np . unique ( cx , return_inverse = True ) _ , cy = np . unique ( cy , return_inverse = True ) _ , cxy = np . unique ( cx + cy * 1j , return_inverse = True ) cx += 1 cy += 1 cxy += 1 Px = np . histogram ( cx , bins = np . max ( cx ) ) [ 0 ] / n Py = np . histogram ( cy , bins = np . max ( cy ) ) [ 0 ] / n Pxy = np . histogram ( cxy , bins = np . max ( cxy ) ) [ 0 ] / n Hx = - np . sum ( Px * np . log ( Px ) ) Hy = - np . sum ( Py * np . log ( Py ) ) Hxy = - np . sum ( Pxy * np . log ( Pxy ) ) Vin = ( 2 * Hxy - Hx - Hy ) / np . log ( n ) Min = 2 * ( Hx + Hy - Hxy ) / ( Hx + Hy ) return Vin , Min | This function quantifies the distance between pairs of community partitions with information theoretic measures . |
13,574 | def breadth ( CIJ , source ) : n = len ( CIJ ) white = 0 gray = 1 black = 2 color = np . zeros ( ( n , ) ) distance = np . inf * np . ones ( ( n , ) ) branch = np . zeros ( ( n , ) ) color [ source ] = gray distance [ source ] = 0 branch [ source ] = - 1 Q = [ source ] while Q : u = Q [ 0 ] ns , = np . where ( CIJ [ u , : ] ) for v in ns : if distance [ v ] == 0 : distance [ v ] = distance [ u ] + 1 if color [ v ] == white : color [ v ] = gray distance [ v ] = distance [ u ] + 1 branch [ v ] = u Q . append ( v ) Q = Q [ 1 : ] color [ u ] = black return distance , branch | Implementation of breadth - first search . |
13,575 | def charpath ( D , include_diagonal = False , include_infinite = True ) : D = D . copy ( ) if not include_diagonal : np . fill_diagonal ( D , np . nan ) if not include_infinite : D [ np . isinf ( D ) ] = np . nan Dv = D [ np . logical_not ( np . isnan ( D ) ) ] . ravel ( ) lambda_ = np . mean ( Dv ) efficiency = np . mean ( 1 / Dv ) ecc = np . array ( np . ma . masked_where ( np . isnan ( D ) , D ) . max ( axis = 1 ) ) radius = np . min ( ecc ) diameter = np . max ( ecc ) return lambda_ , efficiency , ecc , radius , diameter | The characteristic path length is the average shortest path length in the network . The global efficiency is the average inverse shortest path length in the network . |
13,576 | def cycprob ( Pq ) : fcyc = np . zeros ( np . size ( Pq , axis = 2 ) ) for q in range ( np . size ( Pq , axis = 2 ) ) : if np . sum ( Pq [ : , : , q ] ) > 0 : fcyc [ q ] = np . sum ( np . diag ( Pq [ : , : , q ] ) ) / np . sum ( Pq [ : , : , q ] ) else : fcyc [ q ] = 0 pcyc = np . zeros ( np . size ( Pq , axis = 2 ) ) for q in range ( np . size ( Pq , axis = 2 ) ) : if np . sum ( Pq [ : , : , q - 1 ] ) - np . sum ( np . diag ( Pq [ : , : , q - 1 ] ) ) > 0 : pcyc [ q ] = ( np . sum ( np . diag ( Pq [ : , : , q - 1 ] ) ) / np . sum ( Pq [ : , : , q - 1 ] ) - np . sum ( np . diag ( Pq [ : , : , q - 1 ] ) ) ) else : pcyc [ q ] = 0 return fcyc , pcyc | Cycles are paths which begin and end at the same node . Cycle probability for path length d is the fraction of all paths of length d - 1 that may be extended to form cycles of length d . |
13,577 | def distance_wei_floyd ( adjacency , transform = None ) : if transform is not None : if transform == 'log' : if np . logical_or ( adjacency > 1 , adjacency < 0 ) . any ( ) : raise ValueError ( "Connection strengths must be in the " + "interval [0,1) to use the transform " + "-log(w_ij)." ) SPL = - np . log ( adjacency ) elif transform == 'inv' : SPL = 1. / adjacency else : raise ValueError ( "Unexpected transform type. Only 'log' and " + "'inv' are accepted" ) else : SPL = adjacency . copy ( ) . astype ( 'float' ) SPL [ SPL == 0 ] = np . inf n = adjacency . shape [ 1 ] flag_find_paths = True hops = np . array ( adjacency != 0 ) . astype ( 'float' ) Pmat = np . repeat ( np . atleast_2d ( np . arange ( 0 , n ) ) , n , 0 ) for k in range ( n ) : i2k_k2j = np . repeat ( SPL [ : , [ k ] ] , n , 1 ) + np . repeat ( SPL [ [ k ] , : ] , n , 0 ) if flag_find_paths : path = SPL > i2k_k2j i , j = np . where ( path ) hops [ path ] = hops [ i , k ] + hops [ k , j ] Pmat [ path ] = Pmat [ i , k ] SPL = np . min ( np . stack ( [ SPL , i2k_k2j ] , 2 ) , 2 ) I = np . eye ( n ) > 0 SPL [ I ] = 0 if flag_find_paths : hops [ I ] , Pmat [ I ] = 0 , 0 return SPL , hops , Pmat | Computes the topological length of the shortest possible path connecting every pair of nodes in the network . |
13,578 | def findwalks ( CIJ ) : CIJ = binarize ( CIJ , copy = True ) n = len ( CIJ ) Wq = np . zeros ( ( n , n , n ) ) CIJpwr = CIJ . copy ( ) Wq [ : , : , 1 ] = CIJ for q in range ( n ) : CIJpwr = np . dot ( CIJpwr , CIJ ) Wq [ : , : , q ] = CIJpwr twalk = np . sum ( Wq ) wlq = np . sum ( np . sum ( Wq , axis = 0 ) , axis = 0 ) return Wq , twalk , wlq | Walks are sequences of linked nodes that may visit a single node more than once . This function finds the number of walks of a given length between any two nodes . |
13,579 | def mean_first_passage_time ( adjacency ) : P = np . linalg . solve ( np . diag ( np . sum ( adjacency , axis = 1 ) ) , adjacency ) n = len ( P ) D , V = np . linalg . eig ( P . T ) aux = np . abs ( D - 1 ) index = np . where ( aux == aux . min ( ) ) [ 0 ] if aux [ index ] > 10e-3 : raise ValueError ( "Cannot find eigenvalue of 1. Minimum eigenvalue " + "value is {0}. Tolerance was " . format ( aux [ index ] + 1 ) + "set at 10e-3." ) w = V [ : , index ] . T w = w / np . sum ( w ) W = np . real ( np . repeat ( w , n , 0 ) ) I = np . eye ( n ) Z = np . linalg . inv ( I - P + W ) mfpt = ( np . repeat ( np . atleast_2d ( np . diag ( Z ) ) , n , 0 ) - Z ) / W return mfpt | Calculates mean first passage time of adjacency |
13,580 | def teachers_round ( x ) : if ( ( x > 0 ) and ( x % 1 >= 0.5 ) ) or ( ( x < 0 ) and ( x % 1 > 0.5 ) ) : return int ( np . ceil ( x ) ) else : return int ( np . floor ( x ) ) | Do rounding such that . 5 always rounds to 1 and not bankers rounding . This is for compatibility with matlab functions and ease of testing . |
13,581 | def dummyvar ( cis , return_sparse = False ) : n = np . size ( cis , axis = 0 ) m = np . size ( cis , axis = 1 ) r = np . sum ( ( np . max ( len ( np . unique ( cis [ : , i ] ) ) ) ) for i in range ( m ) ) nnz = np . prod ( cis . shape ) ix = np . argsort ( cis , axis = 0 ) s_cis = cis [ ix ] [ : , range ( m ) , range ( m ) ] mask = np . hstack ( ( ( ( True , ) , ) * m , ( s_cis [ : - 1 , : ] != s_cis [ 1 : , : ] ) . T ) ) indptr , = np . where ( mask . flat ) indptr = np . append ( indptr , nnz ) import scipy . sparse as sp dv = sp . csc_matrix ( ( np . repeat ( ( 1 , ) , nnz ) , ix . T . flat , indptr ) , shape = ( n , r ) ) return dv . toarray ( ) | This is an efficient implementation of matlab s dummyvar command using sparse matrices . |
13,582 | def assortativity_bin ( CIJ , flag = 0 ) : if flag == 0 : deg = degrees_und ( CIJ ) i , j = np . where ( np . triu ( CIJ , 1 ) > 0 ) K = len ( i ) degi = deg [ i ] degj = deg [ j ] else : id , od , deg = degrees_dir ( CIJ ) i , j = np . where ( CIJ > 0 ) K = len ( i ) if flag == 1 : degi = od [ i ] degj = id [ j ] elif flag == 2 : degi = id [ i ] degj = od [ j ] elif flag == 3 : degi = od [ i ] degj = od [ j ] elif flag == 4 : degi = id [ i ] degj = id [ j ] else : raise ValueError ( 'Flag must be 0-4' ) term1 = np . sum ( degi * degj ) / K term2 = np . square ( np . sum ( .5 * ( degi + degj ) ) / K ) term3 = np . sum ( .5 * ( degi * degi + degj * degj ) ) / K r = ( term1 - term2 ) / ( term3 - term2 ) return r | The assortativity coefficient is a correlation coefficient between the degrees of all nodes on two opposite ends of a link . A positive assortativity coefficient indicates that nodes tend to link to other nodes with the same or similar degree . |
13,583 | def kcore_bd ( CIJ , k , peel = False ) : if peel : peelorder , peellevel = ( [ ] , [ ] ) iter = 0 CIJkcore = CIJ . copy ( ) while True : id , od , deg = degrees_dir ( CIJkcore ) ff , = np . where ( np . logical_and ( deg < k , deg > 0 ) ) if ff . size == 0 : break iter += 1 CIJkcore [ ff , : ] = 0 CIJkcore [ : , ff ] = 0 if peel : peelorder . append ( ff ) if peel : peellevel . append ( iter * np . ones ( ( len ( ff ) , ) ) ) kn = np . sum ( deg > 0 ) if peel : return CIJkcore , kn , peelorder , peellevel else : return CIJkcore , kn | The k - core is the largest subnetwork comprising nodes of degree at least k . This function computes the k - core for a given binary directed connection matrix by recursively peeling off nodes with degree lower than k until no such nodes remain . |
13,584 | def kcore_bu ( CIJ , k , peel = False ) : if peel : peelorder , peellevel = ( [ ] , [ ] ) iter = 0 CIJkcore = CIJ . copy ( ) while True : deg = degrees_und ( CIJkcore ) ff , = np . where ( np . logical_and ( deg < k , deg > 0 ) ) if ff . size == 0 : break iter += 1 CIJkcore [ ff , : ] = 0 CIJkcore [ : , ff ] = 0 if peel : peelorder . append ( ff ) if peel : peellevel . append ( iter * np . ones ( ( len ( ff ) , ) ) ) kn = np . sum ( deg > 0 ) if peel : return CIJkcore , kn , peelorder , peellevel else : return CIJkcore , kn | The k - core is the largest subnetwork comprising nodes of degree at least k . This function computes the k - core for a given binary undirected connection matrix by recursively peeling off nodes with degree lower than k until no such nodes remain . |
13,585 | def score_wu ( CIJ , s ) : CIJscore = CIJ . copy ( ) while True : str = strengths_und ( CIJscore ) ff , = np . where ( np . logical_and ( str < s , str > 0 ) ) if ff . size == 0 : break CIJscore [ ff , : ] = 0 CIJscore [ : , ff ] = 0 sn = np . sum ( str > 0 ) return CIJscore , sn | The s - core is the largest subnetwork comprising nodes of strength at least s . This function computes the s - core for a given weighted undirected connection matrix . Computation is analogous to the more widely used k - core but is based on node strengths instead of node degrees . |
13,586 | def find_pad_index ( self , array ) : try : return list ( array ) . index ( self . pad_value ) except ValueError : return len ( array ) | Find padding index . |
13,587 | def get_length ( self , y ) : lens = [ self . find_pad_index ( row ) for row in y ] return lens | Get true length of y . |
13,588 | def convert_idx_to_name ( self , y , lens ) : y = [ [ self . id2label [ idx ] for idx in row [ : l ] ] for row , l in zip ( y , lens ) ] return y | Convert label index to name . |
13,589 | def predict ( self , X , y ) : y_pred = self . model . predict_on_batch ( X ) y_true = np . argmax ( y , - 1 ) y_pred = np . argmax ( y_pred , - 1 ) lens = self . get_length ( y_true ) y_true = self . convert_idx_to_name ( y_true , lens ) y_pred = self . convert_idx_to_name ( y_pred , lens ) return y_true , y_pred | Predict sequences . |
13,590 | def score ( self , y_true , y_pred ) : score = f1_score ( y_true , y_pred ) print ( ' - f1: {:04.2f}' . format ( score * 100 ) ) print ( classification_report ( y_true , y_pred , digits = 4 ) ) return score | Calculate f1 score . |
13,591 | def get_entities ( seq , suffix = False ) : if any ( isinstance ( s , list ) for s in seq ) : seq = [ item for sublist in seq for item in sublist + [ 'O' ] ] prev_tag = 'O' prev_type = '' begin_offset = 0 chunks = [ ] for i , chunk in enumerate ( seq + [ 'O' ] ) : if suffix : tag = chunk [ - 1 ] type_ = chunk . split ( '-' ) [ 0 ] else : tag = chunk [ 0 ] type_ = chunk . split ( '-' ) [ - 1 ] if end_of_chunk ( prev_tag , tag , prev_type , type_ ) : chunks . append ( ( prev_type , begin_offset , i - 1 ) ) if start_of_chunk ( prev_tag , tag , prev_type , type_ ) : begin_offset = i prev_tag = tag prev_type = type_ return chunks | Gets entities from sequence . |
13,592 | def end_of_chunk ( prev_tag , tag , prev_type , type_ ) : chunk_end = False if prev_tag == 'E' : chunk_end = True if prev_tag == 'S' : chunk_end = True if prev_tag == 'B' and tag == 'B' : chunk_end = True if prev_tag == 'B' and tag == 'S' : chunk_end = True if prev_tag == 'B' and tag == 'O' : chunk_end = True if prev_tag == 'I' and tag == 'B' : chunk_end = True if prev_tag == 'I' and tag == 'S' : chunk_end = True if prev_tag == 'I' and tag == 'O' : chunk_end = True if prev_tag != 'O' and prev_tag != '.' and prev_type != type_ : chunk_end = True return chunk_end | Checks if a chunk ended between the previous and current word . |
13,593 | def start_of_chunk ( prev_tag , tag , prev_type , type_ ) : chunk_start = False if tag == 'B' : chunk_start = True if tag == 'S' : chunk_start = True if prev_tag == 'E' and tag == 'E' : chunk_start = True if prev_tag == 'E' and tag == 'I' : chunk_start = True if prev_tag == 'S' and tag == 'E' : chunk_start = True if prev_tag == 'S' and tag == 'I' : chunk_start = True if prev_tag == 'O' and tag == 'E' : chunk_start = True if prev_tag == 'O' and tag == 'I' : chunk_start = True if tag != 'O' and tag != '.' and prev_type != type_ : chunk_start = True return chunk_start | Checks if a chunk started between the previous and current word . |
13,594 | def f1_score ( y_true , y_pred , average = 'micro' , suffix = False ) : true_entities = set ( get_entities ( y_true , suffix ) ) pred_entities = set ( get_entities ( y_pred , suffix ) ) nb_correct = len ( true_entities & pred_entities ) nb_pred = len ( pred_entities ) nb_true = len ( true_entities ) p = nb_correct / nb_pred if nb_pred > 0 else 0 r = nb_correct / nb_true if nb_true > 0 else 0 score = 2 * p * r / ( p + r ) if p + r > 0 else 0 return score | Compute the F1 score . |
13,595 | def precision_score ( y_true , y_pred , average = 'micro' , suffix = False ) : true_entities = set ( get_entities ( y_true , suffix ) ) pred_entities = set ( get_entities ( y_pred , suffix ) ) nb_correct = len ( true_entities & pred_entities ) nb_pred = len ( pred_entities ) score = nb_correct / nb_pred if nb_pred > 0 else 0 return score | Compute the precision . |
13,596 | def recall_score ( y_true , y_pred , average = 'micro' , suffix = False ) : true_entities = set ( get_entities ( y_true , suffix ) ) pred_entities = set ( get_entities ( y_pred , suffix ) ) nb_correct = len ( true_entities & pred_entities ) nb_true = len ( true_entities ) score = nb_correct / nb_true if nb_true > 0 else 0 return score | Compute the recall . |
13,597 | def classification_report ( y_true , y_pred , digits = 2 , suffix = False ) : true_entities = set ( get_entities ( y_true , suffix ) ) pred_entities = set ( get_entities ( y_pred , suffix ) ) name_width = 0 d1 = defaultdict ( set ) d2 = defaultdict ( set ) for e in true_entities : d1 [ e [ 0 ] ] . add ( ( e [ 1 ] , e [ 2 ] ) ) name_width = max ( name_width , len ( e [ 0 ] ) ) for e in pred_entities : d2 [ e [ 0 ] ] . add ( ( e [ 1 ] , e [ 2 ] ) ) last_line_heading = 'macro avg' width = max ( name_width , len ( last_line_heading ) , digits ) headers = [ "precision" , "recall" , "f1-score" , "support" ] head_fmt = u'{:>{width}s} ' + u' {:>9}' * len ( headers ) report = head_fmt . format ( u'' , * headers , width = width ) report += u'\n\n' row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n' ps , rs , f1s , s = [ ] , [ ] , [ ] , [ ] for type_name , true_entities in d1 . items ( ) : pred_entities = d2 [ type_name ] nb_correct = len ( true_entities & pred_entities ) nb_pred = len ( pred_entities ) nb_true = len ( true_entities ) p = nb_correct / nb_pred if nb_pred > 0 else 0 r = nb_correct / nb_true if nb_true > 0 else 0 f1 = 2 * p * r / ( p + r ) if p + r > 0 else 0 report += row_fmt . format ( * [ type_name , p , r , f1 , nb_true ] , width = width , digits = digits ) ps . append ( p ) rs . append ( r ) f1s . append ( f1 ) s . append ( nb_true ) report += u'\n' report += row_fmt . format ( 'micro avg' , precision_score ( y_true , y_pred , suffix = suffix ) , recall_score ( y_true , y_pred , suffix = suffix ) , f1_score ( y_true , y_pred , suffix = suffix ) , np . sum ( s ) , width = width , digits = digits ) report += row_fmt . format ( last_line_heading , np . average ( ps , weights = s ) , np . average ( rs , weights = s ) , np . average ( f1s , weights = s ) , np . sum ( s ) , width = width , digits = digits ) return report | Build a text report showing the main classification metrics . |
13,598 | def _timedelta_to_seconds ( td ) : if isinstance ( td , numbers . Real ) : td = datetime . timedelta ( seconds = td ) return td . total_seconds ( ) | Convert a datetime . timedelta object into a seconds interval for rotating file ouput . |
13,599 | def getLogger ( name = None , ** kwargs ) : adapter = _LOGGERS . get ( name ) if not adapter : adapter = KeywordArgumentAdapter ( logging . getLogger ( name ) , kwargs ) _LOGGERS [ name ] = adapter return adapter | Build a logger with the given name . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.