idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
9,200 | def avl_release_kids ( node ) : left , right = node . left , node . right if left is not None : left . parent = None if right is not None : right . parent = None node . balance = 0 node . left = None node . right = None return node , left , right | splits a node from its kids maintaining parent pointers |
9,201 | def avl_release_parent ( node ) : parent = node . parent if parent is not None : if parent . right is node : parent . right = None elif parent . left is node : parent . left = None else : raise AssertionError ( 'impossible state' ) node . parent = None parent . balance = max ( height ( parent . right ) , height ( parent . left ) ) + 1 return node , parent | removes the parent of a child |
9,202 | def avl_join ( t1 , t2 , node ) : if DEBUG_JOIN : print ( '-- JOIN node=%r' % ( node , ) ) if t1 is None and t2 is None : if DEBUG_JOIN : print ( 'Join Case 1' ) top = node elif t1 is None : if DEBUG_JOIN : print ( 'Join Case 2' ) top = avl_insert_dir ( t2 , node , 0 ) elif t2 is None : if DEBUG_JOIN : print ( 'Join Case 3' ) top = avl_insert_dir ( t1 , node , 1 ) else : h1 = height ( t1 ) h2 = height ( t2 ) if h1 > h2 + 1 : if DEBUG_JOIN : print ( 'Join Case 4' ) top = avl_join_dir_recursive ( t1 , t2 , node , 1 ) if DEBUG_JOIN : ascii_tree ( t1 , 'top' ) elif h2 > h1 + 1 : if DEBUG_JOIN : print ( 'Join Case 5' ) ascii_tree ( t1 ) ascii_tree ( t2 ) top = avl_join_dir_recursive ( t1 , t2 , node , 0 ) if DEBUG_JOIN : ascii_tree ( top ) else : if DEBUG_JOIN : print ( 'Join Case 6' ) top = avl_new_top ( t1 , t2 , node , 0 ) return top | Joins two trees t1 and t1 with an intermediate key - value pair |
9,203 | def avl_split_last ( root ) : if root is None : raise IndexError ( 'Empty tree has no maximum element' ) root , left , right = avl_release_kids ( root ) if right is None : new_root , last_node = left , root else : new_right , last_node = avl_split_last ( right ) new_root = avl_join ( left , new_right , root ) return ( new_root , last_node ) | Removes the maximum element from the tree |
9,204 | def avl_split_first ( root ) : if root is None : raise IndexError ( 'Empty tree has no maximum element' ) root , left , right = avl_release_kids ( root ) if left is None : new_root , first_node = right , root else : new_left , first_node = avl_split_first ( left ) new_root = avl_join ( new_left , right , root ) return ( new_root , first_node ) | Removes the minimum element from the tree |
9,205 | def avl_join2 ( t1 , t2 ) : if t1 is None and t2 is None : new_root = None elif t2 is None : new_root = t1 elif t1 is None : new_root = t2 else : new_left , last_node = avl_split_last ( t1 ) debug = 0 if debug : EulerTourTree ( root = new_left ) . _assert_nodes ( 'new_left' ) EulerTourTree ( root = last_node ) . _assert_nodes ( 'last_node' ) EulerTourTree ( root = t2 ) . _assert_nodes ( 't2' ) print ( 'new_left' ) EulerTourTree ( root = new_left ) . print_tree ( ) print ( 'last_node' ) EulerTourTree ( root = last_node ) . print_tree ( ) print ( 't2' ) EulerTourTree ( root = t2 ) . print_tree ( ) new_root = avl_join ( new_left , t2 , last_node ) if debug : print ( 'new_root' ) EulerTourTree ( root = new_root ) . print_tree ( ) EulerTourTree ( root = last_node ) . _assert_nodes ( 'new_root' ) return new_root | join two trees without any intermediate key |
9,206 | def to_networkx ( self , labels = None , edge_labels = False ) : import networkx as nx graph = nx . DiGraph ( ) for node in self . _traverse_nodes ( ) : u = node . key graph . add_node ( u ) graph . nodes [ u ] [ 'value' ] = node . value if labels is not None : label = ',' . join ( [ str ( getattr ( node , k ) ) for k in labels ] ) graph . nodes [ u ] [ 'label' ] = label if node . left is not None : v = node . left . key graph . add_node ( v ) graph . add_edge ( u , v ) if edge_labels : graph . edge [ u ] [ v ] [ 'label' ] = 'L' if node . right is not None : v = node . right . key graph . add_node ( v ) graph . add_edge ( u , v ) if edge_labels : graph . edge [ u ] [ v ] [ 'label' ] = 'R' return graph | Get a networkx representation of the binary search tree . |
9,207 | def repr_tree ( self ) : import utool as ut import networkx as nx repr_tree = nx . DiGraph ( ) for u , v in ut . itertwo ( self . values ( ) ) : if not repr_tree . has_edge ( v , u ) : repr_tree . add_edge ( u , v ) return repr_tree | reconstruct represented tree as a DiGraph to preserve the current rootedness |
9,208 | def unixjoin ( * args ) : isabs_list = list ( map ( isabs , args ) ) if any ( isabs_list ) : poslist = [ count for count , flag in enumerate ( isabs_list ) if flag ] pos = poslist [ - 1 ] return '/' . join ( args [ pos : ] ) else : return '/' . join ( args ) | Like os . path . join but uses forward slashes on win32 |
9,209 | def create_merge_psm_map ( peptides , ns ) : psmmap = { } for peptide in peptides : seq = reader . get_peptide_seq ( peptide , ns ) psm_ids = reader . get_psm_ids_from_peptide ( peptide , ns ) for psm_id in psm_ids : try : psmmap [ seq ] [ psm_id . text ] = 1 except KeyError : psmmap [ seq ] = { psm_id . text : 2 } for seq , psm_id_dict in psmmap . items ( ) : psmmap [ seq ] = [ x for x in psm_id_dict ] return psmmap | Loops through peptides stores sequences mapped to PSM ids . |
9,210 | def create_pool_b ( dsn = None , * , min_size = 10 , max_size = 10 , max_queries = 50000 , max_inactive_connection_lifetime = 300.0 , setup = None , init = None , loop = None , connection_class = BuildPgConnection , ** connect_kwargs , ) : return BuildPgPool ( dsn , connection_class = connection_class , min_size = min_size , max_size = max_size , max_queries = max_queries , loop = loop , setup = setup , init = init , max_inactive_connection_lifetime = max_inactive_connection_lifetime , ** connect_kwargs , ) | Create a connection pool . |
9,211 | def add_runnable ( self , runnable ) : if runnable . id in self . runnables : raise SimError ( 'Duplicate runnable component {0}' . format ( runnable . id ) ) self . runnables [ runnable . id ] = runnable | Adds a runnable component to the list of runnable components in this simulation . |
9,212 | def run ( self ) : self . init_run ( ) if self . debug : self . dump ( "AfterInit: " ) while self . step ( ) : pass | Runs the simulation . |
9,213 | def controller_creatr ( filename ) : if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:controller command' ) return path = os . path . abspath ( '.' ) + '/controller' if not os . path . exists ( path ) : os . makedirs ( path ) file_name = str ( filename + '.py' ) if os . path . isfile ( path + "/" + file_name ) : click . echo ( Fore . WHITE + Back . RED + "ERROR: Controller file exists" ) return controller_file = open ( os . path . abspath ( '.' ) + '/controller/' + file_name , 'w+' ) compose = "from bast import Controller\n\nclass " + filename + "(Controller):\n pass" controller_file . write ( compose ) controller_file . close ( ) click . echo ( Fore . GREEN + "Controller " + filename + " created successfully" ) | Name of the controller file to be created |
9,214 | def view_creatr ( filename ) : if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:view command' ) return path = os . path . abspath ( '.' ) + '/public/templates' if not os . path . exists ( path ) : os . makedirs ( path ) filename_ = str ( filename + ".html" ) . lower ( ) view_file = open ( path + "/" + filename_ , 'w+' ) view_file . write ( "" ) view_file . close ( ) click . echo ( Fore . GREEN + "View file " + filename_ + "created in public/template folder" ) | Name of the View File to be created |
9,215 | def migration_creatr ( migration_file , create , table ) : if not check ( ) : click . echo ( Fore . RED + 'ERROR: Ensure you are in a bast app to run the create:migration command' ) return migration = CreateMigration ( ) if table is None : table = snake_case ( migration_file ) file = migration . create_file ( snake_case ( migration_file ) , table = table , create = create ) click . echo ( Fore . GREEN + 'Migration file created at %s' % file ) | Name of the migration file |
9,216 | def quit ( self ) : logging . info ( "quiting sock server" ) if self . __quit is not None : self . __quit . set ( ) self . join ( ) return | Quit socket server |
9,217 | def get_quantcols ( pattern , oldheader , coltype ) : if pattern is None : return False if coltype == 'precur' : return reader . get_cols_in_file ( pattern , oldheader , single_col = True ) | Searches for quantification columns using pattern and header list . Calls reader function to do regexp . Returns a single column for precursor quant . |
9,218 | def get_peptide_quant ( quantdata , quanttype ) : parsefnx = { 'precur' : max } quantfloats = [ ] for q in quantdata : try : quantfloats . append ( float ( q ) ) except ( TypeError , ValueError ) : pass if not quantfloats : return 'NA' return str ( parsefnx [ quanttype ] ( quantfloats ) ) | Parses lists of quantdata and returns maxvalue from them . Strips NA |
9,219 | def read_csv ( fpath ) : import csv import utool as ut with open ( fpath , 'rb' ) as csvfile : row_iter = csv . reader ( csvfile , delimiter = str ( ',' ) , quotechar = str ( '|' ) ) row_list = [ ut . lmap ( ut . ensure_unicode , row ) for row in row_iter ] return row_list | reads csv in unicode |
9,220 | def get_caller_name ( N = 0 , strict = True ) : if isinstance ( N , ( list , tuple ) ) : name_list = [ ] for N_ in N : try : name_list . append ( get_caller_name ( N_ ) ) except AssertionError : name_list . append ( 'X' ) return '[' + '][' . join ( name_list ) + ']' parent_frame = get_stack_frame ( N = N + 2 , strict = strict ) caller_name = parent_frame . f_code . co_name if caller_name == '<module>' : co_filename = parent_frame . f_code . co_filename caller_name = splitext ( split ( co_filename ) [ 1 ] ) [ 0 ] if caller_name == '__init__' : co_filename = parent_frame . f_code . co_filename caller_name = basename ( dirname ( co_filename ) ) + '.' + caller_name return caller_name | Standalone version of get_caller_name |
9,221 | def _handle_ping ( self , packet , protocol ) : if 'payload' in packet : is_valid_node = True node_ids = list ( packet [ 'payload' ] . values ( ) ) for node_id in node_ids : if self . _repository . get_node ( node_id ) is None : is_valid_node = False break if is_valid_node : self . _pong ( packet , protocol ) else : self . _pong ( packet , protocol ) | Responds to pings from registry_client only if the node_ids present in the ping payload are registered |
9,222 | def set_features ( self ) : allpsms_str = readers . generate_psms_multiple_fractions_strings ( self . mergefiles , self . ns ) allpeps = preparation . merge_peptides ( self . mergefiles , self . ns ) self . features = { 'psm' : allpsms_str , 'peptide' : allpeps } | Merge all psms and peptides |
9,223 | def git_sequence_editor_squash ( fpath ) : r import utool as ut text = ut . read_from ( fpath ) print ( text ) prev_msg = None prev_dt = None new_lines = [ ] def get_commit_date ( hashid ) : out , err , ret = ut . cmd ( 'git show -s --format=%ci ' + hashid , verbose = False , quiet = True , pad_stdout = False ) from dateutil import parser stamp = out . strip ( '\n' ) dt = parser . parse ( stamp ) return dt for line in text . split ( '\n' ) : commit_line = line . split ( ' ' ) if len ( commit_line ) < 3 : prev_msg = None prev_dt = None new_lines += [ line ] continue action = commit_line [ 0 ] hashid = commit_line [ 1 ] msg = ' ' . join ( commit_line [ 2 : ] ) try : dt = get_commit_date ( hashid ) except ValueError : prev_msg = None prev_dt = None new_lines += [ line ] continue orig_msg = msg can_squash = action == 'pick' and msg == 'wip' and prev_msg == 'wip' if prev_dt is not None and prev_msg == 'wip' : tdelta = dt - prev_dt threshold_minutes = 45 td_min = ( tdelta . total_seconds ( ) / 60. ) can_squash &= td_min < threshold_minutes msg = msg + ' -- tdelta=%r' % ( ut . get_timedelta_str ( tdelta ) , ) if can_squash : new_line = ' ' . join ( [ 'squash' , hashid , msg ] ) new_lines += [ new_line ] else : new_lines += [ line ] prev_msg = orig_msg prev_dt = dt new_text = '\n' . join ( new_lines ) def get_commit_date ( hashid ) : out = ut . cmd ( 'git show -s --format=%ci ' + hashid , verbose = False ) print ( 'out = %r' % ( out , ) ) print ( new_text ) ut . write_to ( fpath , new_text , n = None ) | r squashes wip messages |
9,224 | def std_build_command ( repo = '.' ) : import utool as ut print ( '+**** stdbuild *******' ) print ( 'repo = %r' % ( repo , ) ) if sys . platform . startswith ( 'win32' ) : scriptname = 'mingw_build.bat' else : scriptname = './unix_build.sh' if repo == '' : repo = '.' else : os . chdir ( repo ) ut . assert_exists ( scriptname ) normbuild_flag = '--no-rmbuild' if ut . get_argflag ( normbuild_flag ) : scriptname += ' ' + normbuild_flag ut . cmd ( scriptname ) print ( 'L**** stdbuild *******' ) | DEPRICATE My standard build script names . |
9,225 | def wait_for_import ( self , connection_id , wait_interval ) : self . stdout . write ( self . style . NOTICE ( 'Waiting for import' ) , ending = '' ) state = utils . ConnectionStates . IMPORT_CONFIGURATION while state == utils . ConnectionStates . IMPORT_CONFIGURATION : self . stdout . write ( self . style . NOTICE ( '.' ) , ending = '' ) time . sleep ( wait_interval ) try : connection = utils . get_connection ( connection_id ) except requests . HTTPError as e : raise CommandError ( "Failed to fetch connection information." ) from e else : state = connection [ 'state' ] self . stdout . write ( self . style . NOTICE ( ' Done!' ) ) | Wait until connection state is no longer IMPORT_CONFIGURATION . |
9,226 | def setup ( self ) : if self . dry_run is not True : self . client = self . _get_client ( ) self . _disable_access_key ( ) | Method runs the plugin |
9,227 | def validate ( self ) : try : response = self . client . get_access_key_last_used ( AccessKeyId = self . access_key_id ) username = response [ 'UserName' ] access_keys = self . client . list_access_keys ( UserName = username ) for key in access_keys [ 'AccessKeyMetadata' ] : if ( key [ 'AccessKeyId' ] == self . access_key_id ) and ( key [ 'Status' ] == 'Inactive' ) : return True return False except Exception as e : logger . info ( "Failed to validate key disable for " "key {id} due to: {e}." . format ( e = e , id = self . access_key_id ) ) return False | Returns whether this plugin does what it claims to have done |
9,228 | def _disable_access_key ( self , force_disable_self = False ) : client = self . client if self . validate is True : return else : try : client . update_access_key ( UserName = self . _search_user_for_key ( ) , AccessKeyId = self . access_key_id , Status = 'Inactive' ) logger . info ( "Access key {id} has " "been disabled." . format ( id = self . access_key_id ) ) except Exception as e : logger . info ( "Access key {id} could not " "be disabled due to: {e}." . format ( e = e , id = self . access_key_id ) ) | This function first checks to see if the key is already disabled \ |
9,229 | def generate_master_proteins ( psms , protcol ) : master_proteins = { } if not protcol : protcol = mzidtsvdata . HEADER_MASTER_PROT for psm in psms : protacc = psm [ protcol ] if ';' in protacc : continue master_proteins [ protacc ] = 1 if 'NA' in master_proteins : master_proteins . pop ( 'NA' ) if '' in master_proteins : master_proteins . pop ( '' ) for protacc in master_proteins : yield { prottabledata . HEADER_PROTEIN : protacc } | Fed with a psms generator this returns the master proteins present in the PSM table . PSMs with multiple master proteins are excluded . |
9,230 | def prepare_percolator_output ( self , fn ) : ns = xml . get_namespace ( fn ) static = readers . get_percolator_static_xml ( fn , ns ) return ns , static | Returns namespace and static xml from percolator output file |
9,231 | def git_available ( func ) : def inner ( * args ) : os . chdir ( APISettings . GIT_DIR ) if call ( [ 'git' , 'rev-parse' ] ) == 0 : return func ( * args ) Shell . fail ( 'There is no git repository!' ) return exit ( 1 ) return inner | Check if a git repository exists in the given folder . |
9,232 | def _cuda_get_gpu_spec_string ( gpu_ids = None ) : if gpu_ids is None : return '' if isinstance ( gpu_ids , list ) : return ',' . join ( str ( gpu_id ) for gpu_id in gpu_ids ) if isinstance ( gpu_ids , int ) : return str ( gpu_ids ) return gpu_ids | Build a GPU id string to be used for CUDA_VISIBLE_DEVICES . |
9,233 | def write_error ( self , status_code , ** kwargs ) : reason = self . _reason if self . settings . get ( "serve_traceback" ) and "exc_info" in kwargs : error = [ ] for line in traceback . format_exception ( * kwargs [ "exc_info" ] ) : error . append ( line ) else : error = None data = { '_traceback' : error , 'message' : reason , 'code' : status_code } content = self . render_exception ( ** data ) self . write ( content ) | Handle Exceptions from the server . Formats the HTML into readable form |
9,234 | def view ( self , template_name , kwargs = None ) : if kwargs is None : kwargs = dict ( ) self . add_ ( 'session' , self . session ) content = self . render_template ( template_name , ** kwargs ) self . write ( content ) | Used to render template to view |
9,235 | def initialize ( self , method , middleware , request_type ) : self . method = method self . middleware = middleware self . request_type = request_type | Overridden initialize method from Tornado . Assigns the controller method and middleware attached to the route being executed to global variables to be used |
9,236 | def only ( self , arguments ) : data = { } if not isinstance ( arguments , list ) : arguments = list ( arguments ) for i in arguments : data [ i ] = self . get_argument ( i ) return data | returns the key value pair of the arguments passed as a dict object |
9,237 | def all ( self ) : data = { } args = self . request . arguments for key , value in args . items ( ) : data [ key ] = self . get_argument ( key ) return data | Returns all the arguments passed with the request |
9,238 | def except_ ( self , arguments ) : if not isinstance ( arguments , list ) : arguments = list ( arguments ) args = self . request . arguments data = { } for key , value in args . items ( ) : if key not in arguments : data [ key ] = self . get_argument ( key ) return data | returns the arguments passed to the route except that set by user |
9,239 | def get_one ( cls , db , * args , ** kwargs ) : data = db [ cls . collection ] . find_one ( * args , ** kwargs ) if data : return cls . wrap_incoming ( data , db ) else : return None | Returns an object that corresponds to given query or None . |
9,240 | def get_id ( self ) : import warnings warnings . warn ( '{0}.get_id() is deprecated, ' 'use {0}.id instead' . format ( type ( self ) . __name__ ) , DeprecationWarning ) return self . get ( '_id' ) | Returns object id or None . |
9,241 | def get_ref ( self ) : _id = self . id if _id is None : return None else : return DBRef ( self . collection , _id ) | Returns a DBRef for this object or None . |
9,242 | def formatTime ( self , record , datefmt = None ) : if datefmt : s = datetime . datetime . now ( ) . strftime ( datefmt ) else : t = datetime . datetime . now ( ) . strftime ( self . default_time_format ) s = self . default_msec_format % ( t , record . msecs ) return s | Overrides formatTime method to use datetime module instead of time module to display time in microseconds . Time module by default does not resolve time to microseconds . |
9,243 | def related_to ( self , instance ) : return self . filter ( table_name = instance . table_name , record_id = instance . record_id ) | Filter for all log objects of the same connected model as the given instance . |
9,244 | def capture_insert_from_model ( cls , table_name , record_id , * , exclude_fields = ( ) ) : exclude_cols = ( ) if exclude_fields : model_cls = get_connected_model_for_table_name ( table_name ) exclude_cols = cls . _fieldnames_to_colnames ( model_cls , exclude_fields ) raw_query = sql . SQL ( ) . format ( schema = sql . Identifier ( settings . HEROKU_CONNECT_SCHEMA ) , table_name = sql . Identifier ( table_name ) , exclude_cols = sql . SQL ( ', ' ) . join ( sql . Identifier ( col ) for col in exclude_cols ) , ) params = { 'record_id' : record_id , 'table_name' : table_name } result_qs = TriggerLog . objects . raw ( raw_query , params ) return list ( result_qs ) | Create a fresh insert record from the current model state in the database . |
9,245 | def capture_update_from_model ( cls , table_name , record_id , * , update_fields = ( ) ) : include_cols = ( ) if update_fields : model_cls = get_connected_model_for_table_name ( table_name ) include_cols = cls . _fieldnames_to_colnames ( model_cls , update_fields ) raw_query = sql . SQL ( ) . format ( schema = sql . Identifier ( settings . HEROKU_CONNECT_SCHEMA ) , table_name = sql . Identifier ( table_name ) , include_cols = sql . SQL ( ', ' ) . join ( sql . Identifier ( col ) for col in include_cols ) , ) params = { 'record_id' : record_id , 'table_name' : table_name } result_qs = TriggerLog . objects . raw ( raw_query , params ) return list ( result_qs ) | Create a fresh update record from the current model state in the database . |
9,246 | def get_model ( self ) : model_cls = get_connected_model_for_table_name ( self . table_name ) return model_cls . _default_manager . filter ( id = self . record_id ) . first ( ) | Fetch the instance of the connected model referenced by this log record . |
9,247 | def related ( self , * , exclude_self = False ) : manager = type ( self ) . _default_manager queryset = manager . related_to ( self ) if exclude_self : queryset = queryset . exclude ( id = self . id ) return queryset | Get a QuerySet for all trigger log objects for the same connected model . |
9,248 | def _fieldnames_to_colnames ( model_cls , fieldnames ) : get_field = model_cls . _meta . get_field fields = map ( get_field , fieldnames ) return { f . column for f in fields } | Get the names of columns referenced by the given model fields . |
9,249 | def redo ( self ) : trigger_log = self . _to_live_trigger_log ( state = TRIGGER_LOG_STATE [ 'NEW' ] ) trigger_log . save ( force_insert = True ) self . state = TRIGGER_LOG_STATE [ 'REQUEUED' ] self . save ( update_fields = [ 'state' ] ) return trigger_log | Re - sync the change recorded in this trigger log . |
9,250 | def add_isoquant_data ( proteins , quantproteins , quantacc , quantfields ) : for protein in base_add_isoquant_data ( proteins , quantproteins , prottabledata . HEADER_PROTEIN , quantacc , quantfields ) : yield protein | Runs through a protein table and adds quant data from ANOTHER protein table that contains that data . |
9,251 | def add_isoquant_data ( peptides , quantpeptides , quantacc , quantfields ) : for peptide in base_add_isoquant_data ( peptides , quantpeptides , peptabledata . HEADER_PEPTIDE , quantacc , quantfields ) : yield peptide | Runs through a peptide table and adds quant data from ANOTHER peptide table that contains that data . |
9,252 | def map ( self , fn ) : return TimeSeries ( [ ( x , fn ( y ) ) for x , y in self . points ] ) | Run a map function across all y points in the series |
9,253 | def build_proteintable ( pqdb , headerfields , mergecutoff , isobaric = False , precursor = False , probability = False , fdr = False , pep = False , genecentric = False ) : pdmap = create_featuredata_map ( pqdb , genecentric = genecentric , psm_fill_fun = pinfo . add_psms_to_proteindata , pgene_fill_fun = pinfo . add_protgene_to_protdata , count_fun = pinfo . count_peps_psms , get_uniques = True ) empty_return = lambda x , y , z : { } iso_fun = { True : get_isobaric_quant , False : empty_return } [ isobaric ] ms1_fun = { True : get_precursor_quant , False : empty_return } [ precursor ] prob_fun = { True : get_prot_probability , False : empty_return } [ probability ] fdr_fun = { True : get_prot_fdr , False : empty_return } [ fdr ] pep_fun = { True : get_prot_pep , False : empty_return } [ pep ] pdata_fun = { True : get_protein_data_genecentric , False : get_protein_data } [ genecentric is not False ] protein_sql , sqlfieldmap = pqdb . prepare_mergetable_sql ( precursor , isobaric , probability , fdr , pep ) accession_field = prottabledata . ACCESSIONS [ genecentric ] proteins = pqdb . get_merged_features ( protein_sql ) protein = next ( proteins ) outprotein = { accession_field : protein [ sqlfieldmap [ 'p_acc' ] ] } check_prot = { k : v for k , v in outprotein . items ( ) } if not mergecutoff or protein_pool_fdr_cutoff ( protein , sqlfieldmap , mergecutoff ) : fill_mergefeature ( outprotein , iso_fun , ms1_fun , prob_fun , fdr_fun , pep_fun , pdata_fun , protein , sqlfieldmap , headerfields , pdmap , accession_field ) for protein in proteins : if mergecutoff and not protein_pool_fdr_cutoff ( protein , sqlfieldmap , mergecutoff ) : continue p_acc = protein [ sqlfieldmap [ 'p_acc' ] ] if p_acc != outprotein [ accession_field ] : if outprotein != check_prot : yield outprotein outprotein = { accession_field : p_acc } check_prot = { k : v for k , v in outprotein . items ( ) } fill_mergefeature ( outprotein , iso_fun , ms1_fun , prob_fun , fdr_fun , pep_fun , pdata_fun , protein , sqlfieldmap , headerfields , pdmap , accession_field ) if outprotein != check_prot : yield outprotein | Fetches proteins and quants from joined lookup table loops through them and when all of a protein s quants have been collected yields the protein quant information . |
9,254 | def count_protein_group_hits ( lineproteins , groups ) : hits = [ ] for group in groups : hits . append ( 0 ) for protein in lineproteins : if protein in group : hits [ - 1 ] += 1 return [ str ( x ) for x in hits ] | Takes a list of protein accessions and a list of protein groups content from DB . Counts for each group in list how many proteins are found in lineproteins . Returns list of str amounts . |
9,255 | def get_logging_dir ( appname = 'default' ) : from utool . _internal import meta_util_cache from utool . _internal import meta_util_cplat from utool import util_cache if appname is None or appname == 'default' : appname = util_cache . get_default_appname ( ) resource_dpath = meta_util_cplat . get_resource_dir ( ) default = join ( resource_dpath , appname , 'logs' ) log_dir = meta_util_cache . global_cache_read ( logdir_cacheid , appname = appname , default = default ) log_dir_realpath = realpath ( log_dir ) return log_dir_realpath | The default log dir is in the system resource directory But the utool global cache allows for the user to override where the logs for a specific app should be stored . |
9,256 | def add_logging_handler ( handler , format_ = 'file' ) : global __UTOOL_ROOT_LOGGER__ if __UTOOL_ROOT_LOGGER__ is None : builtins . print ( '[WARNING] logger not started, cannot add handler' ) return timeformat = '%H:%M:%S' if format_ == 'file' : logformat = '[%(asctime)s]%(message)s' elif format_ == 'stdout' : logformat = '%(message)s' else : raise AssertionError ( 'unknown logging format_: %r' % format_ ) formatter = logging . Formatter ( logformat , timeformat ) handler . setLevel ( logging . DEBUG ) handler . setFormatter ( formatter ) __UTOOL_ROOT_LOGGER__ . addHandler ( handler ) | mostly for util_logging internals |
9,257 | def start_logging ( log_fpath = None , mode = 'a' , appname = 'default' , log_dir = None ) : r global __UTOOL_ROOT_LOGGER__ global __UTOOL_PRINT__ global __UTOOL_WRITE__ global __UTOOL_FLUSH__ global __CURRENT_LOG_FPATH__ if LOGGING_VERBOSE : print ( '[utool] start_logging()' ) if __UTOOL_ROOT_LOGGER__ is None and __IN_MAIN_PROCESS__ and not __inside_doctest ( ) : if LOGGING_VERBOSE : print ( '[utool] start_logging()... rootcheck OK' ) if log_fpath is None : log_fpath = get_log_fpath ( num = 'next' , appname = appname , log_dir = log_dir ) __CURRENT_LOG_FPATH__ = log_fpath if VERBOSE or LOGGING_VERBOSE : startmsg = ( 'logging to log_fpath=%r' % log_fpath ) _utool_print ( ) ( startmsg ) __UTOOL_ROOT_LOGGER__ = logging . getLogger ( 'root' ) __UTOOL_ROOT_LOGGER__ . setLevel ( 'DEBUG' ) logfile_handler = logging . FileHandler ( log_fpath , mode = mode ) stdout_handler = CustomStreamHandler ( __UTOOL_STDOUT__ ) stdout_handler . terminator = '' add_logging_handler ( logfile_handler , format_ = 'file' ) add_logging_handler ( stdout_handler , format_ = 'stdout' ) __UTOOL_ROOT_LOGGER__ . propagate = False __UTOOL_ROOT_LOGGER__ . setLevel ( logging . DEBUG ) def utool_flush ( * args ) : stdout_handler . flush ( ) def utool_write ( * args ) : msg = ', ' . join ( map ( six . text_type , args ) ) __UTOOL_ROOT_LOGGER__ . info ( msg ) if not PRINT_ALL_CALLERS : def utool_print ( * args ) : endline = '\n' try : msg = ', ' . join ( map ( six . text_type , args ) ) return __UTOOL_ROOT_LOGGER__ . info ( msg + endline ) except UnicodeDecodeError : new_msg = ', ' . join ( map ( meta_util_six . ensure_unicode , args ) ) return __UTOOL_ROOT_LOGGER__ . info ( new_msg + endline ) else : def utool_print ( * args ) : import utool as ut utool_flush ( ) endline = '\n' __UTOOL_ROOT_LOGGER__ . info ( '\n\n----------' ) __UTOOL_ROOT_LOGGER__ . info ( ut . get_caller_name ( range ( 0 , 20 ) ) ) return __UTOOL_ROOT_LOGGER__ . info ( ', ' . join ( map ( six . text_type , args ) ) + endline ) def utool_printdbg ( * args ) : return __UTOOL_ROOT_LOGGER__ . debug ( ', ' . join ( map ( six . text_type , args ) ) ) __UTOOL_WRITE__ = utool_write __UTOOL_FLUSH__ = utool_flush __UTOOL_PRINT__ = utool_print if VERBOSE or LOGGING_VERBOSE : __UTOOL_PRINT__ ( '<__LOG_START__>' ) __UTOOL_PRINT__ ( startmsg ) else : if LOGGING_VERBOSE : print ( '[utool] start_logging()... FAILED TO START' ) print ( 'DEBUG INFO' ) print ( '__inside_doctest() = %r' % ( __inside_doctest ( ) , ) ) print ( '__IN_MAIN_PROCESS__ = %r' % ( __IN_MAIN_PROCESS__ , ) ) print ( '__UTOOL_ROOT_LOGGER__ = %r' % ( __UTOOL_ROOT_LOGGER__ , ) ) | r Overwrites utool print functions to use a logger |
9,258 | def stop_logging ( ) : global __UTOOL_ROOT_LOGGER__ global __UTOOL_PRINT__ global __UTOOL_WRITE__ global __UTOOL_FLUSH__ if __UTOOL_ROOT_LOGGER__ is not None : if VERBOSE or LOGGING_VERBOSE : _utool_print ( ) ( ) ( '<__LOG_STOP__>' ) _utool_flush ( ) ( ) for h in __UTOOL_ROOT_LOGGER__ . handlers [ : ] : __UTOOL_ROOT_LOGGER__ . removeHandler ( h ) __UTOOL_ROOT_LOGGER__ = None __UTOOL_PRINT__ = None __UTOOL_WRITE__ = None __UTOOL_FLUSH__ = None | Restores utool print functions to python defaults |
9,259 | def replace_nones ( list_ , repl = - 1 ) : r repl_list = [ repl if item is None else ( replace_nones ( item , repl ) if isinstance ( item , list ) else item ) for item in list_ ] return repl_list | r Recursively removes Nones in all lists and sublists and replaces them with the repl variable |
9,260 | def recursive_replace ( list_ , target , repl = - 1 ) : r repl_list = [ recursive_replace ( item , target , repl ) if isinstance ( item , ( list , np . ndarray ) ) else ( repl if item == target else item ) for item in list_ ] return repl_list | r Recursively removes target in all lists and sublists and replaces them with the repl variable |
9,261 | def ensure_list_size ( list_ , size_ ) : lendiff = ( size_ ) - len ( list_ ) if lendiff > 0 : extension = [ None for _ in range ( lendiff ) ] list_ . extend ( extension ) | Allocates more space if needbe . |
9,262 | def multi_replace ( instr , search_list = [ ] , repl_list = None ) : repl_list = [ '' ] * len ( search_list ) if repl_list is None else repl_list for ser , repl in zip ( search_list , repl_list ) : instr = instr . replace ( ser , repl ) return instr | Does a string replace with a list of search and replacements |
9,263 | def invertible_flatten1 ( unflat_list ) : r nextnum = functools . partial ( six . next , itertools . count ( 0 ) ) reverse_list = [ [ nextnum ( ) for _ in tup ] for tup in unflat_list ] flat_list = flatten ( unflat_list ) return flat_list , reverse_list | r Flattens unflat_list but remember how to reconstruct the unflat_list Returns flat_list and the reverse_list with indexes into the flat_list |
9,264 | def invertible_flatten2 ( unflat_list ) : sublen_list = list ( map ( len , unflat_list ) ) if not util_type . HAVE_NUMPY : cumlen_list = np . cumsum ( sublen_list ) else : cumlen_list = list ( accumulate ( sublen_list ) ) flat_list = flatten ( unflat_list ) return flat_list , cumlen_list | An alternative to invertible_flatten1 which uses cumsum |
9,265 | def invertible_flatten2_numpy ( unflat_arrs , axis = 0 ) : cumlen_list = np . cumsum ( [ arr . shape [ axis ] for arr in unflat_arrs ] ) flat_list = np . concatenate ( unflat_arrs , axis = axis ) return flat_list , cumlen_list | more numpy version |
9,266 | def unflat_unique_rowid_map ( func , unflat_rowids , ** kwargs ) : import utool as ut flat_rowids , reverse_list = ut . invertible_flatten2 ( unflat_rowids ) flat_rowids_arr = np . array ( flat_rowids ) unique_flat_rowids , inverse_unique = np . unique ( flat_rowids_arr , return_inverse = True ) unique_flat_vals = func ( unique_flat_rowids , ** kwargs ) flat_vals_ = np . array ( unique_flat_vals ) [ inverse_unique ] output_shape = tuple ( list ( flat_rowids_arr . shape ) + list ( flat_vals_ . shape [ 1 : ] ) ) flat_vals = np . array ( flat_vals_ ) . reshape ( output_shape ) unflat_vals = ut . unflatten2 ( flat_vals , reverse_list ) return unflat_vals | performs only one call to the underlying func with unique rowids the func must be some lookup function |
9,267 | def allsame ( list_ , strict = True ) : if len ( list_ ) == 0 : return True first_item = list_ [ 0 ] return list_all_eq_to ( list_ , first_item , strict ) | checks to see if list is equal everywhere |
9,268 | def list_all_eq_to ( list_ , val , strict = True ) : if util_type . HAVE_NUMPY and isinstance ( val , np . ndarray ) : return all ( [ np . all ( item == val ) for item in list_ ] ) try : with warnings . catch_warnings ( ) : warnings . filterwarnings ( 'ignore' , category = FutureWarning ) flags = [ item == val for item in list_ ] return all ( [ np . all ( flag ) if hasattr ( flag , '__array__' ) else flag for flag in flags ] ) except ValueError : if not strict : return all ( [ repr ( item ) == repr ( val ) for item in list_ ] ) else : raise | checks to see if list is equal everywhere to a value |
9,269 | def get_dirty_items ( item_list , flag_list ) : assert len ( item_list ) == len ( flag_list ) dirty_items = [ item for ( item , flag ) in zip ( item_list , flag_list ) if not flag ] return dirty_items | Returns each item in item_list where not flag in flag_list |
9,270 | def filterfalse_items ( item_list , flag_list ) : assert len ( item_list ) == len ( flag_list ) filtered_items = list ( util_iter . ifilterfalse_items ( item_list , flag_list ) ) return filtered_items | Returns items in item list where the corresponding item in flag list is true |
9,271 | def isect ( list1 , list2 ) : r set2 = set ( list2 ) return [ item for item in list1 if item in set2 ] | r returns list1 elements that are also in list2 . preserves order of list1 |
9,272 | def is_subset_of_any ( set_ , other_sets ) : set_ = set ( set_ ) other_sets = map ( set , other_sets ) return any ( [ set_ . issubset ( other_set ) for other_set in other_sets ] ) | returns True if set_ is a subset of any set in other_sets |
9,273 | def unique_ordered ( list_ ) : list_ = list ( list_ ) flag_list = flag_unique_items ( list_ ) unique_list = compress ( list_ , flag_list ) return unique_list | Returns unique items in list_ in the order they were seen . |
9,274 | def setdiff ( list1 , list2 ) : set2 = set ( list2 ) return [ item for item in list1 if item not in set2 ] | returns list1 elements that are not in list2 . preserves order of list1 |
9,275 | def isetdiff_flags ( list1 , list2 ) : set2 = set ( list2 ) return ( item not in set2 for item in list1 ) | move to util_iter |
9,276 | def unflat_take ( items_list , unflat_index_list ) : r return [ unflat_take ( items_list , xs ) if isinstance ( xs , list ) else take ( items_list , xs ) for xs in unflat_index_list ] | r Returns nested subset of items_list |
9,277 | def argsort ( * args , ** kwargs ) : if len ( args ) == 1 and isinstance ( args [ 0 ] , dict ) : dict_ = args [ 0 ] index_list = list ( dict_ . keys ( ) ) value_list = list ( dict_ . values ( ) ) return sortedby2 ( index_list , value_list ) else : index_list = list ( range ( len ( args [ 0 ] ) ) ) return sortedby2 ( index_list , * args , ** kwargs ) | like np . argsort but for lists |
9,278 | def argsort2 ( indexable , key = None , reverse = False ) : if isinstance ( indexable , dict ) : vk_iter = ( ( v , k ) for k , v in indexable . items ( ) ) else : vk_iter = ( ( v , k ) for k , v in enumerate ( indexable ) ) if key is None : indices = [ k for v , k in sorted ( vk_iter , reverse = reverse ) ] else : indices = [ k for v , k in sorted ( vk_iter , key = lambda vk : key ( vk [ 0 ] ) , reverse = reverse ) ] return indices | Returns the indices that would sort a indexable object . |
9,279 | def index_complement ( index_list , len_ = None ) : mask1 = index_to_boolmask ( index_list , len_ ) mask2 = not_list ( mask1 ) index_list_bar = list_where ( mask2 ) return index_list_bar | Returns the other indicies in a list of length len_ |
9,280 | def take_complement ( list_ , index_list ) : mask = not_list ( index_to_boolmask ( index_list , len ( list_ ) ) ) return compress ( list_ , mask ) | Returns items in list_ not indexed by index_list |
9,281 | def take ( list_ , index_list ) : try : return [ list_ [ index ] for index in index_list ] except TypeError : return list_ [ index_list ] | Selects a subset of a list based on a list of indices . This is similar to np . take but pure python . |
9,282 | def take_percentile ( arr , percent ) : size = len ( arr ) stop = min ( int ( size * percent ) , len ( arr ) ) return arr [ 0 : stop ] | take the top percent items in a list rounding up |
9,283 | def snapped_slice ( size , frac , n ) : r if size < n : n = size start = int ( size * frac - ceil ( n / 2 ) ) + 1 stop = int ( size * frac + floor ( n / 2 ) ) + 1 buf = 0 if stop >= size : buf = ( size - stop ) elif start < 0 : buf = 0 - start stop += buf start += buf assert stop <= size , 'out of bounds [%r, %r]' % ( stop , start ) sl = slice ( start , stop ) return sl | r Creates a slice spanning n items in a list of length size at position frac . |
9,284 | def take_percentile_parts ( arr , front = None , mid = None , back = None ) : r slices = [ ] if front : slices += [ snapped_slice ( len ( arr ) , 0.0 , front ) ] if mid : slices += [ snapped_slice ( len ( arr ) , 0.5 , mid ) ] if back : slices += [ snapped_slice ( len ( arr ) , 1.0 , back ) ] parts = flatten ( [ arr [ sl ] for sl in slices ] ) return parts | r Take parts from front back or middle of a list |
9,285 | def broadcast_zip ( list1 , list2 ) : r try : len ( list1 ) except TypeError : list1 = list ( list1 ) try : len ( list2 ) except TypeError : list2 = list ( list2 ) if len ( list1 ) == 1 and len ( list2 ) > 1 : list1 = list1 * len ( list2 ) elif len ( list1 ) > 1 and len ( list2 ) == 1 : list2 = list2 * len ( list1 ) elif len ( list1 ) != len ( list2 ) : raise ValueError ( 'out of alignment len(list1)=%r, len(list2)=%r' % ( len ( list1 ) , len ( list2 ) ) ) return zip ( list1 , list2 ) | r Zips elementwise pairs between list1 and list2 . Broadcasts the first dimension if a single list is of length 1 . |
9,286 | def equal ( list1 , list2 ) : return [ item1 == item2 for item1 , item2 in broadcast_zip ( list1 , list2 ) ] | takes flags returns indexes of True values |
9,287 | def scalar_input_map ( func , input_ ) : if util_iter . isiterable ( input_ ) : return list ( map ( func , input_ ) ) else : return func ( input_ ) | Map like function |
9,288 | def partial_imap_1to1 ( func , si_func ) : @ functools . wraps ( si_func ) def wrapper ( input_ ) : if not util_iter . isiterable ( input_ ) : return func ( si_func ( input_ ) ) else : return list ( map ( func , si_func ( input_ ) ) ) set_funcname ( wrapper , util_str . get_callable_name ( func ) + '_mapper_' + get_funcname ( si_func ) ) return wrapper | a bit messy |
9,289 | def sample_zip ( items_list , num_samples , allow_overflow = False , per_bin = 1 ) : samples_list = [ [ ] for _ in range ( num_samples ) ] samples_iter = zip_longest ( * items_list ) sx = 0 for ix , samples_ in zip ( range ( num_samples ) , samples_iter ) : samples = filter_Nones ( samples_ ) samples_list [ sx ] . extend ( samples ) if ( ix + 1 ) % per_bin == 0 : sx += 1 if allow_overflow : overflow_samples = flatten ( [ filter_Nones ( samples_ ) for samples_ in samples_iter ] ) return samples_list , overflow_samples else : try : samples_iter . next ( ) except StopIteration : pass else : raise AssertionError ( 'Overflow occured' ) return samples_list | Helper for sampling |
9,290 | def issorted ( list_ , op = operator . le ) : return all ( op ( list_ [ ix ] , list_ [ ix + 1 ] ) for ix in range ( len ( list_ ) - 1 ) ) | Determines if a list is sorted |
9,291 | def list_depth ( list_ , func = max , _depth = 0 ) : depth_list = [ list_depth ( item , func = func , _depth = _depth + 1 ) for item in list_ if util_type . is_listlike ( item ) ] if len ( depth_list ) > 0 : return func ( depth_list ) else : return _depth | Returns the deepest level of nesting within a list of lists |
9,292 | def depth ( sequence , func = max , _depth = 0 ) : if isinstance ( sequence , dict ) : sequence = list ( sequence . values ( ) ) depth_list = [ depth ( item , func = func , _depth = _depth + 1 ) for item in sequence if ( isinstance ( item , dict ) or util_type . is_listlike ( item ) ) ] if len ( depth_list ) > 0 : return func ( depth_list ) else : return _depth | Find the nesting depth of a nested sequence |
9,293 | def list_deep_types ( list_ ) : type_list = [ ] for item in list_ : if util_type . is_listlike ( item ) : type_list . extend ( list_deep_types ( item ) ) else : type_list . append ( type ( item ) ) return type_list | Returns all types in a deep list |
9,294 | def depth_profile ( list_ , max_depth = None , compress_homogenous = True , compress_consecutive = False , new_depth = False ) : r if isinstance ( list_ , dict ) : list_ = list ( list_ . values ( ) ) level_shape_list = [ ] if not any ( map ( util_type . is_listlike , list_ ) ) : return len ( list_ ) if False and new_depth : pass else : for item in list_ : if isinstance ( item , dict ) : item = list ( item . values ( ) ) if util_type . is_listlike ( item ) : if max_depth is None : level_shape_list . append ( depth_profile ( item , None ) ) else : if max_depth >= 0 : level_shape_list . append ( depth_profile ( item , max_depth - 1 ) ) else : level_shape_list . append ( str ( len ( item ) ) ) else : level_shape_list . append ( 1 ) if compress_homogenous : if allsame ( level_shape_list ) : dim_ = level_shape_list [ 0 ] len_ = len ( level_shape_list ) if isinstance ( dim_ , tuple ) : level_shape_list = tuple ( [ len_ ] + list ( dim_ ) ) else : level_shape_list = tuple ( [ len_ , dim_ ] ) if compress_consecutive : hash_list = list ( map ( hash , map ( str , level_shape_list ) ) ) consec_list = group_consecutives ( hash_list , 0 ) if len ( consec_list ) != len ( level_shape_list ) : len_list = list ( map ( len , consec_list ) ) cumsum_list = np . cumsum ( len_list ) consec_str = '[' thresh = 1 for len_ , cumsum in zip ( len_list , cumsum_list ) : value = level_shape_list [ cumsum - 1 ] if len_ > thresh : consec_str += str ( value ) + '] * ' + str ( len_ ) consec_str += ' + [' else : consec_str += str ( value ) + ', ' if consec_str . endswith ( ', ' ) : consec_str = consec_str [ : - 2 ] consec_str += ']' level_shape_list = consec_str return level_shape_list | r Returns a nested list corresponding the shape of the nested structures lists represent depth tuples represent shape . The values of the items do not matter . only the lengths . |
9,295 | def list_cover ( list1 , list2 ) : r set2 = set ( list2 ) incover_list = [ item1 in set2 for item1 in list1 ] return incover_list | r returns boolean for each position in list1 if it is in list2 |
9,296 | def list_alignment ( list1 , list2 , missing = False ) : import utool as ut item1_to_idx = make_index_lookup ( list1 ) if missing : sortx = ut . dict_take ( item1_to_idx , list2 , None ) else : sortx = ut . take ( item1_to_idx , list2 ) return sortx | Assumes list items are unique |
9,297 | def list_transpose ( list_ , shape = None ) : r num_cols_set = unique ( [ len ( x ) for x in list_ ] ) if shape is None : if len ( num_cols_set ) == 0 : raise ValueError ( 'listT does not support empty transpose without shapes' ) else : assert len ( shape ) == 2 , 'shape must be a 2-tuple' if len ( num_cols_set ) == 0 : return [ [ ] for _ in range ( shape [ 1 ] ) ] elif num_cols_set [ 0 ] == 0 : return [ ] if len ( num_cols_set ) != 1 : raise ValueError ( 'inconsistent column lengths=%r' % ( num_cols_set , ) ) return list ( zip ( * list_ ) ) | r Swaps rows and columns . nCols should be specified if the initial list is empty . |
9,298 | def delete_items_by_index ( list_ , index_list , copy = False ) : if copy : list_ = list_ [ : ] index_list_ = [ ( len ( list_ ) + x if x < 0 else x ) for x in index_list ] index_list_ = sorted ( index_list_ , reverse = True ) for index in index_list_ : del list_ [ index ] return list_ | Remove items from list_ at positions specified in index_list The original list_ is preserved if copy is True |
9,299 | def delete_list_items ( list_ , item_list , copy = False ) : r if copy : list_ = list_ [ : ] for item in item_list : list_ . remove ( item ) return list_ | r Remove items in item_list from list_ . The original list_ is preserved if copy is True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.