idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
17,900 | def filter_pmids ( pmid_list , source_type ) : global pmids_fulltext_dict if source_type not in ( 'fulltext' , 'oa_xml' , 'oa_txt' , 'auth_xml' ) : raise ValueError ( "source_type must be one of: 'fulltext', 'oa_xml', " "'oa_txt', or 'auth_xml'." ) if pmids_fulltext_dict . get ( source_type ) is None : fulltext_list_path = os . path . join ( os . path . dirname ( __file__ ) , 'pmids_%s.txt' % source_type ) with open ( fulltext_list_path , 'rb' ) as f : fulltext_list = set ( [ line . strip ( ) . decode ( 'utf-8' ) for line in f . readlines ( ) ] ) pmids_fulltext_dict [ source_type ] = fulltext_list return list ( set ( pmid_list ) . intersection ( pmids_fulltext_dict . get ( source_type ) ) ) | Filter a list of PMIDs for ones with full text from PMC . |
17,901 | def get_example_extractions ( fname ) : "Get extractions from one of the examples in `cag_examples`." with open ( fname , 'r' ) as f : sentences = f . read ( ) . splitlines ( ) rdf_xml_dict = { } for sentence in sentences : logger . info ( "Reading \"%s\"..." % sentence ) html = tc . send_query ( sentence , 'cwms' ) try : rdf_xml_dict [ sentence ] = tc . get_xml ( html , 'rdf:RDF' , fail_if_empty = True ) except AssertionError as e : logger . error ( "Got error for %s." % sentence ) logger . exception ( e ) return rdf_xml_dict | Get extractions from one of the examples in cag_examples . |
17,902 | def make_example_graphs ( ) : "Make graphs from all the examples in cag_examples." cag_example_rdfs = { } for i , fname in enumerate ( os . listdir ( 'cag_examples' ) ) : cag_example_rdfs [ i + 1 ] = get_example_extractions ( fname ) return make_cag_graphs ( cag_example_rdfs ) | Make graphs from all the examples in cag_examples . |
17,903 | def _join_list ( lst , oxford = False ) : if len ( lst ) > 2 : s = ', ' . join ( lst [ : - 1 ] ) if oxford : s += ',' s += ' and ' + lst [ - 1 ] elif len ( lst ) == 2 : s = lst [ 0 ] + ' and ' + lst [ 1 ] elif len ( lst ) == 1 : s = lst [ 0 ] else : s = '' return s | Join a list of words in a gramatically correct way . |
17,904 | def _assemble_activeform ( stmt ) : subj_str = _assemble_agent_str ( stmt . agent ) if stmt . is_active : is_active_str = 'active' else : is_active_str = 'inactive' if stmt . activity == 'activity' : stmt_str = subj_str + ' is ' + is_active_str elif stmt . activity == 'kinase' : stmt_str = subj_str + ' is kinase-' + is_active_str elif stmt . activity == 'phosphatase' : stmt_str = subj_str + ' is phosphatase-' + is_active_str elif stmt . activity == 'catalytic' : stmt_str = subj_str + ' is catalytically ' + is_active_str elif stmt . activity == 'transcription' : stmt_str = subj_str + ' is transcriptionally ' + is_active_str elif stmt . activity == 'gtpbound' : stmt_str = subj_str + ' is GTP-bound ' + is_active_str return _make_sentence ( stmt_str ) | Assemble ActiveForm statements into text . |
17,905 | def _assemble_modification ( stmt ) : sub_str = _assemble_agent_str ( stmt . sub ) if stmt . enz is not None : enz_str = _assemble_agent_str ( stmt . enz ) if _get_is_direct ( stmt ) : mod_str = ' ' + _mod_process_verb ( stmt ) + ' ' else : mod_str = ' leads to the ' + _mod_process_noun ( stmt ) + ' of ' stmt_str = enz_str + mod_str + sub_str else : stmt_str = sub_str + ' is ' + _mod_state_stmt ( stmt ) if stmt . residue is not None : if stmt . position is None : mod_str = 'on ' + ist . amino_acids [ stmt . residue ] [ 'full_name' ] else : mod_str = 'on ' + stmt . residue + stmt . position else : mod_str = '' stmt_str += ' ' + mod_str return _make_sentence ( stmt_str ) | Assemble Modification statements into text . |
17,906 | def _assemble_association ( stmt ) : member_strs = [ _assemble_agent_str ( m . concept ) for m in stmt . members ] stmt_str = member_strs [ 0 ] + ' is associated with ' + _join_list ( member_strs [ 1 : ] ) return _make_sentence ( stmt_str ) | Assemble Association statements into text . |
17,907 | def _assemble_complex ( stmt ) : member_strs = [ _assemble_agent_str ( m ) for m in stmt . members ] stmt_str = member_strs [ 0 ] + ' binds ' + _join_list ( member_strs [ 1 : ] ) return _make_sentence ( stmt_str ) | Assemble Complex statements into text . |
17,908 | def _assemble_autophosphorylation ( stmt ) : enz_str = _assemble_agent_str ( stmt . enz ) stmt_str = enz_str + ' phosphorylates itself' if stmt . residue is not None : if stmt . position is None : mod_str = 'on ' + ist . amino_acids [ stmt . residue ] [ 'full_name' ] else : mod_str = 'on ' + stmt . residue + stmt . position else : mod_str = '' stmt_str += ' ' + mod_str return _make_sentence ( stmt_str ) | Assemble Autophosphorylation statements into text . |
17,909 | def _assemble_regulate_activity ( stmt ) : subj_str = _assemble_agent_str ( stmt . subj ) obj_str = _assemble_agent_str ( stmt . obj ) if stmt . is_activation : rel_str = ' activates ' else : rel_str = ' inhibits ' stmt_str = subj_str + rel_str + obj_str return _make_sentence ( stmt_str ) | Assemble RegulateActivity statements into text . |
17,910 | def _assemble_regulate_amount ( stmt ) : obj_str = _assemble_agent_str ( stmt . obj ) if stmt . subj is not None : subj_str = _assemble_agent_str ( stmt . subj ) if isinstance ( stmt , ist . IncreaseAmount ) : rel_str = ' increases the amount of ' elif isinstance ( stmt , ist . DecreaseAmount ) : rel_str = ' decreases the amount of ' stmt_str = subj_str + rel_str + obj_str else : if isinstance ( stmt , ist . IncreaseAmount ) : stmt_str = obj_str + ' is produced' elif isinstance ( stmt , ist . DecreaseAmount ) : stmt_str = obj_str + ' is degraded' return _make_sentence ( stmt_str ) | Assemble RegulateAmount statements into text . |
17,911 | def _assemble_translocation ( stmt ) : agent_str = _assemble_agent_str ( stmt . agent ) stmt_str = agent_str + ' translocates' if stmt . from_location is not None : stmt_str += ' from the ' + stmt . from_location if stmt . to_location is not None : stmt_str += ' to the ' + stmt . to_location return _make_sentence ( stmt_str ) | Assemble Translocation statements into text . |
17,912 | def _assemble_gap ( stmt ) : subj_str = _assemble_agent_str ( stmt . gap ) obj_str = _assemble_agent_str ( stmt . ras ) stmt_str = subj_str + ' is a GAP for ' + obj_str return _make_sentence ( stmt_str ) | Assemble Gap statements into text . |
17,913 | def _assemble_gef ( stmt ) : subj_str = _assemble_agent_str ( stmt . gef ) obj_str = _assemble_agent_str ( stmt . ras ) stmt_str = subj_str + ' is a GEF for ' + obj_str return _make_sentence ( stmt_str ) | Assemble Gef statements into text . |
17,914 | def _assemble_conversion ( stmt ) : reactants = _join_list ( [ _assemble_agent_str ( r ) for r in stmt . obj_from ] ) products = _join_list ( [ _assemble_agent_str ( r ) for r in stmt . obj_to ] ) if stmt . subj is not None : subj_str = _assemble_agent_str ( stmt . subj ) stmt_str = '%s catalyzes the conversion of %s into %s' % ( subj_str , reactants , products ) else : stmt_str = '%s is converted into %s' % ( reactants , products ) return _make_sentence ( stmt_str ) | Assemble a Conversion statement into text . |
17,915 | def _assemble_influence ( stmt ) : subj_str = _assemble_agent_str ( stmt . subj . concept ) obj_str = _assemble_agent_str ( stmt . obj . concept ) if stmt . subj . delta [ 'polarity' ] is not None : subj_delta_str = ' decrease' if stmt . subj . delta [ 'polarity' ] == - 1 else 'n increase' subj_str = 'a%s in %s' % ( subj_delta_str , subj_str ) if stmt . obj . delta [ 'polarity' ] is not None : obj_delta_str = ' decrease' if stmt . obj . delta [ 'polarity' ] == - 1 else 'n increase' obj_str = 'a%s in %s' % ( obj_delta_str , obj_str ) stmt_str = '%s causes %s' % ( subj_str , obj_str ) return _make_sentence ( stmt_str ) | Assemble an Influence statement into text . |
17,916 | def _make_sentence ( txt ) : txt = txt . strip ( ' ' ) txt = txt [ 0 ] . upper ( ) + txt [ 1 : ] + '.' return txt | Make a sentence from a piece of text . |
17,917 | def _get_is_hypothesis ( stmt ) : for ev in stmt . evidence : if not ev . epistemics . get ( 'hypothesis' ) is True : return True return False | Returns true if there is evidence that the statement is only hypothetical . If all of the evidences associated with the statement indicate a hypothetical interaction then we assume the interaction is hypothetical . |
17,918 | def make_model ( self ) : stmt_strs = [ ] for stmt in self . statements : if isinstance ( stmt , ist . Modification ) : stmt_strs . append ( _assemble_modification ( stmt ) ) elif isinstance ( stmt , ist . Autophosphorylation ) : stmt_strs . append ( _assemble_autophosphorylation ( stmt ) ) elif isinstance ( stmt , ist . Association ) : stmt_strs . append ( _assemble_association ( stmt ) ) elif isinstance ( stmt , ist . Complex ) : stmt_strs . append ( _assemble_complex ( stmt ) ) elif isinstance ( stmt , ist . Influence ) : stmt_strs . append ( _assemble_influence ( stmt ) ) elif isinstance ( stmt , ist . RegulateActivity ) : stmt_strs . append ( _assemble_regulate_activity ( stmt ) ) elif isinstance ( stmt , ist . RegulateAmount ) : stmt_strs . append ( _assemble_regulate_amount ( stmt ) ) elif isinstance ( stmt , ist . ActiveForm ) : stmt_strs . append ( _assemble_activeform ( stmt ) ) elif isinstance ( stmt , ist . Translocation ) : stmt_strs . append ( _assemble_translocation ( stmt ) ) elif isinstance ( stmt , ist . Gef ) : stmt_strs . append ( _assemble_gef ( stmt ) ) elif isinstance ( stmt , ist . Gap ) : stmt_strs . append ( _assemble_gap ( stmt ) ) elif isinstance ( stmt , ist . Conversion ) : stmt_strs . append ( _assemble_conversion ( stmt ) ) else : logger . warning ( 'Unhandled statement type: %s.' % type ( stmt ) ) if stmt_strs : return ' ' . join ( stmt_strs ) else : return '' | Assemble text from the set of collected INDRA Statements . |
17,919 | def add_statements ( self , stmts ) : for stmt in stmts : if not self . statement_exists ( stmt ) : self . statements . append ( stmt ) | Add INDRA Statements to the assembler s list of statements . |
17,920 | def make_model ( self ) : ppa = PysbPreassembler ( self . statements ) ppa . replace_activities ( ) self . statements = ppa . statements self . sbgn = emaker . sbgn ( ) self . _map = emaker . map ( ) self . sbgn . append ( self . _map ) for stmt in self . statements : if isinstance ( stmt , Modification ) : self . _assemble_modification ( stmt ) elif isinstance ( stmt , RegulateActivity ) : self . _assemble_regulateactivity ( stmt ) elif isinstance ( stmt , RegulateAmount ) : self . _assemble_regulateamount ( stmt ) elif isinstance ( stmt , Complex ) : self . _assemble_complex ( stmt ) elif isinstance ( stmt , ActiveForm ) : pass else : logger . warning ( "Unhandled Statement type %s" % type ( stmt ) ) continue sbgn_str = self . print_model ( ) return sbgn_str | Assemble the SBGN model from the collected INDRA Statements . |
17,921 | def print_model ( self , pretty = True , encoding = 'utf8' ) : return lxml . etree . tostring ( self . sbgn , pretty_print = pretty , encoding = encoding , xml_declaration = True ) | Return the assembled SBGN model as an XML string . |
17,922 | def save_model ( self , file_name = 'model.sbgn' ) : model = self . print_model ( ) with open ( file_name , 'wb' ) as fh : fh . write ( model ) | Save the assembled SBGN model in a file . |
17,923 | def _glyph_for_complex_pattern ( self , pattern ) : monomer_glyphs = [ ] for monomer_pattern in pattern . monomer_patterns : glyph = self . _glyph_for_monomer_pattern ( monomer_pattern ) monomer_glyphs . append ( glyph ) if len ( monomer_glyphs ) > 1 : pattern . matches_key = lambda : str ( pattern ) agent_id = self . _make_agent_id ( pattern ) complex_glyph = emaker . glyph ( emaker . bbox ( ** self . complex_style ) , class_ ( 'complex' ) , id = agent_id ) for glyph in monomer_glyphs : glyph . attrib [ 'id' ] = agent_id + glyph . attrib [ 'id' ] complex_glyph . append ( glyph ) return complex_glyph return monomer_glyphs [ 0 ] | Add glyph and member glyphs for a PySB ComplexPattern . |
17,924 | def _glyph_for_monomer_pattern ( self , pattern ) : pattern . matches_key = lambda : str ( pattern ) agent_id = self . _make_agent_id ( pattern ) if pattern . monomer . name in ( '__source' , '__sink' ) : return None glyph = emaker . glyph ( emaker . label ( text = pattern . monomer . name ) , emaker . bbox ( ** self . monomer_style ) , class_ ( 'macromolecule' ) , id = agent_id ) for site , value in pattern . site_conditions . items ( ) : if value is None or isinstance ( value , int ) : continue if site == 'phospho' : site = 'p' elif site == 'activity' : site = 'act' if value == 'active' : value = 'a' elif value == 'inactive' : value = 'i' state = emaker . state ( variable = site , value = value ) state_glyph = emaker . glyph ( state , emaker . bbox ( ** self . entity_state_style ) , class_ ( 'state variable' ) , id = self . _make_id ( ) ) glyph . append ( state_glyph ) return glyph | Add glyph for a PySB MonomerPattern . |
17,925 | def load_go_graph ( go_fname ) : global _go_graph if _go_graph is None : _go_graph = rdflib . Graph ( ) logger . info ( "Parsing GO OWL file" ) _go_graph . parse ( os . path . abspath ( go_fname ) ) return _go_graph | Load the GO data from an OWL file and parse into an RDF graph . |
17,926 | def update_id_mappings ( g ) : g = load_go_graph ( go_owl_path ) query = _prefixes + logger . info ( "Querying for GO ID mappings" ) res = g . query ( query ) mappings = [ ] for id_lit , label_lit in sorted ( res , key = lambda x : x [ 0 ] ) : mappings . append ( ( id_lit . value , label_lit . value ) ) write_unicode_csv ( go_mappings_file , mappings , delimiter = '\t' ) | Compile all ID - > label mappings and save to a TSV file . |
17,927 | def get_default_ndex_cred ( ndex_cred ) : if ndex_cred : username = ndex_cred . get ( 'user' ) password = ndex_cred . get ( 'password' ) if username is not None and password is not None : return username , password username = get_config ( 'NDEX_USERNAME' ) password = get_config ( 'NDEX_PASSWORD' ) return username , password | Gets the NDEx credentials from the dict or tries the environment if None |
17,928 | def send_request ( ndex_service_url , params , is_json = True , use_get = False ) : if use_get : res = requests . get ( ndex_service_url , json = params ) else : res = requests . post ( ndex_service_url , json = params ) status = res . status_code if status == 200 : if is_json : return res . json ( ) else : return res . text elif status != 300 : logger . error ( 'Request returned with code %d' % status ) return None task_id = res . json ( ) . get ( 'task_id' ) logger . info ( 'NDEx task submitted...' ) time_used = 0 try : while status != 200 : res = requests . get ( ndex_base_url + '/task/' + task_id ) status = res . status_code if status != 200 : time . sleep ( 5 ) time_used += 5 except KeyError : next return None logger . info ( 'NDEx task complete.' ) if is_json : return res . json ( ) else : return res . text | Send a request to the NDEx server . |
17,929 | def update_network ( cx_str , network_id , ndex_cred = None ) : server = 'http://public.ndexbio.org' username , password = get_default_ndex_cred ( ndex_cred ) nd = ndex2 . client . Ndex2 ( server , username , password ) try : logger . info ( 'Getting network summary...' ) summary = nd . get_network_summary ( network_id ) except Exception as e : logger . error ( 'Could not get NDEx network summary.' ) logger . error ( e ) return try : logger . info ( 'Updating network...' ) cx_stream = io . BytesIO ( cx_str . encode ( 'utf-8' ) ) nd . update_cx_network ( cx_stream , network_id ) except Exception as e : logger . error ( 'Could not update NDEx network.' ) logger . error ( e ) return ver_str = summary . get ( 'version' ) new_ver = _increment_ndex_ver ( ver_str ) profile = { 'name' : summary . get ( 'name' ) , 'description' : summary . get ( 'description' ) , 'version' : new_ver , } logger . info ( 'Updating NDEx network (%s) profile to %s' , network_id , profile ) profile_retries = 5 for _ in range ( profile_retries ) : try : time . sleep ( 5 ) nd . update_network_profile ( network_id , profile ) break except Exception as e : logger . error ( 'Could not update NDEx network profile.' ) logger . error ( e ) set_style ( network_id , ndex_cred ) | Update an existing CX network on NDEx with new CX content . |
17,930 | def set_style ( network_id , ndex_cred = None , template_id = None ) : if not template_id : template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf" server = 'http://public.ndexbio.org' username , password = get_default_ndex_cred ( ndex_cred ) source_network = ndex2 . create_nice_cx_from_server ( username = username , password = password , uuid = network_id , server = server ) source_network . apply_template ( server , template_id ) source_network . update_to ( network_id , server = server , username = username , password = password ) | Set the style of the network to a given template network s style |
17,931 | def initialize ( self , cfg_file = None , mode = None ) : self . sim = ScipyOdeSimulator ( self . model ) self . state = numpy . array ( copy . copy ( self . sim . initials ) [ 0 ] ) self . time = numpy . array ( 0.0 ) self . status = 'initialized' | Initialize the model for simulation possibly given a config file . |
17,932 | def update ( self , dt = None ) : dt = dt if ( dt is not None and dt > 0 ) else self . dt tspan = [ 0 , dt ] res = self . sim . run ( tspan = tspan , initials = self . state ) self . state = res . species [ - 1 ] self . time += dt if self . time > self . stop_time : self . DONE = True print ( ( self . time , self . state ) ) self . time_course . append ( ( self . time . copy ( ) , self . state . copy ( ) ) ) | Simulate the model for a given time interval . |
17,933 | def set_value ( self , var_name , value ) : if var_name in self . outside_name_map : var_name = self . outside_name_map [ var_name ] print ( '%s=%.5f' % ( var_name , 1e9 * value ) ) if var_name == 'Precipitation' : value = 1e9 * value species_idx = self . species_name_map [ var_name ] self . state [ species_idx ] = value | Set the value of a given variable to a given value . |
17,934 | def get_value ( self , var_name ) : if var_name in self . outside_name_map : var_name = self . outside_name_map [ var_name ] species_idx = self . species_name_map [ var_name ] return self . state [ species_idx ] | Return the value of a given variable . |
17,935 | def get_input_var_names ( self ) : in_vars = copy . copy ( self . input_vars ) for idx , var in enumerate ( in_vars ) : if self . _map_in_out ( var ) is not None : in_vars [ idx ] = self . _map_in_out ( var ) return in_vars | Return a list of variables names that can be set as input . |
17,936 | def get_output_var_names ( self ) : all_vars = list ( self . species_name_map . keys ( ) ) output_vars = list ( set ( all_vars ) - set ( self . input_vars ) ) for idx , var in enumerate ( output_vars ) : if self . _map_in_out ( var ) is not None : output_vars [ idx ] = self . _map_in_out ( var ) return output_vars | Return a list of variables names that can be read as output . |
17,937 | def make_repository_component ( self ) : component = etree . Element ( 'component' ) comp_name = etree . Element ( 'comp_name' ) comp_name . text = self . model . name component . append ( comp_name ) mod_path = etree . Element ( 'module_path' ) mod_path . text = os . getcwd ( ) component . append ( mod_path ) mod_name = etree . Element ( 'module_name' ) mod_name . text = self . model . name component . append ( mod_name ) class_name = etree . Element ( 'class_name' ) class_name . text = 'model_class' component . append ( class_name ) model_name = etree . Element ( 'model_name' ) model_name . text = self . model . name component . append ( model_name ) lang = etree . Element ( 'language' ) lang . text = 'python' component . append ( lang ) ver = etree . Element ( 'version' ) ver . text = self . get_attribute ( 'version' ) component . append ( ver ) au = etree . Element ( 'author' ) au . text = self . get_attribute ( 'author_name' ) component . append ( au ) hu = etree . Element ( 'help_url' ) hu . text = 'http://github.com/sorgerlab/indra' component . append ( hu ) for tag in ( 'cfg_template' , 'time_step_type' , 'time_units' , 'grid_type' , 'description' , 'comp_type' , 'uses_types' ) : elem = etree . Element ( tag ) elem . text = tag component . append ( elem ) return etree . tounicode ( component , pretty_print = True ) | Return an XML string representing this BMI in a workflow . |
17,938 | def _map_in_out ( self , inside_var_name ) : for out_name , in_name in self . outside_name_map . items ( ) : if inside_var_name == in_name : return out_name return None | Return the external name of a variable mapped from inside . |
17,939 | def read_pmid ( pmid , source , cont_path , sparser_version , outbuf = None , cleanup = True ) : "Run sparser on a single pmid." signal . signal ( signal . SIGALRM , _timeout_handler ) signal . alarm ( 60 ) try : if ( source is 'content_not_found' or source . startswith ( 'unhandled_content_type' ) or source . endswith ( 'failure' ) ) : logger . info ( 'No content read for %s.' % pmid ) return if cont_path . endswith ( '.nxml' ) and source . startswith ( 'pmc' ) : new_fname = 'PMC%s%d.nxml' % ( pmid , mp . current_process ( ) . pid ) os . rename ( cont_path , new_fname ) try : sp = sparser . process_nxml_file ( new_fname , outbuf = outbuf , cleanup = cleanup ) finally : if cleanup and os . path . exists ( new_fname ) : os . remove ( new_fname ) elif cont_path . endswith ( '.txt' ) : content_str = '' with open ( cont_path , 'r' ) as f : content_str = f . read ( ) sp = sparser . process_text ( content_str , outbuf = outbuf , cleanup = cleanup ) signal . alarm ( 0 ) except Exception as e : logger . error ( 'Failed to process data for %s.' % pmid ) logger . exception ( e ) signal . alarm ( 0 ) return if sp is None : logger . error ( 'Failed to run sparser on pmid: %s.' % pmid ) return sp . set_statements_pmid ( pmid ) s3_client . put_reader_output ( 'sparser' , sp . json_stmts , pmid , sparser_version , source ) return sp . statements | Run sparser on a single pmid . |
17,940 | def get_stmts ( pmids_unread , cleanup = True , sparser_version = None ) : "Run sparser on the pmids in pmids_unread." if sparser_version is None : sparser_version = sparser . get_version ( ) stmts = { } now = datetime . now ( ) outbuf_fname = 'sparser_%s_%s.log' % ( now . strftime ( '%Y%m%d-%H%M%S' ) , mp . current_process ( ) . pid , ) outbuf = open ( outbuf_fname , 'wb' ) try : for pmid , result in pmids_unread . items ( ) : logger . info ( 'Reading %s' % pmid ) source = result [ 'content_source' ] cont_path = result [ 'content_path' ] outbuf . write ( ( '\nReading pmid %s from %s located at %s.\n' % ( pmid , source , cont_path ) ) . encode ( 'utf-8' ) ) outbuf . flush ( ) some_stmts = read_pmid ( pmid , source , cont_path , sparser_version , outbuf , cleanup ) if some_stmts is not None : stmts [ pmid ] = some_stmts else : continue except KeyboardInterrupt as e : logger . exception ( e ) logger . info ( 'Caught keyboard interrupt...stopping. \n' 'Results so far will be pickled unless ' 'Keyboard interupt is hit again.' ) finally : outbuf . close ( ) print ( "Sparser logs may be found in %s" % outbuf_fname ) return stmts | Run sparser on the pmids in pmids_unread . |
17,941 | def run_sparser ( pmid_list , tmp_dir , num_cores , start_index , end_index , force_read , force_fulltext , cleanup = True , verbose = True ) : 'Run the sparser reader on the pmids in pmid_list.' reader_version = sparser . get_version ( ) _ , _ , _ , pmids_read , pmids_unread , _ = get_content_to_read ( pmid_list , start_index , end_index , tmp_dir , num_cores , force_fulltext , force_read , 'sparser' , reader_version ) logger . info ( 'Adjusting num cores to length of pmid_list.' ) num_cores = min ( len ( pmid_list ) , num_cores ) logger . info ( 'Adjusted...' ) if num_cores is 1 : stmts = get_stmts ( pmids_unread , cleanup = cleanup ) stmts . update ( { pmid : get_stmts_from_cache ( pmid ) [ pmid ] for pmid in pmids_read . keys ( ) } ) elif num_cores > 1 : logger . info ( "Starting a pool with %d cores." % num_cores ) pool = mp . Pool ( num_cores ) pmids_to_read = list ( pmids_unread . keys ( ) ) N = len ( pmids_unread ) dn = int ( N / num_cores ) logger . info ( "Breaking pmids into batches." ) batches = [ ] for i in range ( num_cores ) : batches . append ( { k : pmids_unread [ k ] for k in pmids_to_read [ i * dn : min ( ( i + 1 ) * dn , N ) ] } ) get_stmts_func = functools . partial ( get_stmts , cleanup = cleanup , sparser_version = reader_version ) logger . info ( "Mapping get_stmts onto pool." ) unread_res = pool . map ( get_stmts_func , batches ) logger . info ( 'len(unread_res)=%d' % len ( unread_res ) ) read_res = pool . map ( get_stmts_from_cache , pmids_read . keys ( ) ) logger . info ( 'len(read_res)=%d' % len ( read_res ) ) pool . close ( ) logger . info ( 'Multiprocessing pool closed.' ) pool . join ( ) logger . info ( 'Multiprocessing pool joined.' ) stmts = { pmid : stmt_list for res_dict in unread_res + read_res for pmid , stmt_list in res_dict . items ( ) } logger . info ( 'len(stmts)=%d' % len ( stmts ) ) return ( stmts , pmids_unread ) | Run the sparser reader on the pmids in pmid_list . |
17,942 | def get_all_descendants ( parent ) : children = parent . __subclasses__ ( ) descendants = children [ : ] for child in children : descendants += get_all_descendants ( child ) return descendants | Get all the descendants of a parent class recursively . |
17,943 | def get_type_hierarchy ( s ) : tp = type ( s ) if not isinstance ( s , type ) else s p_list = [ tp ] for p in tp . __bases__ : if p is not Statement : p_list . extend ( get_type_hierarchy ( p ) ) else : p_list . append ( p ) return p_list | Get the sequence of parents from s to Statement . |
17,944 | def get_statement_by_name ( stmt_name ) : stmt_classes = get_all_descendants ( Statement ) for stmt_class in stmt_classes : if stmt_class . __name__ . lower ( ) == stmt_name . lower ( ) : return stmt_class raise NotAStatementName ( '\"%s\" is not recognized as a statement type!' % stmt_name ) | Get a statement class given the name of the statement class . |
17,945 | def get_unresolved_support_uuids ( stmts ) : return { s . uuid for stmt in stmts for s in stmt . supports + stmt . supported_by if isinstance ( s , Unresolved ) } | Get uuids unresolved in support from stmts from stmts_from_json . |
17,946 | def stmt_type ( obj , mk = True ) : if isinstance ( obj , Statement ) and mk : return type ( obj ) else : return type ( obj ) . __name__ | Return standardized backwards compatible object type String . |
17,947 | def get_hash ( self , shallow = True , refresh = False ) : if shallow : if not hasattr ( self , '_shallow_hash' ) or self . _shallow_hash is None or refresh : self . _shallow_hash = make_hash ( self . matches_key ( ) , 14 ) ret = self . _shallow_hash else : if not hasattr ( self , '_full_hash' ) or self . _full_hash is None or refresh : ev_mk_list = sorted ( [ ev . matches_key ( ) for ev in self . evidence ] ) self . _full_hash = make_hash ( self . matches_key ( ) + str ( ev_mk_list ) , 16 ) ret = self . _full_hash return ret | Get a hash for this Statement . |
17,948 | def _tag_evidence ( self ) : h = self . get_hash ( shallow = False ) for ev in self . evidence : ev . stmt_tag = h return | Set all the Evidence stmt_tag to my deep matches - key hash . |
17,949 | def agent_list ( self , deep_sorted = False ) : ag_list = [ ] for ag_name in self . _agent_order : ag_attr = getattr ( self , ag_name ) if isinstance ( ag_attr , Concept ) or ag_attr is None : ag_list . append ( ag_attr ) elif isinstance ( ag_attr , list ) : if not all ( [ isinstance ( ag , Concept ) for ag in ag_attr ] ) : raise TypeError ( "Expected all elements of list to be Agent " "and/or Concept, but got: %s" % { type ( ag ) for ag in ag_attr } ) if deep_sorted : ag_attr = sorted_agents ( ag_attr ) ag_list . extend ( ag_attr ) else : raise TypeError ( "Expected type Agent, Concept, or list, got " "type %s." % type ( ag_attr ) ) return ag_list | Get the canonicallized agent list . |
17,950 | def to_json ( self , use_sbo = False ) : stmt_type = type ( self ) . __name__ all_stmts = [ self ] + self . supports + self . supported_by for st in all_stmts : if not hasattr ( st , 'uuid' ) : st . uuid = '%s' % uuid . uuid4 ( ) json_dict = _o ( type = stmt_type ) json_dict [ 'belief' ] = self . belief if self . evidence : evidence = [ ev . to_json ( ) for ev in self . evidence ] json_dict [ 'evidence' ] = evidence json_dict [ 'id' ] = '%s' % self . uuid if self . supports : json_dict [ 'supports' ] = [ '%s' % st . uuid for st in self . supports ] if self . supported_by : json_dict [ 'supported_by' ] = [ '%s' % st . uuid for st in self . supported_by ] def get_sbo_term ( cls ) : sbo_term = stmt_sbo_map . get ( cls . __name__ . lower ( ) ) while not sbo_term : cls = cls . __bases__ [ 0 ] sbo_term = stmt_sbo_map . get ( cls . __name__ . lower ( ) ) return sbo_term if use_sbo : sbo_term = get_sbo_term ( self . __class__ ) json_dict [ 'sbo' ] = 'http://identifiers.org/sbo/SBO:%s' % sbo_term return json_dict | Return serialized Statement as a JSON dict . |
17,951 | def to_graph ( self ) : def json_node ( graph , element , prefix ) : if not element : return None node_id = '|' . join ( prefix ) if isinstance ( element , list ) : graph . add_node ( node_id , label = '' ) for i , sub_element in enumerate ( element ) : sub_id = json_node ( graph , sub_element , prefix + [ '%s' % i ] ) if sub_id : graph . add_edge ( node_id , sub_id , label = '' ) elif isinstance ( element , dict ) : graph . add_node ( node_id , label = '' ) for k , v in element . items ( ) : if k == 'id' : continue elif k == 'name' : graph . node [ node_id ] [ 'label' ] = v continue elif k == 'type' : graph . node [ node_id ] [ 'label' ] = v continue sub_id = json_node ( graph , v , prefix + [ '%s' % k ] ) if sub_id : graph . add_edge ( node_id , sub_id , label = ( '%s' % k ) ) else : if isinstance ( element , basestring ) and element . startswith ( 'http' ) : element = element . split ( '/' ) [ - 1 ] graph . add_node ( node_id , label = ( '%s' % str ( element ) ) ) return node_id jd = self . to_json ( ) graph = networkx . DiGraph ( ) json_node ( graph , jd , [ '%s' % self . uuid ] ) return graph | Return Statement as a networkx graph . |
17,952 | def make_generic_copy ( self , deeply = False ) : if deeply : kwargs = deepcopy ( self . __dict__ ) else : kwargs = self . __dict__ . copy ( ) for attr in [ 'evidence' , 'belief' , 'uuid' , 'supports' , 'supported_by' , 'is_activation' ] : kwargs . pop ( attr , None ) for attr in [ '_full_hash' , '_shallow_hash' ] : my_hash = kwargs . pop ( attr , None ) my_shallow_hash = kwargs . pop ( attr , None ) for attr in self . _agent_order : attr_value = kwargs . get ( attr ) if isinstance ( attr_value , list ) : kwargs [ attr ] = sorted_agents ( attr_value ) new_instance = self . __class__ ( ** kwargs ) new_instance . _full_hash = my_hash new_instance . _shallow_hash = my_shallow_hash return new_instance | Make a new matching Statement with no provenance . |
17,953 | def load_lincs_csv ( url ) : resp = requests . get ( url , params = { 'output_type' : '.csv' } , timeout = 120 ) resp . raise_for_status ( ) if sys . version_info [ 0 ] < 3 : csv_io = BytesIO ( resp . content ) else : csv_io = StringIO ( resp . text ) data_rows = list ( read_unicode_csv_fileobj ( csv_io , delimiter = ',' ) ) headers = data_rows [ 0 ] return [ { header : val for header , val in zip ( headers , line_elements ) } for line_elements in data_rows [ 1 : ] ] | Helper function to turn csv rows into dicts . |
17,954 | def get_small_molecule_name ( self , hms_lincs_id ) : entry = self . _get_entry_by_id ( self . _sm_data , hms_lincs_id ) if not entry : return None name = entry [ 'Name' ] return name | Get the name of a small molecule from the LINCS sm metadata . |
17,955 | def get_small_molecule_refs ( self , hms_lincs_id ) : refs = { 'HMS-LINCS' : hms_lincs_id } entry = self . _get_entry_by_id ( self . _sm_data , hms_lincs_id ) if not entry : return refs mappings = dict ( chembl = 'ChEMBL ID' , chebi = 'ChEBI ID' , pubchem = 'PubChem CID' , lincs = 'LINCS ID' ) for k , v in mappings . items ( ) : if entry . get ( v ) : refs [ k . upper ( ) ] = entry . get ( v ) return refs | Get the id refs of a small molecule from the LINCS sm metadata . |
17,956 | def get_protein_refs ( self , hms_lincs_id ) : refs = { 'HMS-LINCS' : hms_lincs_id } entry = self . _get_entry_by_id ( self . _prot_data , hms_lincs_id ) if not entry : return refs mappings = dict ( egid = 'Gene ID' , up = 'UniProt ID' ) for k , v in mappings . items ( ) : if entry . get ( v ) : refs [ k . upper ( ) ] = entry . get ( v ) return refs | Get the refs for a protein from the LINCs protein metadata . |
17,957 | def get_bel_stmts ( self , filter = False ) : if self . basename is not None : bel_stmt_path = '%s_bel_stmts.pkl' % self . basename if self . basename is not None and os . path . isfile ( bel_stmt_path ) : logger . info ( "Loading BEL statements from %s" % bel_stmt_path ) with open ( bel_stmt_path , 'rb' ) as f : bel_statements = pickle . load ( f ) else : bel_proc = bel . process_pybel_neighborhood ( self . gene_list , network_file = self . bel_corpus ) bel_statements = bel_proc . statements if self . basename is not None : with open ( bel_stmt_path , 'wb' ) as f : pickle . dump ( bel_statements , f ) if filter : if len ( self . gene_list ) > 1 : bel_statements = ac . filter_gene_list ( bel_statements , self . gene_list , 'all' ) return bel_statements | Get relevant statements from the BEL large corpus . |
17,958 | def get_biopax_stmts ( self , filter = False , query = 'pathsbetween' , database_filter = None ) : if self . basename is not None : biopax_stmt_path = '%s_biopax_stmts.pkl' % self . basename biopax_ras_owl_path = '%s_pc_pathsbetween.owl' % self . basename if self . basename is not None and os . path . isfile ( biopax_stmt_path ) : logger . info ( "Loading Biopax statements from %s" % biopax_stmt_path ) with open ( biopax_stmt_path , 'rb' ) as f : bp_statements = pickle . load ( f ) return bp_statements if self . basename is not None and os . path . isfile ( biopax_ras_owl_path ) : logger . info ( "Loading Biopax from OWL file %s" % biopax_ras_owl_path ) bp = biopax . process_owl ( biopax_ras_owl_path ) else : if ( len ( self . gene_list ) < 2 ) and ( query == 'pathsbetween' ) : logger . warning ( 'Using neighborhood query for one gene.' ) query = 'neighborhood' if query == 'pathsbetween' : if len ( self . gene_list ) > 60 : block_size = 60 else : block_size = None bp = biopax . process_pc_pathsbetween ( self . gene_list , database_filter = database_filter , block_size = block_size ) elif query == 'neighborhood' : bp = biopax . process_pc_neighborhood ( self . gene_list , database_filter = database_filter ) else : logger . error ( 'Invalid query type: %s' % query ) return [ ] if self . basename is not None : bp . save_model ( biopax_ras_owl_path ) if self . basename is not None : with open ( biopax_stmt_path , 'wb' ) as f : pickle . dump ( bp . statements , f ) if filter : policy = 'one' if len ( self . gene_list ) > 1 else 'all' stmts = ac . filter_gene_list ( bp . statements , self . gene_list , policy ) else : stmts = bp . statements return stmts | Get relevant statements from Pathway Commons . |
17,959 | def get_statements ( self , filter = False ) : bp_stmts = self . get_biopax_stmts ( filter = filter ) bel_stmts = self . get_bel_stmts ( filter = filter ) return bp_stmts + bel_stmts | Return the combined list of statements from BEL and Pathway Commons . |
17,960 | def run_preassembly ( self , stmts , print_summary = True ) : pa1 = Preassembler ( hierarchies , stmts ) logger . info ( "Combining duplicates" ) pa1 . combine_duplicates ( ) logger . info ( "Mapping sites" ) ( valid , mapped ) = sm . map_sites ( pa1 . unique_stmts ) correctly_mapped_stmts = [ ] for ms in mapped : if all ( [ True if mm [ 1 ] is not None else False for mm in ms . mapped_mods ] ) : correctly_mapped_stmts . append ( ms . mapped_stmt ) mapped_stmts = valid + correctly_mapped_stmts pa2 = Preassembler ( hierarchies , mapped_stmts ) logger . info ( "Combining duplicates again" ) pa2 . combine_duplicates ( ) pa2 . combine_related ( ) self . results = { } self . results [ 'raw' ] = stmts self . results [ 'duplicates1' ] = pa1 . unique_stmts self . results [ 'valid' ] = valid self . results [ 'mapped' ] = mapped self . results [ 'mapped_stmts' ] = mapped_stmts self . results [ 'duplicates2' ] = pa2 . unique_stmts self . results [ 'related2' ] = pa2 . related_stmts if print_summary : logger . info ( "\nStarting number of statements: %d" % len ( stmts ) ) logger . info ( "After duplicate removal: %d" % len ( pa1 . unique_stmts ) ) logger . info ( "Unique statements with valid sites: %d" % len ( valid ) ) logger . info ( "Unique statements with invalid sites: %d" % len ( mapped ) ) logger . info ( "After post-mapping duplicate removal: %d" % len ( pa2 . unique_stmts ) ) logger . info ( "After combining related statements: %d" % len ( pa2 . related_stmts ) ) if self . basename is not None : results_filename = '%s_results.pkl' % self . basename with open ( results_filename , 'wb' ) as f : pickle . dump ( self . results , f ) return self . results | Run complete preassembly procedure on the given statements . |
17,961 | def _get_grounding ( entity ) : db_refs = { 'TEXT' : entity [ 'text' ] } groundings = entity . get ( 'grounding' ) if not groundings : return db_refs def get_ont_concept ( concept ) : if concept . startswith ( '/' ) : concept = concept [ 1 : ] concept = concept . replace ( ' ' , '_' ) while concept not in hume_onto_entries : parts = concept . split ( '/' ) if len ( parts ) == 1 : break concept = '/' . join ( parts [ : - 1 ] ) return concept raw_grounding_entries = [ ( get_ont_concept ( g [ 'ontologyConcept' ] ) , g [ 'value' ] ) for g in groundings ] grounding_dict = { } for cat , score in raw_grounding_entries : if ( cat not in grounding_dict ) or ( score > grounding_dict [ cat ] ) : grounding_dict [ cat ] = score grounding_entries = sorted ( list ( set ( grounding_dict . items ( ) ) ) , key = lambda x : ( x [ 1 ] , x [ 0 ] . count ( '/' ) , x [ 0 ] ) , reverse = True ) if grounding_entries : db_refs [ 'HUME' ] = grounding_entries return db_refs | Return Hume grounding . |
17,962 | def _find_relations ( self ) : extractions = list ( self . tree . execute ( "$.extractions[(@.@type is 'Extraction')]" ) ) relations = [ ] for e in extractions : label_set = set ( e . get ( 'labels' , [ ] ) ) if 'DirectedRelation' in label_set : self . relation_dict [ e [ '@id' ] ] = e subtype = e . get ( 'subtype' ) if any ( t in subtype for t in polarities . keys ( ) ) : relations . append ( ( subtype , e ) ) if { 'Event' , 'Entity' } & label_set : self . concept_dict [ e [ '@id' ] ] = e if not relations and not self . relation_dict : logger . info ( "No relations found." ) else : logger . info ( '%d relations of types %s found' % ( len ( relations ) , ', ' . join ( polarities . keys ( ) ) ) ) logger . info ( '%d relations in dict.' % len ( self . relation_dict ) ) logger . info ( '%d concepts found.' % len ( self . concept_dict ) ) return relations | Find all relevant relation elements and return them in a list . |
17,963 | def _get_documents ( self ) : documents = self . tree . execute ( "$.documents" ) for doc in documents : sentences = { s [ '@id' ] : s [ 'text' ] for s in doc . get ( 'sentences' , [ ] ) } self . document_dict [ doc [ '@id' ] ] = { 'sentences' : sentences , 'location' : doc [ 'location' ] } | Populate sentences attribute with a dict keyed by document id . |
17,964 | def _make_context ( self , entity ) : loc_context = None time_context = None for argument in entity [ "arguments" ] : if argument [ "type" ] == "place" : entity_id = argument [ "value" ] [ "@id" ] loc_entity = self . concept_dict [ entity_id ] place = loc_entity . get ( "canonicalName" ) if not place : place = loc_entity [ 'text' ] geo_id = loc_entity . get ( 'geoname_id' ) loc_context = RefContext ( name = place , db_refs = { "GEOID" : geo_id } ) if argument [ "type" ] == "time" : entity_id = argument [ "value" ] [ "@id" ] temporal_entity = self . concept_dict [ entity_id ] text = temporal_entity [ 'mentions' ] [ 0 ] [ 'text' ] if len ( temporal_entity . get ( "timeInterval" , [ ] ) ) < 1 : time_context = TimeContext ( text = text ) continue time = temporal_entity [ "timeInterval" ] [ 0 ] start = datetime . strptime ( time [ 'start' ] , '%Y-%m-%dT%H:%M' ) end = datetime . strptime ( time [ 'end' ] , '%Y-%m-%dT%H:%M' ) duration = int ( time [ 'duration' ] ) time_context = TimeContext ( text = text , start = start , end = end , duration = duration ) context = None if loc_context or time_context : context = WorldContext ( time = time_context , geo_location = loc_context ) return context | Get place and time info from the json for this entity . |
17,965 | def _make_concept ( self , entity ) : name = self . _sanitize ( entity [ 'canonicalName' ] ) db_refs = _get_grounding ( entity ) concept = Concept ( name , db_refs = db_refs ) metadata = { arg [ 'type' ] : arg [ 'value' ] [ '@id' ] for arg in entity [ 'arguments' ] } return concept , metadata | Return Concept from a Hume entity . |
17,966 | def _get_event_and_context ( self , event , arg_type ) : eid = _choose_id ( event , arg_type ) ev = self . concept_dict [ eid ] concept , metadata = self . _make_concept ( ev ) ev_delta = { 'adjectives' : [ ] , 'states' : get_states ( ev ) , 'polarity' : get_polarity ( ev ) } context = self . _make_context ( ev ) event_obj = Event ( concept , delta = ev_delta , context = context ) return event_obj | Return an INDRA Event based on an event entry . |
17,967 | def _get_evidence ( self , event , adjectives ) : provenance = event . get ( 'provenance' ) doc_id = provenance [ 0 ] [ 'document' ] [ '@id' ] sent_id = provenance [ 0 ] [ 'sentence' ] text = self . document_dict [ doc_id ] [ 'sentences' ] [ sent_id ] text = self . _sanitize ( text ) bounds = [ provenance [ 0 ] [ 'documentCharPositions' ] [ k ] for k in [ 'start' , 'end' ] ] annotations = { 'found_by' : event . get ( 'rule' ) , 'provenance' : provenance , 'event_type' : os . path . basename ( event . get ( 'type' ) ) , 'adjectives' : adjectives , 'bounds' : bounds } location = self . document_dict [ doc_id ] [ 'location' ] ev = Evidence ( source_api = 'hume' , text = text , annotations = annotations , pmid = location ) return [ ev ] | Return the Evidence object for the INDRA Statement . |
17,968 | def _is_statement_in_list ( new_stmt , old_stmt_list ) : for old_stmt in old_stmt_list : if old_stmt . equals ( new_stmt ) : return True elif old_stmt . evidence_equals ( new_stmt ) and old_stmt . matches ( new_stmt ) : if isinstance ( new_stmt , Complex ) : agent_pairs = zip ( old_stmt . sorted_members ( ) , new_stmt . sorted_members ( ) ) else : agent_pairs = zip ( old_stmt . agent_list ( ) , new_stmt . agent_list ( ) ) for ag_old , ag_new in agent_pairs : s_old = set ( ag_old . db_refs . items ( ) ) s_new = set ( ag_new . db_refs . items ( ) ) if s_old == s_new : continue if s_old > s_new : return True if s_new > s_old : ag_old . db_refs . update ( ag_new . db_refs ) return True if _fix_different_refs ( ag_old , ag_new , 'CHEBI' ) : return _is_statement_in_list ( new_stmt , old_stmt_list ) if _fix_different_refs ( ag_old , ag_new , 'UMLS' ) : return _is_statement_in_list ( new_stmt , old_stmt_list ) logger . warning ( "Found an unexpected kind of duplicate. " "Ignoring it." ) return True return True elif old_stmt . get_hash ( True , True ) == new_stmt . get_hash ( True , True ) : e_old = old_stmt . evidence [ 0 ] e_new = new_stmt . evidence [ 0 ] if e_old . annotations [ 'last_verb' ] is None : e_old . annotations [ 'last_verb' ] = e_new . annotations [ 'last_verb' ] if e_old . get_source_hash ( True ) == e_new . get_source_hash ( True ) : return True return False | Return True of given statement is equivalent to on in a list |
17,969 | def normalize_medscan_name ( name ) : suffix = ' complex' for i in range ( 2 ) : if name . endswith ( suffix ) : name = name [ : - len ( suffix ) ] return name | Removes the complex and complex complex suffixes from a medscan agent name so that it better corresponds with the grounding map . |
17,970 | def _urn_to_db_refs ( urn ) : if urn is None : return { } , None m = URN_PATT . match ( urn ) if m is None : return None , None urn_type , urn_id = m . groups ( ) db_refs = { } db_name = None if urn_type == 'agi-cas' : chebi_id = get_chebi_id_from_cas ( urn_id ) if chebi_id : db_refs [ 'CHEBI' ] = 'CHEBI:%s' % chebi_id db_name = get_chebi_name_from_id ( chebi_id ) elif urn_type == 'agi-llid' : hgnc_id = get_hgnc_from_entrez ( urn_id ) if hgnc_id is not None : db_refs [ 'HGNC' ] = hgnc_id uniprot_id = get_uniprot_id ( hgnc_id ) if uniprot_id is not None : db_refs [ 'UP' ] = uniprot_id db_name = get_hgnc_name ( hgnc_id ) elif urn_type in [ 'agi-meshdis' , 'agi-ncimorgan' , 'agi-ncimtissue' , 'agi-ncimcelltype' ] : if urn_id . startswith ( 'C' ) and urn_id [ 1 : ] . isdigit ( ) : db_refs [ 'UMLS' ] = urn_id else : urn_mesh_name = unquote ( urn_id ) mesh_id , mesh_name = mesh_client . get_mesh_id_name ( urn_mesh_name ) if mesh_id : db_refs [ 'MESH' ] = mesh_id db_name = mesh_name else : db_name = urn_mesh_name elif urn_type == 'agi-gocomplex' : db_refs [ 'GO' ] = 'GO:%s' % urn_id elif urn_type == 'agi-go' : db_refs [ 'GO' ] = 'GO:%s' % urn_id db_sometimes_maps_to_famplex = [ 'GO' , 'MESH' ] for db in db_sometimes_maps_to_famplex : if db in db_refs : key = ( db , db_refs [ db ] ) if key in famplex_map : db_refs [ 'FPLX' ] = famplex_map [ key ] if urn . startswith ( 'urn:agi-enz' ) : tokens = urn . split ( ':' ) eccode = tokens [ 2 ] key = ( 'ECCODE' , eccode ) if key in famplex_map : db_refs [ 'FPLX' ] = famplex_map [ key ] key = ( 'MEDSCAN' , urn ) if key in famplex_map : db_refs [ 'FPLX' ] = famplex_map [ key ] if 'FPLX' in db_refs : db_name = db_refs [ 'FPLX' ] elif 'GO' in db_refs : db_name = go_client . get_go_label ( db_refs [ 'GO' ] ) return db_refs , db_name | Converts a Medscan URN to an INDRA db_refs dictionary with grounding information . |
17,971 | def _untag_sentence ( tagged_sentence ) : untagged_sentence = TAG_PATT . sub ( '\\2' , tagged_sentence ) clean_sentence = JUNK_PATT . sub ( '' , untagged_sentence ) return clean_sentence . strip ( ) | Removes all tags in the sentence returning the original sentence without Medscan annotations . |
17,972 | def _extract_sentence_tags ( tagged_sentence ) : untagged_sentence = _untag_sentence ( tagged_sentence ) decluttered_sentence = JUNK_PATT . sub ( '' , tagged_sentence ) tags = { } endpos = 0 while True : match = TAG_PATT . search ( decluttered_sentence , pos = endpos ) if not match : break endpos = match . end ( ) text = match . group ( 2 ) text = text . replace ( 'CONTEXT' , '' ) text = text . replace ( 'GLOSSARY' , '' ) text = text . strip ( ) start = untagged_sentence . index ( text ) stop = start + len ( text ) tag_key = match . group ( 1 ) if ',' in tag_key : for sub_key in tag_key . split ( ',' ) : if sub_key == '0' : continue tags [ sub_key ] = { 'text' : text , 'bounds' : ( start , stop ) } else : tags [ tag_key ] = { 'text' : text , 'bounds' : ( start , stop ) } return tags | Given a tagged sentence extracts a dictionary mapping tags to the words or phrases that they tag . |
17,973 | def get_sites ( self ) : st = self . site_text suffixes = [ ' residue' , ' residues' , ',' , '/' ] for suffix in suffixes : if st . endswith ( suffix ) : st = st [ : - len ( suffix ) ] assert ( not st . endswith ( ',' ) ) st = st . replace ( '(' , '' ) st = st . replace ( ')' , '' ) st = st . replace ( ' or ' , ' and ' ) sites = [ ] parts = st . split ( ' and ' ) for part in parts : if part . endswith ( ',' ) : part = part [ : - 1 ] if len ( part . strip ( ) ) > 0 : sites . extend ( ReachProcessor . _parse_site_text ( part . strip ( ) ) ) return sites | Parse the site - text string and return a list of sites . |
17,974 | def process_csxml_file ( self , filename , interval = None , lazy = False ) : if interval is None : interval = ( None , None ) tmp_fname = tempfile . mktemp ( os . path . basename ( filename ) ) fix_character_encoding ( filename , tmp_fname ) self . __f = open ( tmp_fname , 'rb' ) self . _gen = self . _iter_through_csxml_file_from_handle ( * interval ) if not lazy : for stmt in self . _gen : self . statements . append ( stmt ) return | Processes a filehandle to MedScan csxml input into INDRA statements . |
17,975 | def get_parser ( description , input_desc ) : parser = ArgumentParser ( description = description ) parser . add_argument ( dest = 'input_file' , help = input_desc ) parser . add_argument ( '-r' , '--readers' , choices = [ 'reach' , 'sparser' , 'trips' ] , help = 'List of readers to be used.' , nargs = '+' ) parser . add_argument ( '-n' , '--num_procs' , dest = 'n_proc' , help = 'Select the number of processes to use.' , type = int , default = 1 ) parser . add_argument ( '-s' , '--sample' , dest = 'n_samp' , help = 'Read a random sample of size N_SAMP of the inputs.' , type = int ) parser . add_argument ( '-I' , '--in_range' , dest = 'range_str' , help = 'Only read input lines in the range given as <start>:<end>.' ) parser . add_argument ( '-v' , '--verbose' , help = 'Include output from the readers.' , action = 'store_true' ) parser . add_argument ( '-q' , '--quiet' , help = 'Suppress most output. Overrides -v and -d options.' , action = 'store_true' ) parser . add_argument ( '-d' , '--debug' , help = 'Set the logging to debug level.' , action = 'store_true' ) return parser | Get a parser that is generic to reading scripts . |
17,976 | def send_request ( endpoint , ** kwargs ) : if api_key is None : logger . error ( 'NewsAPI cannot be used without an API key' ) return None url = '%s/%s' % ( newsapi_url , endpoint ) if 'apiKey' not in kwargs : kwargs [ 'apiKey' ] = api_key if 'pageSize' not in kwargs : kwargs [ 'pageSize' ] = 100 res = requests . get ( url , params = kwargs ) res . raise_for_status ( ) res_json = res . json ( ) return res_json | Return the response to a query as JSON from the NewsAPI web service . |
17,977 | def process_cx_file ( file_name , require_grounding = True ) : with open ( file_name , 'rt' ) as fh : json_list = json . load ( fh ) return process_cx ( json_list , require_grounding = require_grounding ) | Process a CX JSON file into Statements . |
17,978 | def process_ndex_network ( network_id , username = None , password = None , require_grounding = True ) : nd = ndex2 . client . Ndex2 ( username = username , password = password ) res = nd . get_network_as_cx_stream ( network_id ) if res . status_code != 200 : logger . error ( 'Problem downloading network: status code %s' % res . status_code ) logger . error ( 'Response: %s' % res . text ) return None json_list = res . json ( ) summary = nd . get_network_summary ( network_id ) return process_cx ( json_list , summary = summary , require_grounding = require_grounding ) | Process an NDEx network into Statements . |
17,979 | def process_cx ( cx_json , summary = None , require_grounding = True ) : ncp = NdexCxProcessor ( cx_json , summary = summary , require_grounding = require_grounding ) ncp . get_statements ( ) return ncp | Process a CX JSON object into Statements . |
17,980 | def read_files ( files , readers , ** kwargs ) : reading_content = [ Content . from_file ( filepath ) for filepath in files ] output_list = [ ] for reader in readers : res_list = reader . read ( reading_content , ** kwargs ) if res_list is None : logger . info ( "Nothing read by %s." % reader . name ) else : logger . info ( "Successfully read %d content entries with %s." % ( len ( res_list ) , reader . name ) ) output_list += res_list logger . info ( "Read %s text content entries in all." % len ( output_list ) ) return output_list | Read the files in files with the reader objects in readers . |
17,981 | def expand_families ( self , stmts ) : new_stmts = [ ] for stmt in stmts : families_list = [ ] for ag in stmt . agent_list ( ) : ag_children = self . get_children ( ag ) if len ( ag_children ) == 0 : families_list . append ( [ ag ] ) else : families_list . append ( ag_children ) for ag_combo in itertools . product ( * families_list ) : child_agents = [ ] for ag_entry in ag_combo : if ag_entry is None or isinstance ( ag_entry , Agent ) : new_agent = ag_entry elif isinstance ( ag_entry , tuple ) : ag_ns , ag_id = ag_entry new_agent = _agent_from_ns_id ( ag_ns , ag_id ) else : raise Exception ( 'Unrecognized agent entry type.' ) child_agents . append ( new_agent ) new_stmt = deepcopy ( stmt ) new_stmt . set_agent_list ( child_agents ) new_stmts . append ( new_stmt ) return new_stmts | Generate statements by expanding members of families and complexes . |
17,982 | def update_ontology ( ont_url , rdf_path ) : yaml_root = load_yaml_from_url ( ont_url ) G = rdf_graph_from_yaml ( yaml_root ) save_hierarchy ( G , rdf_path ) | Load an ontology formatted like Eidos from github . |
17,983 | def rdf_graph_from_yaml ( yaml_root ) : G = Graph ( ) for top_entry in yaml_root : assert len ( top_entry ) == 1 node = list ( top_entry . keys ( ) ) [ 0 ] build_relations ( G , node , top_entry [ node ] , None ) return G | Convert the YAML object into an RDF Graph object . |
17,984 | def load_yaml_from_url ( ont_url ) : res = requests . get ( ont_url ) if res . status_code != 200 : raise Exception ( 'Could not load ontology from %s' % ont_url ) root = yaml . load ( res . content ) return root | Return a YAML object loaded from a YAML file URL . |
17,985 | def register_preprocessed_file ( self , infile , pmid , extra_annotations ) : infile_base = os . path . basename ( infile ) outfile = os . path . join ( self . preprocessed_dir , infile_base ) shutil . copyfile ( infile , outfile ) infile_key = os . path . splitext ( infile_base ) [ 0 ] self . pmids [ infile_key ] = pmid self . extra_annotations [ infile_key ] = extra_annotations | Set up already preprocessed text file for reading with ISI reader . |
17,986 | def preprocess_plain_text_string ( self , text , pmid , extra_annotations ) : output_file = '%s.txt' % self . next_file_id output_file = os . path . join ( self . preprocessed_dir , output_file ) sentences = nltk . sent_tokenize ( text ) first_sentence = True with codecs . open ( output_file , 'w' , encoding = 'utf-8' ) as f : for sentence in sentences : if not first_sentence : f . write ( '\n' ) f . write ( sentence . rstrip ( ) ) first_sentence = False self . pmids [ str ( self . next_file_id ) ] = pmid self . extra_annotations [ str ( self . next_file_id ) ] = extra_annotations self . next_file_id += 1 | Preprocess plain text string for use by ISI reader . |
17,987 | def preprocess_plain_text_file ( self , filename , pmid , extra_annotations ) : with codecs . open ( filename , 'r' , encoding = 'utf-8' ) as f : content = f . read ( ) self . preprocess_plain_text_string ( content , pmid , extra_annotations ) | Preprocess a plain text file for use with ISI reder . |
17,988 | def preprocess_nxml_file ( self , filename , pmid , extra_annotations ) : tmp_dir = tempfile . mkdtemp ( 'indra_isi_nxml2txt_output' ) if nxml2txt_path is None : logger . error ( 'NXML2TXT_PATH not specified in config file or ' + 'environment variable' ) return if python2_path is None : logger . error ( 'PYTHON2_PATH not specified in config file or ' + 'environment variable' ) return else : txt_out = os . path . join ( tmp_dir , 'out.txt' ) so_out = os . path . join ( tmp_dir , 'out.so' ) command = [ python2_path , os . path . join ( nxml2txt_path , 'nxml2txt' ) , filename , txt_out , so_out ] ret = subprocess . call ( command ) if ret != 0 : logger . warning ( 'nxml2txt returned non-zero error code' ) with open ( txt_out , 'r' ) as f : txt_content = f . read ( ) shutil . rmtree ( tmp_dir ) cmd1 = '[^ \{\}]+\{[^\{\}]+\}\{[^\{\}]+\}' cmd2 = '[^ \{\}]+\{[^\{\}]+\}' txt_content = re . sub ( cmd1 , '' , txt_content ) txt_content = re . sub ( cmd2 , '' , txt_content ) with open ( 'tmp.txt' , 'w' ) as f : f . write ( txt_content ) self . preprocess_plain_text_string ( txt_content , pmid , extra_annotations ) | Preprocess an NXML file for use with the ISI reader . |
17,989 | def preprocess_abstract_list ( self , abstract_list ) : for abstract_struct in abstract_list : abs_format = abstract_struct [ 'format' ] content_type = abstract_struct [ 'text_type' ] content_zipped = abstract_struct [ 'content' ] tcid = abstract_struct [ 'tcid' ] trid = abstract_struct [ 'trid' ] assert ( abs_format == 'text' ) assert ( content_type == 'abstract' ) pmid = None extra_annotations = { 'tcid' : tcid , 'trid' : trid } content = zlib . decompress ( content_zipped , zlib . MAX_WBITS + 16 ) . decode ( 'utf-8' ) self . preprocess_plain_text_string ( content , pmid , extra_annotations ) | Preprocess abstracts in database pickle dump format for ISI reader . |
17,990 | def process_geneways_files ( input_folder = data_folder , get_evidence = True ) : gp = GenewaysProcessor ( input_folder , get_evidence ) return gp | Reads in Geneways data and returns a list of statements . |
17,991 | def post_flag_create ( self , post_id , reason ) : params = { 'post_flag[post_id]' : post_id , 'post_flag[reason]' : reason } return self . _get ( 'post_flags.json' , params , 'POST' , auth = True ) | Function to flag a post . |
17,992 | def post_versions_list ( self , updater_name = None , updater_id = None , post_id = None , start_id = None ) : params = { 'search[updater_name]' : updater_name , 'search[updater_id]' : updater_id , 'search[post_id]' : post_id , 'search[start_id]' : start_id } return self . _get ( 'post_versions.json' , params ) | Get list of post versions . |
17,993 | def artist_list ( self , query = None , artist_id = None , creator_name = None , creator_id = None , is_active = None , is_banned = None , empty_only = None , order = None ) : params = { 'search[name]' : query , 'search[id]' : artist_id , 'search[creator_name]' : creator_name , 'search[creator_id]' : creator_id , 'search[is_active]' : is_active , 'search[is_banned]' : is_banned , 'search[empty_only]' : empty_only , 'search[order]' : order } return self . _get ( 'artists.json' , params ) | Get an artist of a list of artists . |
17,994 | def artist_commentary_list ( self , text_matches = None , post_id = None , post_tags_match = None , original_present = None , translated_present = None ) : params = { 'search[text_matches]' : text_matches , 'search[post_id]' : post_id , 'search[post_tags_match]' : post_tags_match , 'search[original_present]' : original_present , 'search[translated_present]' : translated_present } return self . _get ( 'artist_commentaries.json' , params ) | list artist commentary . |
17,995 | def artist_commentary_versions ( self , post_id , updater_id ) : params = { 'search[updater_id]' : updater_id , 'search[post_id]' : post_id } return self . _get ( 'artist_commentary_versions.json' , params ) | Return list of artist commentary versions . |
17,996 | def note_list ( self , body_matches = None , post_id = None , post_tags_match = None , creator_name = None , creator_id = None , is_active = None ) : params = { 'search[body_matches]' : body_matches , 'search[post_id]' : post_id , 'search[post_tags_match]' : post_tags_match , 'search[creator_name]' : creator_name , 'search[creator_id]' : creator_id , 'search[is_active]' : is_active } return self . _get ( 'notes.json' , params ) | Return list of notes . |
17,997 | def note_versions ( self , updater_id = None , post_id = None , note_id = None ) : params = { 'search[updater_id]' : updater_id , 'search[post_id]' : post_id , 'search[note_id]' : note_id } return self . _get ( 'note_versions.json' , params ) | Get list of note versions . |
17,998 | def user_list ( self , name = None , name_matches = None , min_level = None , max_level = None , level = None , user_id = None , order = None ) : params = { 'search[name]' : name , 'search[name_matches]' : name_matches , 'search[min_level]' : min_level , 'search[max_level]' : max_level , 'search[level]' : level , 'search[id]' : user_id , 'search[order]' : order } return self . _get ( 'users.json' , params ) | Function to get a list of users or a specific user . |
17,999 | def pool_list ( self , name_matches = None , pool_ids = None , category = None , description_matches = None , creator_name = None , creator_id = None , is_deleted = None , is_active = None , order = None ) : params = { 'search[name_matches]' : name_matches , 'search[id]' : pool_ids , 'search[description_matches]' : description_matches , 'search[creator_name]' : creator_name , 'search[creator_id]' : creator_id , 'search[is_active]' : is_active , 'search[is_deleted]' : is_deleted , 'search[order]' : order , 'search[category]' : category } return self . _get ( 'pools.json' , params ) | Get a list of pools . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.