idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
17,300 | def download_article ( id_val , id_type = 'doi' , on_retry = False ) : if id_type == 'pmid' : id_type = 'pubmed_id' url = '%s/%s' % ( elsevier_article_url_fmt % id_type , id_val ) params = { 'httpAccept' : 'text/xml' } res = requests . get ( url , params , headers = ELSEVIER_KEYS ) if res . status_code == 404 : logger . info ( "Resource for %s not available on elsevier." % url ) return None elif res . status_code == 429 : if not on_retry : logger . warning ( "Broke the speed limit. Waiting half a second then " "trying again..." ) sleep ( 0.5 ) return download_article ( id_val , id_type , True ) else : logger . error ( "Still breaking speed limit after waiting." ) logger . error ( "Elsevier response: %s" % res . text ) return None elif res . status_code != 200 : logger . error ( 'Could not download article %s: status code %d' % ( url , res . status_code ) ) logger . error ( 'Elsevier response: %s' % res . text ) return None else : content_str = res . content . decode ( 'utf-8' ) if content_str . startswith ( '<service-error>' ) : logger . error ( 'Got a service error with 200 status: %s' % content_str ) return None return content_str | Low level function to get an XML article for a particular id . |
17,301 | def download_article_from_ids ( ** id_dict ) : valid_id_types = [ 'eid' , 'doi' , 'pmid' , 'pii' ] assert all ( [ k in valid_id_types for k in id_dict . keys ( ) ] ) , ( "One of these id keys is invalid: %s Valid keys are: %s." % ( list ( id_dict . keys ( ) ) , valid_id_types ) ) if 'doi' in id_dict . keys ( ) and id_dict [ 'doi' ] . lower ( ) . startswith ( 'doi:' ) : id_dict [ 'doi' ] = id_dict [ 'doi' ] [ 4 : ] content = None for id_type in valid_id_types : if id_type in id_dict . keys ( ) : content = download_article ( id_dict [ id_type ] , id_type ) if content is not None : break else : logger . error ( "Could not download article with any of the ids: %s." % str ( id_dict ) ) return content | Download an article in XML format from Elsevier matching the set of ids . |
17,302 | def get_abstract ( doi ) : xml_string = download_article ( doi ) if xml_string is None : return None assert isinstance ( xml_string , str ) xml_tree = ET . XML ( xml_string . encode ( 'utf-8' ) , parser = UTB ( ) ) if xml_tree is None : return None coredata = xml_tree . find ( 'article:coredata' , elsevier_ns ) abstract = coredata . find ( 'dc:description' , elsevier_ns ) abs_text = abstract . text return abs_text | Get the abstract text of an article from Elsevier given a doi . |
17,303 | def get_article ( doi , output_format = 'txt' ) : xml_string = download_article ( doi ) if output_format == 'txt' and xml_string is not None : text = extract_text ( xml_string ) return text return xml_string | Get the full body of an article from Elsevier . |
17,304 | def extract_paragraphs ( xml_string ) : assert isinstance ( xml_string , str ) xml_tree = ET . XML ( xml_string . encode ( 'utf-8' ) , parser = UTB ( ) ) full_text = xml_tree . find ( 'article:originalText' , elsevier_ns ) if full_text is None : logger . info ( 'Could not find full text element article:originalText' ) return None article_body = _get_article_body ( full_text ) if article_body : return article_body raw_text = _get_raw_text ( full_text ) if raw_text : return [ raw_text ] return None | Get paragraphs from the body of the given Elsevier xml . |
17,305 | def get_dois ( query_str , count = 100 ) : url = '%s/%s' % ( elsevier_search_url , query_str ) params = { 'query' : query_str , 'count' : count , 'httpAccept' : 'application/xml' , 'sort' : '-coverdate' , 'field' : 'doi' } res = requests . get ( url , params ) if not res . status_code == 200 : return None tree = ET . XML ( res . content , parser = UTB ( ) ) doi_tags = tree . findall ( 'atom:entry/prism:doi' , elsevier_ns ) dois = [ dt . text for dt in doi_tags ] return dois | Search ScienceDirect through the API for articles . |
17,306 | def get_piis ( query_str ) : dates = range ( 1960 , datetime . datetime . now ( ) . year ) all_piis = flatten ( [ get_piis_for_date ( query_str , date ) for date in dates ] ) return all_piis | Search ScienceDirect through the API for articles and return PIIs . |
17,307 | def get_piis_for_date ( query_str , date ) : count = 200 params = { 'query' : query_str , 'count' : count , 'start' : 0 , 'sort' : '-coverdate' , 'date' : date , 'field' : 'pii' } all_piis = [ ] while True : res = requests . get ( elsevier_search_url , params , headers = ELSEVIER_KEYS ) if not res . status_code == 200 : logger . info ( 'Got status code: %d' % res . status_code ) break res_json = res . json ( ) entries = res_json [ 'search-results' ] [ 'entry' ] logger . info ( res_json [ 'search-results' ] [ 'opensearch:totalResults' ] ) if entries == [ { '@_fa' : 'true' , 'error' : 'Result set was empty' } ] : logger . info ( 'Search result was empty' ) return [ ] piis = [ entry [ 'pii' ] for entry in entries ] all_piis += piis links = res_json [ 'search-results' ] . get ( 'link' , [ ] ) cont = False for link in links : if link . get ( '@ref' ) == 'next' : logger . info ( 'Found link to next batch of results.' ) params [ 'start' ] += count cont = True break if not cont : break return all_piis | Search ScienceDirect with a query string constrained to a given year . |
17,308 | def download_from_search ( query_str , folder , do_extract_text = True , max_results = None ) : piis = get_piis ( query_str ) for pii in piis [ : max_results ] : if os . path . exists ( os . path . join ( folder , '%s.txt' % pii ) ) : continue logger . info ( 'Downloading %s' % pii ) xml = download_article ( pii , 'pii' ) sleep ( 1 ) if do_extract_text : txt = extract_text ( xml ) if not txt : continue with open ( os . path . join ( folder , '%s.txt' % pii ) , 'wb' ) as fh : fh . write ( txt . encode ( 'utf-8' ) ) else : with open ( os . path . join ( folder , '%s.xml' % pii ) , 'wb' ) as fh : fh . write ( xml . encode ( 'utf-8' ) ) return | Save raw text files based on a search for papers on ScienceDirect . |
17,309 | def extract_statement_from_query_result ( self , res ) : agent_start , agent_end , affected_start , affected_end = res agent_start = int ( agent_start ) agent_end = int ( agent_end ) affected_start = int ( affected_start ) affected_end = int ( affected_end ) agent = self . text [ agent_start : agent_end ] affected = self . text [ affected_start : affected_end ] agent = agent . lstrip ( ) . rstrip ( ) affected = affected . lstrip ( ) . rstrip ( ) subj = Agent ( agent , db_refs = { 'TEXT' : agent } ) obj = Agent ( affected , db_refs = { 'TEXT' : affected } ) statement = Influence ( subj = subj , obj = obj ) self . statements . append ( statement ) | Adds a statement based on one element of a rdflib SPARQL query . |
17,310 | def extract_statements ( self ) : query = prefixes + results = self . graph . query ( query ) for res in results : self . extract_statement_from_query_result ( res ) query = query . replace ( 'role:AFFECTED' , 'role:RESULT' ) results = self . graph . query ( query ) for res in results : self . extract_statement_from_query_result ( res ) | Extracts INDRA statements from the RDF graph via SPARQL queries . |
17,311 | def _recursively_lookup_complex ( self , complex_id ) : assert complex_id in self . complex_map expanded_agent_strings = [ ] expand_these_next = [ complex_id ] while len ( expand_these_next ) > 0 : c = expand_these_next [ 0 ] expand_these_next = expand_these_next [ 1 : ] assert c in self . complex_map for s in self . complex_map [ c ] : if s in self . complex_map : expand_these_next . append ( s ) else : expanded_agent_strings . append ( s ) return expanded_agent_strings | Looks up the constitutents of a complex . If any constituent is itself a complex recursively expands until all constituents are not complexes . |
17,312 | def _get_complex_agents ( self , complex_id ) : agents = [ ] components = self . _recursively_lookup_complex ( complex_id ) for c in components : db_refs = { } name = uniprot_client . get_gene_name ( c ) if name is None : db_refs [ 'SIGNOR' ] = c else : db_refs [ 'UP' ] = c hgnc_id = hgnc_client . get_hgnc_id ( name ) if hgnc_id : db_refs [ 'HGNC' ] = hgnc_id famplex_key = ( 'SIGNOR' , c ) if famplex_key in famplex_map : db_refs [ 'FPLX' ] = famplex_map [ famplex_key ] if not name : name = db_refs [ 'FPLX' ] elif not name : logger . info ( 'Have neither a Uniprot nor Famplex grounding ' + 'for ' + c ) if not name : name = db_refs [ 'SIGNOR' ] assert ( name is not None ) agents . append ( Agent ( name , db_refs = db_refs ) ) return agents | Returns a list of agents corresponding to each of the constituents in a SIGNOR complex . |
17,313 | def stmts_from_json ( json_in , on_missing_support = 'handle' ) : stmts = [ ] uuid_dict = { } for json_stmt in json_in : try : st = Statement . _from_json ( json_stmt ) except Exception as e : logger . warning ( "Error creating statement: %s" % e ) continue stmts . append ( st ) uuid_dict [ st . uuid ] = st for st in stmts : _promote_support ( st . supports , uuid_dict , on_missing_support ) _promote_support ( st . supported_by , uuid_dict , on_missing_support ) return stmts | Get a list of Statements from Statement jsons . |
17,314 | def stmts_to_json_file ( stmts , fname ) : with open ( fname , 'w' ) as fh : json . dump ( stmts_to_json ( stmts ) , fh , indent = 1 ) | Serialize a list of INDRA Statements into a JSON file . |
17,315 | def stmts_to_json ( stmts_in , use_sbo = False ) : if not isinstance ( stmts_in , list ) : json_dict = stmts_in . to_json ( use_sbo = use_sbo ) return json_dict else : json_dict = [ st . to_json ( use_sbo = use_sbo ) for st in stmts_in ] return json_dict | Return the JSON - serialized form of one or more INDRA Statements . |
17,316 | def _promote_support ( sup_list , uuid_dict , on_missing = 'handle' ) : valid_handling_choices = [ 'handle' , 'error' , 'ignore' ] if on_missing not in valid_handling_choices : raise InputError ( 'Invalid option for `on_missing_support`: \'%s\'\n' 'Choices are: %s.' % ( on_missing , str ( valid_handling_choices ) ) ) for idx , uuid in enumerate ( sup_list ) : if uuid in uuid_dict . keys ( ) : sup_list [ idx ] = uuid_dict [ uuid ] elif on_missing == 'handle' : sup_list [ idx ] = Unresolved ( uuid ) elif on_missing == 'ignore' : sup_list . remove ( uuid ) elif on_missing == 'error' : raise UnresolvedUuidError ( "Uuid %s not found in stmt jsons." % uuid ) return | Promote the list of support - related uuids to Statements if possible . |
17,317 | def draw_stmt_graph ( stmts ) : import networkx try : import matplotlib . pyplot as plt except Exception : logger . error ( 'Could not import matplotlib, not drawing graph.' ) return try : import pygraphviz except Exception : logger . error ( 'Could not import pygraphviz, not drawing graph.' ) return import numpy g = networkx . compose_all ( [ stmt . to_graph ( ) for stmt in stmts ] ) plt . figure ( ) plt . ion ( ) g . graph [ 'graph' ] = { 'rankdir' : 'LR' } pos = networkx . drawing . nx_agraph . graphviz_layout ( g , prog = 'dot' ) g = g . to_undirected ( ) options = { 'marker' : 'o' , 's' : 200 , 'c' : [ 0.85 , 0.85 , 1 ] , 'facecolor' : '0.5' , 'lw' : 0 , } ax = plt . gca ( ) nodelist = list ( g ) xy = numpy . asarray ( [ pos [ v ] for v in nodelist ] ) node_collection = ax . scatter ( xy [ : , 0 ] , xy [ : , 1 ] , ** options ) node_collection . set_zorder ( 2 ) networkx . draw_networkx_edges ( g , pos , arrows = False , edge_color = '0.5' ) edge_labels = { ( e [ 0 ] , e [ 1 ] ) : e [ 2 ] . get ( 'label' ) for e in g . edges ( data = True ) } networkx . draw_networkx_edge_labels ( g , pos , edge_labels = edge_labels ) node_labels = { n [ 0 ] : n [ 1 ] . get ( 'label' ) for n in g . nodes ( data = True ) } for key , label in node_labels . items ( ) : if len ( label ) > 25 : parts = label . split ( ' ' ) parts . insert ( int ( len ( parts ) / 2 ) , '\n' ) label = ' ' . join ( parts ) node_labels [ key ] = label networkx . draw_networkx_labels ( g , pos , labels = node_labels ) ax . get_xaxis ( ) . set_visible ( False ) ax . get_yaxis ( ) . set_visible ( False ) plt . show ( ) | Render the attributes of a list of Statements as directed graphs . |
17,318 | def _fix_json_agents ( ag_obj ) : if isinstance ( ag_obj , str ) : logger . info ( "Fixing string agent: %s." % ag_obj ) ret = { 'name' : ag_obj , 'db_refs' : { 'TEXT' : ag_obj } } elif isinstance ( ag_obj , list ) : ret = [ _fix_json_agents ( ag ) for ag in ag_obj ] elif isinstance ( ag_obj , dict ) and 'TEXT' in ag_obj . keys ( ) : ret = deepcopy ( ag_obj ) text = ret . pop ( 'TEXT' ) ret [ 'db_refs' ] [ 'TEXT' ] = text else : ret = ag_obj return ret | Fix the json representation of an agent . |
17,319 | def set_statements_pmid ( self , pmid ) : for stmt in self . json_stmts : evs = stmt . get ( 'evidence' , [ ] ) for ev in evs : ev [ 'pmid' ] = pmid for stmt in self . statements : for ev in stmt . evidence : ev . pmid = pmid | Set the evidence PMID of Statements that have been extracted . |
17,320 | def get_args ( node ) : arg_roles = { } args = node . findall ( 'arg' ) + [ node . find ( 'arg1' ) , node . find ( 'arg2' ) , node . find ( 'arg3' ) ] for arg in args : if arg is not None : id = arg . attrib . get ( 'id' ) if id is not None : arg_roles [ arg . attrib [ 'role' ] ] = ( arg . attrib [ 'id' ] , arg ) if node . find ( 'features' ) is not None : inevents = node . findall ( 'features/inevent' ) for inevent in inevents : if 'id' in inevent . attrib : arg_roles [ 'inevent' ] = ( inevent . attrib [ 'id' ] , inevent ) ptms = node . findall ( 'features/ptm' ) + node . findall ( 'features/no-ptm' ) for ptm in ptms : if 'id' in inevent . attrib : arg_roles [ 'ptm' ] = ( inevent . attrib [ 'id' ] , ptm ) aw = node . find ( 'assoc-with' ) if aw is not None : aw_id = aw . attrib [ 'id' ] arg_roles [ 'assoc-with' ] = ( aw_id , aw ) return arg_roles | Return the arguments of a node in the event graph . |
17,321 | def type_match ( a , b ) : if a [ 'type' ] == b [ 'type' ] : return True eq_groups = [ { 'ONT::GENE-PROTEIN' , 'ONT::GENE' , 'ONT::PROTEIN' } , { 'ONT::PHARMACOLOGIC-SUBSTANCE' , 'ONT::CHEMICAL' } ] for eq_group in eq_groups : if a [ 'type' ] in eq_group and b [ 'type' ] in eq_group : return True return False | Return True of the types of a and b are compatible False otherwise . |
17,322 | def add_graph ( patterns , G ) : if not patterns : patterns . append ( [ G ] ) return for i , graphs in enumerate ( patterns ) : if networkx . is_isomorphic ( graphs [ 0 ] , G , node_match = type_match , edge_match = type_match ) : patterns [ i ] . append ( G ) return patterns . append ( [ G ] ) | Add a graph to a set of unique patterns . |
17,323 | def draw ( graph , fname ) : ag = networkx . nx_agraph . to_agraph ( graph ) ag . draw ( fname , prog = 'dot' ) | Draw a graph and save it into a file |
17,324 | def build_event_graph ( graph , tree , node ) : if node_key ( node ) in graph : return type = get_type ( node ) text = get_text ( node ) label = '%s (%s)' % ( type , text ) graph . add_node ( node_key ( node ) , type = type , label = label , text = text ) args = get_args ( node ) for arg_role , ( arg_id , arg_tag ) in args . items ( ) : arg = get_node_by_id ( tree , arg_id ) if arg is None : arg = arg_tag build_event_graph ( graph , tree , arg ) graph . add_edge ( node_key ( node ) , node_key ( arg ) , type = arg_role , label = arg_role ) | Return a DiGraph of a specific event structure built recursively |
17,325 | def get_extracted_events ( fnames ) : event_list = [ ] for fn in fnames : tp = trips . process_xml_file ( fn ) ed = tp . extracted_events for k , v in ed . items ( ) : event_list += v return event_list | Get a full list of all extracted event IDs from a list of EKB files |
17,326 | def check_event_coverage ( patterns , event_list ) : proportions = [ ] for pattern_list in patterns : proportion = 0 for pattern in pattern_list : for node in pattern . nodes ( ) : if node in event_list : proportion += 1.0 / len ( pattern_list ) break proportions . append ( proportion ) return proportions | Calculate the ratio of patterns that were extracted . |
17,327 | def map_statements ( self ) : for stmt in self . statements : for agent in stmt . agent_list ( ) : if agent is None : continue all_mappings = [ ] for db_name , db_id in agent . db_refs . items ( ) : if isinstance ( db_id , list ) : db_id = db_id [ 0 ] [ 0 ] mappings = self . _map_id ( db_name , db_id ) all_mappings += mappings for map_db_name , map_db_id , score , orig_db_name in all_mappings : if map_db_name in agent . db_refs : continue if self . scored : try : orig_score = agent . db_refs [ orig_db_name ] [ 0 ] [ 1 ] except Exception : orig_score = 1.0 agent . db_refs [ map_db_name ] = [ ( map_db_id , score * orig_score ) ] else : if map_db_name in ( 'UN' , 'HUME' ) : agent . db_refs [ map_db_name ] = [ ( map_db_id , 1.0 ) ] else : agent . db_refs [ map_db_name ] = map_db_id | Run the ontology mapping on the statements . |
17,328 | def load_grounding_map ( grounding_map_path , ignore_path = None , lineterminator = '\r\n' ) : g_map = { } map_rows = read_unicode_csv ( grounding_map_path , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\r\n' ) if ignore_path and os . path . exists ( ignore_path ) : ignore_rows = read_unicode_csv ( ignore_path , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = lineterminator ) else : ignore_rows = [ ] csv_rows = chain ( map_rows , ignore_rows ) for row in csv_rows : key = row [ 0 ] db_refs = { 'TEXT' : key } keys = [ entry for entry in row [ 1 : : 2 ] if entry != '' ] values = [ entry for entry in row [ 2 : : 2 ] if entry != '' ] if len ( keys ) != len ( values ) : logger . info ( 'ERROR: Mismatched keys and values in row %s' % str ( row ) ) continue else : db_refs . update ( dict ( zip ( keys , values ) ) ) if len ( db_refs . keys ( ) ) > 1 : g_map [ key ] = db_refs else : g_map [ key ] = None return g_map | Return a grounding map dictionary loaded from a csv file . |
17,329 | def all_agents ( stmts ) : agents = [ ] for stmt in stmts : for agent in stmt . agent_list ( ) : if agent is not None and agent . db_refs . get ( 'TEXT' ) is not None : agents . append ( agent ) return agents | Return a list of all of the agents from a list of statements . |
17,330 | def get_sentences_for_agent ( text , stmts , max_sentences = None ) : sentences = [ ] for stmt in stmts : for agent in stmt . agent_list ( ) : if agent is not None and agent . db_refs . get ( 'TEXT' ) == text : sentences . append ( ( stmt . evidence [ 0 ] . pmid , stmt . evidence [ 0 ] . text ) ) if max_sentences is not None and len ( sentences ) >= max_sentences : return sentences return sentences | Returns evidence sentences with a given agent text from a list of statements |
17,331 | def agent_texts_with_grounding ( stmts ) : allag = all_agents ( stmts ) for ag in allag : pfam_def = ag . db_refs . get ( 'PFAM-DEF' ) if pfam_def is not None : ag . db_refs [ 'PFAM-DEF' ] = tuple ( pfam_def ) refs = [ tuple ( ag . db_refs . items ( ) ) for ag in allag ] refs_counter = Counter ( refs ) refs_counter_dict = [ ( dict ( entry [ 0 ] ) , entry [ 1 ] ) for entry in refs_counter . items ( ) ] refs_counter_dict . sort ( key = lambda x : x [ 0 ] . get ( 'TEXT' ) ) grouped_by_text = [ ] for k , g in groupby ( refs_counter_dict , key = lambda x : x [ 0 ] . get ( 'TEXT' ) ) : total = 0 entry = [ k ] db_ref_list = [ ] for db_refs , count in g : if list ( db_refs . keys ( ) ) == [ 'TEXT' ] : db_ref_list . append ( ( None , None , count ) ) for db , db_id in db_refs . items ( ) : if db == 'TEXT' : continue else : db_ref_list . append ( ( db , db_id , count ) ) total += count entry . append ( tuple ( sorted ( db_ref_list , key = lambda x : x [ 2 ] , reverse = True ) ) ) entry . append ( total ) grouped_by_text . append ( tuple ( entry ) ) grouped_by_text . sort ( key = lambda x : x [ 2 ] , reverse = True ) return grouped_by_text | Return agent text groundings in a list of statements with their counts |
17,332 | def ungrounded_texts ( stmts ) : ungrounded = [ ag . db_refs [ 'TEXT' ] for s in stmts for ag in s . agent_list ( ) if ag is not None and list ( ag . db_refs . keys ( ) ) == [ 'TEXT' ] ] ungroundc = Counter ( ungrounded ) ungroundc = ungroundc . items ( ) ungroundc = sorted ( ungroundc , key = lambda x : x [ 1 ] , reverse = True ) return ungroundc | Return a list of all ungrounded entities ordered by number of mentions |
17,333 | def get_agents_with_name ( name , stmts ) : return [ ag for stmt in stmts for ag in stmt . agent_list ( ) if ag is not None and ag . name == name ] | Return all agents within a list of statements with a particular name . |
17,334 | def save_base_map ( filename , grouped_by_text ) : rows = [ ] for group in grouped_by_text : text_string = group [ 0 ] for db , db_id , count in group [ 1 ] : if db == 'UP' : name = uniprot_client . get_mnemonic ( db_id ) else : name = '' row = [ text_string , db , db_id , count , name ] rows . append ( row ) write_unicode_csv ( filename , rows , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\r\n' ) | Dump a list of agents along with groundings and counts into a csv file |
17,335 | def protein_map_from_twg ( twg ) : protein_map = { } unmatched = 0 matched = 0 logger . info ( 'Building grounding map for human proteins' ) for agent_text , grounding_list , _ in twg : if 'UP' not in [ entry [ 0 ] for entry in grounding_list ] : continue uniprot_ids = [ entry [ 1 ] for entry in grounding_list if entry [ 0 ] == 'UP' ] for uniprot_id in uniprot_ids : mnemonic = uniprot_client . get_mnemonic ( uniprot_id ) if mnemonic is None or not mnemonic . endswith ( '_HUMAN' ) : continue gene_name = uniprot_client . get_gene_name ( uniprot_id ) if gene_name is None : unmatched += 1 continue if agent_text . upper ( ) == gene_name . upper ( ) : matched += 1 protein_map [ agent_text ] = { 'TEXT' : agent_text , 'UP' : uniprot_id } else : unmatched += 1 logger . info ( 'Exact matches for %d proteins' % matched ) logger . info ( 'No match (or no gene name) for %d proteins' % unmatched ) return protein_map | Build map of entity texts to validate protein grounding . |
17,336 | def save_sentences ( twg , stmts , filename , agent_limit = 300 ) : sentences = [ ] unmapped_texts = [ t [ 0 ] for t in twg ] counter = 0 logger . info ( 'Getting sentences for top %d unmapped agent texts.' % agent_limit ) for text in unmapped_texts : agent_sentences = get_sentences_for_agent ( text , stmts ) sentences += map ( lambda tup : ( text , ) + tup , agent_sentences ) counter += 1 if counter >= agent_limit : break write_unicode_csv ( filename , sentences , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\r\n' ) | Write evidence sentences for stmts with ungrounded agents to csv file . |
17,337 | def _get_text_for_grounding ( stmt , agent_text ) : text = None try : from indra_db . util . content_scripts import get_text_content_from_text_refs from indra . literature . deft_tools import universal_extract_text refs = stmt . evidence [ 0 ] . text_refs if stmt . evidence [ 0 ] . pmid : refs [ 'PMID' ] = stmt . evidence [ 0 ] . pmid logger . info ( 'Obtaining text for disambiguation with refs: %s' % refs ) content = get_text_content_from_text_refs ( refs ) text = universal_extract_text ( content , contains = agent_text ) if text : return text except Exception as e : logger . info ( 'Could not get text for disambiguation from DB.' ) if text is None : from indra . literature import pubmed_client pmid = stmt . evidence [ 0 ] . pmid if pmid : logger . info ( 'Obtaining abstract for disambiguation for PMID%s' % pmid ) text = pubmed_client . get_abstract ( pmid ) if text : return text if text is None : logger . info ( 'Falling back on sentence-based disambiguation' ) text = stmt . evidence [ 0 ] . text return text return None | Get text context for Deft disambiguation |
17,338 | def update_agent_db_refs ( self , agent , agent_text , do_rename = True ) : map_db_refs = deepcopy ( self . gm . get ( agent_text ) ) self . standardize_agent_db_refs ( agent , map_db_refs , do_rename ) | Update db_refs of agent using the grounding map |
17,339 | def map_agents_for_stmt ( self , stmt , do_rename = True ) : mapped_stmt = deepcopy ( stmt ) agent_list = mapped_stmt . agent_list ( ) for idx , agent in enumerate ( agent_list ) : if agent is None : continue agent_txt = agent . db_refs . get ( 'TEXT' ) if agent_txt is None : continue new_agent , maps_to_none = self . map_agent ( agent , do_rename ) if self . use_deft and agent_txt in deft_disambiguators : try : run_deft_disambiguation ( mapped_stmt , agent_list , idx , new_agent , agent_txt ) except Exception as e : logger . error ( 'There was an error during Deft' ' disambiguation.' ) logger . error ( e ) if maps_to_none : return None if new_agent is not None and len ( new_agent . bound_conditions ) == 0 : new_agent . bound_conditions = agent . bound_conditions agent_list [ idx ] = new_agent mapped_stmt . set_agent_list ( agent_list ) for agent in agent_list : if agent is not None : for bc in agent . bound_conditions : bc . agent , maps_to_none = self . map_agent ( bc . agent , do_rename ) if maps_to_none : return None return mapped_stmt | Return a new Statement whose agents have been grounding mapped . |
17,340 | def map_agent ( self , agent , do_rename ) : agent_text = agent . db_refs . get ( 'TEXT' ) mapped_to_agent_json = self . agent_map . get ( agent_text ) if mapped_to_agent_json : mapped_to_agent = Agent . _from_json ( mapped_to_agent_json [ 'agent' ] ) return mapped_to_agent , False if agent_text in self . gm . keys ( ) : map_db_refs = self . gm [ agent_text ] else : return agent , False if map_db_refs is None : logger . debug ( "Skipping %s" % agent_text ) return None , True else : self . update_agent_db_refs ( agent , agent_text , do_rename ) return agent , False | Return the given Agent with its grounding mapped . |
17,341 | def map_agents ( self , stmts , do_rename = True ) : mapped_stmts = [ ] num_skipped = 0 for stmt in stmts : mapped_stmt = self . map_agents_for_stmt ( stmt , do_rename ) if mapped_stmt is not None : mapped_stmts . append ( mapped_stmt ) else : num_skipped += 1 logger . info ( '%s statements filtered out' % num_skipped ) return mapped_stmts | Return a new list of statements whose agents have been mapped |
17,342 | def rename_agents ( self , stmts ) : mapped_stmts = deepcopy ( stmts ) for _ , stmt in enumerate ( mapped_stmts ) : for agent in stmt . agent_list ( ) : if agent is None : continue if agent . db_refs . get ( 'FPLX' ) : agent . name = agent . db_refs . get ( 'FPLX' ) elif agent . db_refs . get ( 'UP' ) : gene_name = uniprot_client . get_gene_name ( agent . db_refs . get ( 'UP' ) , web_fallback = False ) if gene_name : agent . name = gene_name hgnc_id = hgnc_client . get_hgnc_id ( gene_name ) if hgnc_id : agent . db_refs [ 'HGNC' ] = hgnc_id return mapped_stmts | Return a list of mapped statements with updated agent names . |
17,343 | def get_complexes ( self , cplx_df ) : logger . info ( 'Processing complexes...' ) for cplx_id , this_cplx in cplx_df . groupby ( 'CPLX_ID' ) : agents = [ ] for hprd_id in this_cplx . HPRD_ID : ag = self . _make_agent ( hprd_id ) if ag is not None : agents . append ( ag ) if not agents : continue row0 = this_cplx . iloc [ 0 ] isoform_id = '%s_1' % row0 . HPRD_ID ev_list = self . _get_evidence ( row0 . HPRD_ID , isoform_id , row0 . PMIDS , row0 . EVIDENCE , 'interactions' ) stmt = Complex ( agents , evidence = ev_list ) self . statements . append ( stmt ) | Generate Complex Statements from the HPRD protein complexes data . |
17,344 | def get_ptms ( self , ptm_df ) : logger . info ( 'Processing PTMs...' ) for ix , row in ptm_df . iterrows ( ) : ptm_class = _ptm_map [ row [ 'MOD_TYPE' ] ] if ptm_class is None : continue sub_ag = self . _make_agent ( row [ 'HPRD_ID' ] , refseq_id = row [ 'REFSEQ_PROTEIN' ] ) if sub_ag is None : continue enz_id = _nan_to_none ( row [ 'ENZ_HPRD_ID' ] ) enz_ag = self . _make_agent ( enz_id ) res = _nan_to_none ( row [ 'RESIDUE' ] ) pos = _nan_to_none ( row [ 'POSITION' ] ) if pos is not None and ';' in pos : pos , dash = pos . split ( ';' ) assert dash == '-' assert res assert pos motif_dict = self . _get_seq_motif ( row [ 'REFSEQ_PROTEIN' ] , res , pos ) ev_list = self . _get_evidence ( row [ 'HPRD_ID' ] , row [ 'HPRD_ISOFORM' ] , row [ 'PMIDS' ] , row [ 'EVIDENCE' ] , 'ptms' , motif_dict ) stmt = ptm_class ( enz_ag , sub_ag , res , pos , evidence = ev_list ) self . statements . append ( stmt ) | Generate Modification statements from the HPRD PTM data . |
17,345 | def get_ppis ( self , ppi_df ) : logger . info ( 'Processing PPIs...' ) for ix , row in ppi_df . iterrows ( ) : agA = self . _make_agent ( row [ 'HPRD_ID_A' ] ) agB = self . _make_agent ( row [ 'HPRD_ID_B' ] ) if agA is None or agB is None : continue isoform_id = '%s_1' % row [ 'HPRD_ID_A' ] ev_list = self . _get_evidence ( row [ 'HPRD_ID_A' ] , isoform_id , row [ 'PMIDS' ] , row [ 'EVIDENCE' ] , 'interactions' ) stmt = Complex ( [ agA , agB ] , evidence = ev_list ) self . statements . append ( stmt ) | Generate Complex Statements from the HPRD PPI data . |
17,346 | def _build_verb_statement_mapping ( ) : path_this = os . path . dirname ( os . path . abspath ( __file__ ) ) map_path = os . path . join ( path_this , 'isi_verb_to_indra_statement_type.tsv' ) with open ( map_path , 'r' ) as f : first_line = True verb_to_statement_type = { } for line in f : if not first_line : line = line [ : - 1 ] tokens = line . split ( '\t' ) if len ( tokens ) == 2 and len ( tokens [ 1 ] ) > 0 : verb = tokens [ 0 ] s_type = tokens [ 1 ] try : statement_class = getattr ( ist , s_type ) verb_to_statement_type [ verb ] = statement_class except Exception : pass else : first_line = False return verb_to_statement_type | Build the mapping between ISI verb strings and INDRA statement classes . |
17,347 | def get_statements ( self ) : for k , v in self . reader_output . items ( ) : for interaction in v [ 'interactions' ] : self . _process_interaction ( k , interaction , v [ 'text' ] , self . pmid , self . extra_annotations ) | Process reader output to produce INDRA Statements . |
17,348 | def _process_interaction ( self , source_id , interaction , text , pmid , extra_annotations ) : verb = interaction [ 0 ] . lower ( ) subj = interaction [ - 2 ] obj = interaction [ - 1 ] subj = self . _make_agent ( subj ) obj = self . _make_agent ( obj ) annotations = deepcopy ( extra_annotations ) if 'interaction' in extra_annotations : logger . warning ( "'interaction' key of extra_annotations ignored" + " since this is reserved for storing the raw ISI " + "input." ) annotations [ 'source_id' ] = source_id annotations [ 'interaction' ] = interaction ev = ist . Evidence ( source_api = 'isi' , pmid = pmid , text = text . rstrip ( ) , annotations = annotations ) cataylst_specified = False if len ( interaction ) == 4 : catalyst = interaction [ 1 ] if catalyst is not None : cataylst_specified = True self . verbs . add ( verb ) statement = None if verb in verb_to_statement_type : statement_class = verb_to_statement_type [ verb ] if statement_class == ist . Complex : statement = ist . Complex ( [ subj , obj ] , evidence = ev ) else : statement = statement_class ( subj , obj , evidence = ev ) if statement is not None : already_have = False if type ( statement ) == ist . Complex : for old_s in self . statements : old_id = statement . evidence [ 0 ] . source_id new_id = old_s . evidence [ 0 ] . source_id if type ( old_s ) == ist . Complex and old_id == new_id : old_statement_members = [ m . db_refs [ 'TEXT' ] for m in old_s . members ] old_statement_members = sorted ( old_statement_members ) new_statement_members = [ m . db_refs [ 'TEXT' ] for m in statement . members ] new_statement_members = sorted ( new_statement_members ) if old_statement_members == new_statement_members : already_have = True break if not already_have : self . statements . append ( statement ) | Process an interaction JSON tuple from the ISI output and adds up to one statement to the list of extracted statements . |
17,349 | def make_annotation ( self ) : annotation = dict ( ) for item in dir ( self ) : if len ( item ) > 0 and item [ 0 ] != '_' and not inspect . ismethod ( getattr ( self , item ) ) : annotation [ item ] = getattr ( self , item ) return annotation | Returns a dictionary with all properties of the action mention . |
17,350 | def _match_to_array ( m ) : return [ _cast_biopax_element ( m . get ( i ) ) for i in range ( m . varSize ( ) ) ] | Returns an array consisting of the elements obtained from a pattern search cast into their appropriate classes . |
17,351 | def _is_complex ( pe ) : val = isinstance ( pe , _bp ( 'Complex' ) ) or isinstance ( pe , _bpimpl ( 'Complex' ) ) return val | Return True if the physical entity is a complex |
17,352 | def _is_protein ( pe ) : val = isinstance ( pe , _bp ( 'Protein' ) ) or isinstance ( pe , _bpimpl ( 'Protein' ) ) or isinstance ( pe , _bp ( 'ProteinReference' ) ) or isinstance ( pe , _bpimpl ( 'ProteinReference' ) ) return val | Return True if the element is a protein |
17,353 | def _is_rna ( pe ) : val = isinstance ( pe , _bp ( 'Rna' ) ) or isinstance ( pe , _bpimpl ( 'Rna' ) ) return val | Return True if the element is an RNA |
17,354 | def _is_small_molecule ( pe ) : val = isinstance ( pe , _bp ( 'SmallMolecule' ) ) or isinstance ( pe , _bpimpl ( 'SmallMolecule' ) ) or isinstance ( pe , _bp ( 'SmallMoleculeReference' ) ) or isinstance ( pe , _bpimpl ( 'SmallMoleculeReference' ) ) return val | Return True if the element is a small molecule |
17,355 | def _is_physical_entity ( pe ) : val = isinstance ( pe , _bp ( 'PhysicalEntity' ) ) or isinstance ( pe , _bpimpl ( 'PhysicalEntity' ) ) return val | Return True if the element is a physical entity |
17,356 | def _is_modification_or_activity ( feature ) : if not ( isinstance ( feature , _bp ( 'ModificationFeature' ) ) or isinstance ( feature , _bpimpl ( 'ModificationFeature' ) ) ) : return None mf_type = feature . getModificationType ( ) if mf_type is None : return None mf_type_terms = mf_type . getTerm ( ) . toArray ( ) for term in mf_type_terms : if term in ( 'residue modification, active' , 'residue modification, inactive' , 'active' , 'inactive' ) : return 'activity' return 'modification' | Return True if the feature is a modification |
17,357 | def _is_reference ( bpe ) : if isinstance ( bpe , _bp ( 'ProteinReference' ) ) or isinstance ( bpe , _bpimpl ( 'ProteinReference' ) ) or isinstance ( bpe , _bp ( 'SmallMoleculeReference' ) ) or isinstance ( bpe , _bpimpl ( 'SmallMoleculeReference' ) ) or isinstance ( bpe , _bp ( 'RnaReference' ) ) or isinstance ( bpe , _bpimpl ( 'RnaReference' ) ) or isinstance ( bpe , _bp ( 'EntityReference' ) ) or isinstance ( bpe , _bpimpl ( 'EntityReference' ) ) : return True else : return False | Return True if the element is an entity reference . |
17,358 | def _is_entity ( bpe ) : if isinstance ( bpe , _bp ( 'Protein' ) ) or isinstance ( bpe , _bpimpl ( 'Protein' ) ) or isinstance ( bpe , _bp ( 'SmallMolecule' ) ) or isinstance ( bpe , _bpimpl ( 'SmallMolecule' ) ) or isinstance ( bpe , _bp ( 'Complex' ) ) or isinstance ( bpe , _bpimpl ( 'Complex' ) ) or isinstance ( bpe , _bp ( 'Rna' ) ) or isinstance ( bpe , _bpimpl ( 'Rna' ) ) or isinstance ( bpe , _bp ( 'RnaRegion' ) ) or isinstance ( bpe , _bpimpl ( 'RnaRegion' ) ) or isinstance ( bpe , _bp ( 'DnaRegion' ) ) or isinstance ( bpe , _bpimpl ( 'DnaRegion' ) ) or isinstance ( bpe , _bp ( 'PhysicalEntity' ) ) or isinstance ( bpe , _bpimpl ( 'PhysicalEntity' ) ) : return True else : return False | Return True if the element is a physical entity . |
17,359 | def _is_catalysis ( bpe ) : if isinstance ( bpe , _bp ( 'Catalysis' ) ) or isinstance ( bpe , _bpimpl ( 'Catalysis' ) ) : return True else : return False | Return True if the element is Catalysis . |
17,360 | def print_statements ( self ) : for i , stmt in enumerate ( self . statements ) : print ( "%s: %s" % ( i , stmt ) ) | Print all INDRA Statements collected by the processors . |
17,361 | def save_model ( self , file_name = None ) : if file_name is None : logger . error ( 'Missing file name' ) return pcc . model_to_owl ( self . model , file_name ) | Save the BioPAX model object in an OWL file . |
17,362 | def eliminate_exact_duplicates ( self ) : self . statements = list ( { stmt . get_hash ( shallow = False , refresh = True ) : stmt for stmt in self . statements } . values ( ) ) | Eliminate Statements that were extracted multiple times . |
17,363 | def get_complexes ( self ) : for obj in self . model . getObjects ( ) . toArray ( ) : bpe = _cast_biopax_element ( obj ) if not _is_complex ( bpe ) : continue ev = self . _get_evidence ( bpe ) members = self . _get_complex_members ( bpe ) if members is not None : if len ( members ) > 10 : logger . debug ( 'Skipping complex with more than 10 members.' ) continue complexes = _get_combinations ( members ) for c in complexes : self . statements . append ( decode_obj ( Complex ( c , ev ) , encoding = 'utf-8' ) ) | Extract INDRA Complex Statements from the BioPAX model . |
17,364 | def get_modifications ( self ) : for modtype , modclass in modtype_to_modclass . items ( ) : if modtype == 'modification' : continue stmts = self . _get_generic_modification ( modclass ) self . statements += stmts | Extract INDRA Modification Statements from the BioPAX model . |
17,365 | def get_activity_modification ( self ) : mod_filter = 'residue modification, active' for is_active in [ True , False ] : p = self . _construct_modification_pattern ( ) rel = mcct . GAIN if is_active else mcct . LOSS p . add ( mcc ( rel , mod_filter ) , "input simple PE" , "output simple PE" ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : reaction = r [ p . indexOf ( 'Conversion' ) ] activity = 'activity' input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] mod_in = BiopaxProcessor . _get_entity_mods ( input_spe ) mod_out = BiopaxProcessor . _get_entity_mods ( output_spe ) mod_shared = _get_mod_intersection ( mod_in , mod_out ) gained_mods = _get_mod_difference ( mod_out , mod_in ) ev = self . _get_evidence ( reaction ) agents = self . _get_agents_from_entity ( output_spe ) for agent in _listify ( agents ) : static_mods = _get_mod_difference ( agent . mods , gained_mods ) if gained_mods : agent . mods = gained_mods stmt = ActiveForm ( agent , activity , is_active , evidence = ev ) self . statements . append ( decode_obj ( stmt , encoding = 'utf-8' ) ) | Extract INDRA ActiveForm statements from the BioPAX model . |
17,366 | def get_regulate_amounts ( self ) : p = pb . controlsExpressionWithTemplateReac ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] stmts = [ ] for res in res_array : controller = self . _get_agents_from_entity ( res [ 2 ] ) controlled_pe = res [ 6 ] controlled = self . _get_agents_from_entity ( controlled_pe ) conversion = res [ 5 ] direction = conversion . getTemplateDirection ( ) if direction is not None : direction = direction . name ( ) if direction != 'FORWARD' : logger . warning ( 'Unhandled conversion direction %s' % direction ) continue int_type = conversion . getInteractionType ( ) . toArray ( ) if int_type : for it in int_type : for term in it . getTerm ( ) . toArray ( ) : pass control = res [ 4 ] control_type = control . getControlType ( ) if control_type : control_type = control_type . name ( ) ev = self . _get_evidence ( control ) for subj , obj in itertools . product ( _listify ( controller ) , _listify ( controlled ) ) : subj_act = ActivityCondition ( 'transcription' , True ) subj . activity = subj_act if control_type == 'ACTIVATION' : st = IncreaseAmount ( subj , obj , evidence = ev ) elif control_type == 'INHIBITION' : st = DecreaseAmount ( subj , obj , evidence = ev ) else : logger . warning ( 'Unhandled control type %s' % control_type ) continue st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec ) | Extract INDRA RegulateAmount Statements from the BioPAX model . |
17,367 | def get_gef ( self ) : p = self . _gef_gap_base ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_pe = r [ p . indexOf ( 'output PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] if _is_complex ( controller_pe ) : continue members_in = self . _get_complex_members ( input_pe ) members_out = self . _get_complex_members ( output_pe ) if not ( members_in and members_out ) : continue if len ( members_out ) != 2 : continue gdp_in = False for member in members_in : if isinstance ( member , Agent ) and member . name == 'GDP' : gdp_in = True gtp_out = False for member in members_out : if isinstance ( member , Agent ) and member . name == 'GTP' : gtp_out = True if not ( gdp_in and gtp_out ) : continue ras_list = self . _get_agents_from_entity ( input_spe ) gef_list = self . _get_agents_from_entity ( controller_pe ) ev = self . _get_evidence ( control ) for gef , ras in itertools . product ( _listify ( gef_list ) , _listify ( ras_list ) ) : st = Gef ( gef , ras , evidence = ev ) st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec ) | Extract Gef INDRA Statements from the BioPAX model . |
17,368 | def get_gap ( self ) : p = self . _gef_gap_base ( ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_pe = r [ p . indexOf ( 'output PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] if _is_complex ( controller_pe ) : continue members_in = self . _get_complex_members ( input_pe ) members_out = self . _get_complex_members ( output_pe ) if not ( members_in and members_out ) : continue if len ( members_out ) != 2 : continue gtp_in = False for member in members_in : if isinstance ( member , Agent ) and member . name == 'GTP' : gtp_in = True gdp_out = False for member in members_out : if isinstance ( member , Agent ) and member . name == 'GDP' : gdp_out = True if not ( gtp_in and gdp_out ) : continue ras_list = self . _get_agents_from_entity ( input_spe ) gap_list = self . _get_agents_from_entity ( controller_pe ) ev = self . _get_evidence ( control ) for gap , ras in itertools . product ( _listify ( gap_list ) , _listify ( ras_list ) ) : st = Gap ( gap , ras , evidence = ev ) st_dec = decode_obj ( st , encoding = 'utf-8' ) self . statements . append ( st_dec ) | Extract Gap INDRA Statements from the BioPAX model . |
17,369 | def _get_entity_mods ( bpe ) : if _is_entity ( bpe ) : features = bpe . getFeature ( ) . toArray ( ) else : features = bpe . getEntityFeature ( ) . toArray ( ) mods = [ ] for feature in features : if not _is_modification ( feature ) : continue mc = BiopaxProcessor . _extract_mod_from_feature ( feature ) if mc is not None : mods . append ( mc ) return mods | Get all the modifications of an entity in INDRA format |
17,370 | def _get_generic_modification ( self , mod_class ) : mod_type = modclass_to_modtype [ mod_class ] if issubclass ( mod_class , RemoveModification ) : mod_gain_const = mcct . LOSS mod_type = modtype_to_inverse [ mod_type ] else : mod_gain_const = mcct . GAIN mod_filter = mod_type [ : 5 ] p = BiopaxProcessor . _construct_modification_pattern ( ) p . add ( mcc ( mod_gain_const , mod_filter ) , "input simple PE" , "output simple PE" ) s = _bpp ( 'Searcher' ) res = s . searchPlain ( self . model , p ) res_array = [ _match_to_array ( m ) for m in res . toArray ( ) ] stmts = [ ] for r in res_array : controller_pe = r [ p . indexOf ( 'controller PE' ) ] input_pe = r [ p . indexOf ( 'input PE' ) ] input_spe = r [ p . indexOf ( 'input simple PE' ) ] output_spe = r [ p . indexOf ( 'output simple PE' ) ] reaction = r [ p . indexOf ( 'Conversion' ) ] control = r [ p . indexOf ( 'Control' ) ] if not _is_catalysis ( control ) : continue cat_dir = control . getCatalysisDirection ( ) if cat_dir is not None and cat_dir . name ( ) != 'LEFT_TO_RIGHT' : logger . debug ( 'Unexpected catalysis direction: %s.' % control . getCatalysisDirection ( ) ) continue enzs = BiopaxProcessor . _get_primary_controller ( controller_pe ) if not enzs : continue subs = BiopaxProcessor . _get_agents_from_entity ( input_spe , expand_pe = False ) ev = self . _get_evidence ( control ) for enz , sub in itertools . product ( _listify ( enzs ) , _listify ( subs ) ) : mod_in = BiopaxProcessor . _get_entity_mods ( input_spe ) mod_out = BiopaxProcessor . _get_entity_mods ( output_spe ) sub . mods = _get_mod_intersection ( mod_in , mod_out ) if issubclass ( mod_class , AddModification ) : gained_mods = _get_mod_difference ( mod_out , mod_in ) else : gained_mods = _get_mod_difference ( mod_in , mod_out ) for mod in gained_mods : if mod . mod_type not in ( mod_type , modtype_to_inverse [ mod_type ] ) : continue stmt = mod_class ( enz , sub , mod . residue , mod . position , evidence = ev ) stmts . append ( decode_obj ( stmt , encoding = 'utf-8' ) ) return stmts | Get all modification reactions given a Modification class . |
17,371 | def _construct_modification_pattern ( ) : p = _bpp ( 'Pattern' ) ( _bpimpl ( 'PhysicalEntity' ) ( ) . getModelInterface ( ) , 'controller PE' ) p . add ( cb . peToControl ( ) , "controller PE" , "Control" ) p . add ( cb . controlToConv ( ) , "Control" , "Conversion" ) p . add ( _bpp ( 'constraint.NOT' ) ( cb . participant ( ) ) , "Conversion" , "controller PE" ) p . add ( pt ( rt . INPUT , True ) , "Control" , "Conversion" , "input PE" ) p . add ( cb . linkToSpecific ( ) , "input PE" , "input simple PE" ) p . add ( cb . peToER ( ) , "input simple PE" , "input simple ER" ) p . add ( tp ( _bpimpl ( 'Protein' ) ( ) . getModelInterface ( ) ) , "input simple PE" ) p . add ( cs ( cst . OTHER_SIDE ) , "input PE" , "Conversion" , "output PE" ) p . add ( _bpp ( 'constraint.Equality' ) ( False ) , "input PE" , "output PE" ) p . add ( cb . linkToSpecific ( ) , "output PE" , "output simple PE" ) p . add ( cb . peToER ( ) , "output simple PE" , "output simple ER" ) p . add ( _bpp ( 'constraint.Equality' ) ( True ) , "input simple ER" , "output simple ER" ) p . add ( tp ( _bpimpl ( 'Protein' ) ( ) . getModelInterface ( ) ) , "output simple PE" ) p . add ( _bpp ( 'constraint.NOT' ) ( cb . linkToSpecific ( ) ) , "input PE" , "output simple PE" ) p . add ( _bpp ( 'constraint.NOT' ) ( cb . linkToSpecific ( ) ) , "output PE" , "input simple PE" ) return p | Construct the BioPAX pattern to extract modification reactions . |
17,372 | def _extract_mod_from_feature ( mf ) : mf_type = mf . getModificationType ( ) if mf_type is None : return None mf_type_terms = mf_type . getTerm ( ) . toArray ( ) known_mf_type = None for t in mf_type_terms : if t . startswith ( 'MOD_RES ' ) : t = t [ 8 : ] mf_type_indra = _mftype_dict . get ( t ) if mf_type_indra is not None : known_mf_type = mf_type_indra break if not known_mf_type : logger . debug ( 'Skipping modification with unknown terms: %s' % ', ' . join ( mf_type_terms ) ) return None mod_type , residue = known_mf_type mf_pos = mf . getFeatureLocation ( ) if mf_pos is not None : if not mf_pos . modelInterface . getName ( ) == 'org.biopax.paxtools.model.level3.SequenceSite' : mod_pos = None else : mf_site = cast ( _bp ( 'SequenceSite' ) , mf_pos ) mf_pos_status = mf_site . getPositionStatus ( ) if mf_pos_status is None : mod_pos = None elif mf_pos_status and mf_pos_status . toString ( ) != 'EQUAL' : logger . debug ( 'Modification site position is %s' % mf_pos_status . toString ( ) ) else : mod_pos = mf_site . getSequencePosition ( ) mod_pos = '%s' % mod_pos else : mod_pos = None mc = ModCondition ( mod_type , residue , mod_pos , True ) return mc | Extract the type of modification and the position from a ModificationFeature object in the INDRA format . |
17,373 | def _get_entref ( bpe ) : if not _is_reference ( bpe ) : try : er = bpe . getEntityReference ( ) except AttributeError : return None return er else : return bpe | Returns the entity reference of an entity if it exists or return the entity reference that was passed in as argument . |
17,374 | def _stmt_location_to_agents ( stmt , location ) : if location is None : return agents = stmt . agent_list ( ) for a in agents : if a is not None : a . location = location | Apply an event location to the Agents in the corresponding Statement . |
17,375 | def get_all_events ( self ) : self . all_events = { } events = self . tree . findall ( 'EVENT' ) events += self . tree . findall ( 'CC' ) for e in events : event_id = e . attrib [ 'id' ] if event_id in self . _static_events : continue event_type = e . find ( 'type' ) . text try : self . all_events [ event_type ] . append ( event_id ) except KeyError : self . all_events [ event_type ] = [ event_id ] | Make a list of all events in the TRIPS EKB . |
17,376 | def get_activations ( self ) : act_events = self . tree . findall ( "EVENT/[type='ONT::ACTIVATE']" ) inact_events = self . tree . findall ( "EVENT/[type='ONT::DEACTIVATE']" ) inact_events += self . tree . findall ( "EVENT/[type='ONT::INHIBIT']" ) for event in ( act_events + inact_events ) : event_id = event . attrib [ 'id' ] if event_id in self . _static_events : continue agent = event . find ( ".//*[@role=':AGENT']" ) if agent is None : continue agent_id = agent . attrib . get ( 'id' ) if agent_id is None : logger . debug ( 'Skipping activation with missing activator agent' ) continue activator_agent = self . _get_agent_by_id ( agent_id , event_id ) if activator_agent is None : continue affected = event . find ( ".//*[@role=':AFFECTED']" ) if affected is None : logger . debug ( 'Skipping activation with missing affected agent' ) continue affected_id = affected . attrib . get ( 'id' ) if affected_id is None : logger . debug ( 'Skipping activation with missing affected agent' ) continue affected_agent = self . _get_agent_by_id ( affected_id , event_id ) if affected_agent is None : logger . debug ( 'Skipping activation with missing affected agent' ) continue is_activation = True if _is_type ( event , 'ONT::ACTIVATE' ) : self . _add_extracted ( 'ONT::ACTIVATE' , event . attrib [ 'id' ] ) elif _is_type ( event , 'ONT::INHIBIT' ) : is_activation = False self . _add_extracted ( 'ONT::INHIBIT' , event . attrib [ 'id' ] ) elif _is_type ( event , 'ONT::DEACTIVATE' ) : is_activation = False self . _add_extracted ( 'ONT::DEACTIVATE' , event . attrib [ 'id' ] ) ev = self . _get_evidence ( event ) location = self . _get_event_location ( event ) for a1 , a2 in _agent_list_product ( ( activator_agent , affected_agent ) ) : if is_activation : st = Activation ( a1 , a2 , evidence = [ deepcopy ( ev ) ] ) else : st = Inhibition ( a1 , a2 , evidence = [ deepcopy ( ev ) ] ) _stmt_location_to_agents ( st , location ) self . statements . append ( st ) | Extract direct Activation INDRA Statements . |
17,377 | def get_activations_causal ( self ) : ccs = self . tree . findall ( "CC/[type='ONT::CAUSE']" ) for cc in ccs : factor = cc . find ( "arg/[@role=':FACTOR']" ) outcome = cc . find ( "arg/[@role=':OUTCOME']" ) if factor is None or outcome is None : continue factor_id = factor . attrib . get ( 'id' ) factor_term = self . tree . find ( "TERM/[@id='%s']" % factor_id ) outcome_id = outcome . attrib . get ( 'id' ) outcome_event = self . tree . find ( "EVENT/[@id='%s']" % outcome_id ) if factor_term is None or outcome_event is None : continue factor_term_type = factor_term . find ( 'type' ) if factor_term_type is None or factor_term_type . text not in molecule_types : continue factor_agent = self . _get_agent_by_id ( factor_id , None ) if factor_agent is None : continue outcome_event_type = outcome_event . find ( 'type' ) if outcome_event_type is None : continue ev = self . _get_evidence ( cc ) ev . epistemics [ 'direct' ] = False location = self . _get_event_location ( outcome_event ) if outcome_event_type . text in [ 'ONT::ACTIVATE' , 'ONT::ACTIVITY' , 'ONT::DEACTIVATE' ] : if outcome_event_type . text in [ 'ONT::ACTIVATE' , 'ONT::DEACTIVATE' ] : agent_tag = outcome_event . find ( ".//*[@role=':AFFECTED']" ) elif outcome_event_type . text == 'ONT::ACTIVITY' : agent_tag = outcome_event . find ( ".//*[@role=':AGENT']" ) if agent_tag is None or agent_tag . attrib . get ( 'id' ) is None : continue outcome_agent = self . _get_agent_by_id ( agent_tag . attrib [ 'id' ] , outcome_id ) if outcome_agent is None : continue if outcome_event_type . text == 'ONT::DEACTIVATE' : is_activation = False else : is_activation = True for a1 , a2 in _agent_list_product ( ( factor_agent , outcome_agent ) ) : if is_activation : st = Activation ( a1 , a2 , evidence = [ deepcopy ( ev ) ] ) else : st = Inhibition ( a1 , a2 , evidence = [ deepcopy ( ev ) ] ) _stmt_location_to_agents ( st , location ) self . statements . append ( st ) | Extract causal Activation INDRA Statements . |
17,378 | def get_activations_stimulate ( self ) : stim_events = self . tree . findall ( "EVENT/[type='ONT::STIMULATE']" ) for event in stim_events : event_id = event . attrib . get ( 'id' ) if event_id in self . _static_events : continue controller = event . find ( "arg1/[@role=':AGENT']" ) affected = event . find ( "arg2/[@role=':AFFECTED']" ) if controller is None or affected is None : continue controller_id = controller . attrib . get ( 'id' ) controller_term = self . tree . find ( "TERM/[@id='%s']" % controller_id ) affected_id = affected . attrib . get ( 'id' ) affected_event = self . tree . find ( "EVENT/[@id='%s']" % affected_id ) if controller_term is None or affected_event is None : continue controller_term_type = controller_term . find ( 'type' ) if controller_term_type is None or controller_term_type . text not in molecule_types : continue controller_agent = self . _get_agent_by_id ( controller_id , None ) if controller_agent is None : continue affected_event_type = affected_event . find ( 'type' ) if affected_event_type is None : continue ev = self . _get_evidence ( event ) ev . epistemics [ 'direct' ] = False location = self . _get_event_location ( affected_event ) if affected_event_type . text == 'ONT::ACTIVATE' : affected = affected_event . find ( ".//*[@role=':AFFECTED']" ) if affected is None : continue affected_agent = self . _get_agent_by_id ( affected . attrib [ 'id' ] , affected_id ) if affected_agent is None : continue for a1 , a2 in _agent_list_product ( ( controller_agent , affected_agent ) ) : st = Activation ( a1 , a2 , evidence = [ deepcopy ( ev ) ] ) _stmt_location_to_agents ( st , location ) self . statements . append ( st ) elif affected_event_type . text == 'ONT::ACTIVITY' : agent_tag = affected_event . find ( ".//*[@role=':AGENT']" ) if agent_tag is None : continue affected_agent = self . _get_agent_by_id ( agent_tag . attrib [ 'id' ] , affected_id ) if affected_agent is None : continue for a1 , a2 in _agent_list_product ( ( controller_agent , affected_agent ) ) : st = Activation ( a1 , a2 , evidence = [ deepcopy ( ev ) ] ) _stmt_location_to_agents ( st , location ) self . statements . append ( st ) | Extract Activation INDRA Statements via stimulation . |
17,379 | def get_degradations ( self ) : deg_events = self . tree . findall ( "EVENT/[type='ONT::CONSUME']" ) for event in deg_events : if event . attrib [ 'id' ] in self . _static_events : continue affected = event . find ( ".//*[@role=':AFFECTED']" ) if affected is None : msg = 'Skipping degradation event with no affected term.' logger . debug ( msg ) continue affected_id = affected . attrib . get ( 'id' ) if affected_id is None : logger . debug ( 'Skipping degradation event with missing affected agent' ) continue affected_agent = self . _get_agent_by_id ( affected_id , event . attrib [ 'id' ] ) if affected_agent is None : logger . debug ( 'Skipping degradation event with missing affected agent' ) continue agent = event . find ( ".//*[@role=':AGENT']" ) if agent is None : agent_agent = None else : agent_id = agent . attrib . get ( 'id' ) if agent_id is None : agent_agent = None else : agent_agent = self . _get_agent_by_id ( agent_id , event . attrib [ 'id' ] ) ev = self . _get_evidence ( event ) location = self . _get_event_location ( event ) for subj , obj in _agent_list_product ( ( agent_agent , affected_agent ) ) : st = DecreaseAmount ( subj , obj , evidence = deepcopy ( ev ) ) _stmt_location_to_agents ( st , location ) self . statements . append ( st ) self . _add_extracted ( _get_type ( event ) , event . attrib [ 'id' ] ) | Extract Degradation INDRA Statements . |
17,380 | def get_complexes ( self ) : bind_events = self . tree . findall ( "EVENT/[type='ONT::BIND']" ) bind_events += self . tree . findall ( "EVENT/[type='ONT::INTERACT']" ) for event in bind_events : if event . attrib [ 'id' ] in self . _static_events : continue arg1 = event . find ( "arg1" ) arg2 = event . find ( "arg2" ) if arg1 is None and arg2 is None : args = list ( event . findall ( 'arg' ) ) if len ( args ) < 2 : continue arg1 = args [ 0 ] arg2 = args [ 1 ] if ( arg1 is None or arg1 . attrib . get ( 'id' ) is None ) or ( arg2 is None or arg2 . attrib . get ( 'id' ) is None ) : logger . debug ( 'Skipping complex with less than 2 members' ) continue agent1 = self . _get_agent_by_id ( arg1 . attrib [ 'id' ] , event . attrib [ 'id' ] ) agent2 = self . _get_agent_by_id ( arg2 . attrib [ 'id' ] , event . attrib [ 'id' ] ) if agent1 is None or agent2 is None : logger . debug ( 'Skipping complex with less than 2 members' ) continue ev = self . _get_evidence ( event ) location = self . _get_event_location ( event ) for a1 , a2 in _agent_list_product ( ( agent1 , agent2 ) ) : st = Complex ( [ a1 , a2 ] , evidence = deepcopy ( ev ) ) _stmt_location_to_agents ( st , location ) self . statements . append ( st ) self . _add_extracted ( _get_type ( event ) , event . attrib [ 'id' ] ) | Extract Complex INDRA Statements . |
17,381 | def get_modifications ( self ) : mod_event_types = list ( ont_to_mod_type . keys ( ) ) mod_event_types += [ 'ONT::PTM' ] mod_events = [ ] for mod_event_type in mod_event_types : events = self . tree . findall ( "EVENT/[type='%s']" % mod_event_type ) mod_extracted = self . extracted_events . get ( mod_event_type , [ ] ) for event in events : event_id = event . attrib . get ( 'id' ) if event_id not in mod_extracted : mod_events . append ( event ) for event in mod_events : stmts = self . _get_modification_event ( event ) if stmts : for stmt in stmts : self . statements . append ( stmt ) | Extract all types of Modification INDRA Statements . |
17,382 | def get_modifications_indirect ( self ) : mod_event_types = list ( ont_to_mod_type . keys ( ) ) mod_event_types += [ 'ONT::PTM' ] def get_increase_events ( mod_event_types ) : mod_events = [ ] events = self . tree . findall ( "EVENT/[type='ONT::INCREASE']" ) for event in events : affected = event . find ( ".//*[@role=':AFFECTED']" ) if affected is None : continue affected_id = affected . attrib . get ( 'id' ) if not affected_id : continue pattern = "EVENT/[@id='%s']" % affected_id affected_event = self . tree . find ( pattern ) if affected_event is not None : affected_type = affected_event . find ( 'type' ) if affected_type is not None and affected_type . text in mod_event_types : mod_events . append ( event ) return mod_events def get_cause_events ( mod_event_types ) : mod_events = [ ] ccs = self . tree . findall ( "CC/[type='ONT::CAUSE']" ) for cc in ccs : outcome = cc . find ( ".//*[@role=':OUTCOME']" ) if outcome is None : continue outcome_id = outcome . attrib . get ( 'id' ) if not outcome_id : continue pattern = "EVENT/[@id='%s']" % outcome_id outcome_event = self . tree . find ( pattern ) if outcome_event is not None : outcome_type = outcome_event . find ( 'type' ) if outcome_type is not None and outcome_type . text in mod_event_types : mod_events . append ( cc ) return mod_events mod_events = get_increase_events ( mod_event_types ) mod_events += get_cause_events ( mod_event_types ) for event in mod_events : event_id = event . attrib [ 'id' ] if event_id in self . _static_events : continue event_type = _get_type ( event ) enzyme = event . find ( ".//*[@role=':AGENT']" ) if enzyme is None : enzyme = event . find ( ".//*[@role=':FACTOR']" ) if enzyme is None : return enzyme_id = enzyme . attrib . get ( 'id' ) if enzyme_id is None : continue enzyme_agent = self . _get_agent_by_id ( enzyme_id , event_id ) affected_event_tag = event . find ( ".//*[@role=':AFFECTED']" ) if affected_event_tag is None : affected_event_tag = event . find ( ".//*[@role=':OUTCOME']" ) if affected_event_tag is None : return affected_id = affected_event_tag . attrib . get ( 'id' ) if not affected_id : return affected_event = self . tree . find ( "EVENT/[@id='%s']" % affected_id ) if affected_event is None : return for enz_t in _agent_list_product ( ( enzyme_agent , ) ) : enz = enz_t [ 0 ] stmts = self . _get_modification_event ( affected_event ) stmts_to_make = [ ] if stmts : for stmt in stmts : if stmt . enz is None and stmt . sub is not None : stmts_to_make . append ( stmt ) for stmt in stmts_to_make : stmt . enz = enz for ev in stmt . evidence : ev . epistemics [ 'direct' ] = False self . statements . append ( stmt ) self . _add_extracted ( event_type , event . attrib [ 'id' ] ) self . _add_extracted ( affected_event . find ( 'type' ) . text , affected_id ) | Extract indirect Modification INDRA Statements . |
17,383 | def get_agents ( self ) : agents_dict = self . get_term_agents ( ) agents = [ a for a in agents_dict . values ( ) if a is not None ] return agents | Return list of INDRA Agents corresponding to TERMs in the EKB . |
17,384 | def get_term_agents ( self ) : terms = self . tree . findall ( 'TERM' ) agents = { } assoc_links = [ ] for term in terms : term_id = term . attrib . get ( 'id' ) if term_id : agent = self . _get_agent_by_id ( term_id , None ) agents [ term_id ] = agent aw = term . find ( 'assoc-with' ) if aw is not None : aw_id = aw . attrib . get ( 'id' ) if aw_id : assoc_links . append ( ( term_id , aw_id ) ) for source , target in assoc_links : if target in agents and source in agents : agents . pop ( source ) return agents | Return dict of INDRA Agents keyed by corresponding TERMs in the EKB . |
17,385 | def _get_evidence_text ( self , event_tag ) : par_id = event_tag . attrib . get ( 'paragraph' ) uttnum = event_tag . attrib . get ( 'uttnum' ) event_text = event_tag . find ( 'text' ) if self . sentences is not None and uttnum is not None : sentence = self . sentences [ uttnum ] elif event_text is not None : sentence = event_text . text else : sentence = None return sentence | Extract the evidence for an event . |
17,386 | def get_causal_edge ( stmt , activates ) : any_contact = any ( evidence . epistemics . get ( 'direct' , False ) for evidence in stmt . evidence ) if any_contact : return pc . DIRECTLY_INCREASES if activates else pc . DIRECTLY_DECREASES return pc . INCREASES if activates else pc . DECREASES | Returns the causal polar edge with the correct contact . |
17,387 | def to_database ( self , manager = None ) : network = pybel . to_database ( self . model , manager = manager ) return network | Send the model to the PyBEL database |
17,388 | def get_binding_site_name ( agent ) : grounding = agent . get_grounding ( ) if grounding != ( None , None ) : uri = hierarchies [ 'entity' ] . get_uri ( grounding [ 0 ] , grounding [ 1 ] ) parents = hierarchies [ 'entity' ] . get_parents ( uri , 'top' ) if parents : parent_uri = sorted ( parents ) [ 0 ] parent_agent = _agent_from_uri ( parent_uri ) binding_site = _n ( parent_agent . name ) . lower ( ) return binding_site binding_site = _n ( agent . name ) . lower ( ) return binding_site | Return a binding site name from a given agent . |
17,389 | def get_mod_site_name ( mod_condition ) : if mod_condition . residue is None : mod_str = abbrevs [ mod_condition . mod_type ] else : mod_str = mod_condition . residue mod_pos = mod_condition . position if mod_condition . position is not None else '' name = ( '%s%s' % ( mod_str , mod_pos ) ) return name | Return site names for a modification . |
17,390 | def process_flat_files ( id_mappings_file , complexes_file = None , ptm_file = None , ppi_file = None , seq_file = None , motif_window = 7 ) : id_df = pd . read_csv ( id_mappings_file , delimiter = '\t' , names = _hprd_id_cols , dtype = 'str' ) id_df = id_df . set_index ( 'HPRD_ID' ) if complexes_file is None and ptm_file is None and ppi_file is None : raise ValueError ( 'At least one of complexes_file, ptm_file, or ' 'ppi_file must be given.' ) if ptm_file and not seq_file : raise ValueError ( 'If ptm_file is given, seq_file must also be given.' ) cplx_df = None if complexes_file : cplx_df = pd . read_csv ( complexes_file , delimiter = '\t' , names = _cplx_cols , dtype = 'str' , na_values = [ '-' , 'None' ] ) ptm_df = None seq_dict = None if ptm_file : ptm_df = pd . read_csv ( ptm_file , delimiter = '\t' , names = _ptm_cols , dtype = 'str' , na_values = '-' ) seq_dict = load_fasta_sequences ( seq_file , id_index = 2 ) ppi_df = None if ppi_file : ppi_df = pd . read_csv ( ppi_file , delimiter = '\t' , names = _ppi_cols , dtype = 'str' ) return HprdProcessor ( id_df , cplx_df , ptm_df , ppi_df , seq_dict , motif_window ) | Get INDRA Statements from HPRD data . |
17,391 | def _gather_active_forms ( self ) : for stmt in self . statements : if isinstance ( stmt , ActiveForm ) : base_agent = self . agent_set . get_create_base_agent ( stmt . agent ) agent_to_add = stmt . agent if stmt . agent . activity : new_agent = fast_deepcopy ( stmt . agent ) new_agent . activity = None agent_to_add = new_agent base_agent . add_activity_form ( agent_to_add , stmt . is_active ) | Collect all the active forms of each Agent in the Statements . |
17,392 | def replace_activities ( self ) : logger . debug ( 'Running PySB Preassembler replace activities' ) new_stmts = [ ] def has_agent_activity ( stmt ) : for agent in stmt . agent_list ( ) : if isinstance ( agent , Agent ) and agent . activity is not None : return True return False self . _gather_active_forms ( ) for j , stmt in enumerate ( self . statements ) : logger . debug ( '%d/%d %s' % ( j + 1 , len ( self . statements ) , stmt ) ) if not has_agent_activity ( stmt ) : new_stmts . append ( stmt ) continue stmt_agents = stmt . agent_list ( ) num_agents = len ( stmt_agents ) agent_forms = [ [ ] for a in stmt_agents ] for i , agent in enumerate ( stmt_agents ) : if agent is not None and isinstance ( agent , Agent ) and agent . activity is not None : base_agent = self . agent_set . get_create_base_agent ( agent ) if agent . activity . is_active : active_forms = base_agent . active_forms if not active_forms : active_forms = [ agent ] else : active_forms = base_agent . inactive_forms if not active_forms : active_forms = [ agent ] for af in active_forms : new_agent = fast_deepcopy ( agent ) self . _set_agent_context ( af , new_agent ) agent_forms [ i ] . append ( new_agent ) else : agent_forms [ i ] . append ( agent ) agent_combs = itertools . product ( * agent_forms ) for agent_comb in agent_combs : new_stmt = fast_deepcopy ( stmt ) new_stmt . set_agent_list ( agent_comb ) new_stmts . append ( new_stmt ) self . statements = new_stmts | Replace ative flags with Agent states when possible . |
17,393 | def add_reverse_effects ( self ) : pos_mod_sites = { } neg_mod_sites = { } syntheses = [ ] degradations = [ ] for stmt in self . statements : if isinstance ( stmt , Phosphorylation ) : agent = stmt . sub . name try : pos_mod_sites [ agent ] . append ( ( stmt . residue , stmt . position ) ) except KeyError : pos_mod_sites [ agent ] = [ ( stmt . residue , stmt . position ) ] elif isinstance ( stmt , Dephosphorylation ) : agent = stmt . sub . name try : neg_mod_sites [ agent ] . append ( ( stmt . residue , stmt . position ) ) except KeyError : neg_mod_sites [ agent ] = [ ( stmt . residue , stmt . position ) ] elif isinstance ( stmt , Influence ) : if stmt . overall_polarity ( ) == 1 : syntheses . append ( stmt . obj . name ) elif stmt . overall_polarity ( ) == - 1 : degradations . append ( stmt . obj . name ) elif isinstance ( stmt , IncreaseAmount ) : syntheses . append ( stmt . obj . name ) elif isinstance ( stmt , DecreaseAmount ) : degradations . append ( stmt . obj . name ) new_stmts = [ ] for agent_name , pos_sites in pos_mod_sites . items ( ) : neg_sites = neg_mod_sites . get ( agent_name , [ ] ) no_neg_site = set ( pos_sites ) . difference ( set ( neg_sites ) ) for residue , position in no_neg_site : st = Dephosphorylation ( Agent ( 'phosphatase' ) , Agent ( agent_name ) , residue , position ) new_stmts . append ( st ) for agent_name in syntheses : if agent_name not in degradations : st = DecreaseAmount ( None , Agent ( agent_name ) ) new_stmts . append ( st ) self . statements += new_stmts | Add Statements for the reverse effects of some Statements . |
17,394 | def _get_uniprot_id ( agent ) : up_id = agent . db_refs . get ( 'UP' ) hgnc_id = agent . db_refs . get ( 'HGNC' ) if up_id is None : if hgnc_id is None : return None up_id = hgnc_client . get_uniprot_id ( hgnc_id ) if up_id is None : return None if not isinstance ( up_id , basestring ) and isinstance ( up_id [ 0 ] , basestring ) : up_id = up_id [ 0 ] return up_id | Return the UniProt ID for an agent looking up in HGNC if necessary . |
17,395 | def map_sites ( self , stmts ) : valid_statements = [ ] mapped_statements = [ ] for stmt in stmts : mapped_stmt = self . map_stmt_sites ( stmt ) if mapped_stmt is not None : mapped_statements . append ( mapped_stmt ) else : valid_statements . append ( stmt ) return valid_statements , mapped_statements | Check a set of statements for invalid modification sites . |
17,396 | def _map_agent_sites ( self , agent ) : if agent is None or not agent . mods : return [ ] , agent new_agent = deepcopy ( agent ) mapped_sites = [ ] for idx , mod_condition in enumerate ( agent . mods ) : mapped_site = self . _map_agent_mod ( agent , mod_condition ) if not mapped_site or mapped_site . not_invalid ( ) : continue if mapped_site . has_mapping ( ) : mc = ModCondition ( mod_condition . mod_type , mapped_site . mapped_res , mapped_site . mapped_pos , mod_condition . is_modified ) new_agent . mods [ idx ] = mc mapped_sites . append ( mapped_site ) return mapped_sites , new_agent | Check an agent for invalid sites and update if necessary . |
17,397 | def _map_agent_mod ( self , agent , mod_condition ) : up_id = _get_uniprot_id ( agent ) if not up_id : logger . debug ( "No uniprot ID for %s" % agent . name ) return None if mod_condition . position is None or mod_condition . residue is None : return None mapped_site = self . map_to_human_ref ( up_id , 'uniprot' , mod_condition . residue , mod_condition . position , do_methionine_offset = self . do_methionine_offset , do_orthology_mapping = self . do_orthology_mapping , do_isoform_mapping = self . do_isoform_mapping ) return mapped_site | Map a single modification condition on an agent . |
17,398 | def _get_graph_reductions ( graph ) : def frontier ( g , nd ) : if g . out_degree ( nd ) == 0 : return set ( [ nd ] ) else : frontiers = set ( ) for n in g . successors ( nd ) : frontiers = frontiers . union ( frontier ( graph , n ) ) return frontiers reductions = { } nodes_sort = list ( networkx . algorithms . dag . topological_sort ( graph ) ) frontiers = [ frontier ( graph , n ) for n in nodes_sort ] for i , n1 in enumerate ( nodes_sort ) : for j , n2 in enumerate ( nodes_sort ) : if i > j : continue if frontiers [ i ] == frontiers [ j ] : reductions [ n1 ] = n2 return reductions | Return transitive reductions on a DAG . |
17,399 | def gather_explicit_activities ( self ) : for stmt in self . statements : agents = stmt . agent_list ( ) for agent in agents : if agent is not None and agent . activity is not None : agent_base = self . _get_base ( agent ) agent_base . add_activity ( agent . activity . activity_type ) if isinstance ( stmt , RegulateActivity ) : if stmt . obj is not None : obj_base = self . _get_base ( stmt . obj ) obj_base . add_activity ( stmt . obj_activity ) elif isinstance ( stmt , ActiveForm ) : agent_base = self . _get_base ( stmt . agent ) agent_base . add_activity ( stmt . activity ) if stmt . is_active : agent_base . add_active_state ( stmt . activity , stmt . agent , stmt . evidence ) else : agent_base . add_inactive_state ( stmt . activity , stmt . agent , stmt . evidence ) | Aggregate all explicit activities and active forms of Agents . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.