idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
17,600
def grounded_monomer_patterns ( model , agent , ignore_activities = False ) : if not isinstance ( agent , ist . Agent ) : monomer = model . monomers . get ( agent . name ) if not monomer : return yield monomer ( ) monomer = None for ann in model . annotations : if monomer : break if not ann . predicate == 'is' : continue if not isinstance ( ann . subject , Monomer ) : continue ( ns , id ) = parse_identifiers_url ( ann . object ) if ns is None and id is None : continue for db_ns , db_id in agent . db_refs . items ( ) : if db_ns == ns and db_id == id : monomer = ann . subject break if monomer is None : logger . info ( 'No monomer found corresponding to agent %s' % agent ) return sc_list = [ ] for mod in agent . mods : mod_sites = { } res_sites = set ( [ ] ) pos_sites = set ( [ ] ) for ann in monomer . site_annotations : if ann . predicate == 'is_modification' and ann . object == mod . mod_type : site_state = ann . subject assert isinstance ( site_state , tuple ) assert len ( site_state ) == 2 mod_sites [ site_state [ 0 ] ] = site_state [ 1 ] elif ann . predicate == 'is_residue' and ann . object == mod . residue : res_sites . add ( ann . subject ) elif ann . predicate == 'is_position' and ann . object == mod . position : pos_sites . add ( ann . subject ) viable_sites = set ( mod_sites . keys ( ) ) if mod . residue is not None : viable_sites = viable_sites . intersection ( res_sites ) if mod . position is not None : viable_sites = viable_sites . intersection ( pos_sites ) if not viable_sites : return pattern_list = [ ] for site_name in viable_sites : pattern_list . append ( { site_name : ( mod_sites [ site_name ] , WILD ) } ) sc_list . append ( pattern_list ) if agent . activity and not ignore_activities : rel_type = 'has_active_pattern' if agent . activity . is_active else 'has_inactive_pattern' active_form_list = [ ] for ann in model . annotations : if ann . subject == monomer and ann . predicate == rel_type : active_form_list . append ( ann . object ) sc_list . append ( active_form_list ) for pattern_combo in itertools . product ( * sc_list ) : mp_sc = { } for pattern in pattern_combo : mp_sc . update ( pattern ) if mp_sc : yield monomer ( ** mp_sc ) if not sc_list : yield monomer ( )
Get monomer patterns for the agent accounting for grounding information .
17,601
def get_monomer_pattern ( model , agent , extra_fields = None ) : try : monomer = model . monomers [ _n ( agent . name ) ] except KeyError as e : logger . warning ( 'Monomer with name %s not found in model' % _n ( agent . name ) ) return None pattern = get_site_pattern ( agent ) if extra_fields is not None : for k , v in extra_fields . items ( ) : if k not in pattern : pattern [ k ] = v try : monomer_pattern = monomer ( ** pattern ) except Exception as e : logger . info ( "Invalid site pattern %s for monomer %s" % ( pattern , monomer ) ) return None return monomer_pattern
Construct a PySB MonomerPattern from an Agent .
17,602
def get_site_pattern ( agent ) : if not isinstance ( agent , ist . Agent ) : return { } pattern = { } for bc in agent . bound_conditions : if bc . is_bound : pattern [ get_binding_site_name ( bc . agent ) ] = ANY else : pattern [ get_binding_site_name ( bc . agent ) ] = None for mod in agent . mods : mod_site_str = abbrevs [ mod . mod_type ] if mod . residue is not None : mod_site_str = mod . residue mod_pos_str = mod . position if mod . position is not None else '' mod_site = ( '%s%s' % ( mod_site_str , mod_pos_str ) ) site_states = states [ mod . mod_type ] if mod . is_modified : pattern [ mod_site ] = ( site_states [ 1 ] , WILD ) else : pattern [ mod_site ] = ( site_states [ 0 ] , WILD ) for mc in agent . mutations : res_from = mc . residue_from if mc . residue_from else 'mut' res_to = mc . residue_to if mc . residue_to else 'X' if mc . position is None : mut_site_name = res_from else : mut_site_name = res_from + mc . position pattern [ mut_site_name ] = res_to if agent . location is not None : pattern [ 'loc' ] = _n ( agent . location ) if agent . activity is not None : active_site_name = agent . activity . activity_type if agent . activity . is_active : active_site_state = 'active' else : active_site_state = 'inactive' pattern [ active_site_name ] = active_site_state return pattern
Construct a dictionary of Monomer site states from an Agent .
17,603
def set_base_initial_condition ( model , monomer , value ) : sites_dict = { } for site in monomer . sites : if site in monomer . site_states : if site == 'loc' and 'cytoplasm' in monomer . site_states [ 'loc' ] : sites_dict [ 'loc' ] = 'cytoplasm' else : sites_dict [ site ] = monomer . site_states [ site ] [ 0 ] else : sites_dict [ site ] = None mp = monomer ( ** sites_dict ) pname = monomer . name + '_0' try : p = model . parameters [ pname ] p . value = value except KeyError : p = Parameter ( pname , value ) model . add_component ( p ) model . initial ( mp , p )
Set an initial condition for a monomer in its default state .
17,604
def get_annotation ( component , db_name , db_ref ) : url = get_identifiers_url ( db_name , db_ref ) if not url : return None subj = component ann = Annotation ( subj , url , 'is' ) return ann
Construct model Annotations for each component .
17,605
def make_model ( self , policies = None , initial_conditions = True , reverse_effects = False , model_name = 'indra_model' ) : ppa = PysbPreassembler ( self . statements ) self . processed_policies = self . process_policies ( policies ) ppa . replace_activities ( ) if reverse_effects : ppa . add_reverse_effects ( ) self . statements = ppa . statements self . model = Model ( ) self . model . name = model_name self . agent_set = BaseAgentSet ( ) self . _monomers ( ) for agent_name , agent in self . agent_set . items ( ) : m = Monomer ( _n ( agent_name ) , agent . sites , agent . site_states ) m . site_annotations = agent . site_annotations self . model . add_component ( m ) for db_name , db_ref in agent . db_refs . items ( ) : a = get_annotation ( m , db_name , db_ref ) if a is not None : self . model . add_annotation ( a ) for af in agent . active_forms : self . model . add_annotation ( Annotation ( m , af , 'has_active_pattern' ) ) for iaf in agent . inactive_forms : self . model . add_annotation ( Annotation ( m , iaf , 'has_inactive_pattern' ) ) for at in agent . activity_types : act_site_cond = { at : 'active' } self . model . add_annotation ( Annotation ( m , act_site_cond , 'has_active_pattern' ) ) inact_site_cond = { at : 'inactive' } self . model . add_annotation ( Annotation ( m , inact_site_cond , 'has_inactive_pattern' ) ) self . _assemble ( ) if initial_conditions : self . add_default_initial_conditions ( ) return self . model
Assemble the PySB model from the collected INDRA Statements .
17,606
def add_default_initial_conditions ( self , value = None ) : if value is not None : try : value_num = float ( value ) except ValueError : logger . error ( 'Invalid initial condition value.' ) return else : value_num = self . default_initial_amount if self . model is None : return for m in self . model . monomers : set_base_initial_condition ( self . model , m , value_num )
Set default initial conditions in the PySB model .
17,607
def set_expression ( self , expression_dict ) : if self . model is None : return monomers_found = [ ] monomers_notfound = [ ] for m in self . model . monomers : if ( m . name in expression_dict and expression_dict [ m . name ] is not None ) : init = expression_dict [ m . name ] if math . isnan ( init ) : init = 0 init_round = round ( init ) set_base_initial_condition ( self . model , m , init_round ) monomers_found . append ( m . name ) else : set_base_initial_condition ( self . model , m , self . default_initial_amount ) monomers_notfound . append ( m . name ) logger . info ( 'Monomers set to given context' ) logger . info ( '-----------------------------' ) for m in monomers_found : logger . info ( '%s' % m ) if monomers_notfound : logger . info ( '' ) logger . info ( 'Monomers not found in given context' ) logger . info ( '-----------------------------------' ) for m in monomers_notfound : logger . info ( '%s' % m )
Set protein expression amounts as initial conditions
17,608
def set_context ( self , cell_type ) : if self . model is None : return monomer_names = [ m . name for m in self . model . monomers ] res = context_client . get_protein_expression ( monomer_names , [ cell_type ] ) amounts = res . get ( cell_type ) if not amounts : logger . warning ( 'Could not get context for %s cell type.' % cell_type ) self . add_default_initial_conditions ( ) return self . set_expression ( amounts )
Set protein expression amounts from CCLE as initial conditions .
17,609
def export_model ( self , format , file_name = None ) : if format == 'sbgn' : exp_str = export_sbgn ( self . model ) elif format == 'kappa_im' : return export_kappa_im ( self . model , file_name ) elif format == 'kappa_cm' : return export_kappa_cm ( self . model , file_name ) else : try : exp_str = pysb . export . export ( self . model , format ) except KeyError : logging . error ( 'Unknown export format: %s' % format ) return None if file_name : with open ( file_name , 'wb' ) as fh : fh . write ( exp_str . encode ( 'utf-8' ) ) return exp_str
Save the assembled model in a modeling formalism other than PySB .
17,610
def save_rst ( self , file_name = 'pysb_model.rst' , module_name = 'pysb_module' ) : if self . model is not None : with open ( file_name , 'wt' ) as fh : fh . write ( '.. _%s:\n\n' % module_name ) fh . write ( 'Module\n======\n\n' ) fh . write ( 'INDRA-assembled model\n---------------------\n\n' ) fh . write ( '::\n\n' ) model_str = pysb . export . export ( self . model , 'pysb_flat' ) model_str = '\t' + model_str . replace ( '\n' , '\n\t' ) fh . write ( model_str )
Save the assembled model as an RST file for literate modeling .
17,611
def _monomers ( self ) : for stmt in self . statements : if _is_whitelisted ( stmt ) : self . _dispatch ( stmt , 'monomers' , self . agent_set )
Calls the appropriate monomers method based on policies .
17,612
def send_query ( text , service_endpoint = 'drum' , query_args = None ) : if service_endpoint in [ 'drum' , 'drum-dev' , 'cwms' , 'cwmsreader' ] : url = base_url + service_endpoint else : logger . error ( 'Invalid service endpoint: %s' % service_endpoint ) return '' if query_args is None : query_args = { } query_args . update ( { 'input' : text } ) res = requests . get ( url , query_args , timeout = 3600 ) if not res . status_code == 200 : logger . error ( 'Problem with TRIPS query: status code %s' % res . status_code ) return '' return res . text
Send a query to the TRIPS web service .
17,613
def get_xml ( html , content_tag = 'ekb' , fail_if_empty = False ) : cont = re . findall ( r'<%(tag)s(.*?)>(.*?)</%(tag)s>' % { 'tag' : content_tag } , html , re . MULTILINE | re . DOTALL ) if cont : events_terms = '' . join ( [ l . strip ( ) for l in cont [ 0 ] [ 1 ] . splitlines ( ) ] ) if 'xmlns' in cont [ 0 ] [ 0 ] : meta = ' ' . join ( [ l . strip ( ) for l in cont [ 0 ] [ 0 ] . splitlines ( ) ] ) else : meta = '' else : events_terms = '' meta = '' if fail_if_empty : assert events_terms != '' , "Got empty string for events content from html:\n%s" % html header = ( '<?xml version="1.0" encoding="utf-8" standalone="yes"?><%s%s>' % ( content_tag , meta ) ) footer = '</%s>' % content_tag return header + events_terms . replace ( '\n' , '' ) + footer
Extract the content XML from the HTML output of the TRIPS web service .
17,614
def save_xml ( xml_str , file_name , pretty = True ) : try : fh = open ( file_name , 'wt' ) except IOError : logger . error ( 'Could not open %s for writing.' % file_name ) return if pretty : xmld = xml . dom . minidom . parseString ( xml_str ) xml_str_pretty = xmld . toprettyxml ( ) fh . write ( xml_str_pretty ) else : fh . write ( xml_str ) fh . close ( )
Save the TRIPS EKB XML in a file .
17,615
def process_table ( fname ) : book = openpyxl . load_workbook ( fname , read_only = True ) try : rel_sheet = book [ 'Relations' ] except Exception as e : rel_sheet = book [ 'Causal' ] event_sheet = book [ 'Events' ] entities_sheet = book [ 'Entities' ] sp = SofiaExcelProcessor ( rel_sheet . rows , event_sheet . rows , entities_sheet . rows ) return sp
Return processor by processing a given sheet of a spreadsheet file .
17,616
def process_text ( text , out_file = 'sofia_output.json' , auth = None ) : text_json = { 'text' : text } if not auth : user , password = _get_sofia_auth ( ) else : user , password = auth if not user or not password : raise ValueError ( 'Could not use SOFIA web service since' ' authentication information is missing. Please' ' set SOFIA_USERNAME and SOFIA_PASSWORD in the' ' INDRA configuration file or as environmental' ' variables.' ) json_response , status_code , process_status = _text_processing ( text_json = text_json , user = user , password = password ) if process_status != 'Done' or status_code != 200 : return None if out_file : with open ( out_file , 'w' ) as fh : json . dump ( json_response , fh , indent = 1 ) return process_json ( json_response )
Return processor by processing text given as a string .
17,617
def _get_dict_from_list ( dict_key , list_of_dicts ) : the_dict = [ cur_dict for cur_dict in list_of_dicts if cur_dict . get ( dict_key ) ] if not the_dict : raise ValueError ( 'Could not find a dict with key %s' % dict_key ) return the_dict [ 0 ] [ dict_key ]
Retrieve a specific dict from a list of dicts .
17,618
def _initialize_node_agents ( self ) : nodes = _get_dict_from_list ( 'nodes' , self . cx ) invalid_genes = [ ] for node in nodes : id = node [ '@id' ] cx_db_refs = self . get_aliases ( node ) up_id = cx_db_refs . get ( 'UP' ) if up_id : gene_name = uniprot_client . get_gene_name ( up_id ) hgnc_id = hgnc_client . get_hgnc_id ( gene_name ) db_refs = { 'UP' : up_id , 'HGNC' : hgnc_id , 'TEXT' : gene_name } agent = Agent ( gene_name , db_refs = db_refs ) self . _node_names [ id ] = gene_name self . _node_agents [ id ] = agent continue else : node_name = node [ 'n' ] self . _node_names [ id ] = node_name hgnc_id = hgnc_client . get_hgnc_id ( node_name ) db_refs = { 'TEXT' : node_name } if not hgnc_id : if not self . require_grounding : self . _node_agents [ id ] = Agent ( node_name , db_refs = db_refs ) invalid_genes . append ( node_name ) else : db_refs . update ( { 'HGNC' : hgnc_id } ) up_id = hgnc_client . get_uniprot_id ( hgnc_id ) if up_id : db_refs . update ( { 'UP' : up_id } ) self . _node_agents [ id ] = Agent ( node_name , db_refs = db_refs ) if invalid_genes : verb = 'Skipped' if self . require_grounding else 'Included' logger . info ( '%s invalid gene symbols: %s' % ( verb , ', ' . join ( invalid_genes ) ) )
Initialize internal dicts containing node information .
17,619
def get_pmids ( self ) : pmids = [ ] for ea in self . _edge_attributes . values ( ) : edge_pmids = ea . get ( 'pmids' ) if edge_pmids : pmids += edge_pmids return list ( set ( pmids ) )
Get list of all PMIDs associated with edges in the network .
17,620
def get_statements ( self ) : edges = _get_dict_from_list ( 'edges' , self . cx ) for edge in edges : edge_type = edge . get ( 'i' ) if not edge_type : continue stmt_type = _stmt_map . get ( edge_type ) if stmt_type : id = edge [ '@id' ] source_agent = self . _node_agents . get ( edge [ 's' ] ) target_agent = self . _node_agents . get ( edge [ 't' ] ) if not source_agent or not target_agent : logger . info ( "Skipping edge %s->%s: %s" % ( self . _node_names [ edge [ 's' ] ] , self . _node_names [ edge [ 't' ] ] , edge ) ) continue ev = self . _create_evidence ( id ) if stmt_type == Complex : stmt = stmt_type ( [ source_agent , target_agent ] , evidence = ev ) else : stmt = stmt_type ( source_agent , target_agent , evidence = ev ) self . statements . append ( stmt ) return self . statements
Convert network edges into Statements .
17,621
def node_has_edge_with_label ( self , node_name , edge_label ) : G = self . G for edge in G . edges ( node_name ) : to = edge [ 1 ] relation_name = G . edges [ node_name , to ] [ 'relation' ] if relation_name == edge_label : return to return None
Looks for an edge from node_name to some other node with the specified label . Returns the node to which this edge points if it exists or None if it doesn t .
17,622
def general_node_label ( self , node ) : G = self . G if G . node [ node ] [ 'is_event' ] : return 'event type=' + G . node [ node ] [ 'type' ] else : return 'entity text=' + G . node [ node ] [ 'text' ]
Used for debugging - gives a short text description of a graph node .
17,623
def print_parent_and_children_info ( self , node ) : G = self . G parents = G . predecessors ( node ) children = G . successors ( node ) print ( general_node_label ( G , node ) ) tabs = '\t' for parent in parents : relation = G . edges [ parent , node ] [ 'relation' ] print ( tabs + 'Parent (%s): %s' % ( relation , general_node_label ( G , parent ) ) ) for cop in G . successors ( parent ) : if cop != node : relation = G . edges [ parent , cop ] [ 'relation' ] print ( tabs + 'Child of parent (%s): %s' % ( relation , general_node_label ( G , cop ) ) ) for child in children : relation = G . edges [ node , child ] [ 'relation' ] print ( tabs + 'Child (%s): (%s)' % ( relation , general_node_label ( G , child ) ) )
Used for debugging - prints a short description of a a node its children its parents and its parents children .
17,624
def find_event_with_outgoing_edges ( self , event_name , desired_relations ) : G = self . G desired_relations = set ( desired_relations ) desired_event_nodes = [ ] for node in G . node . keys ( ) : if G . node [ node ] [ 'is_event' ] and G . node [ node ] [ 'type' ] == event_name : has_relations = [ G . edges [ node , edge [ 1 ] ] [ 'relation' ] for edge in G . edges ( node ) ] has_relations = set ( has_relations ) if desired_relations . issubset ( has_relations ) : desired_event_nodes . append ( node ) return desired_event_nodes
Gets a list of event nodes with the specified event_name and outgoing edges annotated with each of the specified relations .
17,625
def get_related_node ( self , node , relation ) : G = self . G for edge in G . edges ( node ) : to = edge [ 1 ] to_relation = G . edges [ node , to ] [ 'relation' ] if to_relation == relation : return to return None
Looks for an edge from node to some other node such that the edge is annotated with the given relation . If there exists such an edge returns the name of the node it points to . Otherwise returns None .
17,626
def get_entity_text_for_relation ( self , node , relation ) : G = self . G related_node = self . get_related_node ( node , relation ) if related_node is not None : if not G . node [ related_node ] [ 'is_event' ] : return G . node [ related_node ] [ 'text' ] else : return None else : return None
Looks for an edge from node to some other node such that the edge is annotated with the given relation . If there exists such an edge and the node at the other edge is an entity return that entity s text . Otherwise returns None .
17,627
def process_increase_expression_amount ( self ) : statements = [ ] pwcs = self . find_event_parent_with_event_child ( 'Positive_regulation' , 'Gene_expression' ) for pair in pwcs : pos_reg = pair [ 0 ] expression = pair [ 1 ] cause = self . get_entity_text_for_relation ( pos_reg , 'Cause' ) target = self . get_entity_text_for_relation ( expression , 'Theme' ) if cause is not None and target is not None : theme_node = self . get_related_node ( expression , 'Theme' ) assert ( theme_node is not None ) evidence = self . node_to_evidence ( theme_node , is_direct = False ) statements . append ( IncreaseAmount ( s2a ( cause ) , s2a ( target ) , evidence = evidence ) ) return statements
Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme and processes them into INDRA statements .
17,628
def process_phosphorylation_statements ( self ) : G = self . G statements = [ ] pwcs = self . find_event_parent_with_event_child ( 'Positive_regulation' , 'Phosphorylation' ) for pair in pwcs : ( pos_reg , phos ) = pair cause = self . get_entity_text_for_relation ( pos_reg , 'Cause' ) theme = self . get_entity_text_for_relation ( phos , 'Theme' ) print ( 'Cause:' , cause , 'Theme:' , theme ) trigger_word = self . get_entity_text_for_relation ( phos , 'Phosphorylation' ) if 'dephos' in trigger_word : deph = True else : deph = False site = self . get_entity_text_for_relation ( phos , 'Site' ) theme_node = self . get_related_node ( phos , 'Theme' ) assert ( theme_node is not None ) evidence = self . node_to_evidence ( theme_node , is_direct = False ) if theme is not None : if deph : statements . append ( Dephosphorylation ( s2a ( cause ) , s2a ( theme ) , site , evidence = evidence ) ) else : statements . append ( Phosphorylation ( s2a ( cause ) , s2a ( theme ) , site , evidence = evidence ) ) return statements
Looks for Phosphorylation events in the graph and extracts them into INDRA statements .
17,629
def process_binding_statements ( self ) : G = self . G statements = [ ] binding_nodes = self . find_event_with_outgoing_edges ( 'Binding' , [ 'Theme' , 'Theme2' ] ) for node in binding_nodes : theme1 = self . get_entity_text_for_relation ( node , 'Theme' ) theme1_node = self . get_related_node ( node , 'Theme' ) theme2 = self . get_entity_text_for_relation ( node , 'Theme2' ) assert ( theme1 is not None ) assert ( theme2 is not None ) evidence = self . node_to_evidence ( theme1_node , is_direct = True ) statements . append ( Complex ( [ s2a ( theme1 ) , s2a ( theme2 ) ] , evidence = evidence ) ) return statements
Looks for Binding events in the graph and extracts them into INDRA statements .
17,630
def node_to_evidence ( self , entity_node , is_direct ) : sentence_text = self . G . node [ entity_node ] [ 'sentence_text' ] subgraph = self . connected_subgraph ( entity_node ) edge_properties = { } for edge in subgraph . edges ( ) : edge_properties [ edge ] = subgraph . edges [ edge ] annotations = { 'node_properties' : subgraph . node , 'edge_properties' : edge_properties } epistemics = dict ( ) evidence = Evidence ( source_api = 'tees' , pmid = self . pmid , text = sentence_text , epistemics = { 'direct' : is_direct } , annotations = annotations ) return evidence
Computes an evidence object for a statement .
17,631
def connected_subgraph ( self , node ) : G = self . G subgraph_nodes = set ( ) subgraph_nodes . add ( node ) subgraph_nodes . update ( dag . ancestors ( G , node ) ) subgraph_nodes . update ( dag . descendants ( G , node ) ) graph_changed = True while graph_changed : initial_count = len ( subgraph_nodes ) old_nodes = set ( subgraph_nodes ) for n in old_nodes : subgraph_nodes . update ( dag . ancestors ( G , n ) ) subgraph_nodes . update ( dag . descendants ( G , n ) ) current_count = len ( subgraph_nodes ) graph_changed = current_count > initial_count return G . subgraph ( subgraph_nodes )
Returns the subgraph containing the given node its ancestors and its descendants .
17,632
def process_text ( text , save_xml_name = 'trips_output.xml' , save_xml_pretty = True , offline = False , service_endpoint = 'drum' ) : if not offline : html = client . send_query ( text , service_endpoint ) xml = client . get_xml ( html ) else : if offline_reading : try : dr = DrumReader ( ) if dr is None : raise Exception ( 'DrumReader could not be instantiated.' ) except BaseException as e : logger . error ( e ) logger . error ( 'Make sure drum/bin/trips-drum is running in' ' a separate process' ) return None try : dr . read_text ( text ) dr . start ( ) except SystemExit : pass xml = dr . extractions [ 0 ] else : logger . error ( 'Offline reading with TRIPS/DRUM not available.' ) logger . error ( 'Error message was: %s' % offline_err ) msg = logger . error ( msg ) return None if save_xml_name : client . save_xml ( xml , save_xml_name , save_xml_pretty ) return process_xml ( xml )
Return a TripsProcessor by processing text .
17,633
def process_xml_file ( file_name ) : with open ( file_name , 'rb' ) as fh : ekb = fh . read ( ) . decode ( 'utf-8' ) return process_xml ( ekb )
Return a TripsProcessor by processing a TRIPS EKB XML file .
17,634
def process_xml ( xml_string ) : tp = TripsProcessor ( xml_string ) if tp . tree is None : return None tp . get_modifications_indirect ( ) tp . get_activations_causal ( ) tp . get_activations_stimulate ( ) tp . get_complexes ( ) tp . get_modifications ( ) tp . get_active_forms ( ) tp . get_active_forms_state ( ) tp . get_activations ( ) tp . get_translocation ( ) tp . get_regulate_amounts ( ) tp . get_degradations ( ) tp . get_syntheses ( ) tp . get_conversions ( ) tp . get_simple_increase_decrease ( ) return tp
Return a TripsProcessor by processing a TRIPS EKB XML string .
17,635
def load_eidos_curation_table ( ) : url = 'https://raw.githubusercontent.com/clulab/eidos/master/' + 'src/main/resources/org/clulab/wm/eidos/english/confidence/' + 'rule_summary.tsv' res = StringIO ( requests . get ( url ) . text ) table = pandas . read_table ( res , sep = '\t' ) table = table . drop ( table . index [ len ( table ) - 1 ] ) return table
Return a pandas table of Eidos curation data .
17,636
def get_eidos_bayesian_scorer ( prior_counts = None ) : table = load_eidos_curation_table ( ) subtype_counts = { 'eidos' : { r : [ c , i ] for r , c , i in zip ( table [ 'RULE' ] , table [ 'Num correct' ] , table [ 'Num incorrect' ] ) } } prior_counts = prior_counts if prior_counts else copy . deepcopy ( default_priors ) scorer = BayesianScorer ( prior_counts = prior_counts , subtype_counts = subtype_counts ) return scorer
Return a BayesianScorer based on Eidos curation counts .
17,637
def get_eidos_scorer ( ) : table = load_eidos_curation_table ( ) total_num = table [ 'COUNT of RULE' ] . sum ( ) weighted_sum = table [ 'COUNT of RULE' ] . dot ( table [ '% correct' ] ) precision = weighted_sum / total_num syst_error = 0.05 rand_error = 1 - precision - syst_error prior_probs = { 'rand' : { 'eidos' : rand_error } , 'syst' : { 'eidos' : syst_error } } subtype_probs = { 'eidos' : { k : 1.0 - min ( v , 0.95 ) - syst_error for k , v in zip ( table [ 'RULE' ] , table [ '% correct' ] ) } } scorer = SimpleScorer ( prior_probs , subtype_probs ) return scorer
Return a SimpleScorer based on Eidos curated precision estimates .
17,638
def process_from_web ( ) : logger . info ( 'Downloading table from %s' % trrust_human_url ) res = requests . get ( trrust_human_url ) res . raise_for_status ( ) df = pandas . read_table ( io . StringIO ( res . text ) ) tp = TrrustProcessor ( df ) tp . extract_statements ( ) return tp
Return a TrrustProcessor based on the online interaction table .
17,639
def process_from_webservice ( id_val , id_type = 'pmcid' , source = 'pmc' , with_grounding = True ) : if with_grounding : fmt = '%s.normed/%s/%s' else : fmt = '%s/%s/%s' resp = requests . get ( RLIMSP_URL + fmt % ( source , id_type , id_val ) ) if resp . status_code != 200 : raise RLIMSP_Error ( "Bad status code: %d - %s" % ( resp . status_code , resp . reason ) ) rp = RlimspProcessor ( resp . json ( ) ) rp . extract_statements ( ) return rp
Return an output from RLIMS - p for the given PubMed ID or PMC ID .
17,640
def process_from_json_file ( filename , doc_id_type = None ) : with open ( filename , 'rt' ) as f : lines = f . readlines ( ) json_list = [ ] for line in lines : json_list . append ( json . loads ( line ) ) rp = RlimspProcessor ( json_list , doc_id_type = doc_id_type ) rp . extract_statements ( ) return rp
Process RLIMSP extractions from a bulk - download JSON file .
17,641
def get ( self , key ) : "Find the first value within the tree which has the key." if key in self . keys ( ) : return self [ key ] else : res = None for v in self . values ( ) : if hasattr ( v , 'get' ) : res = v . get ( key ) if res is not None : break return res
Find the first value within the tree which has the key .
17,642
def get_path ( self , key ) : "Like `get`, but also return the path taken to the value." if key in self . keys ( ) : return ( key , ) , self [ key ] else : key_path , res = ( None , None ) for sub_key , v in self . items ( ) : if isinstance ( v , self . __class__ ) : key_path , res = v . get_path ( key ) elif hasattr ( v , 'get' ) : res = v . get ( key ) key_path = ( key , ) if res is not None else None if res is not None and key_path is not None : key_path = ( sub_key , ) + key_path break return key_path , res
Like get but also return the path taken to the value .
17,643
def gets ( self , key ) : "Like `get`, but return all matches, not just the first." result_list = [ ] if key in self . keys ( ) : result_list . append ( self [ key ] ) for v in self . values ( ) : if isinstance ( v , self . __class__ ) : sub_res_list = v . gets ( key ) for res in sub_res_list : result_list . append ( res ) elif isinstance ( v , dict ) : if key in v . keys ( ) : result_list . append ( v [ key ] ) return result_list
Like get but return all matches not just the first .
17,644
def get_paths ( self , key ) : "Like `gets`, but include the paths, like `get_path` for all matches." result_list = [ ] if key in self . keys ( ) : result_list . append ( ( ( key , ) , self [ key ] ) ) for sub_key , v in self . items ( ) : if isinstance ( v , self . __class__ ) : sub_res_list = v . get_paths ( key ) for key_path , res in sub_res_list : result_list . append ( ( ( sub_key , ) + key_path , res ) ) elif isinstance ( v , dict ) : if key in v . keys ( ) : result_list . append ( ( ( sub_key , key ) , v [ key ] ) ) return result_list
Like gets but include the paths like get_path for all matches .
17,645
def get_leaves ( self ) : ret_set = set ( ) for val in self . values ( ) : if isinstance ( val , self . __class__ ) : ret_set |= val . get_leaves ( ) elif isinstance ( val , dict ) : ret_set |= set ( val . values ( ) ) elif isinstance ( val , list ) : ret_set |= set ( val ) elif isinstance ( val , set ) : ret_set |= val else : ret_set . add ( val ) return ret_set
Get the deepest entries as a flat set .
17,646
def determine_reach_subtype ( event_name ) : best_match_length = None best_match = None for ss in reach_rule_regexps : if re . search ( ss , event_name ) : if best_match is None or len ( ss ) > best_match_length : best_match = ss best_match_length = len ( ss ) return best_match
Returns the category of reach rule from the reach rule instance .
17,647
def print_event_statistics ( self ) : logger . info ( 'All events by type' ) logger . info ( '-------------------' ) for k , v in self . all_events . items ( ) : logger . info ( '%s, %s' % ( k , len ( v ) ) ) logger . info ( '-------------------' )
Print the number of events in the REACH output by type .
17,648
def get_all_events ( self ) : self . all_events = { } events = self . tree . execute ( "$.events.frames" ) if events is None : return for e in events : event_type = e . get ( 'type' ) frame_id = e . get ( 'frame_id' ) try : self . all_events [ event_type ] . append ( frame_id ) except KeyError : self . all_events [ event_type ] = [ frame_id ]
Gather all event IDs in the REACH output by type .
17,649
def get_modifications ( self ) : qstr = "$.events.frames[(@.type is 'protein-modification')]" res = self . tree . execute ( qstr ) if res is None : return for r in res : modification_type = r . get ( 'subtype' ) epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue annotations , context = self . _get_annot_context ( r ) frame_id = r [ 'frame_id' ] args = r [ 'arguments' ] site = None theme = None for a in args : if self . _get_arg_type ( a ) == 'theme' : theme = a [ 'arg' ] elif self . _get_arg_type ( a ) == 'site' : site = a [ 'text' ] theme_agent , theme_coords = self . _get_agent_from_entity ( theme ) if site is not None : mods = self . _parse_site_text ( site ) else : mods = [ ( None , None ) ] for mod in mods : residue , pos = mod qstr = "$.events.frames[(@.type is 'regulation') and " + "(@.arguments[0].arg is '%s')]" % frame_id reg_res = self . tree . execute ( qstr ) reg_res = list ( reg_res ) for reg in reg_res : controller_agent , controller_coords = None , None for a in reg [ 'arguments' ] : if self . _get_arg_type ( a ) == 'controller' : controller = a . get ( 'arg' ) if controller is not None : controller_agent , controller_coords = self . _get_agent_from_entity ( controller ) break reg_subtype = reg . get ( 'subtype' ) if reg_subtype == 'negative-regulation' : modification_type = modtype_to_inverse . get ( modification_type ) if not modification_type : logger . warning ( 'Unhandled modification type: %s' % modification_type ) continue sentence = reg [ 'verbose-text' ] annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , theme_coords ] ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) args = [ controller_agent , theme_agent , residue , pos , ev ] ModStmt = modtype_to_modclass . get ( modification_type ) if ModStmt is None : logger . warning ( 'Unhandled modification type: %s' % modification_type ) else : if modification_type == 'autophosphorylation' : args = [ theme_agent , residue , pos , ev ] self . statements . append ( ModStmt ( * args ) )
Extract Modification INDRA Statements .
17,650
def get_regulate_amounts ( self ) : qstr = "$.events.frames[(@.type is 'transcription')]" res = self . tree . execute ( qstr ) all_res = [ ] if res is not None : all_res += list ( res ) qstr = "$.events.frames[(@.type is 'amount')]" res = self . tree . execute ( qstr ) if res is not None : all_res += list ( res ) for r in all_res : subtype = r . get ( 'subtype' ) epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue annotations , context = self . _get_annot_context ( r ) frame_id = r [ 'frame_id' ] args = r [ 'arguments' ] theme = None for a in args : if self . _get_arg_type ( a ) == 'theme' : theme = a [ 'arg' ] break if theme is None : continue theme_agent , theme_coords = self . _get_agent_from_entity ( theme ) qstr = "$.events.frames[(@.type is 'regulation') and " + "(@.arguments[0].arg is '%s')]" % frame_id reg_res = self . tree . execute ( qstr ) for reg in reg_res : controller_agent , controller_coords = None , None for a in reg [ 'arguments' ] : if self . _get_arg_type ( a ) == 'controller' : controller_agent , controller_coords = self . _get_controller_agent ( a ) sentence = reg [ 'verbose-text' ] annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , theme_coords ] ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) args = [ controller_agent , theme_agent , ev ] subtype = reg . get ( 'subtype' ) if subtype == 'positive-regulation' : st = IncreaseAmount ( * args ) else : st = DecreaseAmount ( * args ) self . statements . append ( st )
Extract RegulateAmount INDRA Statements .
17,651
def get_complexes ( self ) : qstr = "$.events.frames[@.type is 'complex-assembly']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue if epistemics . get ( 'direct' ) is None : continue annotations , context = self . _get_annot_context ( r ) args = r [ 'arguments' ] sentence = r [ 'verbose-text' ] members = [ ] agent_coordinates = [ ] for a in args : agent , coords = self . _get_agent_from_entity ( a [ 'arg' ] ) members . append ( agent ) agent_coordinates . append ( coords ) annotations [ 'agents' ] [ 'coords' ] = agent_coordinates ev = Evidence ( source_api = 'reach' , text = sentence , annotations = annotations , pmid = self . citation , context = context , epistemics = epistemics ) stmt = Complex ( members , ev ) self . statements . append ( stmt )
Extract INDRA Complex Statements .
17,652
def get_activation ( self ) : qstr = "$.events.frames[@.type is 'activation']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue sentence = r [ 'verbose-text' ] annotations , context = self . _get_annot_context ( r ) ev = Evidence ( source_api = 'reach' , text = sentence , pmid = self . citation , annotations = annotations , context = context , epistemics = epistemics ) args = r [ 'arguments' ] for a in args : if self . _get_arg_type ( a ) == 'controller' : controller_agent , controller_coords = self . _get_controller_agent ( a ) if self . _get_arg_type ( a ) == 'controlled' : controlled = a [ 'arg' ] controlled_agent , controlled_coords = self . _get_agent_from_entity ( controlled ) annotations [ 'agents' ] [ 'coords' ] = [ controller_coords , controlled_coords ] if r [ 'subtype' ] == 'positive-activation' : st = Activation ( controller_agent , controlled_agent , evidence = ev ) else : st = Inhibition ( controller_agent , controlled_agent , evidence = ev ) self . statements . append ( st )
Extract INDRA Activation Statements .
17,653
def get_translocation ( self ) : qstr = "$.events.frames[@.type is 'translocation']" res = self . tree . execute ( qstr ) if res is None : return for r in res : epistemics = self . _get_epistemics ( r ) if epistemics . get ( 'negated' ) : continue sentence = r [ 'verbose-text' ] annotations , context = self . _get_annot_context ( r ) args = r [ 'arguments' ] from_location = None to_location = None for a in args : if self . _get_arg_type ( a ) == 'theme' : agent , theme_coords = self . _get_agent_from_entity ( a [ 'arg' ] ) if agent is None : continue elif self . _get_arg_type ( a ) == 'source' : from_location = self . _get_location_by_id ( a [ 'arg' ] ) elif self . _get_arg_type ( a ) == 'destination' : to_location = self . _get_location_by_id ( a [ 'arg' ] ) annotations [ 'agents' ] [ 'coords' ] = [ theme_coords ] ev = Evidence ( source_api = 'reach' , text = sentence , pmid = self . citation , annotations = annotations , context = context , epistemics = epistemics ) st = Translocation ( agent , from_location , to_location , evidence = ev ) self . statements . append ( st )
Extract INDRA Translocation Statements .
17,654
def _get_mod_conditions ( self , mod_term ) : site = mod_term . get ( 'site' ) if site is not None : mods = self . _parse_site_text ( site ) else : mods = [ Site ( None , None ) ] mcs = [ ] for mod in mods : mod_res , mod_pos = mod mod_type_str = mod_term [ 'type' ] . lower ( ) mod_state = agent_mod_map . get ( mod_type_str ) if mod_state is not None : mc = ModCondition ( mod_state [ 0 ] , residue = mod_res , position = mod_pos , is_modified = mod_state [ 1 ] ) mcs . append ( mc ) else : logger . warning ( 'Unhandled entity modification type: %s' % mod_type_str ) return mcs
Return a list of ModConditions given a mod term dict .
17,655
def _get_entity_coordinates ( self , entity_term ) : sent_id = entity_term . get ( 'sentence' ) if sent_id is None : return None qstr = "$.sentences.frames[(@.frame_id is \'%s')]" % sent_id res = self . tree . execute ( qstr ) if res is None : return None try : sentence = next ( res ) except StopIteration : return None sent_start = sentence . get ( 'start-pos' ) if sent_start is None : return None sent_start = sent_start . get ( 'offset' ) if sent_start is None : return None entity_start = entity_term . get ( 'start-pos' ) entity_stop = entity_term . get ( 'end-pos' ) if entity_start is None or entity_stop is None : return None entity_start = entity_start . get ( 'offset' ) entity_stop = entity_stop . get ( 'offset' ) if entity_start is None or entity_stop is None : return None return ( entity_start - sent_start , entity_stop - sent_start )
Return sentence coordinates for a given entity .
17,656
def _get_section ( self , event ) : sentence_id = event . get ( 'sentence' ) section = None if sentence_id : qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % sentence_id res = self . tree . execute ( qstr ) if res : sentence_frame = list ( res ) [ 0 ] passage_id = sentence_frame . get ( 'passage' ) if passage_id : qstr = "$.sentences.frames[(@.frame_id is \'%s\')]" % passage_id res = self . tree . execute ( qstr ) if res : passage_frame = list ( res ) [ 0 ] section = passage_frame . get ( 'section-id' ) if section in self . _section_list : return section elif section . startswith ( 'fig' ) : return 'figure' elif section . startswith ( 'supm' ) : return 'supplementary' elif section == 'article-title' : return 'title' elif section in [ 'subjects|methods' , 'methods|subjects' ] : return 'methods' elif section == 'conclusions' : return 'conclusion' elif section == 'intro' : return 'introduction' else : return None
Get the section of the paper that the event is from .
17,657
def _get_controller_agent ( self , arg ) : controller_agent = None controller = arg . get ( 'arg' ) if controller is not None : controller_agent , coords = self . _get_agent_from_entity ( controller ) elif arg [ 'argument-type' ] == 'complex' : controllers = list ( arg . get ( 'args' ) . values ( ) ) controller_agent , coords = self . _get_agent_from_entity ( controllers [ 0 ] ) bound_agents = [ self . _get_agent_from_entity ( c ) [ 0 ] for c in controllers [ 1 : ] ] bound_conditions = [ BoundCondition ( ba , True ) for ba in bound_agents ] controller_agent . bound_conditions = bound_conditions return controller_agent , coords
Return a single or a complex controller agent .
17,658
def _sanitize ( text ) : d = { '-LRB-' : '(' , '-RRB-' : ')' } return re . sub ( '|' . join ( d . keys ( ) ) , lambda m : d [ m . group ( 0 ) ] , text )
Return sanitized Eidos text field for human readability .
17,659
def ref_context_from_geoloc ( geoloc ) : text = geoloc . get ( 'text' ) geoid = geoloc . get ( 'geoID' ) rc = RefContext ( name = text , db_refs = { 'GEOID' : geoid } ) return rc
Return a RefContext object given a geoloc entry .
17,660
def time_context_from_timex ( timex ) : time_text = timex . get ( 'text' ) constraint = timex [ 'intervals' ] [ 0 ] start = _get_time_stamp ( constraint . get ( 'start' ) ) end = _get_time_stamp ( constraint . get ( 'end' ) ) duration = constraint [ 'duration' ] tc = TimeContext ( text = time_text , start = start , end = end , duration = duration ) return tc
Return a TimeContext object given a timex entry .
17,661
def find_args ( event , arg_type ) : args = event . get ( 'arguments' , { } ) obj_tags = [ arg for arg in args if arg [ 'type' ] == arg_type ] if obj_tags : return [ o [ 'value' ] [ '@id' ] for o in obj_tags ] else : return [ ]
Return IDs of all arguments of a given type
17,662
def extract_causal_relations ( self ) : relations = [ e for e in self . doc . extractions if 'DirectedRelation' in e [ 'labels' ] and 'Causal' in e [ 'labels' ] ] for relation in relations : stmt = self . get_causal_relation ( relation ) if stmt is not None : self . statements . append ( stmt )
Extract causal relations as Statements .
17,663
def get_evidence ( self , relation ) : provenance = relation . get ( 'provenance' ) text = None context = None if provenance : sentence_tag = provenance [ 0 ] . get ( 'sentence' ) if sentence_tag and '@id' in sentence_tag : sentence_id = sentence_tag [ '@id' ] sentence = self . doc . sentences . get ( sentence_id ) if sentence is not None : text = _sanitize ( sentence [ 'text' ] ) timexes = sentence . get ( 'timexes' , [ ] ) if timexes : timex = timexes [ 0 ] tc = time_context_from_timex ( timex ) context = WorldContext ( time = tc ) geolocs = sentence . get ( 'geolocs' , [ ] ) if geolocs : geoloc = geolocs [ 0 ] rc = ref_context_from_geoloc ( geoloc ) if context : context . geo_location = rc else : context = WorldContext ( geo_location = rc ) doc_id = provenance [ 0 ] . get ( 'document' , { } ) . get ( '@id' ) if doc_id : title = self . doc . documents . get ( doc_id , { } ) . get ( 'title' ) if title : provenance [ 0 ] [ 'document' ] [ 'title' ] = title annotations = { 'found_by' : relation . get ( 'rule' ) , 'provenance' : provenance } if self . doc . dct is not None : annotations [ 'document_creation_time' ] = self . doc . dct . to_json ( ) epistemics = { } negations = self . get_negation ( relation ) hedgings = self . get_hedging ( relation ) if hedgings : epistemics [ 'hedgings' ] = hedgings if negations : epistemics [ 'negated' ] = True annotations [ 'negated_texts' ] = negations if text is None : text = _sanitize ( event . get ( 'text' ) ) ev = Evidence ( source_api = 'eidos' , text = text , annotations = annotations , context = context , epistemics = epistemics ) return ev
Return the Evidence object for the INDRA Statment .
17,664
def get_negation ( event ) : states = event . get ( 'states' , [ ] ) if not states : return [ ] negs = [ state for state in states if state . get ( 'type' ) == 'NEGATION' ] neg_texts = [ neg [ 'text' ] for neg in negs ] return neg_texts
Return negation attached to an event .
17,665
def get_hedging ( event ) : states = event . get ( 'states' , [ ] ) if not states : return [ ] hedgings = [ state for state in states if state . get ( 'type' ) == 'HEDGE' ] hedging_texts = [ hedging [ 'text' ] for hedging in hedgings ] return hedging_texts
Return hedging markers attached to an event .
17,666
def get_groundings ( entity ) : def get_grounding_entries ( grounding ) : if not grounding : return None entries = [ ] values = grounding . get ( 'values' , [ ] ) if values : for entry in values : ont_concept = entry . get ( 'ontologyConcept' ) value = entry . get ( 'value' ) if ont_concept is None or value is None : continue entries . append ( ( ont_concept , value ) ) return entries db_refs = { 'TEXT' : entity [ 'text' ] } groundings = entity . get ( 'groundings' ) if not groundings : return db_refs for g in groundings : entries = get_grounding_entries ( g ) if entries : key = g [ 'name' ] . upper ( ) if key == 'UN' : db_refs [ key ] = [ ( s [ 0 ] . replace ( ' ' , '_' ) , s [ 1 ] ) for s in entries ] else : db_refs [ key ] = entries return db_refs
Return groundings as db_refs for an entity .
17,667
def get_concept ( entity ) : name = entity [ 'canonicalName' ] db_refs = EidosProcessor . get_groundings ( entity ) concept = Concept ( name , db_refs = db_refs ) return concept
Return Concept from an Eidos entity .
17,668
def time_context_from_ref ( self , timex ) : value = timex . get ( 'value' ) if value : tc = self . doc . timexes . get ( value [ '@id' ] ) return tc return None
Return a time context object given a timex reference entry .
17,669
def geo_context_from_ref ( self , ref ) : value = ref . get ( 'value' ) if value : rc = self . doc . geolocs . get ( value [ '@id' ] ) return rc return None
Return a ref context object given a location reference entry .
17,670
def time_context_from_dct ( dct ) : time_text = dct . get ( 'text' ) start = _get_time_stamp ( dct . get ( 'start' ) ) end = _get_time_stamp ( dct . get ( 'end' ) ) duration = dct . get ( 'duration' ) tc = TimeContext ( text = time_text , start = start , end = end , duration = duration ) return tc
Return a time context object given a DCT entry .
17,671
def make_hash ( s , n_bytes ) : raw_h = int ( md5 ( s . encode ( 'utf-8' ) ) . hexdigest ( ) [ : n_bytes ] , 16 ) return 16 ** n_bytes // 2 - raw_h
Make the hash from a matches key .
17,672
def parse_a1 ( a1_text ) : entities = { } for line in a1_text . split ( '\n' ) : if len ( line ) == 0 : continue tokens = line . rstrip ( ) . split ( '\t' ) if len ( tokens ) != 3 : raise Exception ( 'Expected three tab-seperated tokens per line ' + 'in the a1 file output from TEES.' ) identifier = tokens [ 0 ] entity_info = tokens [ 1 ] entity_name = tokens [ 2 ] info_tokens = entity_info . split ( ) if len ( info_tokens ) != 3 : raise Exception ( 'Expected three space-seperated tokens in the ' + 'second column of the a2 file output from TEES.' ) entity_type = info_tokens [ 0 ] first_offset = int ( info_tokens [ 1 ] ) second_offset = int ( info_tokens [ 2 ] ) offsets = ( first_offset , second_offset ) entities [ identifier ] = TEESEntity ( identifier , entity_type , entity_name , offsets ) return entities
Parses an a1 file the file TEES outputs that lists the entities in the extracted events .
17,673
def parse_output ( a1_text , a2_text , sentence_segmentations ) : tees_sentences = TEESSentences ( sentence_segmentations ) entities = parse_a1 ( a1_text ) events = parse_a2 ( a2_text , entities , tees_sentences ) return events
Parses the output of the TEES reader and returns a networkx graph with the event information .
17,674
def tees_parse_networkx_to_dot ( G , output_file , subgraph_nodes ) : with codecs . open ( output_file , 'w' , encoding = 'utf-8' ) as f : f . write ( 'digraph teesParse {\n' ) mentioned_nodes = set ( ) for from_node in subgraph_nodes : for edge in G . edges ( from_node ) : to_node = edge [ 1 ] mentioned_nodes . add ( from_node ) mentioned_nodes . add ( to_node ) relation = G . edges [ from_node , to_node ] [ 'relation' ] f . write ( '%s -> %s [ label = "%s" ];\n' % ( from_node , to_node , relation ) ) for node in mentioned_nodes : is_event = G . node [ node ] [ 'is_event' ] if is_event : node_type = G . node [ node ] [ 'type' ] negated = G . node [ node ] [ 'negated' ] speculation = G . node [ node ] [ 'speculation' ] if negated and speculation : tag = ' {NS}' elif negated : tag = ' {N}' elif speculation : tag = ' {S}' else : tag = '' node_label = node_type + tag else : node_label = G . node [ node ] [ 'text' ] f . write ( '%s [label="%s"];\n' % ( node , node_label ) ) f . write ( '}\n' )
Converts TEES extractions stored in a networkx graph into a graphviz . dot file .
17,675
def _get_event ( self , event , find_str ) : element = event . find ( find_str ) if element is None : return None element_id = element . attrib . get ( 'id' ) element_term = self . tree . find ( "*[@id='%s']" % element_id ) if element_term is None : return None time , location = self . _extract_time_loc ( element_term ) assoc_with = self . _get_assoc_with ( element_term ) element_text_element = element_term . find ( 'text' ) if element_text_element is None : return None element_text = element_text_element . text element_db_refs = { 'TEXT' : element_text } element_name = sanitize_name ( element_text ) element_type_element = element_term . find ( 'type' ) if element_type_element is not None : element_db_refs [ 'CWMS' ] = element_type_element . text if assoc_with is not None : element_db_refs [ 'CWMS' ] += ( '|%s' % assoc_with ) concept = Concept ( element_name , db_refs = element_db_refs ) if time or location : context = WorldContext ( time = time , geo_location = location ) else : context = None event_obj = Event ( concept , context = context ) return event_obj
Get a concept referred from the event by the given string .
17,676
def make_model ( self , grounding_ontology = 'UN' , grounding_threshold = None ) : if grounding_threshold is not None : self . grounding_threshold = grounding_threshold self . grounding_ontology = grounding_ontology statements = [ stmt for stmt in self . statements if isinstance ( stmt , Influence ) ] self . CAG = nx . MultiDiGraph ( ) for s in statements : has_both_polarity = ( s . subj . delta [ 'polarity' ] is not None and s . obj . delta [ 'polarity' ] is not None ) for node , delta in zip ( ( s . subj . concept , s . obj . concept ) , ( s . subj . delta , s . obj . delta ) ) : self . CAG . add_node ( self . _node_name ( node ) , simulable = has_both_polarity , mods = delta [ 'adjectives' ] ) linestyle = 'solid' if has_both_polarity else 'dotted' if has_both_polarity : same_polarity = ( s . subj . delta [ 'polarity' ] == s . obj . delta [ 'polarity' ] ) if same_polarity : target_arrow_shape , linecolor = ( 'circle' , 'green' ) else : target_arrow_shape , linecolor = ( 'tee' , 'maroon' ) else : target_arrow_shape , linecolor = ( 'triangle' , 'maroon' ) provenance = [ ] if s . evidence : provenance = s . evidence [ 0 ] . annotations . get ( 'provenance' , [ ] ) if provenance : provenance [ 0 ] [ 'text' ] = s . evidence [ 0 ] . text self . CAG . add_edge ( self . _node_name ( s . subj . concept ) , self . _node_name ( s . obj . concept ) , subj_polarity = s . subj . delta [ 'polarity' ] , subj_adjectives = s . subj . delta [ 'adjectives' ] , obj_polarity = s . obj . delta [ 'polarity' ] , obj_adjectives = s . obj . delta [ 'adjectives' ] , linestyle = linestyle , linecolor = linecolor , targetArrowShape = target_arrow_shape , provenance = provenance , ) return self . CAG
Return a networkx MultiDiGraph representing a causal analysis graph .
17,677
def export_to_cytoscapejs ( self ) : def _create_edge_data_dict ( e ) : if e [ 3 ] . get ( 'provenance' ) : tooltip = e [ 3 ] [ 'provenance' ] [ 0 ] if tooltip . get ( '@type' ) : del tooltip [ '@type' ] else : tooltip = None edge_data_dict = { 'id' : e [ 0 ] + '_' + e [ 1 ] , 'source' : e [ 0 ] , 'target' : e [ 1 ] , 'linestyle' : e [ 3 ] [ "linestyle" ] , 'linecolor' : e [ 3 ] [ "linecolor" ] , 'targetArrowShape' : e [ 3 ] [ "targetArrowShape" ] , 'subj_adjectives' : e [ 3 ] [ "subj_adjectives" ] , 'subj_polarity' : e [ 3 ] [ "subj_polarity" ] , 'obj_adjectives' : e [ 3 ] [ "obj_adjectives" ] , 'obj_polarity' : e [ 3 ] [ "obj_polarity" ] , 'tooltip' : tooltip , 'simulable' : False if ( e [ 3 ] [ 'obj_polarity' ] is None or e [ 3 ] [ 'subj_polarity' ] is None ) else True , } return edge_data_dict return { 'nodes' : [ { 'data' : { 'id' : n [ 0 ] , 'simulable' : n [ 1 ] [ 'simulable' ] , 'tooltip' : 'Modifiers: ' + json . dumps ( n [ 1 ] [ 'mods' ] ) } } for n in self . CAG . nodes ( data = True ) ] , 'edges' : [ { 'data' : _create_edge_data_dict ( e ) } for e in self . CAG . edges ( data = True , keys = True ) ] }
Return CAG in format readable by CytoscapeJS .
17,678
def generate_jupyter_js ( self , cyjs_style = None , cyjs_layout = None ) : cyjs_elements = self . export_to_cytoscapejs ( ) tempf = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'cag_template.js' ) with open ( tempf , 'r' ) as fh : template = fh . read ( ) stylef = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , 'cag_style.json' ) with open ( stylef , 'r' ) as fh : style = json . load ( fh ) if cyjs_style is None : cyjs_style = style [ 'style' ] if cyjs_layout is None : cyjs_layout = style [ 'layout' ] formatted_args = tuple ( json . dumps ( x , indent = 2 ) for x in ( cyjs_elements , cyjs_style , cyjs_layout ) ) js_str = template % formatted_args return js_str
Generate Javascript from a template to run in Jupyter notebooks .
17,679
def _node_name ( self , concept ) : if ( self . grounding_threshold is not None and concept . db_refs [ self . grounding_ontology ] and ( concept . db_refs [ self . grounding_ontology ] [ 0 ] [ 1 ] > self . grounding_threshold ) ) : entry = concept . db_refs [ self . grounding_ontology ] [ 0 ] [ 0 ] return entry . split ( '/' ) [ - 1 ] . replace ( '_' , ' ' ) . capitalize ( ) else : return concept . name . capitalize ( )
Return a standardized name for a node given a Concept .
17,680
def term_from_uri ( uri ) : if uri is None : return None if isinstance ( uri , rdflib . Literal ) : uri = str ( uri . toPython ( ) ) patterns = [ 'http://www.openbel.org/bel/namespace//(.*)' , 'http://www.openbel.org/vocabulary//(.*)' , 'http://www.openbel.org/bel//(.*)' , 'http://www.openbel.org/bel/namespace/(.*)' , 'http://www.openbel.org/vocabulary/(.*)' , 'http://www.openbel.org/bel/(.*)' ] for pr in patterns : match = re . match ( pr , uri ) if match is not None : term = match . groups ( ) [ 0 ] term = unquote ( term ) return term return uri
Removes prepended URI information from terms .
17,681
def get_activating_mods ( self ) : q_mods = prefixes + res_mods = self . g . query ( q_mods ) for stmt in res_mods : evidence = self . _get_evidence ( stmt [ 5 ] ) species = self . _get_agent ( stmt [ 0 ] , stmt [ 6 ] ) act_type = term_from_uri ( stmt [ 1 ] ) . lower ( ) mod = term_from_uri ( stmt [ 2 ] ) mod_pos = term_from_uri ( stmt [ 3 ] ) mc = self . _get_mod_condition ( mod , mod_pos ) species . mods = [ mc ] rel = term_from_uri ( stmt [ 4 ] ) if rel == 'DirectlyDecreases' : is_active = False else : is_active = True stmt_str = strip_statement ( stmt [ 5 ] ) self . converted_direct_stmts . append ( stmt_str ) st = ActiveForm ( species , act_type , is_active , evidence ) self . statements . append ( st )
Extract INDRA ActiveForm Statements with a single mod from BEL .
17,682
def get_complexes ( self ) : q_cmplx = prefixes + res_cmplx = self . g . query ( q_cmplx ) cmplx_dict = collections . defaultdict ( list ) cmplx_ev = { } for stmt in res_cmplx : stmt_uri = stmt [ 3 ] ev = self . _get_evidence ( stmt_uri ) for e in ev : e . epistemics [ 'direct' ] = True cmplx_name = term_from_uri ( stmt [ 0 ] ) cmplx_id = stmt_uri + '#' + cmplx_name child = self . _get_agent ( stmt [ 1 ] , stmt [ 2 ] ) cmplx_dict [ cmplx_id ] . append ( child ) cmplx_ev [ cmplx_id ] = ev for cmplx_id , cmplx_list in cmplx_dict . items ( ) : if len ( cmplx_list ) < 2 : msg = 'Complex %s has less than 2 members! Skipping.' % cmplx_name logger . warning ( msg ) else : self . statements . append ( Complex ( cmplx_list , evidence = cmplx_ev [ cmplx_id ] ) )
Extract INDRA Complex Statements from BEL .
17,683
def get_activating_subs ( self ) : q_mods = prefixes + res_mods = self . g . query ( q_mods ) for stmt in res_mods : evidence = self . _get_evidence ( stmt [ 4 ] ) enz = self . _get_agent ( stmt [ 0 ] , stmt [ 5 ] ) sub_expr = term_from_uri ( stmt [ 1 ] ) act_type = term_from_uri ( stmt [ 2 ] ) . lower ( ) match = re . match ( 'sub\(([A-Z]),([0-9]*),([A-Z])\)' , sub_expr ) if match : matches = match . groups ( ) wt_residue = matches [ 0 ] position = matches [ 1 ] sub_residue = matches [ 2 ] else : logger . warning ( "Could not parse substitution expression %s" % sub_expr ) continue mc = MutCondition ( position , wt_residue , sub_residue ) enz . mutations = [ mc ] rel = strip_statement ( stmt [ 3 ] ) if rel == 'DirectlyDecreases' : is_active = False else : is_active = True stmt_str = strip_statement ( stmt [ 4 ] ) self . converted_direct_stmts . append ( stmt_str ) st = ActiveForm ( enz , act_type , is_active , evidence ) self . statements . append ( st )
Extract INDRA ActiveForm Statements based on a mutation from BEL .
17,684
def get_conversions ( self ) : query = prefixes + res = self . g . query ( query ) stmt_map = collections . defaultdict ( list ) for stmt in res : stmt_map [ stmt [ - 1 ] ] . append ( stmt ) for stmts in stmt_map . values ( ) : stmt = stmts [ 0 ] subj = self . _get_agent ( stmt [ 1 ] , stmt [ 0 ] ) evidence = self . _get_evidence ( stmt [ - 1 ] ) stmt_str = strip_statement ( stmt [ - 1 ] ) obj_from_map = { } obj_to_map = { } for stmt in stmts : reactant_name = stmt [ 6 ] product_name = stmt [ 4 ] if reactant_name not in obj_from_map : obj_from_map [ reactant_name ] = self . _get_agent ( stmt [ 6 ] , stmt [ 5 ] ) if product_name not in obj_to_map : obj_to_map [ product_name ] = self . _get_agent ( stmt [ 4 ] , stmt [ 3 ] ) obj_from = list ( obj_from_map . values ( ) ) obj_to = list ( obj_to_map . values ( ) ) st = Conversion ( subj , obj_from , obj_to , evidence = evidence ) self . statements . append ( st ) self . converted_direct_stmts . append ( stmt_str )
Extract Conversion INDRA Statements from BEL .
17,685
def get_degenerate_statements ( self ) : logger . info ( "Checking for 'degenerate' statements...\n" ) q_stmts = prefixes + res_stmts = self . g . query ( q_stmts ) logger . info ( "Protein -> Protein/Activity statements:" ) logger . info ( "---------------------------------------" ) for stmt in res_stmts : stmt_str = strip_statement ( stmt [ 0 ] ) logger . info ( stmt_str ) self . degenerate_stmts . append ( stmt_str )
Get all degenerate BEL statements .
17,686
def print_statement_coverage ( self ) : if not self . all_direct_stmts : self . get_all_direct_statements ( ) if not self . degenerate_stmts : self . get_degenerate_statements ( ) if not self . all_indirect_stmts : self . get_all_indirect_statements ( ) logger . info ( '' ) logger . info ( "Total indirect statements: %d" % len ( self . all_indirect_stmts ) ) logger . info ( "Converted indirect statements: %d" % len ( self . converted_indirect_stmts ) ) logger . info ( ">> Unhandled indirect statements: %d" % ( len ( self . all_indirect_stmts ) - len ( self . converted_indirect_stmts ) ) ) logger . info ( '' ) logger . info ( "Total direct statements: %d" % len ( self . all_direct_stmts ) ) logger . info ( "Converted direct statements: %d" % len ( self . converted_direct_stmts ) ) logger . info ( "Degenerate direct statements: %d" % len ( self . degenerate_stmts ) ) logger . info ( ">> Unhandled direct statements: %d" % ( len ( self . all_direct_stmts ) - len ( self . converted_direct_stmts ) - len ( self . degenerate_stmts ) ) ) logger . info ( '' ) logger . info ( "--- Unhandled direct statements ---------" ) for stmt in self . all_direct_stmts : if not ( stmt in self . converted_direct_stmts or stmt in self . degenerate_stmts ) : logger . info ( stmt ) logger . info ( '' ) logger . info ( "--- Unhandled indirect statements ---------" ) for stmt in self . all_indirect_stmts : if not ( stmt in self . converted_indirect_stmts or stmt in self . degenerate_stmts ) : logger . info ( stmt )
Display how many of the direct statements have been converted .
17,687
def print_statements ( self ) : logger . info ( '--- Direct INDRA statements ----------' ) for i , stmt in enumerate ( self . statements ) : logger . info ( "%s: %s" % ( i , stmt ) ) logger . info ( '--- Indirect INDRA statements ----------' ) for i , stmt in enumerate ( self . indirect_stmts ) : logger . info ( "%s: %s" % ( i , stmt ) )
Print all extracted INDRA Statements .
17,688
def process_directory_statements_sorted_by_pmid ( directory_name ) : s_dict = defaultdict ( list ) mp = process_directory ( directory_name , lazy = True ) for statement in mp . iter_statements ( ) : s_dict [ statement . evidence [ 0 ] . pmid ] . append ( statement ) return s_dict
Processes a directory filled with CSXML files first normalizing the character encoding to utf - 8 and then processing into INDRA statements sorted by pmid .
17,689
def process_directory ( directory_name , lazy = False ) : mp = MedscanProcessor ( ) mp . process_directory ( directory_name , lazy ) return mp
Processes a directory filled with CSXML files first normalizing the character encodings to utf - 8 and then processing into a list of INDRA statements .
17,690
def process_file_sorted_by_pmid ( file_name ) : s_dict = defaultdict ( list ) mp = process_file ( file_name , lazy = True ) for statement in mp . iter_statements ( ) : s_dict [ statement . evidence [ 0 ] . pmid ] . append ( statement ) return s_dict
Processes a file and returns a dictionary mapping pmids to a list of statements corresponding to that pmid .
17,691
def process_file ( filename , interval = None , lazy = False ) : mp = MedscanProcessor ( ) mp . process_csxml_file ( filename , interval , lazy ) return mp
Process a CSXML file for its relevant information .
17,692
def stmts_from_path ( path , model , stmts ) : path_stmts = [ ] for path_rule , sign in path : for rule in model . rules : if rule . name == path_rule : stmt = stmt_from_rule ( path_rule , model , stmts ) assert stmt is not None path_stmts . append ( stmt ) return path_stmts
Return source Statements corresponding to a path in a model .
17,693
def extract_context ( annotations , annot_manager ) : def get_annot ( annotations , key ) : val = annotations . pop ( key , None ) if val : val_list = [ v for v , tf in val . items ( ) if tf ] if len ( val_list ) > 1 : logger . warning ( 'More than one "%s" in annotations' % key ) elif not val_list : return None return val_list [ 0 ] return None bc = BioContext ( ) species = get_annot ( annotations , 'Species' ) if species : name = annot_manager . get_mapping ( 'Species' , species ) bc . species = RefContext ( name = name , db_refs = { 'TAXONOMY' : species } ) mappings = ( ( 'CellLine' , 'cell_line' , None ) , ( 'Disease' , 'disease' , None ) , ( 'Anatomy' , 'organ' , None ) , ( 'Cell' , 'cell_type' , None ) , ( 'CellStructure' , 'location' , 'MESH' ) ) for bel_name , indra_name , ns in mappings : ann = get_annot ( annotations , bel_name ) if ann : ref = annot_manager . get_mapping ( bel_name , ann ) if ref is None : continue if not ns : db_ns , db_id = ref . split ( '_' , 1 ) else : db_ns , db_id = ns , ref setattr ( bc , indra_name , RefContext ( name = ann , db_refs = { db_ns : db_id } ) ) if not bc : bc = None return bc
Return a BioContext object extracted from the annotations .
17,694
def format_axis ( ax , label_padding = 2 , tick_padding = 0 , yticks_position = 'left' ) : ax . xaxis . set_ticks_position ( 'bottom' ) ax . yaxis . set_ticks_position ( yticks_position ) ax . yaxis . set_tick_params ( which = 'both' , direction = 'out' , labelsize = fontsize , pad = tick_padding , length = 2 , width = 0.5 ) ax . xaxis . set_tick_params ( which = 'both' , direction = 'out' , labelsize = fontsize , pad = tick_padding , length = 2 , width = 0.5 ) ax . xaxis . labelpad = label_padding ax . yaxis . labelpad = label_padding ax . xaxis . label . set_size ( fontsize ) ax . yaxis . label . set_size ( fontsize )
Set standardized axis formatting for figure .
17,695
def make_model ( self ) : stmts_formatted = [ ] stmt_rows = group_and_sort_statements ( self . statements , self . ev_totals if self . ev_totals else None ) for key , verb , stmts in stmt_rows : stmt_info_list = [ ] for stmt in stmts : stmt_hash = stmt . get_hash ( shallow = True ) ev_list = self . _format_evidence_text ( stmt ) english = self . _format_stmt_text ( stmt ) if self . ev_totals : total_evidence = self . ev_totals . get ( int ( stmt_hash ) , '?' ) if total_evidence == '?' : logger . warning ( 'The hash %s was not found in the ' 'evidence totals dict.' % stmt_hash ) evidence_count_str = '%s / %s' % ( len ( ev_list ) , total_evidence ) else : evidence_count_str = str ( len ( ev_list ) ) stmt_info_list . append ( { 'hash' : stmt_hash , 'english' : english , 'evidence' : ev_list , 'evidence_count' : evidence_count_str } ) short_name = make_string_from_sort_key ( key , verb ) short_name_key = str ( uuid . uuid4 ( ) ) stmts_formatted . append ( ( short_name , short_name_key , stmt_info_list ) ) metadata = { k . replace ( '_' , ' ' ) . title ( ) : v for k , v in self . metadata . items ( ) } if self . db_rest_url and not self . db_rest_url . endswith ( 'statements' ) : db_rest_url = self . db_rest_url + '/statements' else : db_rest_url = '.' self . model = template . render ( stmt_data = stmts_formatted , metadata = metadata , title = self . title , db_rest_url = db_rest_url ) return self . model
Return the assembled HTML content as a string .
17,696
def append_warning ( self , msg ) : assert self . model is not None , "You must already have run make_model!" addendum = ( '\t<span style="color:red;">(CAUTION: %s occurred when ' 'creating this page.)</span>' % msg ) self . model = self . model . replace ( self . title , self . title + addendum ) return self . model
Append a warning message to the model to expose issues .
17,697
def save_model ( self , fname ) : if self . model is None : self . make_model ( ) with open ( fname , 'wb' ) as fh : fh . write ( self . model . encode ( 'utf-8' ) )
Save the assembled HTML into a file .
17,698
def _format_evidence_text ( stmt ) : def get_role ( ag_ix ) : if isinstance ( stmt , Complex ) or isinstance ( stmt , SelfModification ) or isinstance ( stmt , ActiveForm ) or isinstance ( stmt , Conversion ) or isinstance ( stmt , Translocation ) : return 'other' else : assert len ( stmt . agent_list ( ) ) == 2 , ( len ( stmt . agent_list ( ) ) , type ( stmt ) ) return 'subject' if ag_ix == 0 else 'object' ev_list = [ ] for ix , ev in enumerate ( stmt . evidence ) : if ev . source_api == 'biopax' and 'source_sub_id' in ev . annotations and ev . annotations [ 'source_sub_id' ] : source_api = '%s:%s' % ( ev . source_api , ev . annotations [ 'source_sub_id' ] ) else : source_api = ev . source_api if ev . text is None : format_text = None else : indices = [ ] for ix , ag in enumerate ( stmt . agent_list ( ) ) : if ag is None : continue try : ag_text = ev . annotations [ 'agents' ] [ 'raw_text' ] [ ix ] if ag_text is None : raise KeyError except KeyError : ag_text = ag . db_refs . get ( 'TEXT' ) if ag_text is None : continue role = get_role ( ix ) tag_start = '<span class="badge badge-%s">' % role tag_close = '</span>' indices += [ ( m . start ( ) , m . start ( ) + len ( ag_text ) , ag_text , tag_start , tag_close ) for m in re . finditer ( re . escape ( ag_text ) , ev . text ) ] format_text = tag_text ( ev . text , indices ) ev_list . append ( { 'source_api' : source_api , 'pmid' : ev . pmid , 'text_refs' : ev . text_refs , 'text' : format_text , 'source_hash' : ev . source_hash } ) return ev_list
Returns evidence metadata with highlighted evidence text .
17,699
def process_pmc ( pmc_id , offline = False , output_fname = default_output_fname ) : xml_str = pmc_client . get_xml ( pmc_id ) if xml_str is None : return None fname = pmc_id + '.nxml' with open ( fname , 'wb' ) as fh : fh . write ( xml_str . encode ( 'utf-8' ) ) ids = id_lookup ( pmc_id , 'pmcid' ) pmid = ids . get ( 'pmid' ) rp = process_nxml_file ( fname , citation = pmid , offline = offline , output_fname = output_fname ) return rp
Return a ReachProcessor by processing a paper with a given PMC id .