idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
17,700 | def process_pubmed_abstract ( pubmed_id , offline = False , output_fname = default_output_fname , ** kwargs ) : abs_txt = pubmed_client . get_abstract ( pubmed_id ) if abs_txt is None : return None rp = process_text ( abs_txt , citation = pubmed_id , offline = offline , output_fname = output_fname , ** kwargs ) if rp and rp . statements : for st in rp . statements : for ev in st . evidence : ev . epistemics [ 'section_type' ] = 'abstract' return rp | Return a ReachProcessor by processing an abstract with a given Pubmed id . |
17,701 | def process_text ( text , citation = None , offline = False , output_fname = default_output_fname , timeout = None ) : if offline : if not try_offline : logger . error ( 'Offline reading is not available.' ) return None try : api_ruler = reach_reader . get_api_ruler ( ) except ReachOfflineReadingError as e : logger . error ( e ) logger . error ( 'Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.' ) return None try : result_map = api_ruler . annotateText ( text , 'fries' ) except JavaException as e : logger . error ( 'Could not process text.' ) logger . error ( e ) return None json_str = result_map . get ( 'resultJson' ) if not json_str : json_str = result_map . get ( 'result' ) if not isinstance ( json_str , bytes ) : json_str = json_str . encode ( 'utf-8' ) else : data = { 'text' : text . encode ( 'utf-8' ) } try : res = requests . post ( reach_text_url , data , timeout = timeout ) except requests . exceptions . RequestException as e : logger . error ( 'Could not connect to REACH service:' ) logger . error ( e ) return None json_str = res . content if not isinstance ( json_str , bytes ) : raise TypeError ( '{} is {} instead of {}' . format ( json_str , json_str . __class__ , bytes ) ) with open ( output_fname , 'wb' ) as fh : fh . write ( json_str ) return process_json_str ( json_str . decode ( 'utf-8' ) , citation ) | Return a ReachProcessor by processing the given text . |
17,702 | def process_nxml_str ( nxml_str , citation = None , offline = False , output_fname = default_output_fname ) : if offline : if not try_offline : logger . error ( 'Offline reading is not available.' ) return None try : api_ruler = reach_reader . get_api_ruler ( ) except ReachOfflineReadingError as e : logger . error ( e ) logger . error ( 'Cannot read offline because the REACH ApiRuler ' 'could not be instantiated.' ) return None try : result_map = api_ruler . annotateNxml ( nxml_str , 'fries' ) except JavaException as e : logger . error ( 'Could not process NXML.' ) logger . error ( e ) return None json_str = result_map . get ( 'resultJson' ) if not json_str : json_str = result_map . get ( 'result' ) if json_str is None : logger . warning ( 'No results retrieved' ) return None if isinstance ( json_str , bytes ) : json_str = json_str . decode ( 'utf-8' ) return process_json_str ( json_str , citation ) else : data = { 'nxml' : nxml_str } try : res = requests . post ( reach_nxml_url , data ) except requests . exceptions . RequestException as e : logger . error ( 'Could not connect to REACH service:' ) logger . error ( e ) return None if res . status_code != 200 : logger . error ( 'Could not process NXML via REACH service.' + 'Status code: %d' % res . status_code ) return None json_str = res . text with open ( output_fname , 'wb' ) as fh : fh . write ( json_str . encode ( 'utf-8' ) ) return process_json_str ( json_str , citation ) | Return a ReachProcessor by processing the given NXML string . |
17,703 | def process_nxml_file ( file_name , citation = None , offline = False , output_fname = default_output_fname ) : with open ( file_name , 'rb' ) as f : nxml_str = f . read ( ) . decode ( 'utf-8' ) return process_nxml_str ( nxml_str , citation , False , output_fname ) | Return a ReachProcessor by processing the given NXML file . |
17,704 | def process_json_file ( file_name , citation = None ) : try : with open ( file_name , 'rb' ) as fh : json_str = fh . read ( ) . decode ( 'utf-8' ) return process_json_str ( json_str , citation ) except IOError : logger . error ( 'Could not read file %s.' % file_name ) | Return a ReachProcessor by processing the given REACH json file . |
17,705 | def process_json_str ( json_str , citation = None ) : if not isinstance ( json_str , basestring ) : raise TypeError ( '{} is {} instead of {}' . format ( json_str , json_str . __class__ , basestring ) ) json_str = json_str . replace ( 'frame-id' , 'frame_id' ) json_str = json_str . replace ( 'argument-label' , 'argument_label' ) json_str = json_str . replace ( 'object-meta' , 'object_meta' ) json_str = json_str . replace ( 'doc-id' , 'doc_id' ) json_str = json_str . replace ( 'is-hypothesis' , 'is_hypothesis' ) json_str = json_str . replace ( 'is-negated' , 'is_negated' ) json_str = json_str . replace ( 'is-direct' , 'is_direct' ) json_str = json_str . replace ( 'found-by' , 'found_by' ) try : json_dict = json . loads ( json_str ) except ValueError : logger . error ( 'Could not decode JSON string.' ) return None rp = ReachProcessor ( json_dict , citation ) rp . get_modifications ( ) rp . get_complexes ( ) rp . get_activation ( ) rp . get_translocation ( ) rp . get_regulate_amounts ( ) return rp | Return a ReachProcessor by processing the given REACH json string . |
17,706 | def make_parser ( ) : parser = ArgumentParser ( 'wait_for_complete.py' , usage = '%(prog)s [-h] queue_name [options]' , description = ( 'Wait for a set of batch jobs to complete, and monitor ' 'them as they run.' ) , epilog = ( 'Jobs can also be monitored, terminated, and otherwise ' 'managed on the AWS website. However this tool will also tag ' 'the instances, and should be run whenever a job is submitted ' 'to AWS.' ) ) parser . add_argument ( dest = 'queue_name' , help = ( 'The name of the queue to watch and wait for completion. If no ' 'jobs are specified, this will wait until all jobs in the queue ' 'are completed (either SUCCEEDED or FAILED).' ) ) parser . add_argument ( '--watch' , '-w' , dest = 'job_list' , metavar = 'JOB_ID' , nargs = '+' , help = ( 'Specify particular jobs using their job ids, as reported by ' 'the submit command. Many ids may be specified.' ) ) parser . add_argument ( '--prefix' , '-p' , dest = 'job_name_prefix' , help = 'Specify a prefix for the name of the jobs to watch and wait for.' ) parser . add_argument ( '--interval' , '-i' , dest = 'poll_interval' , default = 10 , type = int , help = ( 'The time interval to wait between job status checks, in ' 'seconds (default: %(default)d seconds).' ) ) parser . add_argument ( '--timeout' , '-T' , metavar = 'TIMEOUT' , type = int , help = ( 'If the logs are not updated for %(metavar)s seconds, ' 'print a warning. If `--kill_on_log_timeout` flag is set, then ' 'the offending jobs will be automatically terminated.' ) ) parser . add_argument ( '--kill_on_timeout' , '-K' , action = 'store_true' , help = 'If a log times out, terminate the offending job.' ) parser . add_argument ( '--stash_log_method' , '-l' , choices = [ 's3' , 'local' ] , metavar = 'METHOD' , help = ( 'Select a method from: [%(choices)s] to store the job logs. ' 'If no method is specified, the logs will not be ' 'loaded off of AWS. If \'s3\' is specified, then ' '`job_name_prefix` must also be given, as this will indicate ' 'where on s3 to store the logs.' ) ) return parser | Generate the parser for this script . |
17,707 | def id_lookup ( paper_id , idtype ) : if idtype not in ( 'pmid' , 'pmcid' , 'doi' ) : raise ValueError ( "Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype ) ids = { 'doi' : None , 'pmid' : None , 'pmcid' : None } pmc_id_results = pmc_client . id_lookup ( paper_id , idtype ) ids [ 'pmid' ] = pmc_id_results . get ( 'pmid' ) ids [ 'pmcid' ] = pmc_id_results . get ( 'pmcid' ) ids [ 'doi' ] = pmc_id_results . get ( 'doi' ) ids [ idtype ] = paper_id if idtype == 'doi' : return ids elif ids . get ( 'doi' ) : return ids if ids . get ( 'pmcid' ) and ids . get ( 'doi' ) is None and ids . get ( 'pmid' ) is None : logger . warning ( '%s: PMCID without PMID or DOI' % ids . get ( 'pmcid' ) ) return ids assert ids . get ( 'pmid' ) is not None assert ids . get ( 'doi' ) is None ids [ 'doi' ] = crossref_client . doi_query ( ids [ 'pmid' ] ) return ids | Take an ID of type PMID PMCID or DOI and lookup the other IDs . |
17,708 | def get_full_text ( paper_id , idtype , preferred_content_type = 'text/xml' ) : if preferred_content_type not in ( 'text/xml' , 'text/plain' , 'application/pdf' ) : raise ValueError ( "preferred_content_type must be one of 'text/xml', " "'text/plain', or 'application/pdf'." ) ids = id_lookup ( paper_id , idtype ) pmcid = ids . get ( 'pmcid' ) pmid = ids . get ( 'pmid' ) doi = ids . get ( 'doi' ) if pmcid : nxml = pmc_client . get_xml ( pmcid ) if nxml : return nxml , 'pmc_oa_xml' if not doi and not pmid : return ( None , None ) if doi : publisher = crossref_client . get_publisher ( doi ) if publisher == 'Elsevier BV' : logger . info ( 'Elsevier: %s' % pmid ) try : article_xml = elsevier_client . download_article ( doi ) except Exception as e : logger . error ( "Error downloading Elsevier article: %s" % e ) article_xml = None if article_xml is not None : return ( article_xml , 'elsevier_xml' ) if pmid : abstract = pubmed_client . get_abstract ( pmid ) if abstract is None : return ( None , None ) else : return abstract , 'abstract' else : return ( None , None ) else : abstract = pubmed_client . get_abstract ( pmid ) if abstract is None : return ( None , None ) else : return abstract , 'abstract' assert False | Return the content and the content type of an article . |
17,709 | def get_api_ruler ( self ) : if self . api_ruler is None : try : self . api_ruler = autoclass ( 'org.clulab.reach.export.apis.ApiRuler' ) except JavaException as e : raise ReachOfflineReadingError ( e ) return self . api_ruler | Return the existing reader if it exists or launch a new one . |
17,710 | def _download_biogrid_data ( url ) : res = requests . get ( biogrid_file_url ) if res . status_code != 200 : raise Exception ( 'Unable to download Biogrid data: status code %s' % res . status_code ) zip_bytes = BytesIO ( res . content ) zip_file = ZipFile ( zip_bytes ) zip_info_list = zip_file . infolist ( ) if len ( zip_info_list ) != 1 : raise Exception ( 'There should be exactly zipfile in BioGrid zip ' 'archive: %s' % str ( zip_info_list ) ) unzipped_bytes = zip_file . read ( zip_info_list [ 0 ] ) biogrid_str = StringIO ( unzipped_bytes . decode ( 'utf8' ) ) csv_reader = csv . reader ( biogrid_str , delimiter = '\t' ) next ( csv_reader ) return csv_reader | Downloads zipped tab - separated Biogrid data in . tab2 format . |
17,711 | def _make_agent ( self , entrez_id , text_id ) : hgnc_name , db_refs = self . _make_db_refs ( entrez_id , text_id ) if hgnc_name is not None : name = hgnc_name elif text_id is not None : name = text_id else : return None return Agent ( name , db_refs = db_refs ) | Make an Agent object appropriately grounded . |
17,712 | def _make_db_refs ( self , entrez_id , text_id ) : db_refs = { } if text_id != '-' and text_id is not None : db_refs [ 'TEXT' ] = text_id hgnc_id = hgnc_client . get_hgnc_from_entrez ( entrez_id ) hgnc_name = hgnc_client . get_hgnc_name ( hgnc_id ) if hgnc_id is not None : db_refs [ 'HGNC' ] = hgnc_id up_id = hgnc_client . get_uniprot_id ( hgnc_id ) if up_id is not None : db_refs [ 'UP' ] = up_id return ( hgnc_name , db_refs ) | Looks up the HGNC ID and name as well as the Uniprot ID . |
17,713 | def make_model ( self , policies = None , initial_conditions = True , reverse_effects = False ) : self . processed_policies = self . process_policies ( policies ) ppa = PysbPreassembler ( self . statements ) ppa . replace_activities ( ) if reverse_effects : ppa . add_reverse_effects ( ) self . statements = ppa . statements if policies is not None : global_policies = self . policies if isinstance ( policies , basestring ) : local_policies = { 'other' : policies } else : local_policies = { 'other' : 'default' } local_policies . update ( policies ) self . policies = local_policies self . model = { } graphs = [ ] self . model [ 'graphs' ] = graphs self . model [ 'typing' ] = [ ] action_graph = { 'id' : 'action_graph' , 'attrs' : { 'name' : 'action_graph' } } action_graph [ 'graph' ] = { 'nodes' : [ ] , 'edges' : [ ] } graphs . append ( action_graph ) self . _assemble ( ) if policies is not None : self . policies = global_policies return self . model | Assemble the Kami model from the collected INDRA Statements . |
17,714 | def add_agent ( self , agent ) : agent_id = self . add_node ( agent . name ) self . add_typing ( agent_id , 'agent' ) for bc in agent . bound_conditions : if bc . is_bound : test_type = 'is_bnd' else : test_type = 'is_free' bound_name = bc . agent . name agent_bs = get_binding_site_name ( bc . agent ) test_name = '%s_bound_to_%s_test' % ( agent_id , bound_name ) agent_bs_id = self . add_node ( agent_bs ) test_id = self . add_node ( test_name ) self . add_edge ( agent_bs_id , agent_id ) self . add_edge ( agent_bs_id , test_id ) self . add_typing ( agent_bs_id , 'locus' ) self . add_typing ( test_id , test_type ) for mod in agent . mods : mod_site_str = abbrevs [ mod . mod_type ] if mod . residue is not None : mod_site_str = mod . residue mod_pos_str = mod . position if mod . position is not None else '' mod_site = ( '%s%s' % ( mod_site_str , mod_pos_str ) ) site_states = states [ mod . mod_type ] if mod . is_modified : val = site_states [ 1 ] else : val = site_states [ 0 ] mod_site_id = self . add_node ( mod_site , { 'val' : val } ) self . add_edge ( mod_site_id , agent_id ) self . add_typing ( mod_site_id , 'state' ) return agent_id | Add an INDRA Agent and its conditions to the Nugget . |
17,715 | def add_node ( self , name_base , attrs = None ) : if name_base not in self . counters : node_id = name_base else : node_id = '%s_%d' % ( name_base , self . counters [ name_base ] ) node = { 'id' : node_id } if attrs : node [ 'attrs' ] = attrs self . nodes . append ( node ) self . counters [ node_id ] += 1 return node_id | Add a node with a given base name to the Nugget and return ID . |
17,716 | def get_nugget_dict ( self ) : nugget_dict = { 'id' : self . id , 'graph' : { 'nodes' : self . nodes , 'edges' : self . edges } , 'attrs' : { 'name' : self . name , 'rate' : self . rate } } return nugget_dict | Return the Nugget as a dictionary . |
17,717 | def process_text ( text , pmid = None , python2_path = None ) : if python2_path is None : for path in os . environ [ "PATH" ] . split ( os . pathsep ) : proposed_python2_path = os . path . join ( path , 'python2.7' ) if os . path . isfile ( proposed_python2_path ) : python2_path = proposed_python2_path print ( 'Found python 2 interpreter at' , python2_path ) break if python2_path is None : raise Exception ( 'Could not find python2 in the directories ' + 'listed in the PATH environment variable. ' + 'Need python2 to run TEES.' ) a1_text , a2_text , sentence_segmentations = run_on_text ( text , python2_path ) tp = TEESProcessor ( a1_text , a2_text , sentence_segmentations , pmid ) return tp | Processes the specified plain text with TEES and converts output to supported INDRA statements . Check for the TEES installation is the TEES_PATH environment variable and configuration file ; if not found checks candidate paths in tees_candidate_paths . Raises an exception if TEES cannot be found in any of these places . |
17,718 | def run_on_text ( text , python2_path ) : tees_path = get_config ( 'TEES_PATH' ) if tees_path is None : for cpath in tees_candidate_paths : cpath = os . path . expanduser ( cpath ) if os . path . isdir ( cpath ) : has_expected_files = True for f in tees_installation_files : fpath = os . path . join ( cpath , f ) present = os . path . isfile ( fpath ) has_expected_files = has_expected_files and present has_expected_dirs = True for d in tees_installation_dirs : dpath = os . path . join ( cpath , d ) present = os . path . isdir ( dpath ) has_expected_dirs = has_expected_dirs and present if has_expected_files and has_expected_dirs : tees_path = cpath print ( 'Found TEES installation at ' + cpath ) break if not os . path . isdir ( tees_path ) : raise Exception ( 'Provided TEES directory does not exist.' ) classify_path = 'classify.py' tmp_dir = tempfile . mkdtemp ( suffix = 'indra_tees_processor' ) pwd = os . path . abspath ( os . getcwd ( ) ) try : text_path = os . path . join ( tmp_dir , 'text.txt' ) with codecs . open ( text_path , 'w' , encoding = 'latin-1' , errors = 'ignore' ) as f : f . write ( text ) output_path = os . path . join ( tmp_dir , 'output' ) model_path = os . path . join ( tees_path , 'tees_data/models/GE11-test/' ) command = [ python2_path , classify_path , '-m' , model_path , '-i' , text_path , '-o' , output_path ] try : pwd = os . path . abspath ( os . getcwd ( ) ) os . chdir ( tees_path ) p = subprocess . Popen ( command , stdout = subprocess . PIPE , stderr = subprocess . PIPE , cwd = tees_path ) p . wait ( ) ( so , se ) = p . communicate ( ) print ( so ) print ( se ) os . chdir ( pwd ) except BaseException as e : os . chdir ( pwd ) raise e except BaseException as e : shutil . rmtree ( tmp_dir ) raise e output_tuple = extract_output ( tmp_dir ) shutil . rmtree ( tmp_dir ) return output_tuple | Runs TEES on the given text in a temporary directory and returns a temporary directory with TEES output . The caller should delete this directory when done with it . This function runs TEES and produces TEES output files but does not process TEES output into INDRA statements . |
17,719 | def extract_output ( output_dir ) : sentences_glob = os . path . join ( output_dir , '*-preprocessed.xml.gz' ) sentences_filename_candidates = glob . glob ( sentences_glob ) if len ( sentences_filename_candidates ) != 1 : m = 'Looking for exactly one file matching %s but found %d matches' raise Exception ( m % ( sentences_glob , len ( sentences_filename_candidates ) ) ) return None , None , None sentence_segmentation_filename = sentences_filename_candidates [ 0 ] with gzip . GzipFile ( sentences_filename_candidates [ 0 ] , 'r' ) as f : sentence_segmentations = f . read ( ) . decode ( 'utf-8' ) tmp_dir = tempfile . mkdtemp ( suffix = 'indra_tees_processor' ) try : tarfile_glob = os . path . join ( output_dir , '*-events.tar.gz' ) candidate_tarfiles = glob . glob ( tarfile_glob ) if len ( candidate_tarfiles ) != 1 : raise Exception ( 'Expected exactly one match for glob %s' % tarfile_glob ) return None , None , None tar_file = tarfile . open ( candidate_tarfiles [ 0 ] ) a1_file = None a2_file = None extract_these = [ ] for m in tar_file . getmembers ( ) : if re . match ( '[a-zA-Z0-9].*.a[12]' , m . name ) : extract_these . append ( m ) if m . name . endswith ( '.a1' ) : a1_file = m . name elif m . name . endswith ( '.a2' ) : a2_file = m . name else : assert ( False ) if len ( extract_these ) != 2 or a1_file is None or a2_file is None : raise Exception ( 'We thought there would be one .a1 and one .a2' + ' file in the tarball, but we got %d files total' % len ( extract_these ) ) return None , None , None tar_file . extractall ( path = tmp_dir , members = extract_these ) with codecs . open ( os . path . join ( tmp_dir , a1_file ) , 'r' , encoding = 'utf-8' ) as f : a1_text = f . read ( ) with codecs . open ( os . path . join ( tmp_dir , a2_file ) , 'r' , encoding = 'utf-8' ) as f : a2_text = f . read ( ) shutil . rmtree ( tmp_dir ) return a1_text , a2_text , sentence_segmentations except BaseException as e : print ( 'Not removing temporary directory: ' + tmp_dir ) shutil . rmtree ( tmp_dir ) raise e return None , None , None | Extract the text of the a1 a2 and sentence segmentation files from the TEES output directory . These files are located within a compressed archive . |
17,720 | def _list_to_seq ( lst ) : ml = autoclass ( 'scala.collection.mutable.MutableList' ) ( ) for element in lst : ml . appendElem ( element ) return ml | Return a scala . collection . Seq from a Python list . |
17,721 | def process_text ( self , text , format = 'json' ) : if self . eidos_reader is None : self . initialize_reader ( ) default_arg = lambda x : autoclass ( 'scala.Some' ) ( x ) today = datetime . date . today ( ) . strftime ( "%Y-%m-%d" ) fname = 'default_file_name' annot_doc = self . eidos_reader . extractFromText ( text , True , False , default_arg ( today ) , default_arg ( fname ) ) if format == 'json' : mentions = annot_doc . odinMentions ( ) ser = autoclass ( eidos_package + '.serialization.json.WMJSONSerializer' ) mentions_json = ser . toJsonStr ( mentions ) elif format == 'json_ld' : ml = _list_to_seq ( [ annot_doc ] ) jc = autoclass ( eidos_package + '.serialization.json.JLDCorpus' ) corpus = jc ( ml ) mentions_json = corpus . toJsonStr ( ) json_dict = json . loads ( mentions_json ) return json_dict | Return a mentions JSON object given text . |
17,722 | def process_text ( text , out_format = 'json_ld' , save_json = 'eidos_output.json' , webservice = None ) : if not webservice : if eidos_reader is None : logger . error ( 'Eidos reader is not available.' ) return None json_dict = eidos_reader . process_text ( text , out_format ) else : res = requests . post ( '%s/process_text' % webservice , json = { 'text' : text } ) json_dict = res . json ( ) if save_json : with open ( save_json , 'wt' ) as fh : json . dump ( json_dict , fh , indent = 2 ) return process_json ( json_dict ) | Return an EidosProcessor by processing the given text . |
17,723 | def process_json_file ( file_name ) : try : with open ( file_name , 'rb' ) as fh : json_str = fh . read ( ) . decode ( 'utf-8' ) return process_json_str ( json_str ) except IOError : logger . exception ( 'Could not read file %s.' % file_name ) | Return an EidosProcessor by processing the given Eidos JSON - LD file . |
17,724 | def process_json ( json_dict ) : ep = EidosProcessor ( json_dict ) ep . extract_causal_relations ( ) ep . extract_correlations ( ) ep . extract_events ( ) return ep | Return an EidosProcessor by processing a Eidos JSON - LD dict . |
17,725 | def get_drug_inhibition_stmts ( drug ) : chebi_id = drug . db_refs . get ( 'CHEBI' ) mesh_id = drug . db_refs . get ( 'MESH' ) if chebi_id : drug_chembl_id = chebi_client . get_chembl_id ( chebi_id ) elif mesh_id : drug_chembl_id = get_chembl_id ( mesh_id ) else : logger . error ( 'Drug missing ChEBI or MESH grounding.' ) return None logger . info ( 'Drug: %s' % ( drug_chembl_id ) ) query_dict = { 'query' : 'activity' , 'params' : { 'molecule_chembl_id' : drug_chembl_id , 'limit' : 10000 } } res = send_query ( query_dict ) activities = res [ 'activities' ] targ_act_dict = activities_by_target ( activities ) target_chembl_ids = [ x for x in targ_act_dict ] protein_targets = get_protein_targets_only ( target_chembl_ids ) filtered_targ_act_dict = { t : targ_act_dict [ t ] for t in [ x for x in protein_targets ] } stmts = [ ] for target_chembl_id in filtered_targ_act_dict : target_activity_ids = filtered_targ_act_dict [ target_chembl_id ] target_activites = [ x for x in activities if x [ 'activity_id' ] in target_activity_ids ] target_upids = [ ] targ_comp = protein_targets [ target_chembl_id ] [ 'target_components' ] for t_c in targ_comp : target_upids . append ( t_c [ 'accession' ] ) evidence = [ ] for assay in target_activites : ev = get_evidence ( assay ) if not ev : continue evidence . append ( ev ) if len ( evidence ) > 0 : for target_upid in target_upids : agent_name = uniprot_client . get_gene_name ( target_upid ) target_agent = Agent ( agent_name , db_refs = { 'UP' : target_upid } ) st = Inhibition ( drug , target_agent , evidence = evidence ) stmts . append ( st ) return stmts | Query ChEMBL for kinetics data given drug as Agent get back statements |
17,726 | def send_query ( query_dict ) : query = query_dict [ 'query' ] params = query_dict [ 'params' ] url = 'https://www.ebi.ac.uk/chembl/api/data/' + query + '.json' r = requests . get ( url , params = params ) r . raise_for_status ( ) js = r . json ( ) return js | Query ChEMBL API |
17,727 | def query_target ( target_chembl_id ) : query_dict = { 'query' : 'target' , 'params' : { 'target_chembl_id' : target_chembl_id , 'limit' : 1 } } res = send_query ( query_dict ) target = res [ 'targets' ] [ 0 ] return target | Query ChEMBL API target by id |
17,728 | def activities_by_target ( activities ) : targ_act_dict = defaultdict ( lambda : [ ] ) for activity in activities : target_chembl_id = activity [ 'target_chembl_id' ] activity_id = activity [ 'activity_id' ] targ_act_dict [ target_chembl_id ] . append ( activity_id ) for target_chembl_id in targ_act_dict : targ_act_dict [ target_chembl_id ] = list ( set ( targ_act_dict [ target_chembl_id ] ) ) return targ_act_dict | Get back lists of activities in a dict keyed by ChEMBL target id |
17,729 | def get_protein_targets_only ( target_chembl_ids ) : protein_targets = { } for target_chembl_id in target_chembl_ids : target = query_target ( target_chembl_id ) if 'SINGLE PROTEIN' in target [ 'target_type' ] : protein_targets [ target_chembl_id ] = target return protein_targets | Given list of ChEMBL target ids return dict of SINGLE PROTEIN targets |
17,730 | def get_evidence ( assay ) : kin = get_kinetics ( assay ) source_id = assay . get ( 'assay_chembl_id' ) if not kin : return None annotations = { 'kinetics' : kin } chembl_doc_id = str ( assay . get ( 'document_chembl_id' ) ) pmid = get_pmid ( chembl_doc_id ) ev = Evidence ( source_api = 'chembl' , pmid = pmid , source_id = source_id , annotations = annotations ) return ev | Given an activity return an INDRA Evidence object . |
17,731 | def get_kinetics ( assay ) : try : val = float ( assay . get ( 'standard_value' ) ) except TypeError : logger . warning ( 'Invalid assay value: %s' % assay . get ( 'standard_value' ) ) return None unit = assay . get ( 'standard_units' ) if unit == 'nM' : unit_sym = 1e-9 * units . mol / units . liter elif unit == 'uM' : unit_sym = 1e-6 * units . mol / units . liter else : logger . warning ( 'Unhandled unit: %s' % unit ) return None param_type = assay . get ( 'standard_type' ) if param_type not in [ 'IC50' , 'EC50' , 'INH' , 'Potency' , 'Kd' ] : logger . warning ( 'Unhandled parameter type: %s' % param_type ) logger . info ( str ( assay ) ) return None kin = { param_type : val * unit_sym } return kin | Given an activity return its kinetics values . |
17,732 | def get_pmid ( doc_id ) : url_pmid = 'https://www.ebi.ac.uk/chembl/api/data/document.json' params = { 'document_chembl_id' : doc_id } res = requests . get ( url_pmid , params = params ) js = res . json ( ) pmid = str ( js [ 'documents' ] [ 0 ] [ 'pubmed_id' ] ) return pmid | Get PMID from document_chembl_id |
17,733 | def get_target_chemblid ( target_upid ) : url = 'https://www.ebi.ac.uk/chembl/api/data/target.json' params = { 'target_components__accession' : target_upid } r = requests . get ( url , params = params ) r . raise_for_status ( ) js = r . json ( ) target_chemblid = js [ 'targets' ] [ 0 ] [ 'target_chembl_id' ] return target_chemblid | Get ChEMBL ID from UniProt upid |
17,734 | def get_mesh_id ( nlm_mesh ) : url_nlm2mesh = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi' params = { 'db' : 'mesh' , 'term' : nlm_mesh , 'retmode' : 'JSON' } r = requests . get ( url_nlm2mesh , params = params ) res = r . json ( ) mesh_id = res [ 'esearchresult' ] [ 'idlist' ] [ 0 ] return mesh_id | Get MESH ID from NLM MESH |
17,735 | def get_pcid ( mesh_id ) : url_mesh2pcid = 'http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi' params = { 'dbfrom' : 'mesh' , 'id' : mesh_id , 'db' : 'pccompound' , 'retmode' : 'JSON' } r = requests . get ( url_mesh2pcid , params = params ) res = r . json ( ) pcid = res [ 'linksets' ] [ 0 ] [ 'linksetdbs' ] [ 0 ] [ 'links' ] [ 0 ] return pcid | Get PC ID from MESH ID |
17,736 | def get_chembl_id ( nlm_mesh ) : mesh_id = get_mesh_id ( nlm_mesh ) pcid = get_pcid ( mesh_id ) url_mesh2pcid = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/' + 'cid/%s/synonyms/JSON' % pcid r = requests . get ( url_mesh2pcid ) res = r . json ( ) synonyms = res [ 'InformationList' ] [ 'Information' ] [ 0 ] [ 'Synonym' ] chembl_id = [ syn for syn in synonyms if 'CHEMBL' in syn and 'SCHEMBL' not in syn ] [ 0 ] return chembl_id | Get ChEMBL ID from NLM MESH |
17,737 | def get_sentences ( self , root_element , block_tags ) : sentences = [ ] for element in root_element : if not self . any_ends_with ( block_tags , element . tag ) : if element . text is not None and not re . match ( '^\s*$' , element . text ) : sentences . extend ( self . sentence_tokenize ( element . text ) ) sentences . extend ( self . get_sentences ( element , block_tags ) ) f = open ( 'sentence_debug.txt' , 'w' ) for s in sentences : f . write ( s . lower ( ) + '\n' ) f . close ( ) return sentences | Returns a list of plain - text sentences by iterating through XML tags except for those listed in block_tags . |
17,738 | def any_ends_with ( self , string_list , pattern ) : try : s_base = basestring except : s_base = str is_string = isinstance ( pattern , s_base ) if not is_string : return False for s in string_list : if pattern . endswith ( s ) : return True return False | Returns true iff one of the strings in string_list ends in pattern . |
17,739 | def get_tag_names ( self ) : root = etree . fromstring ( self . xml_full_text . encode ( 'utf-8' ) ) return self . get_children_tag_names ( root ) | Returns the set of tag names present in the XML . |
17,740 | def get_children_tag_names ( self , xml_element ) : tags = set ( ) tags . add ( self . remove_namespace_from_tag ( xml_element . tag ) ) for element in xml_element . iter ( tag = etree . Element ) : if element != xml_element : new_tags = self . get_children_tag_names ( element ) if new_tags is not None : tags . update ( new_tags ) return tags | Returns all tag names of xml element and its children . |
17,741 | def string_matches_sans_whitespace ( self , str1 , str2_fuzzy_whitespace ) : str2_fuzzy_whitespace = re . sub ( '\s+' , '\s*' , str2_fuzzy_whitespace ) return re . search ( str2_fuzzy_whitespace , str1 ) is not None | Check if two strings match modulo their whitespace . |
17,742 | def sentence_matches ( self , sentence_text ) : has_upstream = False has_downstream = False has_verb = False actiontype_words = word_tokenize ( self . mention . actiontype ) actiontype_verb_stemmed = stem ( actiontype_words [ 0 ] ) words = word_tokenize ( sentence_text ) if self . string_matches_sans_whitespace ( sentence_text . lower ( ) , self . mention . upstream . lower ( ) ) : has_upstream = True if self . string_matches_sans_whitespace ( sentence_text . lower ( ) , self . mention . downstream . lower ( ) ) : has_downstream = True for word in words : if actiontype_verb_stemmed == stem ( word ) : has_verb = True return has_upstream and has_downstream and has_verb | Returns true iff the sentence contains this mention s upstream and downstream participants and if one of the stemmed verbs in the sentence is the same as the stemmed action type . |
17,743 | def get_identifiers_url ( db_name , db_id ) : identifiers_url = 'http://identifiers.org/' bel_scai_url = 'https://arty.scai.fraunhofer.de/artifactory/bel/namespace/' if db_name == 'UP' : url = identifiers_url + 'uniprot/%s' % db_id elif db_name == 'HGNC' : url = identifiers_url + 'hgnc/HGNC:%s' % db_id elif db_name == 'IP' : url = identifiers_url + 'interpro/%s' % db_id elif db_name == 'IPR' : url = identifiers_url + 'interpro/%s' % db_id elif db_name == 'CHEBI' : url = identifiers_url + 'chebi/%s' % db_id elif db_name == 'NCIT' : url = identifiers_url + 'ncit/%s' % db_id elif db_name == 'GO' : if db_id . startswith ( 'GO:' ) : url = identifiers_url + 'go/%s' % db_id else : url = identifiers_url + 'go/GO:%s' % db_id elif db_name in ( 'PUBCHEM' , 'PCID' ) : if db_id . startswith ( 'PUBCHEM:' ) : db_id = db_id [ 8 : ] elif db_id . startswith ( 'PCID:' ) : db_id = db_id [ 5 : ] url = identifiers_url + 'pubchem.compound/%s' % db_id elif db_name == 'PF' : url = identifiers_url + 'pfam/%s' % db_id elif db_name == 'MIRBASEM' : url = identifiers_url + 'mirbase.mature/%s' % db_id elif db_name == 'MIRBASE' : url = identifiers_url + 'mirbase/%s' % db_id elif db_name == 'MESH' : url = identifiers_url + 'mesh/%s' % db_id elif db_name == 'EGID' : url = identifiers_url + 'ncbigene/%s' % db_id elif db_name == 'HMDB' : url = identifiers_url + 'hmdb/%s' % db_id elif db_name == 'LINCS' : if db_id . startswith ( 'LSM-' ) : url = identifiers_url + 'lincs.smallmolecule/%s' % db_id elif db_id . startswith ( 'LCL-' ) : url = identifiers_url + 'lincs.cell/%s' % db_id else : url = identifiers_url + 'lincs.protein/%s' % db_id elif db_name == 'HMS-LINCS' : url = 'http://lincs.hms.harvard.edu/db/sm/%s-101' % db_id elif db_name == 'SCHEM' : url = bel_scai_url + 'selventa-legacy-chemicals/' + 'selventa-legacy-chemicals-20150601.belns' elif db_name == 'SCOMP' : url = bel_scai_url + 'selventa-named-complexes/' + 'selventa-named-complexes-20150601.belns' elif db_name == 'SFAM' : url = bel_scai_url + 'selventa-protein-families/' + 'selventa-protein-families-20150601.belns' elif db_name == 'FPLX' : url = 'http://identifiers.org/fplx/%s' % db_id elif db_name == 'LNCRNADB' : if db_id . startswith ( 'ENSG' ) : url = 'http://www.lncrnadb.org/search/?q=%s' % db_id else : url = 'http://www.lncrnadb.org/%s/' % db_id elif db_name == 'NXPFA' : url = 'https://www.nextprot.org/term/FA-%s' % db_id elif db_name in ( 'UN' , 'WDI' , 'FAO' ) : url = 'https://github.com/clulab/eidos/wiki/JSON-LD#Grounding/%s' % db_id elif db_name == 'HUME' : url = ( 'https://github.com/BBN-E/Hume/blob/master/resource/ontologies/' 'hume_ontology/%s' % db_id ) elif db_name == 'CWMS' : url = 'http://trips.ihmc.us/%s' % db_id elif db_name == 'SIGNOR' : url = 'https://signor.uniroma2.it/relation_result.php?id=%s' % db_id elif db_name == 'SOFIA' : url = 'http://cs.cmu.edu/sofia/%s' % db_id elif db_name == 'CHEMBL' : if not db_id . startswith ( 'CHEMBL' ) : db_id = 'CHEMBL%s' % db_id url = identifiers_url + 'chembl.compound/%s' % db_id elif db_name == 'NONCODE' : url = 'http://www.noncode.org/show_gene.php?id=NONHSAG%s' % db_id elif db_name == 'TEXT' : return None else : logger . warning ( 'Unhandled name space %s' % db_name ) url = None return url | Return an identifiers . org URL for a given database name and ID . |
17,744 | def dump_statements ( stmts , fname , protocol = 4 ) : logger . info ( 'Dumping %d statements into %s...' % ( len ( stmts ) , fname ) ) with open ( fname , 'wb' ) as fh : pickle . dump ( stmts , fh , protocol = protocol ) | Dump a list of statements into a pickle file . |
17,745 | def load_statements ( fname , as_dict = False ) : logger . info ( 'Loading %s...' % fname ) with open ( fname , 'rb' ) as fh : if sys . version_info [ 0 ] < 3 : stmts = pickle . load ( fh ) else : stmts = pickle . load ( fh , encoding = 'latin1' ) if isinstance ( stmts , dict ) : if as_dict : return stmts st = [ ] for pmid , st_list in stmts . items ( ) : st += st_list stmts = st logger . info ( 'Loaded %d statements' % len ( stmts ) ) return stmts | Load statements from a pickle file . |
17,746 | def map_grounding ( stmts_in , ** kwargs ) : from indra . preassembler . grounding_mapper import GroundingMapper from indra . preassembler . grounding_mapper import gm as grounding_map from indra . preassembler . grounding_mapper import default_agent_map as agent_map logger . info ( 'Mapping grounding on %d statements...' % len ( stmts_in ) ) do_rename = kwargs . get ( 'do_rename' ) gm = kwargs . get ( 'grounding_map' , grounding_map ) if do_rename is None : do_rename = True gm = GroundingMapper ( gm , agent_map , use_deft = kwargs . get ( 'use_deft' , True ) ) stmts_out = gm . map_agents ( stmts_in , do_rename = do_rename ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Map grounding using the GroundingMapper . |
17,747 | def merge_groundings ( stmts_in ) : def surface_grounding ( stmt ) : for idx , concept in enumerate ( stmt . agent_list ( ) ) : if concept is None : continue aggregate_groundings = { } for ev in stmt . evidence : if 'agents' in ev . annotations : groundings = ev . annotations [ 'agents' ] [ 'raw_grounding' ] [ idx ] for ns , value in groundings . items ( ) : if ns not in aggregate_groundings : aggregate_groundings [ ns ] = [ ] if isinstance ( value , list ) : aggregate_groundings [ ns ] += value else : aggregate_groundings [ ns ] . append ( value ) best_groundings = get_best_groundings ( aggregate_groundings ) concept . db_refs = best_groundings def get_best_groundings ( aggregate_groundings ) : best_groundings = { } for ns , values in aggregate_groundings . items ( ) : if all ( [ isinstance ( v , ( tuple , list ) ) for v in values ] ) : best_groundings [ ns ] = [ ] for unique_value in { v [ 0 ] for v in values } : scores = [ v [ 1 ] for v in values if v [ 0 ] == unique_value ] best_groundings [ ns ] . append ( ( unique_value , max ( scores ) ) ) best_groundings [ ns ] = sorted ( best_groundings [ ns ] , key = lambda x : x [ 1 ] , reverse = True ) elif all ( [ not isinstance ( v , ( tuple , list ) ) for v in values ] ) : best_groundings [ ns ] = max ( set ( values ) , key = values . count ) else : unscored_vals = [ v for v in values if not isinstance ( v , ( tuple , list ) ) ] best_groundings [ ns ] = max ( set ( unscored_vals ) , key = unscored_vals . count ) return best_groundings stmts_out = [ ] for stmt in stmts_in : if not isinstance ( stmt , ( Complex , Conversion ) ) : surface_grounding ( stmt ) stmts_out . append ( stmt ) return stmts_out | Gather and merge original grounding information from evidences . |
17,748 | def merge_deltas ( stmts_in ) : stmts_out = [ ] for stmt in stmts_in : if not isinstance ( stmt , Influence ) : stmts_out . append ( stmt ) continue deltas = { } for role in ( 'subj' , 'obj' ) : for info in ( 'polarity' , 'adjectives' ) : key = ( role , info ) deltas [ key ] = [ ] for ev in stmt . evidence : entry = ev . annotations . get ( '%s_%s' % key ) deltas [ key ] . append ( entry if entry else None ) polarity_pairs = list ( zip ( deltas [ ( 'subj' , 'polarity' ) ] , deltas [ ( 'obj' , 'polarity' ) ] ) ) both_pols = [ pair for pair in polarity_pairs if pair [ 0 ] is not None and pair [ 1 ] is not None ] if both_pols : subj_pol , obj_pol = max ( set ( both_pols ) , key = both_pols . count ) stmt . subj . delta [ 'polarity' ] = subj_pol stmt . obj . delta [ 'polarity' ] = obj_pol else : one_pol = [ pair for pair in polarity_pairs if pair [ 0 ] is not None or pair [ 1 ] is not None ] if one_pol : subj_pol , obj_pol = max ( set ( one_pol ) , key = one_pol . count ) stmt . subj . delta [ 'polarity' ] = subj_pol stmt . obj . delta [ 'polarity' ] = obj_pol for attr , role in ( ( stmt . subj . delta , 'subj' ) , ( stmt . obj . delta , 'obj' ) ) : all_adjectives = [ ] for adj in deltas [ ( role , 'adjectives' ) ] : if isinstance ( adj , list ) : all_adjectives += adj elif adj is not None : all_adjectives . append ( adj ) attr [ 'adjectives' ] = all_adjectives stmts_out . append ( stmt ) return stmts_out | Gather and merge original Influence delta information from evidence . |
17,749 | def map_sequence ( stmts_in , ** kwargs ) : from indra . preassembler . sitemapper import SiteMapper , default_site_map logger . info ( 'Mapping sites on %d statements...' % len ( stmts_in ) ) kwarg_list = [ 'do_methionine_offset' , 'do_orthology_mapping' , 'do_isoform_mapping' ] sm = SiteMapper ( default_site_map , use_cache = kwargs . pop ( 'use_cache' , False ) , ** _filter ( kwargs , kwarg_list ) ) valid , mapped = sm . map_sites ( stmts_in ) correctly_mapped_stmts = [ ] for ms in mapped : correctly_mapped = all ( [ mm . has_mapping ( ) for mm in ms . mapped_mods ] ) if correctly_mapped : correctly_mapped_stmts . append ( ms . mapped_stmt ) stmts_out = valid + correctly_mapped_stmts logger . info ( '%d statements with valid sites' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) del sm return stmts_out | Map sequences using the SiteMapper . |
17,750 | def run_preassembly ( stmts_in , ** kwargs ) : dump_pkl_unique = kwargs . get ( 'save_unique' ) belief_scorer = kwargs . get ( 'belief_scorer' ) use_hierarchies = kwargs [ 'hierarchies' ] if 'hierarchies' in kwargs else hierarchies be = BeliefEngine ( scorer = belief_scorer ) pa = Preassembler ( hierarchies , stmts_in ) run_preassembly_duplicate ( pa , be , save = dump_pkl_unique ) dump_pkl = kwargs . get ( 'save' ) return_toplevel = kwargs . get ( 'return_toplevel' , True ) poolsize = kwargs . get ( 'poolsize' , None ) size_cutoff = kwargs . get ( 'size_cutoff' , 100 ) options = { 'save' : dump_pkl , 'return_toplevel' : return_toplevel , 'poolsize' : poolsize , 'size_cutoff' : size_cutoff , 'flatten_evidence' : kwargs . get ( 'flatten_evidence' , False ) , 'flatten_evidence_collect_from' : kwargs . get ( 'flatten_evidence_collect_from' , 'supported_by' ) } stmts_out = run_preassembly_related ( pa , be , ** options ) return stmts_out | Run preassembly on a list of statements . |
17,751 | def run_preassembly_duplicate ( preassembler , beliefengine , ** kwargs ) : logger . info ( 'Combining duplicates on %d statements...' % len ( preassembler . stmts ) ) dump_pkl = kwargs . get ( 'save' ) stmts_out = preassembler . combine_duplicates ( ) beliefengine . set_prior_probs ( stmts_out ) logger . info ( '%d unique statements' % len ( stmts_out ) ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Run deduplication stage of preassembly on a list of statements . |
17,752 | def run_preassembly_related ( preassembler , beliefengine , ** kwargs ) : logger . info ( 'Combining related on %d statements...' % len ( preassembler . unique_stmts ) ) return_toplevel = kwargs . get ( 'return_toplevel' , True ) poolsize = kwargs . get ( 'poolsize' , None ) size_cutoff = kwargs . get ( 'size_cutoff' , 100 ) stmts_out = preassembler . combine_related ( return_toplevel = False , poolsize = poolsize , size_cutoff = size_cutoff ) beliefengine . set_hierarchy_probs ( stmts_out ) do_flatten_evidence = kwargs . get ( 'flatten_evidence' , False ) if do_flatten_evidence : flatten_evidences_collect_from = kwargs . get ( 'flatten_evidence_collect_from' , 'supported_by' ) stmts_out = flatten_evidence ( stmts_out , flatten_evidences_collect_from ) stmts_top = filter_top_level ( stmts_out ) if return_toplevel : stmts_out = stmts_top logger . info ( '%d top-level statements' % len ( stmts_out ) ) else : logger . info ( '%d statements out of which %d are top-level' % ( len ( stmts_out ) , len ( stmts_top ) ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Run related stage of preassembly on a list of statements . |
17,753 | def filter_by_type ( stmts_in , stmt_type , ** kwargs ) : invert = kwargs . get ( 'invert' , False ) logger . info ( 'Filtering %d statements for type %s%s...' % ( len ( stmts_in ) , 'not ' if invert else '' , stmt_type . __name__ ) ) if not invert : stmts_out = [ st for st in stmts_in if isinstance ( st , stmt_type ) ] else : stmts_out = [ st for st in stmts_in if not isinstance ( st , stmt_type ) ] logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to a given statement type . |
17,754 | def _remove_bound_conditions ( agent , keep_criterion ) : new_bc = [ ] for ind in range ( len ( agent . bound_conditions ) ) : if keep_criterion ( agent . bound_conditions [ ind ] . agent ) : new_bc . append ( agent . bound_conditions [ ind ] ) agent . bound_conditions = new_bc | Removes bound conditions of agent such that keep_criterion is False . |
17,755 | def _any_bound_condition_fails_criterion ( agent , criterion ) : bc_agents = [ bc . agent for bc in agent . bound_conditions ] for b in bc_agents : if not criterion ( b ) : return True return False | Returns True if any bound condition fails to meet the specified criterion . |
17,756 | def filter_grounded_only ( stmts_in , ** kwargs ) : remove_bound = kwargs . get ( 'remove_bound' , False ) logger . info ( 'Filtering %d statements for grounded agents...' % len ( stmts_in ) ) stmts_out = [ ] score_threshold = kwargs . get ( 'score_threshold' ) for st in stmts_in : grounded = True for agent in st . agent_list ( ) : if agent is not None : criterion = lambda x : _agent_is_grounded ( x , score_threshold ) if not criterion ( agent ) : grounded = False break if not isinstance ( agent , Agent ) : continue if remove_bound : _remove_bound_conditions ( agent , criterion ) elif _any_bound_condition_fails_criterion ( agent , criterion ) : grounded = False break if grounded : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements that have grounded agents . |
17,757 | def _agent_is_gene ( agent , specific_only ) : if not specific_only : if not ( agent . db_refs . get ( 'HGNC' ) or agent . db_refs . get ( 'UP' ) or agent . db_refs . get ( 'FPLX' ) ) : return False else : if not ( agent . db_refs . get ( 'HGNC' ) or agent . db_refs . get ( 'UP' ) ) : return False return True | Returns whether an agent is for a gene . |
17,758 | def filter_genes_only ( stmts_in , ** kwargs ) : remove_bound = 'remove_bound' in kwargs and kwargs [ 'remove_bound' ] specific_only = kwargs . get ( 'specific_only' ) logger . info ( 'Filtering %d statements for ones containing genes only...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : genes_only = True for agent in st . agent_list ( ) : if agent is not None : criterion = lambda a : _agent_is_gene ( a , specific_only ) if not criterion ( agent ) : genes_only = False break if remove_bound : _remove_bound_conditions ( agent , criterion ) else : if _any_bound_condition_fails_criterion ( agent , criterion ) : genes_only = False break if genes_only : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements containing genes only . |
17,759 | def filter_belief ( stmts_in , belief_cutoff , ** kwargs ) : dump_pkl = kwargs . get ( 'save' ) logger . info ( 'Filtering %d statements to above %f belief' % ( len ( stmts_in ) , belief_cutoff ) ) stmts_out = [ ] for stmt in stmts_in : if stmt . belief < belief_cutoff : continue stmts_out . append ( stmt ) supp_by = [ ] supp = [ ] for st in stmt . supports : if st . belief >= belief_cutoff : supp . append ( st ) for st in stmt . supported_by : if st . belief >= belief_cutoff : supp_by . append ( st ) stmt . supports = supp stmt . supported_by = supp_by logger . info ( '%d statements after filter...' % len ( stmts_out ) ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements with belief above a given cutoff . |
17,760 | def filter_gene_list ( stmts_in , gene_list , policy , allow_families = False , ** kwargs ) : invert = kwargs . get ( 'invert' , False ) remove_bound = kwargs . get ( 'remove_bound' , False ) if policy not in ( 'one' , 'all' ) : logger . error ( 'Policy %s is invalid, not applying filter.' % policy ) else : genes_str = ', ' . join ( gene_list ) inv_str = 'not ' if invert else '' logger . info ( ( 'Filtering %d statements for ones %scontaining "%s" of: ' '%s...' ) % ( len ( stmts_in ) , inv_str , policy , genes_str ) ) filter_list = copy ( gene_list ) if allow_families : for hgnc_name in gene_list : gene_uri = hierarchies [ 'entity' ] . get_uri ( 'HGNC' , hgnc_name ) parents = hierarchies [ 'entity' ] . get_parents ( gene_uri ) for par_uri in parents : ns , id = hierarchies [ 'entity' ] . ns_id_from_uri ( par_uri ) filter_list . append ( id ) stmts_out = [ ] if remove_bound : if not invert : keep_criterion = lambda a : a . name in filter_list else : keep_criterion = lambda a : a . name not in filter_list for st in stmts_in : for agent in st . agent_list ( ) : _remove_bound_conditions ( agent , keep_criterion ) if policy == 'one' : for st in stmts_in : found_gene = False if not remove_bound : agent_list = st . agent_list_with_bound_condition_agents ( ) else : agent_list = st . agent_list ( ) for agent in agent_list : if agent is not None : if agent . name in filter_list : found_gene = True break if ( found_gene and not invert ) or ( not found_gene and invert ) : stmts_out . append ( st ) elif policy == 'all' : for st in stmts_in : found_genes = True if not remove_bound : agent_list = st . agent_list_with_bound_condition_agents ( ) else : agent_list = st . agent_list ( ) for agent in agent_list : if agent is not None : if agent . name not in filter_list : found_genes = False break if ( found_genes and not invert ) or ( not found_genes and invert ) : stmts_out . append ( st ) else : stmts_out = stmts_in logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Return statements that contain genes given in a list . |
17,761 | def filter_by_db_refs ( stmts_in , namespace , values , policy , ** kwargs ) : invert = kwargs . get ( 'invert' , False ) match_suffix = kwargs . get ( 'match_suffix' , False ) if policy not in ( 'one' , 'all' ) : logger . error ( 'Policy %s is invalid, not applying filter.' % policy ) return else : name_str = ', ' . join ( values ) rev_mod = 'not ' if invert else '' logger . info ( ( 'Filtering %d statements for those with %s agents %s' 'grounded to: %s in the %s namespace...' ) % ( len ( stmts_in ) , policy , rev_mod , name_str , namespace ) ) def meets_criterion ( agent ) : if namespace not in agent . db_refs : return False entry = agent . db_refs [ namespace ] if isinstance ( entry , list ) : entry = entry [ 0 ] [ 0 ] ret = False if match_suffix : if any ( [ entry . endswith ( e ) for e in values ] ) : ret = True else : if entry in values : ret = True if invert : return not ret else : return ret enough = all if policy == 'all' else any stmts_out = [ s for s in stmts_in if enough ( [ meets_criterion ( ag ) for ag in s . agent_list ( ) if ag is not None ] ) ] logger . info ( '%d Statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to Statements whose agents are grounded to a matching entry . |
17,762 | def filter_human_only ( stmts_in , ** kwargs ) : from indra . databases import uniprot_client if 'remove_bound' in kwargs and kwargs [ 'remove_bound' ] : remove_bound = True else : remove_bound = False dump_pkl = kwargs . get ( 'save' ) logger . info ( 'Filtering %d statements for human genes only...' % len ( stmts_in ) ) stmts_out = [ ] def criterion ( agent ) : upid = agent . db_refs . get ( 'UP' ) if upid and not uniprot_client . is_human ( upid ) : return False else : return True for st in stmts_in : human_genes = True for agent in st . agent_list ( ) : if agent is not None : if not criterion ( agent ) : human_genes = False break if remove_bound : _remove_bound_conditions ( agent , criterion ) elif _any_bound_condition_fails_criterion ( agent , criterion ) : human_genes = False break if human_genes : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter out statements that are grounded but not to a human gene . |
17,763 | def filter_direct ( stmts_in , ** kwargs ) : def get_is_direct ( stmt ) : any_indirect = False for ev in stmt . evidence : if ev . epistemics . get ( 'direct' ) is True : return True elif ev . epistemics . get ( 'direct' ) is False : any_indirect = True if any_indirect : return False return True logger . info ( 'Filtering %d statements to direct ones...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : if get_is_direct ( st ) : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements that are direct interactions |
17,764 | def filter_no_hypothesis ( stmts_in , ** kwargs ) : logger . info ( 'Filtering %d statements to no hypothesis...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : all_hypotheses = True ev = None for ev in st . evidence : if not ev . epistemics . get ( 'hypothesis' , False ) : all_hypotheses = False break if ev is None : all_hypotheses = False if not all_hypotheses : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements that are not marked as hypothesis in epistemics . |
17,765 | def filter_evidence_source ( stmts_in , source_apis , policy = 'one' , ** kwargs ) : logger . info ( 'Filtering %d statements to evidence source "%s" of: %s...' % ( len ( stmts_in ) , policy , ', ' . join ( source_apis ) ) ) stmts_out = [ ] for st in stmts_in : sources = set ( [ ev . source_api for ev in st . evidence ] ) if policy == 'one' : if sources . intersection ( source_apis ) : stmts_out . append ( st ) if policy == 'all' : if sources . intersection ( source_apis ) == set ( source_apis ) : stmts_out . append ( st ) if policy == 'none' : if not sources . intersection ( source_apis ) : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements that have evidence from a given set of sources . |
17,766 | def filter_top_level ( stmts_in , ** kwargs ) : logger . info ( 'Filtering %d statements for top-level...' % len ( stmts_in ) ) stmts_out = [ st for st in stmts_in if not st . supports ] logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to statements that are at the top - level of the hierarchy . |
17,767 | def filter_inconsequential_mods ( stmts_in , whitelist = None , ** kwargs ) : if whitelist is None : whitelist = { } logger . info ( 'Filtering %d statements to remove' % len ( stmts_in ) + ' inconsequential modifications...' ) states_used = whitelist for stmt in stmts_in : for agent in stmt . agent_list ( ) : if agent is not None : if agent . mods : for mc in agent . mods : mod = ( mc . mod_type , mc . residue , mc . position ) try : states_used [ agent . name ] . append ( mod ) except KeyError : states_used [ agent . name ] = [ mod ] for k , v in states_used . items ( ) : states_used [ k ] = list ( set ( v ) ) stmts_out = [ ] for stmt in stmts_in : skip = False if isinstance ( stmt , Modification ) : mod_type = modclass_to_modtype [ stmt . __class__ ] if isinstance ( stmt , RemoveModification ) : mod_type = modtype_to_inverse [ mod_type ] mod = ( mod_type , stmt . residue , stmt . position ) used = states_used . get ( stmt . sub . name , [ ] ) if mod not in used : skip = True if not skip : stmts_out . append ( stmt ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter out Modifications that modify inconsequential sites |
17,768 | def filter_inconsequential_acts ( stmts_in , whitelist = None , ** kwargs ) : if whitelist is None : whitelist = { } logger . info ( 'Filtering %d statements to remove' % len ( stmts_in ) + ' inconsequential activations...' ) states_used = whitelist for stmt in stmts_in : for agent in stmt . agent_list ( ) : if agent is not None : if agent . activity : act = agent . activity . activity_type try : states_used [ agent . name ] . append ( act ) except KeyError : states_used [ agent . name ] = [ act ] for k , v in states_used . items ( ) : states_used [ k ] = list ( set ( v ) ) stmts_out = [ ] for stmt in stmts_in : skip = False if isinstance ( stmt , RegulateActivity ) : used = states_used . get ( stmt . obj . name , [ ] ) if stmt . obj_activity not in used : skip = True if not skip : stmts_out . append ( stmt ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter out Activations that modify inconsequential activities |
17,769 | def filter_enzyme_kinase ( stmts_in , ** kwargs ) : logger . info ( 'Filtering %d statements to remove ' % len ( stmts_in ) + 'phosphorylation by non-kinases...' ) path = os . path . dirname ( os . path . abspath ( __file__ ) ) kinase_table = read_unicode_csv ( path + '/../resources/kinases.tsv' , delimiter = '\t' ) gene_names = [ lin [ 1 ] for lin in list ( kinase_table ) [ 1 : ] ] stmts_out = [ ] for st in stmts_in : if isinstance ( st , Phosphorylation ) : if st . enz is not None : if st . enz . name in gene_names : stmts_out . append ( st ) else : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter Phosphorylations to ones where the enzyme is a known kinase . |
17,770 | def filter_transcription_factor ( stmts_in , ** kwargs ) : logger . info ( 'Filtering %d statements to remove ' % len ( stmts_in ) + 'amount regulations by non-transcription-factors...' ) path = os . path . dirname ( os . path . abspath ( __file__ ) ) tf_table = read_unicode_csv ( path + '/../resources/transcription_factors.csv' ) gene_names = [ lin [ 1 ] for lin in list ( tf_table ) [ 1 : ] ] stmts_out = [ ] for st in stmts_in : if isinstance ( st , RegulateAmount ) : if st . subj is not None : if st . subj . name in gene_names : stmts_out . append ( st ) else : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter out RegulateAmounts where subject is not a transcription factor . |
17,771 | def filter_uuid_list ( stmts_in , uuids , ** kwargs ) : invert = kwargs . get ( 'invert' , False ) logger . info ( 'Filtering %d statements for %d UUID%s...' % ( len ( stmts_in ) , len ( uuids ) , 's' if len ( uuids ) > 1 else '' ) ) stmts_out = [ ] for st in stmts_in : if not invert : if st . uuid in uuids : stmts_out . append ( st ) else : if st . uuid not in uuids : stmts_out . append ( st ) logger . info ( '%d statements after filter...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Filter to Statements corresponding to given UUIDs |
17,772 | def expand_families ( stmts_in , ** kwargs ) : from indra . tools . expand_families import Expander logger . info ( 'Expanding families on %d statements...' % len ( stmts_in ) ) expander = Expander ( hierarchies ) stmts_out = expander . expand_families ( stmts_in ) logger . info ( '%d statements after expanding families...' % len ( stmts_out ) ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Expand FamPlex Agents to individual genes . |
17,773 | def reduce_activities ( stmts_in , ** kwargs ) : logger . info ( 'Reducing activities on %d statements...' % len ( stmts_in ) ) stmts_out = [ deepcopy ( st ) for st in stmts_in ] ml = MechLinker ( stmts_out ) ml . gather_explicit_activities ( ) ml . reduce_activities ( ) stmts_out = ml . statements dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Reduce the activity types in a list of statements |
17,774 | def strip_agent_context ( stmts_in , ** kwargs ) : logger . info ( 'Stripping agent context on %d statements...' % len ( stmts_in ) ) stmts_out = [ ] for st in stmts_in : new_st = deepcopy ( st ) for agent in new_st . agent_list ( ) : if agent is None : continue agent . mods = [ ] agent . mutations = [ ] agent . activity = None agent . location = None agent . bound_conditions = [ ] stmts_out . append ( new_st ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Strip any context on agents within each statement . |
17,775 | def standardize_names_groundings ( stmts ) : print ( 'Standardize names to groundings' ) for stmt in stmts : for concept in stmt . agent_list ( ) : db_ns , db_id = concept . get_grounding ( ) if db_id is not None : if isinstance ( db_id , list ) : db_id = db_id [ 0 ] [ 0 ] . split ( '/' ) [ - 1 ] else : db_id = db_id . split ( '/' ) [ - 1 ] db_id = db_id . replace ( '|' , ' ' ) db_id = db_id . replace ( '_' , ' ' ) db_id = db_id . replace ( 'ONT::' , '' ) db_id = db_id . capitalize ( ) concept . name = db_id return stmts | Standardize the names of Concepts with respect to an ontology . |
17,776 | def dump_stmt_strings ( stmts , fname ) : with open ( fname , 'wb' ) as fh : for st in stmts : fh . write ( ( '%s\n' % st ) . encode ( 'utf-8' ) ) | Save printed statements in a file . |
17,777 | def rename_db_ref ( stmts_in , ns_from , ns_to , ** kwargs ) : logger . info ( 'Remapping "%s" to "%s" in db_refs on %d statements...' % ( ns_from , ns_to , len ( stmts_in ) ) ) stmts_out = [ deepcopy ( st ) for st in stmts_in ] for stmt in stmts_out : for agent in stmt . agent_list ( ) : if agent is not None and ns_from in agent . db_refs : agent . db_refs [ ns_to ] = agent . db_refs . pop ( ns_from ) dump_pkl = kwargs . get ( 'save' ) if dump_pkl : dump_statements ( stmts_out , dump_pkl ) return stmts_out | Rename an entry in the db_refs of each Agent . |
17,778 | def align_statements ( stmts1 , stmts2 , keyfun = None ) : def name_keyfun ( stmt ) : return tuple ( a . name if a is not None else None for a in stmt . agent_list ( ) ) if not keyfun : keyfun = name_keyfun matches = [ ] keys1 = [ keyfun ( s ) for s in stmts1 ] keys2 = [ keyfun ( s ) for s in stmts2 ] for stmt , key in zip ( stmts1 , keys1 ) : try : match_idx = keys2 . index ( key ) match_stmt = stmts2 [ match_idx ] matches . append ( ( stmt , match_stmt ) ) except ValueError : matches . append ( ( stmt , None ) ) for stmt , key in zip ( stmts2 , keys2 ) : try : match_idx = keys1 . index ( key ) except ValueError : matches . append ( ( None , stmt ) ) return matches | Return alignment of two lists of statements by key . |
17,779 | def submit_query_request ( end_point , * args , ** kwargs ) : ev_limit = kwargs . pop ( 'ev_limit' , 10 ) best_first = kwargs . pop ( 'best_first' , True ) tries = kwargs . pop ( 'tries' , 2 ) query_str = '?' + '&' . join ( [ '%s=%s' % ( k , v ) for k , v in kwargs . items ( ) if v is not None ] + list ( args ) ) return submit_statement_request ( 'get' , end_point , query_str , ev_limit = ev_limit , best_first = best_first , tries = tries ) | Low level function to format the query string . |
17,780 | def submit_statement_request ( meth , end_point , query_str = '' , data = None , tries = 2 , ** params ) : full_end_point = 'statements/' + end_point . lstrip ( '/' ) return make_db_rest_request ( meth , full_end_point , query_str , data , params , tries ) | Even lower level function to make the request . |
17,781 | def render_stmt_graph ( statements , reduce = True , english = False , rankdir = None , agent_style = None ) : from indra . assemblers . english import EnglishAssembler if agent_style is None : agent_style = { 'color' : 'lightgray' , 'style' : 'filled' , 'fontname' : 'arial' } nodes = set ( [ ] ) edges = set ( [ ] ) stmt_dict = { } def process_stmt ( stmt ) : nodes . add ( str ( stmt . matches_key ( ) ) ) stmt_dict [ str ( stmt . matches_key ( ) ) ] = stmt for sby_ix , sby_stmt in enumerate ( stmt . supported_by ) : edges . add ( ( str ( stmt . matches_key ( ) ) , str ( sby_stmt . matches_key ( ) ) ) ) process_stmt ( sby_stmt ) for stmt in statements : process_stmt ( stmt ) nx_graph = nx . DiGraph ( ) nx_graph . add_edges_from ( edges ) if reduce : nx_graph = nx . algorithms . dag . transitive_reduction ( nx_graph ) try : pgv_graph = pgv . AGraph ( name = 'statements' , directed = True , rankdir = rankdir ) except NameError : logger . error ( 'Cannot generate graph because ' 'pygraphviz could not be imported.' ) return None for node in nx_graph . nodes ( ) : stmt = stmt_dict [ node ] if english : ea = EnglishAssembler ( [ stmt ] ) stmt_str = ea . make_model ( ) else : stmt_str = str ( stmt ) pgv_graph . add_node ( node , label = '%s (%d)' % ( stmt_str , len ( stmt . evidence ) ) , ** agent_style ) pgv_graph . add_edges_from ( nx_graph . edges ( ) ) return pgv_graph | Render the statement hierarchy as a pygraphviz graph . |
17,782 | def flatten_stmts ( stmts ) : total_stmts = set ( stmts ) for stmt in stmts : if stmt . supported_by : children = flatten_stmts ( stmt . supported_by ) total_stmts = total_stmts . union ( children ) return list ( total_stmts ) | Return the full set of unique stms in a pre - assembled stmt graph . |
17,783 | def combine_duplicates ( self ) : if self . unique_stmts is None : self . unique_stmts = self . combine_duplicate_stmts ( self . stmts ) return self . unique_stmts | Combine duplicates among stmts and save result in unique_stmts . |
17,784 | def _get_stmt_matching_groups ( stmts ) : def match_func ( x ) : return x . matches_key ( ) logger . debug ( '%d statements before removing object duplicates.' % len ( stmts ) ) st = list ( set ( stmts ) ) logger . debug ( '%d statements after removing object duplicates.' % len ( stmts ) ) st . sort ( key = match_func ) return itertools . groupby ( st , key = match_func ) | Use the matches_key method to get sets of matching statements . |
17,785 | def combine_duplicate_stmts ( stmts ) : def _ev_keys ( sts ) : ev_keys = [ ] for stmt in sts : for ev in stmt . evidence : ev_keys . append ( ev . matches_key ( ) ) return ev_keys unique_stmts = [ ] for _ , duplicates in Preassembler . _get_stmt_matching_groups ( stmts ) : ev_keys = set ( ) duplicates = list ( duplicates ) start_ev_keys = _ev_keys ( duplicates ) for stmt_ix , stmt in enumerate ( duplicates ) : if stmt_ix is 0 : new_stmt = stmt . make_generic_copy ( ) if len ( duplicates ) == 1 : new_stmt . uuid = stmt . uuid raw_text = [ None if ag is None else ag . db_refs . get ( 'TEXT' ) for ag in stmt . agent_list ( deep_sorted = True ) ] raw_grounding = [ None if ag is None else ag . db_refs for ag in stmt . agent_list ( deep_sorted = True ) ] for ev in stmt . evidence : ev_key = ev . matches_key ( ) + str ( raw_text ) + str ( raw_grounding ) if ev_key not in ev_keys : if 'agents' in ev . annotations : ev . annotations [ 'agents' ] [ 'raw_text' ] = raw_text ev . annotations [ 'agents' ] [ 'raw_grounding' ] = raw_grounding else : ev . annotations [ 'agents' ] = { 'raw_text' : raw_text , 'raw_grounding' : raw_grounding } if 'prior_uuids' not in ev . annotations : ev . annotations [ 'prior_uuids' ] = [ ] ev . annotations [ 'prior_uuids' ] . append ( stmt . uuid ) new_stmt . evidence . append ( ev ) ev_keys . add ( ev_key ) end_ev_keys = _ev_keys ( [ new_stmt ] ) if len ( end_ev_keys ) != len ( start_ev_keys ) : logger . debug ( '%d redundant evidences eliminated.' % ( len ( start_ev_keys ) - len ( end_ev_keys ) ) ) assert isinstance ( new_stmt , Statement ) unique_stmts . append ( new_stmt ) return unique_stmts | Combine evidence from duplicate Statements . |
17,786 | def _get_stmt_by_group ( self , stmt_type , stmts_this_type , eh ) : stmt_by_first = collections . defaultdict ( lambda : [ ] ) stmt_by_second = collections . defaultdict ( lambda : [ ] ) none_first = collections . defaultdict ( lambda : [ ] ) none_second = collections . defaultdict ( lambda : [ ] ) stmt_by_group = collections . defaultdict ( lambda : [ ] ) for stmt_tuple in stmts_this_type : _ , stmt = stmt_tuple entities = self . _get_entities ( stmt , stmt_type , eh ) if stmt_type == Complex : assert None not in entities assert len ( entities ) > 0 entities . sort ( ) key = tuple ( entities ) if stmt_tuple not in stmt_by_group [ key ] : stmt_by_group [ key ] . append ( stmt_tuple ) elif stmt_type == Conversion : assert len ( entities ) > 0 key = ( entities [ 0 ] , tuple ( sorted ( entities [ 1 : len ( stmt . obj_from ) + 1 ] ) ) , tuple ( sorted ( entities [ - len ( stmt . obj_to ) : ] ) ) ) if stmt_tuple not in stmt_by_group [ key ] : stmt_by_group [ key ] . append ( stmt_tuple ) elif len ( entities ) == 1 : assert None not in entities key = tuple ( entities ) if stmt_tuple not in stmt_by_group [ key ] : stmt_by_group [ key ] . append ( stmt_tuple ) else : key = tuple ( entities ) assert len ( key ) == 2 assert key != ( None , None ) if key [ 0 ] is None and stmt_tuple not in none_first [ key [ 1 ] ] : none_first [ key [ 1 ] ] . append ( stmt_tuple ) elif key [ 1 ] is None and stmt_tuple not in none_second [ key [ 0 ] ] : none_second [ key [ 0 ] ] . append ( stmt_tuple ) elif None not in key : if stmt_tuple not in stmt_by_group [ key ] : stmt_by_group [ key ] . append ( stmt_tuple ) if key not in stmt_by_first [ key [ 0 ] ] : stmt_by_first [ key [ 0 ] ] . append ( key ) if key not in stmt_by_second [ key [ 1 ] ] : stmt_by_second [ key [ 1 ] ] . append ( key ) if none_first : for second_arg , stmts in none_first . items ( ) : second_arg_keys = stmt_by_second [ second_arg ] if not second_arg_keys : stmt_by_group [ ( None , second_arg ) ] = stmts for second_arg_key in second_arg_keys : stmt_by_group [ second_arg_key ] += stmts if none_second : for first_arg , stmts in none_second . items ( ) : first_arg_keys = stmt_by_first [ first_arg ] if not first_arg_keys : stmt_by_group [ ( first_arg , None ) ] = stmts for first_arg_key in first_arg_keys : stmt_by_group [ first_arg_key ] += stmts return stmt_by_group | Group Statements of stmt_type by their hierarchical relations . |
17,787 | def combine_related ( self , return_toplevel = True , poolsize = None , size_cutoff = 100 ) : if self . related_stmts is not None : if return_toplevel : return self . related_stmts else : assert self . unique_stmts is not None return self . unique_stmts unique_stmts = self . combine_duplicates ( ) idx_map = self . _generate_id_maps ( unique_stmts , poolsize , size_cutoff ) for ix1 , ix2 in idx_map : unique_stmts [ ix1 ] . supported_by . append ( unique_stmts [ ix2 ] ) unique_stmts [ ix2 ] . supports . append ( unique_stmts [ ix1 ] ) self . related_stmts = [ st for st in unique_stmts if not st . supports ] logger . debug ( '%d top level' % len ( self . related_stmts ) ) if return_toplevel : return self . related_stmts else : return unique_stmts | Connect related statements based on their refinement relationships . |
17,788 | def find_contradicts ( self ) : eh = self . hierarchies [ 'entity' ] stmts_by_type = collections . defaultdict ( lambda : [ ] ) for idx , stmt in enumerate ( self . stmts ) : stmts_by_type [ indra_stmt_type ( stmt ) ] . append ( ( idx , stmt ) ) pos_stmts = AddModification . __subclasses__ ( ) neg_stmts = [ modclass_to_inverse [ c ] for c in pos_stmts ] pos_stmts += [ Activation , IncreaseAmount ] neg_stmts += [ Inhibition , DecreaseAmount ] contradicts = [ ] for pst , nst in zip ( pos_stmts , neg_stmts ) : poss = stmts_by_type . get ( pst , [ ] ) negs = stmts_by_type . get ( nst , [ ] ) pos_stmt_by_group = self . _get_stmt_by_group ( pst , poss , eh ) neg_stmt_by_group = self . _get_stmt_by_group ( nst , negs , eh ) for key , pg in pos_stmt_by_group . items ( ) : ng = neg_stmt_by_group . get ( key , [ ] ) for ( _ , st1 ) , ( _ , st2 ) in itertools . product ( pg , ng ) : if st1 . contradicts ( st2 , self . hierarchies ) : contradicts . append ( ( st1 , st2 ) ) neu_stmts = [ Influence , ActiveForm ] for stt in neu_stmts : stmts = stmts_by_type . get ( stt , [ ] ) for ( _ , st1 ) , ( _ , st2 ) in itertools . combinations ( stmts , 2 ) : if st1 . contradicts ( st2 , self . hierarchies ) : contradicts . append ( ( st1 , st2 ) ) return contradicts | Return pairs of contradicting Statements . |
17,789 | def get_text_content_for_pmids ( pmids ) : pmc_pmids = set ( pmc_client . filter_pmids ( pmids , source_type = 'fulltext' ) ) pmc_ids = [ ] for pmid in pmc_pmids : pmc_id = pmc_client . id_lookup ( pmid , idtype = 'pmid' ) [ 'pmcid' ] if pmc_id : pmc_ids . append ( pmc_id ) else : pmc_pmids . discard ( pmid ) pmc_xmls = [ ] failed = set ( ) for pmc_id in pmc_ids : if pmc_id is not None : pmc_xmls . append ( pmc_client . get_xml ( pmc_id ) ) else : failed . append ( pmid ) time . sleep ( 0.5 ) remaining_pmids = set ( pmids ) - pmc_pmids | failed abstracts = [ ] for pmid in remaining_pmids : abstract = pubmed_client . get_abstract ( pmid ) abstracts . append ( abstract ) time . sleep ( 0.5 ) return [ text_content for source in ( pmc_xmls , abstracts ) for text_content in source if text_content is not None ] | Get text content for articles given a list of their pmids |
17,790 | def universal_extract_paragraphs ( xml ) : try : paragraphs = elsevier_client . extract_paragraphs ( xml ) except Exception : paragraphs = None if paragraphs is None : try : paragraphs = pmc_client . extract_paragraphs ( xml ) except Exception : paragraphs = [ xml ] return paragraphs | Extract paragraphs from xml that could be from different sources |
17,791 | def filter_paragraphs ( paragraphs , contains = None ) : if contains is None : pattern = '' else : if isinstance ( contains , str ) : contains = [ contains ] pattern = '|' . join ( r'[^\w]%s[^\w]' % shortform for shortform in contains ) paragraphs = [ p for p in paragraphs if re . search ( pattern , p ) ] return '\n' . join ( paragraphs ) + '\n' | Filter paragraphs to only those containing one of a list of strings |
17,792 | def get_valid_residue ( residue ) : if residue is not None and amino_acids . get ( residue ) is None : res = amino_acids_reverse . get ( residue . lower ( ) ) if res is None : raise InvalidResidueError ( residue ) else : return res return residue | Check if the given string represents a valid amino acid residue . |
17,793 | def get_valid_location ( location ) : if location is not None and cellular_components . get ( location ) is None : loc = cellular_components_reverse . get ( location ) if loc is None : raise InvalidLocationError ( location ) else : return loc return location | Check if the given location represents a valid cellular component . |
17,794 | def _read_activity_types ( ) : this_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) ac_file = os . path . join ( this_dir , os . pardir , 'resources' , 'activity_hierarchy.rdf' ) g = rdflib . Graph ( ) with open ( ac_file , 'r' ) : g . parse ( ac_file , format = 'nt' ) act_types = set ( ) for s , _ , o in g : subj = s . rpartition ( '/' ) [ - 1 ] obj = o . rpartition ( '/' ) [ - 1 ] act_types . add ( subj ) act_types . add ( obj ) return sorted ( list ( act_types ) ) | Read types of valid activities from a resource file . |
17,795 | def _read_cellular_components ( ) : this_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) cc_file = os . path . join ( this_dir , os . pardir , 'resources' , 'cellular_components.tsv' ) cc_patch_file = os . path . join ( this_dir , os . pardir , 'resources' , 'cellular_components_patch.tsv' ) cellular_components = { } cellular_components_reverse = { } with open ( cc_file , 'rt' ) as fh : lines = list ( fh . readlines ( ) ) with open ( cc_patch_file , 'rt' ) as fh : lines += list ( fh . readlines ( ) ) for lin in lines [ 1 : ] : terms = lin . strip ( ) . split ( '\t' ) cellular_components [ terms [ 1 ] ] = terms [ 0 ] try : cellular_components_reverse [ terms [ 0 ] ] except KeyError : cellular_components_reverse [ terms [ 0 ] ] = terms [ 1 ] return cellular_components , cellular_components_reverse | Read cellular components from a resource file . |
17,796 | def _read_amino_acids ( ) : this_dir = os . path . dirname ( os . path . abspath ( __file__ ) ) aa_file = os . path . join ( this_dir , os . pardir , 'resources' , 'amino_acids.tsv' ) amino_acids = { } amino_acids_reverse = { } with open ( aa_file , 'rt' ) as fh : lines = fh . readlines ( ) for lin in lines [ 1 : ] : terms = lin . strip ( ) . split ( '\t' ) key = terms [ 2 ] val = { 'full_name' : terms [ 0 ] , 'short_name' : terms [ 1 ] , 'indra_name' : terms [ 3 ] } amino_acids [ key ] = val for v in val . values ( ) : amino_acids_reverse [ v ] = key return amino_acids , amino_acids_reverse | Read the amino acid information from a resource file . |
17,797 | def export_sbgn ( model ) : import lxml . etree import lxml . builder from pysb . bng import generate_equations from indra . assemblers . sbgn import SBGNAssembler logger . info ( 'Generating reaction network with BNG for SBGN export. ' + 'This could take a long time.' ) generate_equations ( model ) sa = SBGNAssembler ( ) glyphs = { } for idx , species in enumerate ( model . species ) : glyph = sa . _glyph_for_complex_pattern ( species ) if glyph is None : continue sa . _map . append ( glyph ) glyphs [ idx ] = glyph for reaction in model . reactions : reactants = set ( reaction [ 'reactants' ] ) - set ( reaction [ 'products' ] ) products = set ( reaction [ 'products' ] ) - set ( reaction [ 'reactants' ] ) controllers = set ( reaction [ 'reactants' ] ) & set ( reaction [ 'products' ] ) process_glyph = sa . _process_glyph ( 'process' ) if not reactants : glyph_id = sa . _none_glyph ( ) sa . _arc ( 'consumption' , glyph_id , process_glyph ) else : for r in reactants : glyph = glyphs . get ( r ) if glyph is None : glyph_id = sa . _none_glyph ( ) else : glyph_id = glyph . attrib [ 'id' ] sa . _arc ( 'consumption' , glyph_id , process_glyph ) if not products : glyph_id = sa . _none_glyph ( ) sa . _arc ( 'production' , process_glyph , glyph_id ) else : for p in products : glyph = glyphs . get ( p ) if glyph is None : glyph_id = sa . _none_glyph ( ) else : glyph_id = glyph . attrib [ 'id' ] sa . _arc ( 'production' , process_glyph , glyph_id ) for c in controllers : glyph = glyphs [ c ] sa . _arc ( 'catalysis' , glyph . attrib [ 'id' ] , process_glyph ) sbgn_str = sa . print_model ( ) . decode ( 'utf-8' ) return sbgn_str | Return an SBGN model string corresponding to the PySB model . |
17,798 | def export_kappa_im ( model , fname = None ) : from . kappa_util import im_json_to_graph kappa = _prepare_kappa ( model ) imap = kappa . analyses_influence_map ( ) im = im_json_to_graph ( imap ) for param in model . parameters : try : im . remove_node ( param . name ) except : pass if fname : agraph = networkx . nx_agraph . to_agraph ( im ) agraph . draw ( fname , prog = 'dot' ) return im | Return a networkx graph representing the model s Kappa influence map . |
17,799 | def export_kappa_cm ( model , fname = None ) : from . kappa_util import cm_json_to_graph kappa = _prepare_kappa ( model ) cmap = kappa . analyses_contact_map ( ) cm = cm_json_to_graph ( cmap ) if fname : cm . draw ( fname , prog = 'dot' ) return cm | Return a networkx graph representing the model s Kappa contact map . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.