idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
17,800
def _prepare_kappa ( model ) : import kappy kappa = kappy . KappaStd ( ) model_str = export ( model , 'kappa' ) kappa . add_model_string ( model_str ) kappa . project_parse ( ) return kappa
Return a Kappa STD with the model loaded .
17,801
def send_request ( ** kwargs ) : skiprows = kwargs . pop ( 'skiprows' , None ) res = requests . get ( cbio_url , params = kwargs ) if res . status_code == 200 : if skiprows == - 1 : lines = res . text . split ( '\n' ) skiprows = 0 for line in lines : if line . startswith ( '#' ) : skiprows += 1 else : break csv_StringIO = StringIO ( res . text ) df = pandas . read_csv ( csv_StringIO , sep = '\t' , skiprows = skiprows ) return df else : logger . error ( 'Request returned with code %d' % res . status_code )
Return a data frame from a web service request to cBio portal .
17,802
def get_mutations ( study_id , gene_list , mutation_type = None , case_id = None ) : genetic_profile = get_genetic_profiles ( study_id , 'mutation' ) [ 0 ] gene_list_str = ',' . join ( gene_list ) data = { 'cmd' : 'getMutationData' , 'case_set_id' : study_id , 'genetic_profile_id' : genetic_profile , 'gene_list' : gene_list_str , 'skiprows' : - 1 } df = send_request ( ** data ) if case_id : df = df [ df [ 'case_id' ] == case_id ] res = _filter_data_frame ( df , [ 'gene_symbol' , 'amino_acid_change' ] , 'mutation_type' , mutation_type ) mutations = { 'gene_symbol' : list ( res [ 'gene_symbol' ] . values ( ) ) , 'amino_acid_change' : list ( res [ 'amino_acid_change' ] . values ( ) ) } return mutations
Return mutations as a list of genes and list of amino acid changes .
17,803
def get_case_lists ( study_id ) : data = { 'cmd' : 'getCaseLists' , 'cancer_study_id' : study_id } df = send_request ( ** data ) case_set_ids = df [ 'case_list_id' ] . tolist ( ) return case_set_ids
Return a list of the case set ids for a particular study .
17,804
def get_profile_data ( study_id , gene_list , profile_filter , case_set_filter = None ) : genetic_profiles = get_genetic_profiles ( study_id , profile_filter ) if genetic_profiles : genetic_profile = genetic_profiles [ 0 ] else : return { } gene_list_str = ',' . join ( gene_list ) case_set_ids = get_case_lists ( study_id ) if case_set_filter : case_set_id = [ x for x in case_set_ids if case_set_filter in x ] [ 0 ] else : case_set_id = study_id + '_all' data = { 'cmd' : 'getProfileData' , 'case_set_id' : case_set_id , 'genetic_profile_id' : genetic_profile , 'gene_list' : gene_list_str , 'skiprows' : - 1 } df = send_request ( ** data ) case_list_df = [ x for x in df . columns . tolist ( ) if x not in [ 'GENE_ID' , 'COMMON' ] ] profile_data = { case : { g : None for g in gene_list } for case in case_list_df } for case in case_list_df : profile_values = df [ case ] . tolist ( ) df_gene_list = df [ 'COMMON' ] . tolist ( ) for g , cv in zip ( df_gene_list , profile_values ) : if not pandas . isnull ( cv ) : profile_data [ case ] [ g ] = cv return profile_data
Return dict of cases and genes and their respective values .
17,805
def get_num_sequenced ( study_id ) : data = { 'cmd' : 'getCaseLists' , 'cancer_study_id' : study_id } df = send_request ( ** data ) if df . empty : return 0 row_filter = df [ 'case_list_id' ] . str . contains ( 'sequenced' , case = False ) num_case = len ( df [ row_filter ] [ 'case_ids' ] . tolist ( ) [ 0 ] . split ( ' ' ) ) return num_case
Return number of sequenced tumors for given study .
17,806
def get_cancer_studies ( study_filter = None ) : data = { 'cmd' : 'getCancerStudies' } df = send_request ( ** data ) res = _filter_data_frame ( df , [ 'cancer_study_id' ] , 'cancer_study_id' , study_filter ) study_ids = list ( res [ 'cancer_study_id' ] . values ( ) ) return study_ids
Return a list of cancer study identifiers optionally filtered .
17,807
def get_cancer_types ( cancer_filter = None ) : data = { 'cmd' : 'getTypesOfCancer' } df = send_request ( ** data ) res = _filter_data_frame ( df , [ 'type_of_cancer_id' ] , 'name' , cancer_filter ) type_ids = list ( res [ 'type_of_cancer_id' ] . values ( ) ) return type_ids
Return a list of cancer types optionally filtered .
17,808
def get_ccle_mutations ( gene_list , cell_lines , mutation_type = None ) : mutations = { cl : { g : [ ] for g in gene_list } for cl in cell_lines } for cell_line in cell_lines : mutations_cl = get_mutations ( ccle_study , gene_list , mutation_type = mutation_type , case_id = cell_line ) for gene , aa_change in zip ( mutations_cl [ 'gene_symbol' ] , mutations_cl [ 'amino_acid_change' ] ) : aa_change = str ( aa_change ) mutations [ cell_line ] [ gene ] . append ( aa_change ) return mutations
Return a dict of mutations in given genes and cell lines from CCLE .
17,809
def get_ccle_lines_for_mutation ( gene , amino_acid_change ) : data = { 'cmd' : 'getMutationData' , 'case_set_id' : ccle_study , 'genetic_profile_id' : ccle_study + '_mutations' , 'gene_list' : gene , 'skiprows' : 1 } df = send_request ( ** data ) df = df [ df [ 'amino_acid_change' ] == amino_acid_change ] cell_lines = df [ 'case_id' ] . unique ( ) . tolist ( ) return cell_lines
Return cell lines with a given point mutation in a given gene .
17,810
def get_ccle_cna ( gene_list , cell_lines ) : profile_data = get_profile_data ( ccle_study , gene_list , 'COPY_NUMBER_ALTERATION' , 'all' ) profile_data = dict ( ( key , value ) for key , value in profile_data . items ( ) if key in cell_lines ) return profile_data
Return a dict of CNAs in given genes and cell lines from CCLE .
17,811
def get_ccle_mrna ( gene_list , cell_lines ) : gene_list_str = ',' . join ( gene_list ) data = { 'cmd' : 'getProfileData' , 'case_set_id' : ccle_study + '_mrna' , 'genetic_profile_id' : ccle_study + '_mrna' , 'gene_list' : gene_list_str , 'skiprows' : - 1 } df = send_request ( ** data ) mrna_amounts = { cl : { g : [ ] for g in gene_list } for cl in cell_lines } for cell_line in cell_lines : if cell_line in df . columns : for gene in gene_list : value_cell = df [ cell_line ] [ df [ 'COMMON' ] == gene ] if value_cell . empty : mrna_amounts [ cell_line ] [ gene ] = None elif pandas . isnull ( value_cell . values [ 0 ] ) : mrna_amounts [ cell_line ] [ gene ] = None else : value = value_cell . values [ 0 ] mrna_amounts [ cell_line ] [ gene ] = value else : mrna_amounts [ cell_line ] = None return mrna_amounts
Return a dict of mRNA amounts in given genes and cell lines from CCLE .
17,812
def _filter_data_frame ( df , data_col , filter_col , filter_str = None ) : if filter_str is not None : relevant_cols = data_col + [ filter_col ] df . dropna ( inplace = True , subset = relevant_cols ) row_filter = df [ filter_col ] . str . contains ( filter_str , case = False ) data_list = df [ row_filter ] [ data_col ] . to_dict ( ) else : data_list = df [ data_col ] . to_dict ( ) return data_list
Return a filtered data frame as a dictionary .
17,813
def allow_cors ( func ) : def wrapper ( * args , ** kwargs ) : response . headers [ 'Access-Control-Allow-Origin' ] = '*' response . headers [ 'Access-Control-Allow-Methods' ] = 'PUT, GET, POST, DELETE, OPTIONS' response . headers [ 'Access-Control-Allow-Headers' ] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token' return func ( * args , ** kwargs ) return wrapper
This is a decorator which enable CORS for the specified endpoint .
17,814
def trips_process_text ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) text = body . get ( 'text' ) tp = trips . process_text ( text ) return _stmts_from_proc ( tp )
Process text with TRIPS and return INDRA Statements .
17,815
def trips_process_xml ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) xml_str = body . get ( 'xml_str' ) tp = trips . process_xml ( xml_str ) return _stmts_from_proc ( tp )
Process TRIPS EKB XML and return INDRA Statements .
17,816
def reach_process_text ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) text = body . get ( 'text' ) offline = True if body . get ( 'offline' ) else False rp = reach . process_text ( text , offline = offline ) return _stmts_from_proc ( rp )
Process text with REACH and return INDRA Statements .
17,817
def reach_process_json ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) json_str = body . get ( 'json' ) rp = reach . process_json_str ( json_str ) return _stmts_from_proc ( rp )
Process REACH json and return INDRA Statements .
17,818
def reach_process_pmc ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) pmcid = body . get ( 'pmcid' ) rp = reach . process_pmc ( pmcid ) return _stmts_from_proc ( rp )
Process PubMedCentral article and return INDRA Statements .
17,819
def bel_process_pybel_neighborhood ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) genes = body . get ( 'genes' ) bp = bel . process_pybel_neighborhood ( genes ) return _stmts_from_proc ( bp )
Process BEL Large Corpus neighborhood and return INDRA Statements .
17,820
def bel_process_belrdf ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) belrdf = body . get ( 'belrdf' ) bp = bel . process_belrdf ( belrdf ) return _stmts_from_proc ( bp )
Process BEL RDF and return INDRA Statements .
17,821
def biopax_process_pc_pathsbetween ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) genes = body . get ( 'genes' ) bp = biopax . process_pc_pathsbetween ( genes ) return _stmts_from_proc ( bp )
Process PathwayCommons paths between genes return INDRA Statements .
17,822
def biopax_process_pc_pathsfromto ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) source = body . get ( 'source' ) target = body . get ( 'target' ) bp = biopax . process_pc_pathsfromto ( source , target ) return _stmts_from_proc ( bp )
Process PathwayCommons paths from - to genes return INDRA Statements .
17,823
def biopax_process_pc_neighborhood ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) genes = body . get ( 'genes' ) bp = biopax . process_pc_neighborhood ( genes ) return _stmts_from_proc ( bp )
Process PathwayCommons neighborhood return INDRA Statements .
17,824
def eidos_process_text ( ) : if request . method == 'OPTIONS' : return { } req = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( req ) text = body . get ( 'text' ) webservice = body . get ( 'webservice' ) if not webservice : response . status = 400 response . content_type = 'application/json' return json . dumps ( { 'error' : 'No web service address provided.' } ) ep = eidos . process_text ( text , webservice = webservice ) return _stmts_from_proc ( ep )
Process text with EIDOS and return INDRA Statements .
17,825
def eidos_process_jsonld ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) eidos_json = body . get ( 'jsonld' ) ep = eidos . process_json_str ( eidos_json ) return _stmts_from_proc ( ep )
Process an EIDOS JSON - LD and return INDRA Statements .
17,826
def cwms_process_text ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) text = body . get ( 'text' ) cp = cwms . process_text ( text ) return _stmts_from_proc ( cp )
Process text with CWMS and return INDRA Statements .
17,827
def hume_process_jsonld ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) jsonld_str = body . get ( 'jsonld' ) jsonld = json . loads ( jsonld_str ) hp = hume . process_jsonld ( jsonld ) return _stmts_from_proc ( hp )
Process Hume JSON - LD and return INDRA Statements .
17,828
def sofia_process_text ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) text = body . get ( 'text' ) auth = body . get ( 'auth' ) sp = sofia . process_text ( text , auth = auth ) return _stmts_from_proc ( sp )
Process text with Sofia and return INDRA Statements .
17,829
def assemble_pysb ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) export_format = body . get ( 'export_format' ) stmts = stmts_from_json ( stmts_json ) pa = PysbAssembler ( ) pa . add_statements ( stmts ) pa . make_model ( ) try : for m in pa . model . monomers : pysb_assembler . set_extended_initial_condition ( pa . model , m , 0 ) except Exception as e : logger . exception ( e ) if not export_format : model_str = pa . print_model ( ) elif export_format in ( 'kappa_im' , 'kappa_cm' ) : fname = 'model_%s.png' % export_format root = os . path . dirname ( os . path . abspath ( fname ) ) graph = pa . export_model ( format = export_format , file_name = fname ) with open ( fname , 'rb' ) as fh : data = 'data:image/png;base64,%s' % base64 . b64encode ( fh . read ( ) ) . decode ( ) return { 'image' : data } else : try : model_str = pa . export_model ( format = export_format ) except Exception as e : logger . exception ( e ) model_str = '' res = { 'model' : model_str } return res
Assemble INDRA Statements and return PySB model string .
17,830
def assemble_cx ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) ca = CxAssembler ( stmts ) model_str = ca . make_model ( ) res = { 'model' : model_str } return res
Assemble INDRA Statements and return CX network json .
17,831
def share_model_ndex ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_str = body . get ( 'stmts' ) stmts_json = json . loads ( stmts_str ) stmts = stmts_from_json ( stmts_json [ "statements" ] ) ca = CxAssembler ( stmts ) for n , v in body . items ( ) : ca . cx [ 'networkAttributes' ] . append ( { 'n' : n , 'v' : v , 'd' : 'string' } ) ca . make_model ( ) network_id = ca . upload_model ( private = False ) return { 'network_id' : network_id }
Upload the model to NDEX
17,832
def fetch_model_ndex ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) network_id = body . get ( 'network_id' ) cx = process_ndex_network ( network_id ) network_attr = [ x for x in cx . cx if x . get ( 'networkAttributes' ) ] network_attr = network_attr [ 0 ] [ 'networkAttributes' ] keep_keys = [ 'txt_input' , 'parser' , 'model_elements' , 'preset_pos' , 'stmts' , 'sentences' , 'evidence' , 'cell_line' , 'mrna' , 'mutations' ] stored_data = { } for d in network_attr : if d [ 'n' ] in keep_keys : stored_data [ d [ 'n' ] ] = d [ 'v' ] return stored_data
Download model and associated pieces from NDEX
17,833
def assemble_graph ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) ga = GraphAssembler ( stmts ) model_str = ga . make_model ( ) res = { 'model' : model_str } return res
Assemble INDRA Statements and return Graphviz graph dot string .
17,834
def assemble_cyjs ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) cja = CyJSAssembler ( ) cja . add_statements ( stmts ) cja . make_model ( grouping = True ) model_str = cja . print_cyjs_graph ( ) return model_str
Assemble INDRA Statements and return Cytoscape JS network .
17,835
def assemble_english ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) sentences = { } for st in stmts : enga = EnglishAssembler ( ) enga . add_statements ( [ st ] ) model_str = enga . make_model ( ) sentences [ st . uuid ] = model_str res = { 'sentences' : sentences } return res
Assemble each statement into
17,836
def assemble_loopy ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) sa = SifAssembler ( stmts ) sa . make_model ( use_name_as_key = True ) model_str = sa . print_loopy ( as_url = True ) res = { 'loopy_url' : model_str } return res
Assemble INDRA Statements into a Loopy model using SIF Assembler .
17,837
def get_ccle_mrna_levels ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) gene_list = body . get ( 'gene_list' ) cell_lines = body . get ( 'cell_lines' ) mrna_amounts = cbio_client . get_ccle_mrna ( gene_list , cell_lines ) res = { 'mrna_amounts' : mrna_amounts } return res
Get CCLE mRNA amounts using cBioClient
17,838
def get_ccle_mutations ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) gene_list = body . get ( 'gene_list' ) cell_lines = body . get ( 'cell_lines' ) mutations = cbio_client . get_ccle_mutations ( gene_list , cell_lines ) res = { 'mutations' : mutations } return res
Get CCLE mutations returns the amino acid changes for a given list of genes and cell lines
17,839
def map_grounding ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) stmts_out = ac . map_grounding ( stmts ) return _return_stmts ( stmts_out )
Map grounding on a list of INDRA Statements .
17,840
def run_preassembly ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) scorer = body . get ( 'scorer' ) return_toplevel = body . get ( 'return_toplevel' ) if scorer == 'wm' : belief_scorer = get_eidos_scorer ( ) else : belief_scorer = None stmts_out = ac . run_preassembly ( stmts , belief_scorer = belief_scorer , return_toplevel = return_toplevel ) return _return_stmts ( stmts_out )
Run preassembly on a list of INDRA Statements .
17,841
def map_ontologies ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmts = stmts_from_json ( stmts_json ) om = OntologyMapper ( stmts , wm_ontomap , scored = True , symmetric = False ) om . map_statements ( ) return _return_stmts ( stmts )
Run ontology mapping on a list of INDRA Statements .
17,842
def filter_by_type ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) stmt_type_str = body . get ( 'type' ) stmt_type_str = stmt_type_str . capitalize ( ) stmt_type = getattr ( sys . modules [ __name__ ] , stmt_type_str ) stmts = stmts_from_json ( stmts_json ) stmts_out = ac . filter_by_type ( stmts , stmt_type ) return _return_stmts ( stmts_out )
Filter to a given INDRA Statement type .
17,843
def filter_grounded_only ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) score_threshold = body . get ( 'score_threshold' ) if score_threshold is not None : score_threshold = float ( score_threshold ) stmts = stmts_from_json ( stmts_json ) stmts_out = ac . filter_grounded_only ( stmts , score_threshold = score_threshold ) return _return_stmts ( stmts_out )
Filter to grounded Statements only .
17,844
def filter_belief ( ) : if request . method == 'OPTIONS' : return { } response = request . body . read ( ) . decode ( 'utf-8' ) body = json . loads ( response ) stmts_json = body . get ( 'statements' ) belief_cutoff = body . get ( 'belief_cutoff' ) if belief_cutoff is not None : belief_cutoff = float ( belief_cutoff ) stmts = stmts_from_json ( stmts_json ) stmts_out = ac . filter_belief ( stmts , belief_cutoff ) return _return_stmts ( stmts_out )
Filter to beliefs above a given threshold .
17,845
def get_git_info ( ) : start_dir = abspath ( curdir ) try : chdir ( dirname ( abspath ( __file__ ) ) ) re_patt_str = ( r'commit\s+(?P<commit_hash>\w+).*?Author:\s+' r'(?P<author_name>.*?)\s+<(?P<author_email>.*?)>\s+Date:\s+' r'(?P<date>.*?)\n\s+(?P<commit_msg>.*?)(?:\ndiff.*?)?$' ) show_out = check_output ( [ 'git' , 'show' ] ) . decode ( 'ascii' ) revp_out = check_output ( [ 'git' , 'rev-parse' , '--abbrev-ref' , 'HEAD' ] ) revp_out = revp_out . decode ( 'ascii' ) . strip ( ) m = re . search ( re_patt_str , show_out , re . DOTALL ) assert m is not None , "Regex pattern:\n\n\"%s\"\n\n failed to match string:\n\n\"%s\"" % ( re_patt_str , show_out ) ret_dict = m . groupdict ( ) ret_dict [ 'branch_name' ] = revp_out finally : chdir ( start_dir ) return ret_dict
Get a dict with useful git info .
17,846
def get_version ( with_git_hash = True , refresh_hash = False ) : version = __version__ if with_git_hash : global INDRA_GITHASH if INDRA_GITHASH is None or refresh_hash : with open ( devnull , 'w' ) as nul : try : ret = check_output ( [ 'git' , 'rev-parse' , 'HEAD' ] , cwd = dirname ( __file__ ) , stderr = nul ) except CalledProcessError : ret = 'UNHASHED' INDRA_GITHASH = ret . strip ( ) . decode ( 'utf-8' ) version = '%s-%s' % ( version , INDRA_GITHASH ) return version
Get an indra version string including a git hash .
17,847
def _fix_evidence_text ( txt ) : txt = re . sub ( '[ ]?\( xref \)' , '' , txt ) txt = re . sub ( '\[ xref \]' , '[]' , txt ) txt = re . sub ( '[\(]?XREF_BIBR[\)]?[,]?' , '' , txt ) txt = re . sub ( '[\(]?XREF_FIG[\)]?[,]?' , '' , txt ) txt = re . sub ( '[\(]?XREF_SUPPLEMENT[\)]?[,]?' , '' , txt ) txt = txt . strip ( ) return txt
Eliminate some symbols to have cleaner supporting text .
17,848
def make_model ( self , add_indra_json = True ) : self . add_indra_json = add_indra_json for stmt in self . statements : if isinstance ( stmt , Modification ) : self . _add_modification ( stmt ) if isinstance ( stmt , SelfModification ) : self . _add_self_modification ( stmt ) elif isinstance ( stmt , RegulateActivity ) or isinstance ( stmt , RegulateAmount ) : self . _add_regulation ( stmt ) elif isinstance ( stmt , Complex ) : self . _add_complex ( stmt ) elif isinstance ( stmt , Gef ) : self . _add_gef ( stmt ) elif isinstance ( stmt , Gap ) : self . _add_gap ( stmt ) elif isinstance ( stmt , Influence ) : self . _add_influence ( stmt ) network_description = '' self . cx [ 'networkAttributes' ] . append ( { 'n' : 'name' , 'v' : self . network_name } ) self . cx [ 'networkAttributes' ] . append ( { 'n' : 'description' , 'v' : network_description } ) cx_str = self . print_cx ( ) return cx_str
Assemble the CX network from the collected INDRA Statements .
17,849
def print_cx ( self , pretty = True ) : def _get_aspect_metadata ( aspect ) : count = len ( self . cx . get ( aspect ) ) if self . cx . get ( aspect ) else 0 if not count : return None data = { 'name' : aspect , 'idCounter' : self . _id_counter , 'consistencyGroup' : 1 , 'elementCount' : count } return data full_cx = OrderedDict ( ) full_cx [ 'numberVerification' ] = [ { 'longNumber' : 281474976710655 } ] aspects = [ 'nodes' , 'edges' , 'supports' , 'citations' , 'edgeAttributes' , 'edgeCitations' , 'edgeSupports' , 'networkAttributes' , 'nodeAttributes' , 'cartesianLayout' ] full_cx [ 'metaData' ] = [ ] for aspect in aspects : metadata = _get_aspect_metadata ( aspect ) if metadata : full_cx [ 'metaData' ] . append ( metadata ) for k , v in self . cx . items ( ) : full_cx [ k ] = v full_cx [ 'status' ] = [ { 'error' : '' , 'success' : True } ] full_cx = [ { k : v } for k , v in full_cx . items ( ) ] if pretty : json_str = json . dumps ( full_cx , indent = 2 ) else : json_str = json . dumps ( full_cx ) return json_str
Return the assembled CX network as a json string .
17,850
def save_model ( self , file_name = 'model.cx' ) : with open ( file_name , 'wt' ) as fh : cx_str = self . print_cx ( ) fh . write ( cx_str )
Save the assembled CX network in a file .
17,851
def set_context ( self , cell_type ) : node_names = [ node [ 'n' ] for node in self . cx [ 'nodes' ] ] res_expr = context_client . get_protein_expression ( node_names , [ cell_type ] ) res_mut = context_client . get_mutations ( node_names , [ cell_type ] ) res_expr = res_expr . get ( cell_type ) res_mut = res_mut . get ( cell_type ) if not res_expr : msg = 'Could not get protein expression for %s cell type.' % cell_type logger . warning ( msg ) if not res_mut : msg = 'Could not get mutational status for %s cell type.' % cell_type logger . warning ( msg ) if not res_expr and not res_mut : return self . cx [ 'networkAttributes' ] . append ( { 'n' : 'cellular_context' , 'v' : cell_type } ) counter = 0 for node in self . cx [ 'nodes' ] : amount = res_expr . get ( node [ 'n' ] ) mut = res_mut . get ( node [ 'n' ] ) if amount is not None : node_attribute = { 'po' : node [ '@id' ] , 'n' : 'expression_amount' , 'v' : int ( amount ) } self . cx [ 'nodeAttributes' ] . append ( node_attribute ) if mut is not None : is_mutated = 1 if mut else 0 node_attribute = { 'po' : node [ '@id' ] , 'n' : 'is_mutated' , 'v' : is_mutated } self . cx [ 'nodeAttributes' ] . append ( node_attribute ) if mut is not None or amount is not None : counter += 1 logger . info ( 'Set context for %d nodes.' % counter )
Set protein expression data and mutational status as node attribute
17,852
def get_publications ( gene_names , save_json_name = None ) : if len ( gene_names ) != 2 : logger . warning ( 'Other than 2 gene names given.' ) return [ ] res_dict = _send_request ( gene_names ) if not res_dict : return [ ] if save_json_name is not None : with open ( save_json_name , 'wt' ) as fh : json . dump ( res_dict , fh , indent = 1 ) publications = _extract_publications ( res_dict , gene_names ) return publications
Return evidence publications for interaction between the given genes .
17,853
def _n ( name ) : n = name . encode ( 'ascii' , errors = 'ignore' ) . decode ( 'ascii' ) n = re . sub ( '[^A-Za-z0-9_]' , '_' , n ) n = re . sub ( r'(^[0-9].*)' , r'p\1' , n ) return n
Return valid PySB name .
17,854
def get_hash_statements_dict ( self ) : res = { stmt_hash : stmts_from_json ( [ stmt ] ) [ 0 ] for stmt_hash , stmt in self . __statement_jsons . items ( ) } return res
Return a dict of Statements keyed by hashes .
17,855
def merge_results ( self , other_processor ) : if not isinstance ( other_processor , self . __class__ ) : raise ValueError ( "Can only extend with another %s instance." % self . __class__ . __name__ ) self . statements . extend ( other_processor . statements ) if other_processor . statements_sample is not None : if self . statements_sample is None : self . statements_sample = other_processor . statements_sample else : self . statements_sample . extend ( other_processor . statements_sample ) self . _merge_json ( other_processor . __statement_jsons , other_processor . __evidence_counts ) return
Merge the results of this processor with those of another .
17,856
def wait_until_done ( self , timeout = None ) : start = datetime . now ( ) if not self . __th : raise IndraDBRestResponseError ( "There is no thread waiting to " "complete." ) self . __th . join ( timeout ) now = datetime . now ( ) dt = now - start if self . __th . is_alive ( ) : logger . warning ( "Timed out after %0.3f seconds waiting for " "statement load to complete." % dt . total_seconds ( ) ) ret = False else : logger . info ( "Waited %0.3f seconds for statements to finish loading." % dt . total_seconds ( ) ) ret = True return ret
Wait for the background load to complete .
17,857
def _merge_json ( self , stmt_json , ev_counts ) : self . __evidence_counts . update ( ev_counts ) for k , sj in stmt_json . items ( ) : if k not in self . __statement_jsons : self . __statement_jsons [ k ] = sj else : for evj in sj [ 'evidence' ] : self . __statement_jsons [ k ] [ 'evidence' ] . append ( evj ) if not self . __started : self . statements_sample = stmts_from_json ( self . __statement_jsons . values ( ) ) self . __started = True return
Merge these statement jsons with new jsons .
17,858
def _run_queries ( self , agent_strs , stmt_types , params , persist ) : self . _query_over_statement_types ( agent_strs , stmt_types , params ) assert len ( self . __done_dict ) == len ( stmt_types ) or None in self . __done_dict . keys ( ) , "Done dict was not initiated for all stmt_type's." if not persist : self . _compile_statements ( ) return while not self . _all_done ( ) : self . _query_over_statement_types ( agent_strs , stmt_types , params ) self . _compile_statements ( ) return
Use paging to get all statements requested .
17,859
def get_ids ( search_term , ** kwargs ) : use_text_word = kwargs . pop ( 'use_text_word' , True ) if use_text_word : search_term += '[tw]' params = { 'term' : search_term , 'retmax' : 100000 , 'retstart' : 0 , 'db' : 'pubmed' , 'sort' : 'pub+date' } params . update ( kwargs ) tree = send_request ( pubmed_search , params ) if tree is None : return [ ] if tree . find ( 'ERROR' ) is not None : logger . error ( tree . find ( 'ERROR' ) . text ) return [ ] if tree . find ( 'ErrorList' ) is not None : for err in tree . find ( 'ErrorList' ) . getchildren ( ) : logger . error ( 'Error - %s: %s' % ( err . tag , err . text ) ) return [ ] count = int ( tree . find ( 'Count' ) . text ) id_terms = tree . findall ( 'IdList/Id' ) if id_terms is None : return [ ] ids = [ idt . text for idt in id_terms ] if count != len ( ids ) : logger . warning ( 'Not all ids were retrieved for search %s;\n' 'limited at %d.' % ( search_term , params [ 'retmax' ] ) ) return ids
Search Pubmed for paper IDs given a search term .
17,860
def get_id_count ( search_term ) : params = { 'term' : search_term , 'rettype' : 'count' , 'db' : 'pubmed' } tree = send_request ( pubmed_search , params ) if tree is None : return None else : count = tree . getchildren ( ) [ 0 ] . text return int ( count )
Get the number of citations in Pubmed for a search query .
17,861
def get_ids_for_gene ( hgnc_name , ** kwargs ) : hgnc_id = hgnc_client . get_hgnc_id ( hgnc_name ) if hgnc_id is None : raise ValueError ( 'Invalid HGNC name.' ) entrez_id = hgnc_client . get_entrez_id ( hgnc_id ) if entrez_id is None : raise ValueError ( 'Entrez ID not found in HGNC table.' ) params = { 'db' : 'gene' , 'retmode' : 'xml' , 'id' : entrez_id } params . update ( kwargs ) tree = send_request ( pubmed_fetch , params ) if tree is None : return [ ] if tree . find ( 'ERROR' ) is not None : logger . error ( tree . find ( 'ERROR' ) . text ) return [ ] id_terms = tree . findall ( './/PubMedId' ) if id_terms is None : return [ ] ids = list ( set ( [ idt . text for idt in id_terms ] ) ) return ids
Get the curated set of articles for a gene in the Entrez database .
17,862
def get_article_xml ( pubmed_id ) : if pubmed_id . upper ( ) . startswith ( 'PMID' ) : pubmed_id = pubmed_id [ 4 : ] params = { 'db' : 'pubmed' , 'retmode' : 'xml' , 'id' : pubmed_id } tree = send_request ( pubmed_fetch , params ) if tree is None : return None article = tree . find ( 'PubmedArticle/MedlineCitation/Article' ) return article
Get the XML metadata for a single article from the Pubmed database .
17,863
def get_abstract ( pubmed_id , prepend_title = True ) : article = get_article_xml ( pubmed_id ) if article is None : return None return _abstract_from_article_element ( article , prepend_title )
Get the abstract of an article in the Pubmed database .
17,864
def get_metadata_from_xml_tree ( tree , get_issns_from_nlm = False , get_abstracts = False , prepend_title = False , mesh_annotations = False ) : results = { } pm_articles = tree . findall ( './PubmedArticle' ) for art_ix , pm_article in enumerate ( pm_articles ) : medline_citation = pm_article . find ( './MedlineCitation' ) article_info = _get_article_info ( medline_citation , pm_article . find ( 'PubmedData' ) ) journal_info = _get_journal_info ( medline_citation , get_issns_from_nlm ) context_info = _get_annotations ( medline_citation ) result = { } result . update ( article_info ) result . update ( journal_info ) result . update ( context_info ) if get_abstracts : abstract = _abstract_from_article_element ( medline_citation . find ( 'Article' ) , prepend_title = prepend_title ) result [ 'abstract' ] = abstract results [ article_info [ 'pmid' ] ] = result return results
Get metadata for an XML tree containing PubmedArticle elements .
17,865
def get_metadata_for_ids ( pmid_list , get_issns_from_nlm = False , get_abstracts = False , prepend_title = False ) : if len ( pmid_list ) > 200 : raise ValueError ( "Metadata query is limited to 200 PMIDs at a time." ) params = { 'db' : 'pubmed' , 'retmode' : 'xml' , 'id' : pmid_list } tree = send_request ( pubmed_fetch , params ) if tree is None : return None return get_metadata_from_xml_tree ( tree , get_issns_from_nlm , get_abstracts , prepend_title )
Get article metadata for up to 200 PMIDs from the Pubmed database .
17,866
def get_issns_for_journal ( nlm_id ) : params = { 'db' : 'nlmcatalog' , 'retmode' : 'xml' , 'id' : nlm_id } tree = send_request ( pubmed_fetch , params ) if tree is None : return None issn_list = tree . findall ( './/ISSN' ) issn_linking = tree . findall ( './/ISSNLinking' ) issns = issn_list + issn_linking if not issns : return None else : return [ issn . text for issn in issns ]
Get a list of the ISSN numbers for a journal given its NLM ID .
17,867
def remove_im_params ( model , im ) : for param in model . parameters : try : im . remove_node ( param . name ) except : pass
Remove parameter nodes from the influence map .
17,868
def _get_signed_predecessors ( im , node , polarity ) : signed_pred_list = [ ] for pred in im . predecessors ( node ) : pred_edge = ( pred , node ) yield ( pred , _get_edge_sign ( im , pred_edge ) * polarity )
Get upstream nodes in the influence map .
17,869
def _get_edge_sign ( im , edge ) : edge_data = im [ edge [ 0 ] ] [ edge [ 1 ] ] signs = list ( set ( [ v [ 'sign' ] for v in edge_data . values ( ) if v . get ( 'sign' ) ] ) ) if len ( signs ) > 1 : logger . warning ( "Edge %s has conflicting polarities; choosing " "positive polarity by default" % str ( edge ) ) sign = 1 else : sign = signs [ 0 ] if sign is None : raise Exception ( 'No sign attribute for edge.' ) elif abs ( sign ) == 1 : return sign else : raise Exception ( 'Unexpected edge sign: %s' % edge . attr [ 'sign' ] )
Get the polarity of the influence by examining the edge sign .
17,870
def _add_modification_to_agent ( agent , mod_type , residue , position ) : new_mod = ModCondition ( mod_type , residue , position ) for old_mod in agent . mods : if old_mod . equals ( new_mod ) : return agent new_agent = deepcopy ( agent ) new_agent . mods . append ( new_mod ) return new_agent
Add a modification condition to an Agent .
17,871
def _match_lhs ( cp , rules ) : rule_matches = [ ] for rule in rules : reactant_pattern = rule . rule_expression . reactant_pattern for rule_cp in reactant_pattern . complex_patterns : if _cp_embeds_into ( rule_cp , cp ) : rule_matches . append ( rule ) break return rule_matches
Get rules with a left - hand side matching the given ComplexPattern .
17,872
def _cp_embeds_into ( cp1 , cp2 ) : if cp1 is None or cp2 is None : return False cp1 = as_complex_pattern ( cp1 ) cp2 = as_complex_pattern ( cp2 ) if len ( cp2 . monomer_patterns ) == 1 : mp2 = cp2 . monomer_patterns [ 0 ] for mp1 in cp1 . monomer_patterns : if _mp_embeds_into ( mp1 , mp2 ) : return True return False
Check that any state in ComplexPattern2 is matched in ComplexPattern1 .
17,873
def _mp_embeds_into ( mp1 , mp2 ) : sc_matches = [ ] if mp1 . monomer . name != mp2 . monomer . name : return False for site_name , site_state in mp2 . site_conditions . items ( ) : if site_name not in mp1 . site_conditions or site_state != mp1 . site_conditions [ site_name ] : return False return True
Check that conditions in MonomerPattern2 are met in MonomerPattern1 .
17,874
def _monomer_pattern_label ( mp ) : site_strs = [ ] for site , cond in mp . site_conditions . items ( ) : if isinstance ( cond , tuple ) or isinstance ( cond , list ) : assert len ( cond ) == 2 if cond [ 1 ] == WILD : site_str = '%s_%s' % ( site , cond [ 0 ] ) else : site_str = '%s_%s%s' % ( site , cond [ 0 ] , cond [ 1 ] ) elif isinstance ( cond , numbers . Real ) : continue else : site_str = '%s_%s' % ( site , cond ) site_strs . append ( site_str ) return '%s_%s' % ( mp . monomer . name , '_' . join ( site_strs ) )
Return a string label for a MonomerPattern .
17,875
def _stmt_from_rule ( model , rule_name , stmts ) : stmt_uuid = None for ann in model . annotations : if ann . predicate == 'from_indra_statement' : if ann . subject == rule_name : stmt_uuid = ann . object break if stmt_uuid : for stmt in stmts : if stmt . uuid == stmt_uuid : return stmt
Return the INDRA Statement corresponding to a given rule by name .
17,876
def generate_im ( self , model ) : kappa = kappy . KappaStd ( ) model_str = export . export ( model , 'kappa' ) kappa . add_model_string ( model_str ) kappa . project_parse ( ) imap = kappa . analyses_influence_map ( accuracy = 'medium' ) graph = im_json_to_graph ( imap ) return graph
Return a graph representing the influence map generated by Kappa
17,877
def draw_im ( self , fname ) : im = self . get_im ( ) im_agraph = nx . nx_agraph . to_agraph ( im ) im_agraph . draw ( fname , prog = 'dot' )
Draw and save the influence map in a file .
17,878
def get_im ( self , force_update = False ) : if self . _im and not force_update : return self . _im if not self . model : raise Exception ( "Cannot get influence map if there is no model." ) def add_obs_for_agent ( agent ) : obj_mps = list ( pa . grounded_monomer_patterns ( self . model , agent ) ) if not obj_mps : logger . debug ( 'No monomer patterns found in model for agent %s, ' 'skipping' % agent ) return obs_list = [ ] for obj_mp in obj_mps : obs_name = _monomer_pattern_label ( obj_mp ) + '_obs' obj_obs = Observable ( obs_name , obj_mp , _export = False ) obs_list . append ( obs_name ) try : self . model . add_component ( obj_obs ) except ComponentDuplicateNameError as e : pass return obs_list self . model . observables = ComponentSet ( [ ] ) for stmt in self . statements : if isinstance ( stmt , Modification ) : mod_condition_name = modclass_to_modtype [ stmt . __class__ ] if isinstance ( stmt , RemoveModification ) : mod_condition_name = modtype_to_inverse [ mod_condition_name ] modified_sub = _add_modification_to_agent ( stmt . sub , mod_condition_name , stmt . residue , stmt . position ) obs_list = add_obs_for_agent ( modified_sub ) self . stmt_to_obs [ stmt ] = obs_list elif isinstance ( stmt , RegulateActivity ) : regulated_obj , polarity = _add_activity_to_agent ( stmt . obj , stmt . obj_activity , stmt . is_activation ) obs_list = add_obs_for_agent ( regulated_obj ) self . stmt_to_obs [ stmt ] = obs_list elif isinstance ( stmt , RegulateAmount ) : obs_list = add_obs_for_agent ( stmt . obj ) self . stmt_to_obs [ stmt ] = obs_list elif isinstance ( stmt , Influence ) : obs_list = add_obs_for_agent ( stmt . obj . concept ) self . stmt_to_obs [ stmt ] = obs_list for ag in self . agent_obs : obs_list = add_obs_for_agent ( ag ) self . agent_to_obs [ ag ] = obs_list logger . info ( "Generating influence map" ) self . _im = self . generate_im ( self . model ) node_attributes = nx . get_node_attributes ( self . _im , 'node_type' ) for rule in self . model . rules : obs_list = [ ] for neighb in self . _im . neighbors ( rule . name ) : if node_attributes [ neighb ] != 'variable' : continue edge_sign = _get_edge_sign ( self . _im , ( rule . name , neighb ) ) obs_list . append ( ( neighb , edge_sign ) ) self . rule_obs_dict [ rule . name ] = obs_list return self . _im
Get the influence map for the model generating it if necessary .
17,879
def check_model ( self , max_paths = 1 , max_path_length = 5 ) : results = [ ] for stmt in self . statements : result = self . check_statement ( stmt , max_paths , max_path_length ) results . append ( ( stmt , result ) ) return results
Check all the statements added to the ModelChecker .
17,880
def check_statement ( self , stmt , max_paths = 1 , max_path_length = 5 ) : self . get_im ( ) if not isinstance ( stmt , ( Modification , RegulateAmount , RegulateActivity , Influence ) ) : return PathResult ( False , 'STATEMENT_TYPE_NOT_HANDLED' , max_paths , max_path_length ) if isinstance ( stmt , Modification ) : target_polarity = - 1 if isinstance ( stmt , RemoveModification ) else 1 elif isinstance ( stmt , RegulateActivity ) : target_polarity = 1 if stmt . is_activation else - 1 elif isinstance ( stmt , RegulateAmount ) : target_polarity = - 1 if isinstance ( stmt , DecreaseAmount ) else 1 elif isinstance ( stmt , Influence ) : target_polarity = - 1 if stmt . overall_polarity ( ) == - 1 else 1 subj , obj = stmt . agent_list ( ) if subj is not None : subj_mps = list ( pa . grounded_monomer_patterns ( self . model , subj , ignore_activities = True ) ) if not subj_mps : logger . debug ( 'No monomers found corresponding to agent %s' % subj ) return PathResult ( False , 'SUBJECT_MONOMERS_NOT_FOUND' , max_paths , max_path_length ) else : subj_mps = [ None ] obs_names = self . stmt_to_obs [ stmt ] if not obs_names : logger . debug ( "No observables for stmt %s, returning False" % stmt ) return PathResult ( False , 'OBSERVABLES_NOT_FOUND' , max_paths , max_path_length ) for subj_mp , obs_name in itertools . product ( subj_mps , obs_names ) : result = self . _find_im_paths ( subj_mp , obs_name , target_polarity , max_paths , max_path_length ) if result . path_found : return result return PathResult ( False , 'NO_PATHS_FOUND' , max_paths , max_path_length )
Check a single Statement against the model .
17,881
def score_paths ( self , paths , agents_values , loss_of_function = False , sigma = 0.15 , include_final_node = False ) : obs_model = lambda x : scipy . stats . norm ( x , sigma ) obs_dict = { } for ag , val in agents_values . items ( ) : obs_list = self . agent_to_obs [ ag ] if obs_list is not None : for obs in obs_list : obs_dict [ obs ] = val path_scores = [ ] for path in paths : logger . info ( '------' ) logger . info ( "Scoring path:" ) logger . info ( path ) path_score = 0 last_path_node_index = - 1 if include_final_node else - 2 for node , sign in path [ : last_path_node_index ] : for affected_obs , rule_obs_sign in self . rule_obs_dict [ node ] : flip_polarity = - 1 if loss_of_function else 1 pred_sign = sign * rule_obs_sign * flip_polarity logger . info ( '%s %s: effect %s %s' % ( node , sign , affected_obs , pred_sign ) ) measured_val = obs_dict . get ( affected_obs ) if measured_val : if pred_sign <= 0 : prob_correct = obs_model ( measured_val ) . logcdf ( 0 ) else : prob_correct = obs_model ( measured_val ) . logsf ( 0 ) logger . info ( 'Actual: %s, Log Probability: %s' % ( measured_val , prob_correct ) ) path_score += prob_correct if not self . rule_obs_dict [ node ] : logger . info ( '%s %s' % ( node , sign ) ) prob_correct = obs_model ( 0 ) . logcdf ( 0 ) logger . info ( 'Unmeasured node, Log Probability: %s' % ( prob_correct ) ) path_score += prob_correct logger . info ( "Path score: %s" % path_score ) path_scores . append ( path_score ) path_tuples = list ( zip ( paths , path_scores ) ) sorted_by_length = sorted ( path_tuples , key = lambda x : len ( x [ 0 ] ) ) scored_paths = sorted ( sorted_by_length , key = lambda x : x [ 1 ] , reverse = True ) return scored_paths
Return scores associated with a given set of paths .
17,882
def prune_influence_map ( self ) : im = self . get_im ( ) logger . info ( 'Removing self loops' ) edges_to_remove = [ ] for e in im . edges ( ) : if e [ 0 ] == e [ 1 ] : logger . info ( 'Removing self loop: %s' , e ) edges_to_remove . append ( ( e [ 0 ] , e [ 1 ] ) ) im . remove_edges_from ( edges_to_remove ) remove_im_params ( self . model , im ) logger . info ( 'Get successorts of each node' ) succ_dict = { } for node in im . nodes ( ) : succ_dict [ node ] = set ( im . successors ( node ) ) logger . info ( 'Compare combinations of successors' ) group_key_fun = lambda x : len ( succ_dict [ x ] ) nodes_sorted = sorted ( im . nodes ( ) , key = group_key_fun ) groups = itertools . groupby ( nodes_sorted , key = group_key_fun ) edges_to_remove = [ ] for gix , group in groups : combos = itertools . combinations ( group , 2 ) for ix , ( p1 , p2 ) in enumerate ( combos ) : if succ_dict [ p1 ] . difference ( succ_dict [ p2 ] ) == set ( [ p2 ] ) and succ_dict [ p2 ] . difference ( succ_dict [ p1 ] ) == set ( [ p1 ] ) : for u , v in ( ( p1 , p2 ) , ( p2 , p1 ) ) : edges_to_remove . append ( ( u , v ) ) logger . debug ( 'Will remove edge (%s, %s)' , u , v ) logger . info ( 'Removing %d edges from influence map' % len ( edges_to_remove ) ) im . remove_edges_from ( edges_to_remove )
Remove edges between rules causing problematic non - transitivity .
17,883
def prune_influence_map_subj_obj ( self ) : def get_rule_info ( r ) : result = { } for ann in self . model . annotations : if ann . subject == r : if ann . predicate == 'rule_has_subject' : result [ 'subject' ] = ann . object elif ann . predicate == 'rule_has_object' : result [ 'object' ] = ann . object return result im = self . get_im ( ) rules = im . nodes ( ) edges_to_prune = [ ] for r1 , r2 in itertools . permutations ( rules , 2 ) : if ( r1 , r2 ) not in im . edges ( ) : continue r1_info = get_rule_info ( r1 ) r2_info = get_rule_info ( r2 ) if 'object' not in r1_info or 'subject' not in r2_info : continue if r1_info [ 'object' ] != r2_info [ 'subject' ] : logger . info ( "Removing edge %s % ( r1 , r2 ) ) edges_to_prune . append ( ( r1 , r2 ) ) im . remove_edges_from ( edges_to_prune )
Prune influence map to include only edges where the object of the upstream rule matches the subject of the downstream rule .
17,884
def add_section ( self , section_name ) : self . section_headings . append ( section_name ) if section_name in self . sections : raise ValueError ( "Section %s already exists." % section_name ) self . sections [ section_name ] = [ ] return
Create a section of the report to be headed by section_name
17,885
def set_section_order ( self , section_name_list ) : self . section_headings = section_name_list [ : ] for section_name in self . sections . keys ( ) : if section_name not in section_name_list : self . section_headings . append ( section_name ) return
Set the order of the sections which are by default unorderd .
17,886
def add_text ( self , text , * args , ** kwargs ) : section_name = kwargs . pop ( 'section' , None ) para , sp = self . _preformat_text ( text , * args , ** kwargs ) if section_name is None : relevant_list = self . story else : relevant_list = self . sections [ section_name ] relevant_list . append ( para ) relevant_list . append ( sp ) return
Add text to the document .
17,887
def add_image ( self , image_path , width = None , height = None , section = None ) : if width is not None : width = width * inch if height is not None : height = height * inch im = Image ( image_path , width , height ) if section is None : self . story . append ( im ) else : self . sections [ section ] . append ( im ) return
Add an image to the document .
17,888
def make_report ( self , sections_first = True , section_header_params = None ) : full_story = list ( self . _preformat_text ( self . title , style = 'Title' , fontsize = 18 , alignment = 'center' ) ) if section_header_params is None : section_header_params = { 'style' : 'h1' , 'fontsize' : 14 , 'alignment' : 'center' } if sections_first : full_story += self . _make_sections ( ** section_header_params ) full_story += self . story else : full_story += self . story full_story += self . _make_sections ( ** section_header_params ) fname = self . name + '.pdf' doc = SimpleDocTemplate ( fname , pagesize = letter , rightMargin = 72 , leftMargin = 72 , topMargin = 72 , bottomMargin = 18 ) doc . build ( full_story ) return fname
Create the pdf document with name self . name + . pdf .
17,889
def _make_sections ( self , ** section_hdr_params ) : sect_story = [ ] if not self . section_headings and len ( self . sections ) : self . section_headings = self . sections . keys ( ) for section_name in self . section_headings : section_story = self . sections [ section_name ] line = '-' * 20 section_head_text = '%s %s %s' % ( line , section_name , line ) title , title_sp = self . _preformat_text ( section_head_text , ** section_hdr_params ) sect_story += [ title , title_sp ] + section_story return sect_story
Flatten the sections into a single story list .
17,890
def _preformat_text ( self , text , style = 'Normal' , space = None , fontsize = 12 , alignment = 'left' ) : if space is None : space = ( 1 , 12 ) ptext = ( '<para alignment=\"%s\"><font size=%d>%s</font></para>' % ( alignment , fontsize , text ) ) para = Paragraph ( ptext , self . styles [ style ] ) sp = Spacer ( * space ) return para , sp
Format the text for addition to a story list .
17,891
def get_mesh_name_from_web ( mesh_id ) : url = MESH_URL + mesh_id + '.json' resp = requests . get ( url ) if resp . status_code != 200 : return None mesh_json = resp . json ( ) try : label = mesh_json [ '@graph' ] [ 0 ] [ 'label' ] [ '@value' ] except ( KeyError , IndexError ) as e : return None return label
Get the MESH label for the given MESH ID using the NLM REST API .
17,892
def get_mesh_name ( mesh_id , offline = False ) : indra_mesh_mapping = mesh_id_to_name . get ( mesh_id ) if offline or indra_mesh_mapping is not None : return indra_mesh_mapping return get_mesh_name_from_web ( mesh_id )
Get the MESH label for the given MESH ID .
17,893
def get_mesh_id_name ( mesh_term , offline = False ) : indra_mesh_id = mesh_name_to_id . get ( mesh_term ) if indra_mesh_id is not None : return indra_mesh_id , mesh_term indra_mesh_id , new_term = mesh_name_to_id_name . get ( mesh_term , ( None , None ) ) if indra_mesh_id is not None : return indra_mesh_id , new_term if offline : return None , None return get_mesh_id_name_from_web ( mesh_term )
Get the MESH ID and name for the given MESH term .
17,894
def make ( directory ) : if os . path . exists ( directory ) : if os . path . isdir ( directory ) : click . echo ( 'Directory already exists' ) else : click . echo ( 'Path exists and is not a directory' ) sys . exit ( ) os . makedirs ( directory ) os . mkdir ( os . path . join ( directory , 'jsons' ) ) copy_default_config ( os . path . join ( directory , 'config.yaml' ) )
Makes a RAS Machine directory
17,895
def run_with_search ( model_path , config , num_days ) : from indra . tools . machine . machine import run_with_search_helper run_with_search_helper ( model_path , config , num_days = num_days )
Run with PubMed search for new papers .
17,896
def run_with_pmids ( model_path , pmids ) : from indra . tools . machine . machine import run_with_pmids_helper run_with_pmids_helper ( model_path , pmids )
Run with given list of PMIDs .
17,897
def id_lookup ( paper_id , idtype = None ) : if idtype is not None and idtype not in ( 'pmid' , 'pmcid' , 'doi' ) : raise ValueError ( "Invalid idtype %s; must be 'pmid', 'pmcid', " "or 'doi'." % idtype ) if paper_id . upper ( ) . startswith ( 'PMC' ) : idtype = 'pmcid' if paper_id . upper ( ) . startswith ( 'PMID' ) : paper_id = paper_id [ 4 : ] elif paper_id . upper ( ) . startswith ( 'DOI' ) : paper_id = paper_id [ 3 : ] data = { 'ids' : paper_id } if idtype is not None : data [ 'idtype' ] = idtype try : tree = pubmed_client . send_request ( pmid_convert_url , data ) except Exception as e : logger . error ( 'Error looking up PMID in PMC: %s' % e ) return { } if tree is None : return { } record = tree . find ( 'record' ) if record is None : return { } doi = record . attrib . get ( 'doi' ) pmid = record . attrib . get ( 'pmid' ) pmcid = record . attrib . get ( 'pmcid' ) ids = { 'doi' : doi , 'pmid' : pmid , 'pmcid' : pmcid } return ids
This function takes a Pubmed ID Pubmed Central ID or DOI and use the Pubmed ID mapping service and looks up all other IDs from one of these . The IDs are returned in a dictionary .
17,898
def get_xml ( pmc_id ) : if pmc_id . upper ( ) . startswith ( 'PMC' ) : pmc_id = pmc_id [ 3 : ] params = { } params [ 'verb' ] = 'GetRecord' params [ 'identifier' ] = 'oai:pubmedcentral.nih.gov:%s' % pmc_id params [ 'metadataPrefix' ] = 'pmc' res = requests . get ( pmc_url , params ) if not res . status_code == 200 : logger . warning ( "Couldn't download %s" % pmc_id ) return None xml_bytes = res . content tree = ET . XML ( xml_bytes , parser = UTB ( ) ) xmlns = "http://www.openarchives.org/OAI/2.0/" err_tag = tree . find ( '{%s}error' % xmlns ) if err_tag is not None : err_code = err_tag . attrib [ 'code' ] err_text = err_tag . text logger . warning ( 'PMC client returned with error %s: %s' % ( err_code , err_text ) ) return None else : return xml_bytes . decode ( 'utf-8' )
Returns XML for the article corresponding to a PMC ID .
17,899
def extract_paragraphs ( xml_string ) : tree = etree . fromstring ( xml_string . encode ( 'utf-8' ) ) paragraphs = [ ] for element in tree . iter ( ) : if isinstance ( element . tag , basestring ) and re . search ( '(^|})[p|title]$' , element . tag ) and element . text : paragraph = ' ' . join ( element . itertext ( ) ) paragraphs . append ( paragraph ) return paragraphs
Returns list of paragraphs in an NLM XML .