idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
17,400
def gather_implicit_activities ( self ) : for stmt in self . statements : if isinstance ( stmt , Phosphorylation ) or isinstance ( stmt , Transphosphorylation ) or isinstance ( stmt , Autophosphorylation ) : if stmt . enz is not None : enz_base = self . _get_base ( stmt . enz ) enz_base . add_activity ( 'kinase' ) enz_base . add_active_state ( 'kinase' , stmt . enz . mods ) elif isinstance ( stmt , Dephosphorylation ) : if stmt . enz is not None : enz_base = self . _get_base ( stmt . enz ) enz_base . add_activity ( 'phosphatase' ) enz_base . add_active_state ( 'phosphatase' , stmt . enz . mods ) elif isinstance ( stmt , Modification ) : if stmt . enz is not None : enz_base = self . _get_base ( stmt . enz ) enz_base . add_activity ( 'catalytic' ) enz_base . add_active_state ( 'catalytic' , stmt . enz . mods ) elif isinstance ( stmt , SelfModification ) : if stmt . enz is not None : enz_base = self . _get_base ( stmt . enz ) enz_base . add_activity ( 'catalytic' ) enz_base . add_active_state ( 'catalytic' , stmt . enz . mods ) elif isinstance ( stmt , Gef ) : if stmt . gef is not None : gef_base = self . _get_base ( stmt . gef ) gef_base . add_activity ( 'gef' ) if stmt . gef . activity is not None : act = stmt . gef . activity . activity_type else : act = 'activity' gef_base . add_active_state ( act , stmt . gef . mods ) elif isinstance ( stmt , Gap ) : if stmt . gap is not None : gap_base = self . _get_base ( stmt . gap ) gap_base . add_activity ( 'gap' ) if stmt . gap . activity is not None : act = stmt . gap . activity . activity_type else : act = 'activity' gap_base . add_active_state ( 'act' , stmt . gap . mods ) elif isinstance ( stmt , RegulateActivity ) : if stmt . subj is not None : subj_base = self . _get_base ( stmt . subj ) subj_base . add_activity ( stmt . j )
Aggregate all implicit activities and active forms of Agents .
17,401
def require_active_forms ( self ) : logger . info ( 'Setting required active forms on %d statements...' % len ( self . statements ) ) new_stmts = [ ] for stmt in self . statements : if isinstance ( stmt , Modification ) : if stmt . enz is None : new_stmts . append ( stmt ) continue enz_base = self . _get_base ( stmt . enz ) active_forms = enz_base . get_active_forms ( ) if not active_forms : new_stmts . append ( stmt ) else : for af in active_forms : new_stmt = fast_deepcopy ( stmt ) new_stmt . uuid = str ( uuid . uuid4 ( ) ) evs = af . apply_to ( new_stmt . enz ) new_stmt . partial_evidence = evs new_stmts . append ( new_stmt ) elif isinstance ( stmt , RegulateAmount ) or isinstance ( stmt , RegulateActivity ) : if stmt . subj is None : new_stmts . append ( stmt ) continue subj_base = self . _get_base ( stmt . subj ) active_forms = subj_base . get_active_forms ( ) if not active_forms : new_stmts . append ( stmt ) else : for af in active_forms : new_stmt = fast_deepcopy ( stmt ) new_stmt . uuid = str ( uuid . uuid4 ( ) ) evs = af . apply_to ( new_stmt . subj ) new_stmt . partial_evidence = evs new_stmts . append ( new_stmt ) else : new_stmts . append ( stmt ) self . statements = new_stmts return new_stmts
Rewrites Statements with Agents active forms in active positions .
17,402
def reduce_activities ( self ) : for stmt in self . statements : agents = stmt . agent_list ( ) for agent in agents : if agent is not None and agent . activity is not None : agent_base = self . _get_base ( agent ) act_red = agent_base . get_activity_reduction ( agent . activity . activity_type ) if act_red is not None : agent . activity . activity_type = act_red if isinstance ( stmt , RegulateActivity ) : if stmt . obj is not None : obj_base = self . _get_base ( stmt . obj ) act_red = obj_base . get_activity_reduction ( stmt . obj_activity ) if act_red is not None : stmt . obj_activity = act_red elif isinstance ( stmt , ActiveForm ) : agent_base = self . _get_base ( stmt . agent ) act_red = agent_base . get_activity_reduction ( stmt . activity ) if act_red is not None : stmt . activity = act_red
Rewrite the activity types referenced in Statements for consistency .
17,403
def infer_complexes ( stmts ) : interact_stmts = _get_statements_by_type ( stmts , Modification ) linked_stmts = [ ] for mstmt in interact_stmts : if mstmt . enz is None : continue st = Complex ( [ mstmt . enz , mstmt . sub ] , evidence = mstmt . evidence ) linked_stmts . append ( st ) return linked_stmts
Return inferred Complex from Statements implying physical interaction .
17,404
def infer_activations ( stmts ) : linked_stmts = [ ] af_stmts = _get_statements_by_type ( stmts , ActiveForm ) mod_stmts = _get_statements_by_type ( stmts , Modification ) for af_stmt , mod_stmt in itertools . product ( * ( af_stmts , mod_stmts ) ) : if mod_stmt . enz is None or ( not af_stmt . agent . entity_matches ( mod_stmt . sub ) ) : continue if not af_stmt . agent . mods : continue found = False for mc in af_stmt . agent . mods : if mc . mod_type == modclass_to_modtype [ mod_stmt . __class__ ] and mc . residue == mod_stmt . residue and mc . position == mod_stmt . position : found = True if not found : continue ev = mod_stmt . evidence if af_stmt . is_active : st = Activation ( mod_stmt . enz , mod_stmt . sub , af_stmt . activity , evidence = ev ) else : st = Inhibition ( mod_stmt . enz , mod_stmt . sub , af_stmt . activity , evidence = ev ) linked_stmts . append ( LinkedStatement ( [ af_stmt , mod_stmt ] , st ) ) return linked_stmts
Return inferred RegulateActivity from Modification + ActiveForm .
17,405
def infer_active_forms ( stmts ) : linked_stmts = [ ] for act_stmt in _get_statements_by_type ( stmts , RegulateActivity ) : if not ( act_stmt . subj . activity is not None and act_stmt . subj . activity . activity_type == 'kinase' and act_stmt . subj . activity . is_active ) : continue matching = [ ] ev = act_stmt . evidence for mod_stmt in _get_statements_by_type ( stmts , Modification ) : if mod_stmt . enz is not None : if mod_stmt . enz . entity_matches ( act_stmt . subj ) and mod_stmt . sub . entity_matches ( act_stmt . obj ) : matching . append ( mod_stmt ) ev . extend ( mod_stmt . evidence ) if not matching : continue mods = [ ] for mod_stmt in matching : mod_type_name = mod_stmt . __class__ . __name__ . lower ( ) if isinstance ( mod_stmt , AddModification ) : is_modified = True else : is_modified = False mod_type_name = mod_type_name [ 2 : ] mc = ModCondition ( mod_type_name , mod_stmt . residue , mod_stmt . position , is_modified ) mods . append ( mc ) source_stmts = [ act_stmt ] + [ m for m in matching ] st = ActiveForm ( Agent ( act_stmt . obj . name , mods = mods , db_refs = act_stmt . obj . db_refs ) , act_stmt . obj_activity , act_stmt . is_activation , evidence = ev ) linked_stmts . append ( LinkedStatement ( source_stmts , st ) ) logger . info ( 'inferred: %s' % st ) return linked_stmts
Return inferred ActiveForm from RegulateActivity + Modification .
17,406
def infer_modifications ( stmts ) : linked_stmts = [ ] for act_stmt in _get_statements_by_type ( stmts , RegulateActivity ) : for af_stmt in _get_statements_by_type ( stmts , ActiveForm ) : if not af_stmt . agent . entity_matches ( act_stmt . obj ) : continue mods = af_stmt . agent . mods if af_stmt . agent . mutations or af_stmt . agent . bound_conditions or af_stmt . agent . location : continue if not af_stmt . agent . mods : continue for mod in af_stmt . agent . mods : evs = act_stmt . evidence + af_stmt . evidence for ev in evs : ev . epistemics [ 'direct' ] = False if mod . is_modified : mod_type_name = mod . mod_type else : mod_type_name = modtype_to_inverse [ mod . mod_type ] mod_class = modtype_to_modclass [ mod_type_name ] if not mod_class : continue st = mod_class ( act_stmt . subj , act_stmt . obj , mod . residue , mod . position , evidence = evs ) ls = LinkedStatement ( [ act_stmt , af_stmt ] , st ) linked_stmts . append ( ls ) logger . info ( 'inferred: %s' % st ) return linked_stmts
Return inferred Modification from RegulateActivity + ActiveForm .
17,407
def replace_complexes ( self , linked_stmts = None ) : if linked_stmts is None : linked_stmts = self . infer_complexes ( self . statements ) new_stmts = [ ] for stmt in self . statements : if not isinstance ( stmt , Complex ) : new_stmts . append ( stmt ) continue found = False for linked_stmt in linked_stmts : if linked_stmt . refinement_of ( stmt , hierarchies ) : found = True if not found : new_stmts . append ( stmt ) else : logger . info ( 'Removing complex: %s' % stmt ) self . statements = new_stmts
Remove Complex Statements that can be inferred out .
17,408
def replace_activations ( self , linked_stmts = None ) : if linked_stmts is None : linked_stmts = self . infer_activations ( self . statements ) new_stmts = [ ] for stmt in self . statements : if not isinstance ( stmt , RegulateActivity ) : new_stmts . append ( stmt ) continue found = False for linked_stmt in linked_stmts : inferred_stmt = linked_stmt . inferred_stmt if stmt . is_activation == inferred_stmt . is_activation and stmt . subj . entity_matches ( inferred_stmt . subj ) and stmt . obj . entity_matches ( inferred_stmt . obj ) : found = True if not found : new_stmts . append ( stmt ) else : logger . info ( 'Removing regulate activity: %s' % stmt ) self . statements = new_stmts
Remove RegulateActivity Statements that can be inferred out .
17,409
def get_create_base_agent ( self , agent ) : try : base_agent = self . agents [ agent . name ] except KeyError : base_agent = BaseAgent ( agent . name ) self . agents [ agent . name ] = base_agent return base_agent
Return BaseAgent from an Agent creating it if needed .
17,410
def apply_to ( self , agent ) : agent . bound_conditions = self . bound_conditions agent . mods = self . mods agent . mutations = self . mutations agent . location = self . location return self . evidence
Apply this object s state to an Agent .
17,411
def submit_curation ( ) : if request . json is None : abort ( Response ( 'Missing application/json header.' , 415 ) ) corpus_id = request . json . get ( 'corpus_id' ) curations = request . json . get ( 'curations' , { } ) try : curator . submit_curation ( corpus_id , curations ) except InvalidCorpusError : abort ( Response ( 'The corpus_id "%s" is unknown.' % corpus_id , 400 ) ) return return jsonify ( { } )
Submit curations for a given corpus .
17,412
def update_beliefs ( ) : if request . json is None : abort ( Response ( 'Missing application/json header.' , 415 ) ) corpus_id = request . json . get ( 'corpus_id' ) try : belief_dict = curator . update_beliefs ( corpus_id ) except InvalidCorpusError : abort ( Response ( 'The corpus_id "%s" is unknown.' % corpus_id , 400 ) ) return return jsonify ( belief_dict )
Return updated beliefs based on current probability model .
17,413
def reset_scorer ( self ) : self . scorer = get_eidos_bayesian_scorer ( ) for corpus_id , corpus in self . corpora . items ( ) : corpus . curations = { }
Reset the scorer used for couration .
17,414
def get_corpus ( self , corpus_id ) : try : corpus = self . corpora [ corpus_id ] return corpus except KeyError : raise InvalidCorpusError
Return a corpus given an ID .
17,415
def update_beliefs ( self , corpus_id ) : corpus = self . get_corpus ( corpus_id ) be = BeliefEngine ( self . scorer ) stmts = list ( corpus . statements . values ( ) ) be . set_prior_probs ( stmts ) for uuid , correct in corpus . curations . items ( ) : stmt = corpus . statements . get ( uuid ) if stmt is None : logger . warning ( '%s is not in the corpus.' % uuid ) continue stmt . belief = correct belief_dict = { st . uuid : st . belief for st in stmts } return belief_dict
Return updated belief scores for a given corpus .
17,416
def get_python_list ( scala_list ) : python_list = [ ] for i in range ( scala_list . length ( ) ) : python_list . append ( scala_list . apply ( i ) ) return python_list
Return list from elements of scala . collection . immutable . List
17,417
def get_python_dict ( scala_map ) : python_dict = { } keys = get_python_list ( scala_map . keys ( ) . toList ( ) ) for key in keys : python_dict [ key ] = scala_map . apply ( key ) return python_dict
Return a dict from entries in a scala . collection . immutable . Map
17,418
def get_python_json ( scala_json ) : def convert_node ( node ) : if node . __class__ . __name__ in ( 'org.json4s.JsonAST$JValue' , 'org.json4s.JsonAST$JObject' ) : values_raw = get_python_dict ( node . values ( ) ) values = { } for k , v in values_raw . items ( ) : values [ k ] = convert_node ( v ) return values elif node . __class__ . __name__ . startswith ( 'scala.collection.immutable.Map' ) or node . __class__ . __name__ == 'scala.collection.immutable.HashMap$HashTrieMap' : values_raw = get_python_dict ( node ) values = { } for k , v in values_raw . items ( ) : values [ k ] = convert_node ( v ) return values elif node . __class__ . __name__ == 'org.json4s.JsonAST$JArray' : entries_raw = get_python_list ( node . values ( ) ) entries = [ ] for entry in entries_raw : entries . append ( convert_node ( entry ) ) return entries elif node . __class__ . __name__ == 'scala.collection.immutable.$colon$colon' : entries_raw = get_python_list ( node ) entries = [ ] for entry in entries_raw : entries . append ( convert_node ( entry ) ) return entries elif node . __class__ . __name__ == 'scala.math.BigInt' : return node . intValue ( ) elif node . __class__ . __name__ == 'scala.None$' : return None elif node . __class__ . __name__ == 'scala.collection.immutable.Nil$' : return [ ] elif isinstance ( node , ( str , int , float ) ) : return node else : logger . error ( 'Cannot convert %s into Python' % node . __class__ . __name__ ) return node . __class__ . __name__ python_json = convert_node ( scala_json ) return python_json
Return a JSON dict from a org . json4s . JsonAST
17,419
def get_heat_kernel ( network_id ) : url = ndex_relevance + '/%s/generate_ndex_heat_kernel' % network_id res = ndex_client . send_request ( url , { } , is_json = True , use_get = True ) if res is None : logger . error ( 'Could not get heat kernel for network %s.' % network_id ) return None kernel_id = res . get ( 'kernel_id' ) if kernel_id is None : logger . error ( 'Could not get heat kernel for network %s.' % network_id ) return None return kernel_id
Return the identifier of a heat kernel calculated for a given network .
17,420
def get_relevant_nodes ( network_id , query_nodes ) : url = ndex_relevance + '/rank_entities' kernel_id = get_heat_kernel ( network_id ) if kernel_id is None : return None if isinstance ( query_nodes , basestring ) : query_nodes = [ query_nodes ] params = { 'identifier_set' : query_nodes , 'kernel_id' : kernel_id } res = ndex_client . send_request ( url , params , is_json = True ) if res is None : logger . error ( "ndex_client.send_request returned None." ) return None ranked_entities = res . get ( 'ranked_entities' ) if ranked_entities is None : logger . error ( 'Could not get ranked entities.' ) return None return ranked_entities
Return a set of network nodes relevant to a given query set .
17,421
def _get_belief_package ( stmt ) : belief_packages = [ ] for st in stmt . supports : parent_packages = _get_belief_package ( st ) package_stmt_keys = [ pkg . statement_key for pkg in belief_packages ] for package in parent_packages : if package . statement_key not in package_stmt_keys : belief_packages . append ( package ) belief_package = BeliefPackage ( stmt . matches_key ( ) , stmt . evidence ) belief_packages . append ( belief_package ) return belief_packages
Return the belief packages of a given statement recursively .
17,422
def sample_statements ( stmts , seed = None ) : if seed : numpy . random . seed ( seed ) new_stmts = [ ] r = numpy . random . rand ( len ( stmts ) ) for i , stmt in enumerate ( stmts ) : if r [ i ] < stmt . belief : new_stmts . append ( stmt ) return new_stmts
Return statements sampled according to belief .
17,423
def evidence_random_noise_prior ( evidence , type_probs , subtype_probs ) : ( stype , subtype ) = tag_evidence_subtype ( evidence ) if subtype_probs is not None : if stype in subtype_probs : if subtype in subtype_probs [ stype ] : return subtype_probs [ stype ] [ subtype ] return type_probs [ stype ]
Determines the random - noise prior probability for this evidence .
17,424
def tag_evidence_subtype ( evidence ) : source_api = evidence . source_api annotations = evidence . annotations if source_api == 'biopax' : subtype = annotations . get ( 'source_sub_id' ) elif source_api in ( 'reach' , 'eidos' ) : if 'found_by' in annotations : from indra . sources . reach . processor import determine_reach_subtype if source_api == 'reach' : subtype = determine_reach_subtype ( annotations [ 'found_by' ] ) elif source_api == 'eidos' : subtype = annotations [ 'found_by' ] else : subtype = None else : logger . debug ( 'Could not find found_by attribute in reach ' 'statement annoations' ) subtype = None elif source_api == 'geneways' : subtype = annotations [ 'actiontype' ] else : subtype = None return ( source_api , subtype )
Returns the type and subtype of an evidence object as a string typically the extraction rule or database from which the statement was generated .
17,425
def score_evidence_list ( self , evidences ) : def _score ( evidences ) : if not evidences : return 0 sources = [ ev . source_api for ev in evidences ] uniq_sources = numpy . unique ( sources ) syst_factors = { s : self . prior_probs [ 'syst' ] [ s ] for s in uniq_sources } rand_factors = { k : [ ] for k in uniq_sources } for ev in evidences : rand_factors [ ev . source_api ] . append ( evidence_random_noise_prior ( ev , self . prior_probs [ 'rand' ] , self . subtype_probs ) ) neg_prob_prior = 1 for s in uniq_sources : neg_prob_prior *= ( syst_factors [ s ] + numpy . prod ( rand_factors [ s ] ) ) prob_prior = 1 - neg_prob_prior return prob_prior pos_evidence = [ ev for ev in evidences if not ev . epistemics . get ( 'negated' ) ] neg_evidence = [ ev for ev in evidences if ev . epistemics . get ( 'negated' ) ] pp = _score ( pos_evidence ) np = _score ( neg_evidence ) score = pp * ( 1 - np ) return score
Return belief score given a list of supporting evidences .
17,426
def score_statement ( self , st , extra_evidence = None ) : if extra_evidence is None : extra_evidence = [ ] all_evidence = st . evidence + extra_evidence return self . score_evidence_list ( all_evidence )
Computes the prior belief probability for an INDRA Statement .
17,427
def check_prior_probs ( self , statements ) : sources = set ( ) for stmt in statements : sources |= set ( [ ev . source_api for ev in stmt . evidence ] ) for err_type in ( 'rand' , 'syst' ) : for source in sources : if source not in self . prior_probs [ err_type ] : msg = 'BeliefEngine missing probability parameter' + ' for source: %s' % source raise Exception ( msg )
Throw Exception if BeliefEngine parameter is missing .
17,428
def update_probs ( self ) : syst_error = 0.05 prior_probs = { 'syst' : { } , 'rand' : { } } for source , ( p , n ) in self . prior_counts . items ( ) : if n + p == 0 : continue prior_probs [ 'syst' ] [ source ] = syst_error prior_probs [ 'rand' ] [ source ] = 1 - min ( ( float ( p ) / ( n + p ) , 1 - syst_error ) ) - syst_error subtype_probs = { } for source , entry in self . subtype_counts . items ( ) : for rule , ( p , n ) in entry . items ( ) : if n + p == 0 : continue if source not in subtype_probs : subtype_probs [ source ] = { } subtype_probs [ source ] [ rule ] = 1 - min ( ( float ( p ) / ( n + p ) , 1 - syst_error ) ) - syst_error super ( BayesianScorer , self ) . update_probs ( prior_probs , subtype_probs )
Update the internal probability values given the counts .
17,429
def update_counts ( self , prior_counts , subtype_counts ) : for source , ( pos , neg ) in prior_counts . items ( ) : if source not in self . prior_counts : self . prior_counts [ source ] = [ 0 , 0 ] self . prior_counts [ source ] [ 0 ] += pos self . prior_counts [ source ] [ 1 ] += neg for source , subtype_dict in subtype_counts . items ( ) : if source not in self . subtype_counts : self . subtype_counts [ source ] = { } for subtype , ( pos , neg ) in subtype_dict . items ( ) : if subtype not in self . subtype_counts [ source ] : self . subtype_counts [ source ] [ subtype ] = [ 0 , 0 ] self . subtype_counts [ source ] [ subtype ] [ 0 ] += pos self . subtype_counts [ source ] [ subtype ] [ 1 ] += neg self . update_probs ( )
Update the internal counts based on given new counts .
17,430
def set_prior_probs ( self , statements ) : self . scorer . check_prior_probs ( statements ) for st in statements : st . belief = self . scorer . score_statement ( st )
Sets the prior belief probabilities for a list of INDRA Statements .
17,431
def set_hierarchy_probs ( self , statements ) : def build_hierarchy_graph ( stmts ) : g = networkx . DiGraph ( ) for st1 in stmts : g . add_node ( st1 . matches_key ( ) , stmt = st1 ) for st2 in st1 . supported_by : g . add_node ( st2 . matches_key ( ) , stmt = st2 ) g . add_edge ( st2 . matches_key ( ) , st1 . matches_key ( ) ) return g def get_ranked_stmts ( g ) : node_ranks = networkx . algorithms . dag . topological_sort ( g ) node_ranks = reversed ( list ( node_ranks ) ) stmts = [ g . node [ n ] [ 'stmt' ] for n in node_ranks ] return stmts def assert_no_cycle ( g ) : try : cyc = networkx . algorithms . cycles . find_cycle ( g ) except networkx . exception . NetworkXNoCycle : return msg = 'Cycle found in hierarchy graph: %s' % cyc assert False , msg g = build_hierarchy_graph ( statements ) assert_no_cycle ( g ) ranked_stmts = get_ranked_stmts ( g ) for st in ranked_stmts : bps = _get_belief_package ( st ) supporting_evidences = [ ] for bp in bps [ : - 1 ] : for ev in bp . evidences : if not ev . epistemics . get ( 'negated' ) : supporting_evidences . append ( ev ) belief = self . scorer . score_statement ( st , supporting_evidences ) st . belief = belief
Sets hierarchical belief probabilities for INDRA Statements .
17,432
def set_linked_probs ( self , linked_statements ) : for st in linked_statements : source_probs = [ s . belief for s in st . source_stmts ] st . inferred_stmt . belief = numpy . prod ( source_probs )
Sets the belief probabilities for a list of linked INDRA Statements .
17,433
def extract_statements ( self ) : for p_info in self . _json : para = RlimspParagraph ( p_info , self . doc_id_type ) self . statements . extend ( para . get_statements ( ) ) return
Extract the statements from the json .
17,434
def _get_agent ( self , entity_id ) : if entity_id is None : return None entity_info = self . _entity_dict . get ( entity_id ) if entity_info is None : logger . warning ( "Entity key did not resolve to entity." ) return None return get_agent_from_entity_info ( entity_info )
Convert the entity dictionary into an INDRA Agent .
17,435
def _get_evidence ( self , trigger_id , args , agent_coords , site_coords ) : trigger_info = self . _entity_dict [ trigger_id ] s_idx_set = { self . _entity_dict [ eid ] [ 'sentenceIndex' ] for eid in args . values ( ) if 'sentenceIndex' in self . _entity_dict [ eid ] } if s_idx_set : i_min = min ( s_idx_set ) i_max = max ( s_idx_set ) text = '. ' . join ( self . _sentences [ i_min : ( i_max + 1 ) ] ) + '.' s_start = self . _sentence_starts [ i_min ] annotations = { 'agents' : { 'coords' : [ _fix_coords ( coords , s_start ) for coords in agent_coords ] } , 'trigger' : { 'coords' : _fix_coords ( [ trigger_info [ 'charStart' ] , trigger_info [ 'charEnd' ] ] , s_start ) } } else : logger . info ( 'Unable to get sentence index' ) annotations = { } text = None if site_coords : annotations [ 'site' ] = { 'coords' : _fix_coords ( site_coords , s_start ) } return Evidence ( text_refs = self . _text_refs . copy ( ) , text = text , source_api = 'rlimsp' , pmid = self . _text_refs . get ( 'PMID' ) , annotations = annotations )
Get the evidence using the info in the trigger entity .
17,436
def get_reader_classes ( parent = Reader ) : children = parent . __subclasses__ ( ) descendants = children [ : ] for child in children : grandchildren = get_reader_classes ( child ) if grandchildren : descendants . remove ( child ) descendants . extend ( grandchildren ) return descendants
Get all childless the descendants of a parent class recursively .
17,437
def get_reader_class ( reader_name ) : for reader_class in get_reader_classes ( ) : if reader_class . name . lower ( ) == reader_name . lower ( ) : return reader_class else : logger . error ( "No such reader: %s" % reader_name ) return None
Get a particular reader class by name .
17,438
def from_file ( cls , file_path , compressed = False , encoded = False ) : file_id = '.' . join ( path . basename ( file_path ) . split ( '.' ) [ : - 1 ] ) file_format = file_path . split ( '.' ) [ - 1 ] content = cls ( file_id , file_format , compressed , encoded ) content . file_exists = True content . _location = path . dirname ( file_path ) return content
Create a content object from a file path .
17,439
def change_id ( self , new_id ) : self . _load_raw_content ( ) self . _id = new_id self . get_filename ( renew = True ) self . get_filepath ( renew = True ) return
Change the id of this content .
17,440
def change_format ( self , new_format ) : self . _load_raw_content ( ) self . _format = new_format self . get_filename ( renew = True ) self . get_filepath ( renew = True ) return
Change the format label of this content .
17,441
def get_text ( self ) : self . _load_raw_content ( ) if self . _text is None : assert self . _raw_content is not None ret_cont = self . _raw_content if self . compressed : ret_cont = zlib . decompress ( ret_cont , zlib . MAX_WBITS + 16 ) if self . encoded : ret_cont = ret_cont . decode ( 'utf-8' ) self . _text = ret_cont assert self . _text is not None return self . _text
Get the loaded decompressed and decoded text of this content .
17,442
def get_filename ( self , renew = False ) : if self . _fname is None or renew : self . _fname = '%s.%s' % ( self . _id , self . _format ) return self . _fname
Get the filename of this content .
17,443
def get_filepath ( self , renew = False ) : if self . _location is None or renew : self . _location = '.' return path . join ( self . _location , self . get_filename ( ) )
Get the file path joining the name and location for this file .
17,444
def get_statements ( self , reprocess = False ) : if self . _statements is None or reprocess : if self . content is None : self . _statements = [ ] return [ ] if self . reader == ReachReader . name : if self . format == formats . JSON : json_str = json . dumps ( self . content ) processor = reach . process_json_str ( json_str ) else : raise ReadingError ( "Incorrect format for Reach output: %s." % self . format ) elif self . reader == SparserReader . name : if self . format == formats . JSON : processor = sparser . process_json_dict ( self . content ) if processor is not None : processor . set_statements_pmid ( None ) else : raise ReadingError ( "Sparser should only ever be JSON, not " "%s." % self . format ) elif self . reader == TripsReader . name : processor = trips . process_xml ( self . content ) else : raise ReadingError ( "Unknown reader: %s." % self . reader ) if processor is None : logger . error ( "Production of statements from %s failed for %s." % ( self . reader , self . content_id ) ) stmts = [ ] else : stmts = processor . statements self . _statements = stmts [ : ] else : stmts = self . _statements [ : ] return stmts
General method to create statements .
17,445
def add_result ( self , content_id , content , ** kwargs ) : result_object = self . ResultClass ( content_id , self . name , self . version , formats . JSON , content , ** kwargs ) self . results . append ( result_object ) return
Add a result to the list of results .
17,446
def _check_content ( self , content_str ) : if self . do_content_check : space_ratio = float ( content_str . count ( ' ' ) ) / len ( content_str ) if space_ratio > self . max_space_ratio : return "space-ratio: %f > %f" % ( space_ratio , self . max_space_ratio ) if len ( content_str ) > self . input_character_limit : return "too long: %d > %d" % ( len ( content_str ) , self . input_character_limit ) return None
Check if the content is likely to be successfully read .
17,447
def _check_reach_env ( ) : path_to_reach = get_config ( 'REACHPATH' ) if path_to_reach is None : path_to_reach = environ . get ( 'REACHPATH' , None ) if path_to_reach is None or not path . exists ( path_to_reach ) : raise ReachError ( 'Reach path unset or invalid. Check REACHPATH environment var ' 'and/or config file.' ) logger . debug ( 'Using REACH jar at: %s' % path_to_reach ) reach_version = get_config ( 'REACH_VERSION' ) if reach_version is None : reach_version = environ . get ( 'REACH_VERSION' , None ) if reach_version is None : logger . debug ( 'REACH version not set in REACH_VERSION' ) m = re . match ( 'reach-(.*?)\.jar' , path . basename ( path_to_reach ) ) reach_version = re . sub ( '-SNAP.*?$' , '' , m . groups ( ) [ 0 ] ) logger . debug ( 'Using REACH version: %s' % reach_version ) return path_to_reach , reach_version
Check that the environment supports runnig reach .
17,448
def prep_input ( self , read_list ) : logger . info ( "Prepping input." ) i = 0 for content in read_list : quality_issue = self . _check_content ( content . get_text ( ) ) if quality_issue is not None : logger . warning ( "Skipping %d due to: %s" % ( content . get_id ( ) , quality_issue ) ) continue cid = content . get_id ( ) if isinstance ( cid , str ) and re . match ( '^\w*?\d+$' , cid ) is None : new_id = 'FILE%06d' % i i += 1 self . id_maps [ new_id ] = cid content . change_id ( new_id ) new_fpath = content . copy_to ( self . input_dir ) else : new_fpath = content . copy_to ( self . input_dir ) self . num_input += 1 logger . debug ( '%s saved for reading by reach.' % new_fpath ) return
Apply the readers to the content .
17,449
def get_output ( self ) : logger . info ( "Getting outputs." ) json_files = glob . glob ( path . join ( self . output_dir , '*.json' ) ) json_prefixes = set ( ) for json_file in json_files : prefix = '.' . join ( path . basename ( json_file ) . split ( '.' ) [ : - 3 ] ) json_prefixes . add ( path . join ( self . output_dir , prefix ) ) for prefix in json_prefixes : base_prefix = path . basename ( prefix ) if base_prefix . isdecimal ( ) : base_prefix = int ( base_prefix ) elif base_prefix in self . id_maps . keys ( ) : base_prefix = self . id_maps [ base_prefix ] try : content = self . _join_json_files ( prefix , clear = True ) except Exception as e : logger . exception ( e ) logger . error ( "Could not load result for prefix %s." % prefix ) content = None self . add_result ( base_prefix , content ) logger . debug ( 'Joined files for prefix %s.' % base_prefix ) return self . results
Get the output of a reading job as a list of filenames .
17,450
def read ( self , read_list , verbose = False , log = False ) : ret = [ ] mem_tot = _get_mem_total ( ) if mem_tot is not None and mem_tot <= self . REACH_MEM + self . MEM_BUFFER : logger . error ( "Too little memory to run reach. At least %s required." % ( self . REACH_MEM + self . MEM_BUFFER ) ) logger . info ( "REACH not run." ) return ret self . prep_input ( read_list ) if self . num_input > 0 : logger . info ( "Beginning reach." ) args = [ 'java' , '-Dconfig.file=%s' % self . conf_file_path , '-jar' , self . exec_path ] p = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) log_file_str = '' for line in iter ( p . stdout . readline , b'' ) : log_line = 'REACH: ' + line . strip ( ) . decode ( 'utf8' ) if verbose : logger . info ( log_line ) if log : log_file_str += log_line + '\n' if log : with open ( 'reach_run.log' , 'ab' ) as f : f . write ( log_file_str . encode ( 'utf8' ) ) p_out , p_err = p . communicate ( ) if p . returncode : logger . error ( 'Problem running REACH:' ) logger . error ( 'Stdout: %s' % p_out . decode ( 'utf-8' ) ) logger . error ( 'Stderr: %s' % p_err . decode ( 'utf-8' ) ) raise ReachError ( "Problem running REACH" ) logger . info ( "Reach finished." ) ret = self . get_output ( ) self . clear_input ( ) return ret
Read the content returning a list of ReadingData objects .
17,451
def prep_input ( self , read_list ) : "Prepare the list of files or text content objects to be read." logger . info ( 'Prepping input for sparser.' ) self . file_list = [ ] for content in read_list : quality_issue = self . _check_content ( content . get_text ( ) ) if quality_issue is not None : logger . warning ( "Skipping %d due to: %s" % ( content . get_id ( ) , quality_issue ) ) continue if content . is_format ( 'nxml' ) : if not content . get_filename ( ) . startswith ( 'PMC' ) : content . change_id ( 'PMC' + str ( content . get_id ( ) ) ) fpath = content . copy_to ( self . tmp_dir ) self . file_list . append ( fpath ) elif content . is_format ( 'txt' , 'text' ) : nxml_str = sparser . make_nxml_from_text ( content . get_text ( ) ) new_content = Content . from_string ( 'PMC' + str ( content . get_id ( ) ) , 'nxml' , nxml_str ) fpath = new_content . copy_to ( self . tmp_dir ) self . file_list . append ( fpath ) else : raise SparserError ( "Unrecognized format %s." % content . format ) return
Prepare the list of files or text content objects to be read .
17,452
def get_output ( self , output_files , clear = True ) : "Get the output files as an id indexed dict." patt = re . compile ( r'(.*?)-semantics.*?' ) for outpath in output_files : if outpath is None : logger . warning ( "Found outpath with value None. Skipping." ) continue re_out = patt . match ( path . basename ( outpath ) ) if re_out is None : raise SparserError ( "Could not get prefix from output path %s." % outpath ) prefix = re_out . groups ( ) [ 0 ] if prefix . startswith ( 'PMC' ) : prefix = prefix [ 3 : ] if prefix . isdecimal ( ) : prefix = int ( prefix ) try : with open ( outpath , 'rt' ) as f : content = json . load ( f ) except Exception as e : logger . exception ( e ) logger . error ( "Could not load reading content from %s." % outpath ) content = None self . add_result ( prefix , content ) if clear : input_path = outpath . replace ( '-semantics.json' , '.nxml' ) try : remove ( outpath ) remove ( input_path ) except Exception as e : logger . exception ( e ) logger . error ( "Could not remove sparser files %s and %s." % ( outpath , input_path ) ) return self . results
Get the output files as an id indexed dict .
17,453
def read_some ( self , fpath_list , outbuf = None , verbose = False ) : "Perform a few readings." outpath_list = [ ] for fpath in fpath_list : output , outbuf = self . read_one ( fpath , outbuf , verbose ) if output is not None : outpath_list . append ( output ) return outpath_list , outbuf
Perform a few readings .
17,454
def read ( self , read_list , verbose = False , log = False , n_per_proc = None ) : "Perform the actual reading." ret = [ ] self . prep_input ( read_list ) L = len ( self . file_list ) if L == 0 : return ret logger . info ( "Beginning to run sparser." ) output_file_list = [ ] if log : log_name = 'sparser_run_%s.log' % _time_stamp ( ) outbuf = open ( log_name , 'wb' ) else : outbuf = None try : if self . n_proc == 1 : for fpath in self . file_list : outpath , _ = self . read_one ( fpath , outbuf , verbose ) if outpath is not None : output_file_list . append ( outpath ) else : if n_per_proc is None : n_per_proc = max ( 1 , min ( 1000 , L // self . n_proc // 2 ) ) pool = None try : pool = Pool ( self . n_proc ) if n_per_proc is not 1 : batches = [ self . file_list [ n * n_per_proc : ( n + 1 ) * n_per_proc ] for n in range ( L // n_per_proc + 1 ) ] out_lists_and_buffs = pool . map ( self . read_some , batches ) else : out_files_and_buffs = pool . map ( self . read_one , self . file_list ) out_lists_and_buffs = [ ( [ out_files ] , buffs ) for out_files , buffs in out_files_and_buffs ] finally : if pool is not None : pool . close ( ) pool . join ( ) for i , ( out_list , buff ) in enumerate ( out_lists_and_buffs ) : if out_list is not None : output_file_list += out_list if log : outbuf . write ( b'Log for producing output %d/%d.\n' % ( i , len ( out_lists_and_buffs ) ) ) if buff is not None : buff . seek ( 0 ) outbuf . write ( buff . read ( ) + b'\n' ) else : outbuf . write ( b'ERROR: no buffer was None. ' b'No logs available.\n' ) outbuf . flush ( ) finally : if log : outbuf . close ( ) if verbose : logger . info ( "Sparser logs may be found at %s." % log_name ) ret = self . get_output ( output_file_list ) return ret
Perform the actual reading .
17,455
def process_text ( text , pmid = None , cleanup = True , add_grounding = True ) : pp_dir = tempfile . mkdtemp ( 'indra_isi_pp_output' ) pp = IsiPreprocessor ( pp_dir ) extra_annotations = { } pp . preprocess_plain_text_string ( text , pmid , extra_annotations ) ip = process_preprocessed ( pp ) if add_grounding : ip . add_grounding ( ) if cleanup : shutil . rmtree ( pp_dir ) else : logger . info ( 'Not cleaning up %s' % pp_dir ) return ip
Process a string using the ISI reader and extract INDRA statements .
17,456
def process_nxml ( nxml_filename , pmid = None , extra_annotations = None , cleanup = True , add_grounding = True ) : if extra_annotations is None : extra_annotations = { } pp_dir = tempfile . mkdtemp ( 'indra_isi_pp_output' ) pp = IsiPreprocessor ( pp_dir ) extra_annotations = { } pp . preprocess_nxml_file ( nxml_filename , pmid , extra_annotations ) ip = process_preprocessed ( pp ) if add_grounding : ip . add_grounding ( ) if cleanup : shutil . rmtree ( pp_dir ) else : logger . info ( 'Not cleaning up %s' % pp_dir ) return ip
Process an NXML file using the ISI reader
17,457
def process_output_folder ( folder_path , pmids = None , extra_annotations = None , add_grounding = True ) : pmids = pmids if pmids is not None else { } extra_annotations = extra_annotations if extra_annotations is not None else { } ips = [ ] for entry in glob . glob ( os . path . join ( folder_path , '*.json' ) ) : entry_key = os . path . splitext ( os . path . basename ( entry ) ) [ 0 ] pmid = pmids . get ( entry_key ) extra_annotation = extra_annotations . get ( entry_key ) ip = process_json_file ( entry , pmid , extra_annotation , False ) ips . append ( ip ) if len ( ips ) > 1 : for ip in ips [ 1 : ] : ips [ 0 ] . statements += ip . statements if ips : if add_grounding : ips [ 0 ] . add_grounding ( ) return ips [ 0 ] else : return None
Recursively extracts statements from all ISI output files in the given directory and subdirectories .
17,458
def process_json_file ( file_path , pmid = None , extra_annotations = None , add_grounding = True ) : logger . info ( 'Extracting from %s' % file_path ) with open ( file_path , 'rb' ) as fh : jd = json . load ( fh ) ip = IsiProcessor ( jd , pmid , extra_annotations ) ip . get_statements ( ) if add_grounding : ip . add_grounding ( ) return ip
Extracts statements from the given ISI output file .
17,459
def process_text ( text , save_xml = 'cwms_output.xml' ) : xml = client . send_query ( text , 'cwmsreader' ) first_end = xml . find ( '</ekb>' ) second_start = xml . find ( '<ekb' , first_end ) second_end = xml . find ( '</ekb>' , second_start ) second_ekb = xml [ second_start : second_end + len ( '</ekb>' ) ] if save_xml : with open ( save_xml , 'wb' ) as fh : fh . write ( second_ekb . encode ( 'utf-8' ) ) return process_ekb ( second_ekb )
Processes text using the CWMS web service .
17,460
def process_ekb_file ( fname ) : with open ( fname , 'rb' ) as fh : ekb_str = fh . read ( ) . decode ( 'utf-8' ) return process_ekb ( ekb_str )
Processes an EKB file produced by CWMS .
17,461
def im_json_to_graph ( im_json ) : imap_data = im_json [ 'influence map' ] [ 'map' ] graph = MultiDiGraph ( ) id_node_dict = { } for node_dict in imap_data [ 'nodes' ] : node_type , node = list ( node_dict . items ( ) ) [ 0 ] attrs = { 'fillcolor' : '#b7d2ff' if node_type == 'rule' else '#cdffc9' , 'shape' : 'box' if node_type == 'rule' else 'oval' , 'style' : 'filled' } graph . add_node ( node [ 'label' ] , node_type = node_type , ** attrs ) new_key = '%s%s' % ( node_type , node [ 'id' ] ) id_node_dict [ new_key ] = node [ 'label' ] def add_edges ( link_list , edge_sign ) : attrs = { 'sign' : edge_sign , 'color' : 'green' if edge_sign == 1 else 'red' , 'arrowhead' : 'normal' if edge_sign == 1 else 'tee' } for link_dict in link_list : source = link_dict [ 'source' ] for target_dict in link_dict [ 'target map' ] : target = target_dict [ 'target' ] src_id = '%s%s' % list ( source . items ( ) ) [ 0 ] tgt_id = '%s%s' % list ( target . items ( ) ) [ 0 ] graph . add_edge ( id_node_dict [ src_id ] , id_node_dict [ tgt_id ] , ** attrs ) add_edges ( imap_data [ 'wake-up map' ] , 1 ) add_edges ( imap_data [ 'inhibition map' ] , - 1 ) return graph
Return networkx graph from Kappy s influence map JSON .
17,462
def cm_json_to_graph ( im_json ) : cmap_data = im_json [ 'contact map' ] [ 'map' ] graph = AGraph ( ) edges = [ ] for node_idx , node in enumerate ( cmap_data ) : sites_in_node = [ ] for site_idx , site in enumerate ( node [ 'node_sites' ] ) : site_key = ( node_idx , site_idx ) sites_in_node . append ( site_key ) graph . add_node ( site_key , label = site [ 'site_name' ] , style = 'filled' , shape = 'ellipse' ) if not site [ 'site_type' ] or not site [ 'site_type' ] [ 0 ] == 'port' : continue for port_link in site [ 'site_type' ] [ 1 ] [ 'port_links' ] : edge = ( site_key , tuple ( port_link ) ) edges . append ( edge ) graph . add_subgraph ( sites_in_node , name = 'cluster_%s' % node [ 'node_type' ] , label = node [ 'node_type' ] ) for source , target in edges : graph . add_edge ( source , target ) return graph
Return pygraphviz Agraph from Kappy s contact map JSON .
17,463
def fetch_email ( M , msg_id ) : res , data = M . fetch ( msg_id , '(RFC822)' ) if res == 'OK' : raw_msg_txt = data [ 0 ] [ 1 ] try : msg = email . message_from_bytes ( raw_msg_txt ) except AttributeError : msg = email . message_from_string ( raw_msg_txt ) return msg else : return None
Returns the given email message as a unicode string .
17,464
def get_headers ( msg ) : headers = { } for k in msg . keys ( ) : ( header_txt , charset ) = email . header . decode_header ( msg [ k ] ) [ 0 ] if charset is not None : header_txt = header_txt . decode ( charset ) headers [ k ] = header_txt return headers
Takes email . message . Message object initialized from unicode string returns dict with header fields .
17,465
def populate_config_dict ( config_path ) : try : config_dict = { } parser = RawConfigParser ( ) parser . optionxform = lambda x : x parser . read ( config_path ) sections = parser . sections ( ) for section in sections : options = parser . options ( section ) for option in options : config_dict [ option ] = str ( parser . get ( section , option ) ) except Exception as e : logger . warning ( "Could not load configuration file due to exception. " "Only environment variable equivalents will be used." ) return None for key in config_dict . keys ( ) : if config_dict [ key ] == '' : config_dict [ key ] = None elif isinstance ( config_dict [ key ] , str ) : config_dict [ key ] = os . path . expanduser ( config_dict [ key ] ) return config_dict
Load the configuration file into the config_file dictionary
17,466
def get_config ( key , failure_ok = True ) : err_msg = "Key %s not in environment or config file." % key if key in os . environ : return os . environ [ key ] elif key in CONFIG_DICT : val = CONFIG_DICT [ key ] if val is None and not failure_ok : msg = 'Key %s is set to an empty value in config file.' % key raise IndraConfigError ( msg ) else : return val elif not failure_ok : raise IndraConfigError ( err_msg ) else : logger . warning ( err_msg ) return None
Get value by key from config file or environment .
17,467
def read_unicode_csv_fileobj ( fileobj , delimiter = ',' , quotechar = '"' , quoting = csv . QUOTE_MINIMAL , lineterminator = '\n' , encoding = 'utf-8' , skiprows = 0 ) : if sys . version_info [ 0 ] >= 3 : csv_reader = csv . reader ( fileobj , delimiter = delimiter , quotechar = quotechar , quoting = quoting , lineterminator = lineterminator ) for skip_ix in range ( skiprows ) : next ( csv_reader ) for row in csv_reader : yield row else : csv_reader = csv . reader ( fileobj , delimiter = delimiter . encode ( encoding ) , quotechar = quotechar . encode ( encoding ) , quoting = quoting , lineterminator = lineterminator ) for skip_ix in range ( skiprows ) : next ( csv_reader ) for row in csv_reader : yield [ cell . decode ( encoding ) for cell in row ]
fileobj can be a StringIO in Py3 but should be a BytesIO in Py2 .
17,468
def fast_deepcopy ( obj ) : with BytesIO ( ) as buf : pickle . dump ( obj , buf ) buf . seek ( 0 ) obj_new = pickle . load ( buf ) return obj_new
This is a faster implementation of deepcopy via pickle .
17,469
def batch_iter ( iterator , batch_size , return_func = None , padding = None ) : for batch in zip_longest ( * [ iter ( iterator ) ] * batch_size , fillvalue = padding ) : gen = ( thing for thing in batch if thing is not padding ) if return_func is None : yield gen else : yield return_func ( gen )
Break an iterable into batches of size batch_size
17,470
def read_pmid_sentences ( pmid_sentences , ** drum_args ) : def _set_pmid ( statements , pmid ) : for stmt in statements : for evidence in stmt . evidence : evidence . pmid = pmid run_drum = drum_args . get ( 'run_drum' , False ) drum_process = None all_statements = { } for pmid , sentences in pmid_sentences . items ( ) : logger . info ( '================================' ) logger . info ( 'Processing %d sentences for %s' % ( len ( sentences ) , pmid ) ) ts = time . time ( ) drum_args [ 'name' ] = 'DrumReader%s' % pmid dr = DrumReader ( ** drum_args ) time . sleep ( 3 ) if run_drum and drum_process is None : drum_args . pop ( 'run_drum' , None ) drum_process = dr . drum_system drum_args [ 'drum_system' ] = drum_process for sentence in sentences : dr . read_text ( sentence ) try : dr . start ( ) except SystemExit : pass statements = [ ] for extraction in dr . extractions : if not extraction : continue tp = process_xml ( extraction ) statements += tp . statements _set_pmid ( statements , pmid ) te = time . time ( ) logger . info ( 'Reading took %d seconds and produced %d Statements.' % ( te - ts , len ( statements ) ) ) all_statements [ pmid ] = statements if drum_process and dr . drum_system : dr . _kill_drum ( ) return all_statements
Read sentences from a PMID - keyed dictonary and return all Statements
17,471
def graph_query ( kind , source , target = None , neighbor_limit = 1 , database_filter = None ) : default_databases = [ 'wp' , 'smpdb' , 'reconx' , 'reactome' , 'psp' , 'pid' , 'panther' , 'netpath' , 'msigdb' , 'mirtarbase' , 'kegg' , 'intact' , 'inoh' , 'humancyc' , 'hprd' , 'drugbank' , 'dip' , 'corum' ] if not database_filter : query_databases = default_databases else : query_databases = database_filter params = { } params [ 'format' ] = 'BIOPAX' params [ 'organism' ] = '9606' params [ 'datasource' ] = query_databases kind_str = kind . lower ( ) if kind not in [ 'neighborhood' , 'pathsbetween' , 'pathsfromto' ] : logger . warn ( 'Invalid query type %s' % kind_str ) return None params [ 'kind' ] = kind_str if isinstance ( source , basestring ) : source_str = source else : source_str = ',' . join ( source ) params [ 'source' ] = source_str try : neighbor_limit = int ( neighbor_limit ) params [ 'limit' ] = neighbor_limit except ( TypeError , ValueError ) : logger . warn ( 'Invalid neighborhood limit %s' % neighbor_limit ) return None if target is not None : if isinstance ( target , basestring ) : target_str = target else : target_str = ',' . join ( target ) params [ 'target' ] = target_str logger . info ( 'Sending Pathway Commons query with parameters: ' ) for k , v in params . items ( ) : logger . info ( ' %s: %s' % ( k , v ) ) logger . info ( 'Sending Pathway Commons query...' ) res = requests . get ( pc2_url + 'graph' , params = params ) if not res . status_code == 200 : logger . error ( 'Response is HTTP code %d.' % res . status_code ) if res . status_code == 500 : logger . error ( 'Note: HTTP code 500 can mean empty ' 'results for a valid query.' ) return None model = owl_str_to_model ( res . content ) if model is not None : logger . info ( 'Pathway Commons query returned a model...' ) return model
Perform a graph query on PathwayCommons .
17,472
def owl_str_to_model ( owl_str ) : io_class = autoclass ( 'org.biopax.paxtools.io.SimpleIOHandler' ) io = io_class ( autoclass ( 'org.biopax.paxtools.model.BioPAXLevel' ) . L3 ) bais = autoclass ( 'java.io.ByteArrayInputStream' ) scs = autoclass ( 'java.nio.charset.StandardCharsets' ) jstr = autoclass ( 'java.lang.String' ) istream = bais ( owl_str ) biopax_model = io . convertFromOWL ( istream ) return biopax_model
Return a BioPAX model object from an OWL string .
17,473
def owl_to_model ( fname ) : io_class = autoclass ( 'org.biopax.paxtools.io.SimpleIOHandler' ) io = io_class ( autoclass ( 'org.biopax.paxtools.model.BioPAXLevel' ) . L3 ) try : file_is = autoclass ( 'java.io.FileInputStream' ) ( fname ) except JavaException : logger . error ( 'Could not open data file %s' % fname ) return try : biopax_model = io . convertFromOWL ( file_is ) except JavaException as e : logger . error ( 'Could not convert data file %s to BioPax model' % fname ) logger . error ( e ) return file_is . close ( ) return biopax_model
Return a BioPAX model object from an OWL file .
17,474
def model_to_owl ( model , fname ) : io_class = autoclass ( 'org.biopax.paxtools.io.SimpleIOHandler' ) io = io_class ( autoclass ( 'org.biopax.paxtools.model.BioPAXLevel' ) . L3 ) try : fileOS = autoclass ( 'java.io.FileOutputStream' ) ( fname ) except JavaException : logger . error ( 'Could not open data file %s' % fname ) return l3_factory = autoclass ( 'org.biopax.paxtools.model.BioPAXLevel' ) . L3 . getDefaultFactory ( ) model_out = l3_factory . createModel ( ) for r in model . getObjects ( ) . toArray ( ) : model_out . add ( r ) io . convertToOWL ( model_out , fileOS ) fileOS . close ( )
Save a BioPAX model object as an OWL file .
17,475
def make_model ( self , * args , ** kwargs ) : for stmt in self . statements : if isinstance ( stmt , RegulateActivity ) : self . _add_regulate_activity ( stmt ) elif isinstance ( stmt , RegulateAmount ) : self . _add_regulate_amount ( stmt ) elif isinstance ( stmt , Modification ) : self . _add_modification ( stmt ) elif isinstance ( stmt , SelfModification ) : self . _add_selfmodification ( stmt ) elif isinstance ( stmt , Gef ) : self . _add_gef ( stmt ) elif isinstance ( stmt , Gap ) : self . _add_gap ( stmt ) elif isinstance ( stmt , Complex ) : self . _add_complex ( stmt ) else : logger . warning ( 'Unhandled statement type: %s' % stmt . __class__ . __name__ ) if kwargs . get ( 'grouping' ) : self . _group_nodes ( ) self . _group_edges ( ) return self . print_cyjs_graph ( )
Assemble a Cytoscape JS network from INDRA Statements .
17,476
def get_gene_names ( self ) : gene_names = [ ] for node in self . _nodes : members = node [ 'data' ] . get ( 'members' ) if members : gene_names += list ( members . keys ( ) ) else : if node [ 'data' ] [ 'name' ] . startswith ( 'Group' ) : continue gene_names . append ( node [ 'data' ] [ 'name' ] ) self . _gene_names = gene_names
Gather gene names of all nodes and node members
17,477
def set_CCLE_context ( self , cell_types ) : self . get_gene_names ( ) exp_values = context_client . get_protein_expression ( self . _gene_names , cell_types ) mut_values = context_client . get_mutations ( self . _gene_names , cell_types ) muts = { cell_line : { } for cell_line in cell_types } for cell_line , entries in mut_values . items ( ) : if entries is not None : for gene , mutations in entries . items ( ) : if mutations : muts [ cell_line ] [ gene ] = 1 else : muts [ cell_line ] [ gene ] = 0 def bin_exp ( expression_dict ) : d = expression_dict exp_values = [ ] for line in d : for gene in d [ line ] : val = d [ line ] [ gene ] if val is not None : exp_values . append ( val ) thr_dict = { } for n_bins in range ( 3 , 10 ) : bin_thr = np . histogram ( np . log10 ( exp_values ) , n_bins ) [ 1 ] [ 1 : ] thr_dict [ n_bins ] = bin_thr binned_dict = { x : deepcopy ( expression_dict ) for x in range ( 3 , 10 ) } for n_bins in binned_dict : for line in binned_dict [ n_bins ] : for gene in binned_dict [ n_bins ] [ line ] : if binned_dict [ n_bins ] [ line ] [ gene ] is None : binned_dict [ n_bins ] [ line ] [ gene ] = n_bins else : val = np . log10 ( binned_dict [ n_bins ] [ line ] [ gene ] ) for thr_idx , thr in enumerate ( thr_dict [ n_bins ] ) : if val <= thr : binned_dict [ n_bins ] [ line ] [ gene ] = thr_idx break return binned_dict binned_exp = bin_exp ( exp_values ) context = { 'bin_expression' : binned_exp , 'mutation' : muts } self . _context [ 'CCLE' ] = context
Set context of all nodes and node members from CCLE .
17,478
def print_cyjs_graph ( self ) : cyjs_dict = { 'edges' : self . _edges , 'nodes' : self . _nodes } cyjs_str = json . dumps ( cyjs_dict , indent = 1 , sort_keys = True ) return cyjs_str
Return the assembled Cytoscape JS network as a json string .
17,479
def print_cyjs_context ( self ) : context = self . _context context_str = json . dumps ( context , indent = 1 , sort_keys = True ) return context_str
Return a list of node names and their respective context .
17,480
def save_json ( self , fname_prefix = 'model' ) : cyjs_str = self . print_cyjs_graph ( ) with open ( fname_prefix + '.json' , 'wb' ) as fh : fh . write ( cyjs_str . encode ( 'utf-8' ) ) context_str = self . print_cyjs_context ( ) with open ( fname_prefix + '_context.json' , 'wb' ) as fh : fh . write ( context_str . encode ( 'utf-8' ) )
Save the assembled Cytoscape JS network in a json file .
17,481
def save_model ( self , fname = 'model.js' ) : exp_colorscale_str = json . dumps ( self . _exp_colorscale ) mut_colorscale_str = json . dumps ( self . _mut_colorscale ) cyjs_dict = { 'edges' : self . _edges , 'nodes' : self . _nodes } model_str = json . dumps ( cyjs_dict , indent = 1 , sort_keys = True ) model_dict = { 'exp_colorscale_str' : exp_colorscale_str , 'mut_colorscale_str' : mut_colorscale_str , 'model_elements_str' : model_str } s = '' s += 'var exp_colorscale = %s;\n' % model_dict [ 'exp_colorscale_str' ] s += 'var mut_colorscale = %s;\n' % model_dict [ 'mut_colorscale_str' ] s += 'var model_elements = %s;\n' % model_dict [ 'model_elements_str' ] with open ( fname , 'wb' ) as fh : fh . write ( s . encode ( 'utf-8' ) )
Save the assembled Cytoscape JS network in a js file .
17,482
def _get_edge_dict ( self ) : edge_dict = collections . defaultdict ( lambda : [ ] ) if len ( self . _edges ) > 0 : for e in self . _edges : data = e [ 'data' ] key = tuple ( [ data [ 'i' ] , data [ 'source' ] , data [ 'target' ] , data [ 'polarity' ] ] ) edge_dict [ key ] = data [ 'id' ] return edge_dict
Return a dict of edges .
17,483
def _get_node_key ( self , node_dict_item ) : s = tuple ( sorted ( node_dict_item [ 'sources' ] ) ) t = tuple ( sorted ( node_dict_item [ 'targets' ] ) ) return ( s , t )
Return a tuple of sorted sources and targets given a node dict .
17,484
def _get_node_groups ( self ) : node_dict = { node [ 'data' ] [ 'id' ] : { 'sources' : [ ] , 'targets' : [ ] } for node in self . _nodes } for edge in self . _edges : edge_data = ( edge [ 'data' ] [ 'i' ] , edge [ 'data' ] [ 'polarity' ] , edge [ 'data' ] [ 'source' ] ) node_dict [ edge [ 'data' ] [ 'target' ] ] [ 'sources' ] . append ( edge_data ) edge_data = ( edge [ 'data' ] [ 'i' ] , edge [ 'data' ] [ 'polarity' ] , edge [ 'data' ] [ 'target' ] ) node_dict [ edge [ 'data' ] [ 'source' ] ] [ 'targets' ] . append ( edge_data ) node_key_dict = collections . defaultdict ( lambda : [ ] ) for node_id , node_d in node_dict . items ( ) : key = self . _get_node_key ( node_d ) node_key_dict [ key ] . append ( node_id ) node_groups = [ g for g in node_key_dict . values ( ) if ( len ( g ) > 1 ) ] return node_groups
Return a list of node id lists that are topologically identical .
17,485
def _group_edges ( self ) : edges_to_add = [ [ ] , [ ] ] for e in self . _edges : new_edge = deepcopy ( e ) new_edge [ 'data' ] . pop ( 'id' , None ) uuid_list = new_edge [ 'data' ] . pop ( 'uuid_list' , [ ] ) source = e [ 'data' ] [ 'source' ] target = e [ 'data' ] [ 'target' ] source_node = [ x for x in self . _nodes if x [ 'data' ] [ 'id' ] == source ] [ 0 ] target_node = [ x for x in self . _nodes if x [ 'data' ] [ 'id' ] == target ] [ 0 ] if source_node [ 'data' ] [ 'parent' ] != '' : new_edge [ 'data' ] [ 'source' ] = source_node [ 'data' ] [ 'parent' ] e [ 'data' ] [ 'i' ] = 'Virtual' if target_node [ 'data' ] [ 'parent' ] != '' : new_edge [ 'data' ] [ 'target' ] = target_node [ 'data' ] [ 'parent' ] e [ 'data' ] [ 'i' ] = 'Virtual' if e [ 'data' ] [ 'i' ] == 'Virtual' : if new_edge not in edges_to_add [ 0 ] : edges_to_add [ 0 ] . append ( new_edge ) edges_to_add [ 1 ] . append ( uuid_list ) else : idx = edges_to_add [ 0 ] . index ( new_edge ) edges_to_add [ 1 ] [ idx ] += uuid_list edges_to_add [ 1 ] [ idx ] = list ( set ( edges_to_add [ 1 ] [ idx ] ) ) for ze in zip ( * edges_to_add ) : edge = ze [ 0 ] edge [ 'data' ] [ 'id' ] = self . _get_new_id ( ) edge [ 'data' ] [ 'uuid_list' ] = ze [ 1 ] self . _edges . append ( edge )
Group all edges that are topologically identical .
17,486
def make_stmt ( stmt_cls , tf_agent , target_agent , pmid ) : ev = Evidence ( source_api = 'trrust' , pmid = pmid ) return stmt_cls ( deepcopy ( tf_agent ) , deepcopy ( target_agent ) , evidence = [ ev ] )
Return a Statement based on its type agents and PMID .
17,487
def get_grounded_agent ( gene_name ) : db_refs = { 'TEXT' : gene_name } if gene_name in hgnc_map : gene_name = hgnc_map [ gene_name ] hgnc_id = hgnc_client . get_hgnc_id ( gene_name ) if hgnc_id : db_refs [ 'HGNC' ] = hgnc_id up_id = hgnc_client . get_uniprot_id ( hgnc_id ) if up_id : db_refs [ 'UP' ] = up_id agent = Agent ( gene_name , db_refs = db_refs ) return agent
Return a grounded Agent based on an HGNC symbol .
17,488
def extract_statements ( self ) : for _ , ( tf , target , effect , refs ) in self . df . iterrows ( ) : tf_agent = get_grounded_agent ( tf ) target_agent = get_grounded_agent ( target ) if effect == 'Activation' : stmt_cls = IncreaseAmount elif effect == 'Repression' : stmt_cls = DecreaseAmount else : continue pmids = refs . split ( ';' ) for pmid in pmids : stmt = make_stmt ( stmt_cls , tf_agent , target_agent , pmid ) self . statements . append ( stmt )
Process the table to extract Statements .
17,489
def process_paper ( model_name , pmid ) : json_directory = os . path . join ( model_name , 'jsons' ) json_path = os . path . join ( json_directory , 'PMID%s.json' % pmid ) if pmid . startswith ( 'api' ) or pmid . startswith ( 'PMID' ) : logger . warning ( 'Invalid PMID: %s' % pmid ) if os . path . exists ( json_path ) : rp = reach . process_json_file ( json_path , citation = pmid ) txt_format = 'existing_json' else : try : txt , txt_format = get_full_text ( pmid , 'pmid' ) except Exception : return None , None if txt_format == 'pmc_oa_xml' : rp = reach . process_nxml_str ( txt , citation = pmid , offline = True , output_fname = json_path ) elif txt_format == 'elsevier_xml' : txt = elsevier_client . extract_text ( txt ) rp = reach . process_text ( txt , citation = pmid , offline = True , output_fname = json_path ) elif txt_format == 'abstract' : rp = reach . process_text ( txt , citation = pmid , offline = True , output_fname = json_path ) else : rp = None if rp is not None : check_pmids ( rp . statements ) return rp , txt_format
Process a paper with the given pubmed identifier
17,490
def process_paper_helper ( model_name , pmid , start_time_local ) : try : if not aws_available : rp , txt_format = process_paper ( model_name , pmid ) else : rp , txt_format = process_paper_aws ( pmid , start_time_local ) except : logger . exception ( 'uncaught exception while processing %s' , pmid ) return None , None return rp , txt_format
Wraps processing a paper by either a local or remote service and caches any uncaught exceptions
17,491
def _load_data ( ) : csv_path = path . join ( HERE , path . pardir , path . pardir , 'resources' , DATAFILE_NAME ) data_iter = list ( read_unicode_csv ( csv_path ) ) headers = data_iter [ 0 ] headers [ headers . index ( 'Approved.Symbol' ) ] = 'approved_symbol' return [ { header : val for header , val in zip ( headers , line ) } for line in data_iter [ 1 : ] ]
Load the data from the csv in data .
17,492
def run_eidos ( endpoint , * args ) : call_class = '%s.%s' % ( eidos_package , endpoint ) cmd = [ 'java' , '-Xmx12G' , '-cp' , eip , call_class ] + list ( args ) logger . info ( 'Running Eidos with command "%s"' % ( ' ' . join ( cmd ) ) ) subprocess . call ( cmd )
Run a given enpoint of Eidos through the command line .
17,493
def extract_from_directory ( path_in , path_out ) : path_in = os . path . realpath ( os . path . expanduser ( path_in ) ) path_out = os . path . realpath ( os . path . expanduser ( path_out ) ) logger . info ( 'Running Eidos on input folder %s' % path_in ) run_eidos ( 'apps.ExtractFromDirectory' , path_in , path_out )
Run Eidos on a set of text files in a folder .
17,494
def extract_and_process ( path_in , path_out ) : path_in = os . path . realpath ( os . path . expanduser ( path_in ) ) path_out = os . path . realpath ( os . path . expanduser ( path_out ) ) extract_from_directory ( path_in , path_out ) jsons = glob . glob ( os . path . join ( path_out , '*.jsonld' ) ) logger . info ( 'Found %d JSON-LD files to process in %s' % ( len ( jsons ) , path_out ) ) stmts = [ ] for json in jsons : ep = process_json_file ( json ) if ep : stmts += ep . statements return stmts
Run Eidos on a set of text files and process output with INDRA .
17,495
def get_statements ( subject = None , object = None , agents = None , stmt_type = None , use_exact_type = False , persist = True , timeout = None , simple_response = False , ev_limit = 10 , best_first = True , tries = 2 , max_stmts = None ) : processor = IndraDBRestProcessor ( subject , object , agents , stmt_type , use_exact_type , persist , timeout , ev_limit , best_first , tries , max_stmts ) if simple_response : ret = processor . statements else : ret = processor return ret
Get a processor for the INDRA DB web API matching given agents and type .
17,496
def get_statements_by_hash ( hash_list , ev_limit = 100 , best_first = True , tries = 2 ) : if not isinstance ( hash_list , list ) : raise ValueError ( "The `hash_list` input is a list, not %s." % type ( hash_list ) ) if not hash_list : return [ ] if isinstance ( hash_list [ 0 ] , str ) : hash_list = [ int ( h ) for h in hash_list ] if not all ( [ isinstance ( h , int ) for h in hash_list ] ) : raise ValueError ( "Hashes must be ints or strings that can be " "converted into ints." ) resp = submit_statement_request ( 'post' , 'from_hashes' , ev_limit = ev_limit , data = { 'hashes' : hash_list } , best_first = best_first , tries = tries ) return stmts_from_json ( resp . json ( ) [ 'statements' ] . values ( ) )
Get fully formed statements from a list of hashes .
17,497
def get_statements_for_paper ( ids , ev_limit = 10 , best_first = True , tries = 2 , max_stmts = None ) : id_l = [ { 'id' : id_val , 'type' : id_type } for id_type , id_val in ids ] resp = submit_statement_request ( 'post' , 'from_papers' , data = { 'ids' : id_l } , ev_limit = ev_limit , best_first = best_first , tries = tries , max_stmts = max_stmts ) stmts_json = resp . json ( ) [ 'statements' ] return stmts_from_json ( stmts_json . values ( ) )
Get the set of raw Statements extracted from a paper given by the id .
17,498
def submit_curation ( hash_val , tag , curator , text = None , source = 'indra_rest_client' , ev_hash = None , is_test = False ) : data = { 'tag' : tag , 'text' : text , 'curator' : curator , 'source' : source , 'ev_hash' : ev_hash } url = 'curation/submit/%s' % hash_val if is_test : qstr = '?test' else : qstr = '' return make_db_rest_request ( 'post' , url , qstr , data = data )
Submit a curation for the given statement at the relevant level .
17,499
def get_statement_queries ( stmts , ** params ) : def pick_ns ( ag ) : for ns in [ 'HGNC' , 'FPLX' , 'CHEMBL' , 'CHEBI' , 'GO' , 'MESH' ] : if ns in ag . db_refs . keys ( ) : dbid = ag . db_refs [ ns ] break else : ns = 'TEXT' dbid = ag . name return '%s@%s' % ( dbid , ns ) queries = [ ] url_base = get_url_base ( 'statements/from_agents' ) non_binary_statements = [ Complex , SelfModification , ActiveForm ] for stmt in stmts : kwargs = { } if type ( stmt ) not in non_binary_statements : for pos , ag in zip ( [ 'subject' , 'object' ] , stmt . agent_list ( ) ) : if ag is not None : kwargs [ pos ] = pick_ns ( ag ) else : for i , ag in enumerate ( stmt . agent_list ( ) ) : if ag is not None : kwargs [ 'agent%d' % i ] = pick_ns ( ag ) kwargs [ 'type' ] = stmt . __class__ . __name__ kwargs . update ( params ) query_str = '?' + '&' . join ( [ '%s=%s' % ( k , v ) for k , v in kwargs . items ( ) if v is not None ] ) queries . append ( url_base + query_str ) return queries
Get queries used to search based on a statement .