idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
17,500 | def save ( self , model_fname = 'model.pkl' ) : with open ( model_fname , 'wb' ) as fh : pickle . dump ( self . stmts , fh , protocol = 4 ) | Save the state of the IncrementalModel in a pickle file . |
17,501 | def add_statements ( self , pmid , stmts ) : if pmid not in self . stmts : self . stmts [ pmid ] = stmts else : self . stmts [ pmid ] += stmts | Add INDRA Statements to the incremental model indexed by PMID . |
17,502 | def preassemble ( self , filters = None , grounding_map = None ) : stmts = self . get_statements ( ) stmts = ac . filter_no_hypothesis ( stmts ) if grounding_map is not None : stmts = ac . map_grounding ( stmts , grounding_map = grounding_map ) else : stmts = ac . map_grounding ( stmts ) if filters and ( 'grounding' in filters ) : stmts = ac . filter_grounded_only ( stmts ) stmts = ac . map_sequence ( stmts ) if filters and 'human_only' in filters : stmts = ac . filter_human_only ( stmts ) stmts = ac . run_preassembly ( stmts , return_toplevel = False ) stmts = self . _relevance_filter ( stmts , filters ) self . assembled_stmts = stmts | Preassemble the Statements collected in the model . |
17,503 | def get_model_agents ( self ) : model_stmts = self . get_statements ( ) agents = [ ] for stmt in model_stmts : for a in stmt . agent_list ( ) : if a is not None : agents . append ( a ) return agents | Return a list of all Agents from all Statements . |
17,504 | def get_statements ( self ) : stmt_lists = [ v for k , v in self . stmts . items ( ) ] stmts = [ ] for s in stmt_lists : stmts += s return stmts | Return a list of all Statements in a single list . |
17,505 | def get_statements_noprior ( self ) : stmt_lists = [ v for k , v in self . stmts . items ( ) if k != 'prior' ] stmts = [ ] for s in stmt_lists : stmts += s return stmts | Return a list of all non - prior Statements in a single list . |
17,506 | def process_ndex_neighborhood ( gene_names , network_id = None , rdf_out = 'bel_output.rdf' , print_output = True ) : logger . warning ( 'This method is deprecated and the results are not ' 'guaranteed to be correct. Please use ' 'process_pybel_neighborhood instead.' ) if network_id is None : network_id = '9ea3c170-01ad-11e5-ac0f-000c29cb28fb' url = ndex_bel2rdf + '/network/%s/asBELRDF/query' % network_id params = { 'searchString' : ' ' . join ( gene_names ) } res_json = ndex_client . send_request ( url , params , is_json = True ) if not res_json : logger . error ( 'No response for NDEx neighborhood query.' ) return None if res_json . get ( 'error' ) : error_msg = res_json . get ( 'message' ) logger . error ( 'BEL/RDF response contains error: %s' % error_msg ) return None rdf = res_json . get ( 'content' ) if not rdf : logger . error ( 'BEL/RDF response is empty.' ) return None with open ( rdf_out , 'wb' ) as fh : fh . write ( rdf . encode ( 'utf-8' ) ) bp = process_belrdf ( rdf , print_output = print_output ) return bp | Return a BelRdfProcessor for an NDEx network neighborhood . |
17,507 | def process_pybel_neighborhood ( gene_names , network_file = None , network_type = 'belscript' , ** kwargs ) : if network_file is None : network_file = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , os . path . pardir , os . path . pardir , os . path . pardir , 'data' , 'large_corpus.bel' ) if network_type == 'belscript' : bp = process_belscript ( network_file , ** kwargs ) elif network_type == 'json' : bp = process_json_file ( network_file ) filtered_stmts = [ ] for stmt in bp . statements : found = False for agent in stmt . agent_list ( ) : if agent is not None : if agent . name in gene_names : found = True if found : filtered_stmts . append ( stmt ) bp . statements = filtered_stmts return bp | Return PybelProcessor around neighborhood of given genes in a network . |
17,508 | def process_pybel_graph ( graph ) : bp = PybelProcessor ( graph ) bp . get_statements ( ) if bp . annot_manager . failures : logger . warning ( 'missing %d annotation pairs' , sum ( len ( v ) for v in bp . annot_manager . failures . values ( ) ) ) return bp | Return a PybelProcessor by processing a PyBEL graph . |
17,509 | def process_belscript ( file_name , ** kwargs ) : if 'citation_clearing' not in kwargs : kwargs [ 'citation_clearing' ] = False if 'no_identifier_validation' not in kwargs : kwargs [ 'no_identifier_validation' ] = True pybel_graph = pybel . from_path ( file_name , ** kwargs ) return process_pybel_graph ( pybel_graph ) | Return a PybelProcessor by processing a BEL script file . |
17,510 | def process_json_file ( file_name ) : with open ( file_name , 'rt' ) as fh : pybel_graph = pybel . from_json_file ( fh , False ) return process_pybel_graph ( pybel_graph ) | Return a PybelProcessor by processing a Node - Link JSON file . |
17,511 | def process_cbn_jgif_file ( file_name ) : with open ( file_name , 'r' ) as jgf : return process_pybel_graph ( pybel . from_cbn_jgif ( json . load ( jgf ) ) ) | Return a PybelProcessor by processing a CBN JGIF JSON file . |
17,512 | def update_famplex ( ) : famplex_url_pattern = 'https://raw.githubusercontent.com/sorgerlab/famplex/master/%s.csv' csv_names = [ 'entities' , 'equivalences' , 'gene_prefixes' , 'grounding_map' , 'relations' ] for csv_name in csv_names : url = famplex_url_pattern % csv_name save_from_http ( url , os . path . join ( path , 'famplex/%s.csv' % csv_name ) ) | Update all the CSV files that form the FamPlex resource . |
17,513 | def update_lincs_small_molecules ( ) : url = 'http://lincs.hms.harvard.edu/db/sm/' sm_data = load_lincs_csv ( url ) sm_dict = { d [ 'HMS LINCS ID' ] : d . copy ( ) for d in sm_data } assert len ( sm_dict ) == len ( sm_data ) , "We lost data." fname = os . path . join ( path , 'lincs_small_molecules.json' ) with open ( fname , 'w' ) as fh : json . dump ( sm_dict , fh , indent = 1 ) | Load the csv of LINCS small molecule metadata into a dict . |
17,514 | def update_lincs_proteins ( ) : url = 'http://lincs.hms.harvard.edu/db/proteins/' prot_data = load_lincs_csv ( url ) prot_dict = { d [ 'HMS LINCS ID' ] : d . copy ( ) for d in prot_data } assert len ( prot_dict ) == len ( prot_data ) , "We lost data." fname = os . path . join ( path , 'lincs_proteins.json' ) with open ( fname , 'w' ) as fh : json . dump ( prot_dict , fh , indent = 1 ) | Load the csv of LINCS protein metadata into a dict . |
17,515 | def _get_is_direct ( stmt ) : any_indirect = False for ev in stmt . evidence : if ev . epistemics . get ( 'direct' ) is True : return True elif ev . epistemics . get ( 'direct' ) is False : any_indirect = True if any_indirect : return False return True | Returns true if there is evidence that the statement is a direct interaction . If any of the evidences associated with the statement indicates a direct interatcion then we assume the interaction is direct . If there is no evidence for the interaction being indirect then we default to direct . |
17,516 | def make_model ( self ) : for stmt in self . statements : if isinstance ( stmt , Modification ) : card = assemble_modification ( stmt ) elif isinstance ( stmt , SelfModification ) : card = assemble_selfmodification ( stmt ) elif isinstance ( stmt , Complex ) : card = assemble_complex ( stmt ) elif isinstance ( stmt , Translocation ) : card = assemble_translocation ( stmt ) elif isinstance ( stmt , RegulateActivity ) : card = assemble_regulate_activity ( stmt ) elif isinstance ( stmt , RegulateAmount ) : card = assemble_regulate_amount ( stmt ) else : continue if card is not None : card . card [ 'meta' ] = { 'id' : stmt . uuid , 'belief' : stmt . belief } if self . pmc_override is not None : card . card [ 'pmc_id' ] = self . pmc_override else : card . card [ 'pmc_id' ] = get_pmc_id ( stmt ) self . cards . append ( card ) | Assemble statements into index cards . |
17,517 | def print_model ( self ) : cards = [ c . card for c in self . cards ] if len ( cards ) == 1 : cards = cards [ 0 ] cards_json = json . dumps ( cards , indent = 1 ) return cards_json | Return the assembled cards as a JSON string . |
17,518 | def geneways_action_to_indra_statement_type ( actiontype , plo ) : actiontype = actiontype . lower ( ) statement_generator = None is_direct = ( plo == 'P' ) if actiontype == 'bind' : statement_generator = lambda substance1 , substance2 , evidence : Complex ( [ substance1 , substance2 ] , evidence = evidence ) is_direct = True elif actiontype == 'phosphorylate' : statement_generator = lambda substance1 , substance2 , evidence : Phosphorylation ( substance1 , substance2 , evidence = evidence ) is_direct = True return ( statement_generator , is_direct ) | Return INDRA Statement corresponding to Geneways action type . |
17,519 | def make_statement ( self , action , mention ) : ( statement_generator , is_direct ) = geneways_action_to_indra_statement_type ( mention . actiontype , action . plo ) if statement_generator is None : return None text = None if self . get_ft_mention : try : content , content_type = get_full_text ( mention . pmid , 'pmid' ) if content is not None : ftm = FullTextMention ( mention , content ) sentences = ftm . find_matching_sentences ( ) if len ( sentences ) == 1 : text = sentences [ 0 ] except Exception : logger . warning ( 'Could not fetch full text for PMID ' + mention . pmid ) epistemics = dict ( ) epistemics [ 'direct' ] = is_direct annotations = mention . make_annotation ( ) annotations [ 'plo' ] = action . plo evidence = Evidence ( source_api = 'geneways' , source_id = mention . actionmentionid , pmid = mention . pmid , text = text , epistemics = epistemics , annotations = annotations ) upstream_agent = get_agent ( mention . upstream , action . up ) downstream_agent = get_agent ( mention . downstream , action . dn ) return statement_generator ( upstream_agent , downstream_agent , evidence ) | Makes an INDRA statement from a Geneways action and action mention . |
17,520 | def load_from_rdf_file ( self , rdf_file ) : self . graph = rdflib . Graph ( ) self . graph . parse ( os . path . abspath ( rdf_file ) , format = 'nt' ) self . initialize ( ) | Initialize given an RDF input file representing the hierarchy . |
17,521 | def load_from_rdf_string ( self , rdf_str ) : self . graph = rdflib . Graph ( ) self . graph . parse ( data = rdf_str , format = 'nt' ) self . initialize ( ) | Initialize given an RDF string representing the hierarchy . |
17,522 | def extend_with ( self , rdf_file ) : self . graph . parse ( os . path . abspath ( rdf_file ) , format = 'nt' ) self . initialize ( ) | Extend the RDF graph of this HierarchyManager with another RDF file . |
17,523 | def build_transitive_closures ( self ) : self . component_counter = 0 for rel , tc_dict in ( ( self . isa_objects , self . isa_closure ) , ( self . partof_objects , self . partof_closure ) , ( self . isa_or_partof_objects , self . isa_or_partof_closure ) ) : self . build_transitive_closure ( rel , tc_dict ) | Build the transitive closures of the hierarchy . |
17,524 | def build_transitive_closure ( self , rel , tc_dict ) : rel_fun = lambda node , graph : rel ( node ) for x in self . graph . all_nodes ( ) : rel_closure = self . graph . transitiveClosure ( rel_fun , x ) xs = x . toPython ( ) for y in rel_closure : ys = y . toPython ( ) if xs == ys : continue try : tc_dict [ xs ] . append ( ys ) except KeyError : tc_dict [ xs ] = [ ys ] if rel == self . isa_or_partof_objects : self . _add_component ( xs , ys ) | Build a transitive closure for a given relation in a given dict . |
17,525 | def directly_or_indirectly_related ( self , ns1 , id1 , ns2 , id2 , closure_dict , relation_func ) : if id2 is None or ( id2 is None and id1 is None ) : return True elif id1 is None : return False if closure_dict : term1 = self . get_uri ( ns1 , id1 ) term2 = self . get_uri ( ns2 , id2 ) ec = closure_dict . get ( term1 ) if ec is not None and term2 in ec : return True else : return False else : if not self . uri_as_name : e1 = self . find_entity ( id1 ) e2 = self . find_entity ( id2 ) if e1 is None or e2 is None : return False t1 = rdflib . term . URIRef ( e1 ) t2 = rdflib . term . URIRef ( e2 ) else : u1 = self . get_uri ( ns1 , id1 ) u2 = self . get_uri ( ns2 , id2 ) t1 = rdflib . term . URIRef ( u1 ) t2 = rdflib . term . URIRef ( u2 ) to = self . graph . transitiveClosure ( relation_func , t1 ) if t2 in to : return True else : return False | Return True if two entities have the speicified relationship . |
17,526 | def isa ( self , ns1 , id1 , ns2 , id2 ) : rel_fun = lambda node , graph : self . isa_objects ( node ) return self . directly_or_indirectly_related ( ns1 , id1 , ns2 , id2 , self . isa_closure , rel_fun ) | Return True if one entity has an isa relationship to another . |
17,527 | def partof ( self , ns1 , id1 , ns2 , id2 ) : rel_fun = lambda node , graph : self . partof_objects ( node ) return self . directly_or_indirectly_related ( ns1 , id1 , ns2 , id2 , self . partof_closure , rel_fun ) | Return True if one entity is partof another . |
17,528 | def isa_or_partof ( self , ns1 , id1 , ns2 , id2 ) : rel_fun = lambda node , graph : self . isa_or_partof_objects ( node ) return self . directly_or_indirectly_related ( ns1 , id1 , ns2 , id2 , self . isa_or_partof_closure , rel_fun ) | Return True if two entities are in an isa or partof relationship |
17,529 | def is_opposite ( self , ns1 , id1 , ns2 , id2 ) : u1 = self . get_uri ( ns1 , id1 ) u2 = self . get_uri ( ns2 , id2 ) t1 = rdflib . term . URIRef ( u1 ) t2 = rdflib . term . URIRef ( u2 ) rel = rdflib . term . URIRef ( self . relations_prefix + 'is_opposite' ) to = self . graph . objects ( t1 , rel ) if t2 in to : return True return False | Return True if two entities are in an is_opposite relationship |
17,530 | def get_parents ( self , uri , type = 'all' ) : all_parents = set ( self . isa_or_partof_closure . get ( uri , [ ] ) ) if not all_parents or type == 'all' : return all_parents if type == 'immediate' : node = rdflib . term . URIRef ( uri ) immediate_parents = list ( set ( self . isa_or_partof_objects ( node ) ) ) return [ p . toPython ( ) for p in immediate_parents ] elif type == 'top' : top_parents = [ p for p in all_parents if not self . isa_or_partof_closure . get ( p ) ] return top_parents | Return parents of a given entry . |
17,531 | def _get_perf ( text , msg_id ) : msg = KQMLPerformative ( 'REQUEST' ) msg . set ( 'receiver' , 'READER' ) content = KQMLList ( 'run-text' ) content . sets ( 'text' , text ) msg . set ( 'content' , content ) msg . set ( 'reply-with' , msg_id ) return msg | Return a request message for a given text . |
17,532 | def read_pmc ( self , pmcid ) : msg = KQMLPerformative ( 'REQUEST' ) msg . set ( 'receiver' , 'READER' ) content = KQMLList ( 'run-pmcid' ) content . sets ( 'pmcid' , pmcid ) content . set ( 'reply-when-done' , 'true' ) msg . set ( 'content' , content ) msg . set ( 'reply-with' , 'P-%s' % pmcid ) self . reply_counter += 1 self . send ( msg ) | Read a given PMC article . |
17,533 | def read_text ( self , text ) : logger . info ( 'Reading: "%s"' % text ) msg_id = 'RT000%s' % self . msg_counter kqml_perf = _get_perf ( text , msg_id ) self . reply_counter += 1 self . msg_counter += 1 self . send ( kqml_perf ) | Read a given text phrase . |
17,534 | def receive_reply ( self , msg , content ) : reply_head = content . head ( ) if reply_head == 'error' : comment = content . gets ( 'comment' ) logger . error ( 'Got error reply: "%s"' % comment ) else : extractions = content . gets ( 'ekb' ) self . extractions . append ( extractions ) self . reply_counter -= 1 if self . reply_counter == 0 : self . exit ( 0 ) | Handle replies with reading results . |
17,535 | def split_long_sentence ( sentence , words_per_line ) : words = sentence . split ( ' ' ) split_sentence = '' for i in range ( len ( words ) ) : split_sentence = split_sentence + words [ i ] if ( i + 1 ) % words_per_line == 0 : split_sentence = split_sentence + '\n' elif i != len ( words ) - 1 : split_sentence = split_sentence + " " return split_sentence | Takes a sentence and adds a newline every words_per_line words . |
17,536 | def shorter_name ( key ) : key_short = key for sep in [ '#' , '/' ] : ind = key_short . rfind ( sep ) if ind is not None : key_short = key_short [ ind + 1 : ] else : key_short = key_short return key_short . replace ( '-' , '_' ) . replace ( '.' , '_' ) | Return a shorter name for an id . |
17,537 | def add_event_property_edges ( event_entity , entries ) : do_not_log = [ '@type' , '@id' , 'http://worldmodelers.com/DataProvenance#sourced_from' ] for prop in event_entity : if prop not in do_not_log : value = event_entity [ prop ] value_entry = None value_str = None if '@id' in value [ 0 ] : value = value [ 0 ] [ '@id' ] if value in entries : value_str = get_entry_compact_text_repr ( entries [ value ] , entries ) if value_str is not None : edges . append ( [ shorter_name ( event_entity [ '@id' ] ) , shorter_name ( value ) , shorter_name ( prop ) ] ) node_labels [ shorter_name ( value ) ] = value_str | Adds edges to the graph for event properties . |
17,538 | def get_sourced_from ( entry ) : sourced_from = 'http://worldmodelers.com/DataProvenance#sourced_from' if sourced_from in entry : values = entry [ sourced_from ] values = [ i [ '@id' ] for i in values ] return values | Get a list of values from the source_from attribute |
17,539 | def get_entry_compact_text_repr ( entry , entries ) : text = get_shortest_text_value ( entry ) if text is not None : return text else : sources = get_sourced_from ( entry ) if sources is not None : texts = [ ] for source in sources : source_entry = entries [ source ] texts . append ( get_shortest_text_value ( source_entry ) ) return get_shortest_string ( texts ) | If the entry has a text value return that . If the entry has a source_from value return the text value of the source . Otherwise return None . |
17,540 | def process_text ( text , output_fmt = 'json' , outbuf = None , cleanup = True , key = '' , ** kwargs ) : nxml_str = make_nxml_from_text ( text ) return process_nxml_str ( nxml_str , output_fmt , outbuf , cleanup , key , ** kwargs ) | Return processor with Statements extracted by reading text with Sparser . |
17,541 | def process_nxml_str ( nxml_str , output_fmt = 'json' , outbuf = None , cleanup = True , key = '' , ** kwargs ) : tmp_fname = 'PMC%s_%d.nxml' % ( key , mp . current_process ( ) . pid ) with open ( tmp_fname , 'wb' ) as fh : fh . write ( nxml_str . encode ( 'utf-8' ) ) try : sp = process_nxml_file ( tmp_fname , output_fmt , outbuf , cleanup , ** kwargs ) finally : if cleanup and os . path . exists ( tmp_fname ) : os . remove ( tmp_fname ) return sp | Return processor with Statements extracted by reading an NXML string . |
17,542 | def process_nxml_file ( fname , output_fmt = 'json' , outbuf = None , cleanup = True , ** kwargs ) : sp = None out_fname = None try : out_fname = run_sparser ( fname , output_fmt , outbuf , ** kwargs ) sp = process_sparser_output ( out_fname , output_fmt ) except Exception as e : logger . error ( "Sparser failed to run on %s." % fname ) logger . exception ( e ) finally : if out_fname is not None and os . path . exists ( out_fname ) and cleanup : os . remove ( out_fname ) return sp | Return processor with Statements extracted by reading an NXML file . |
17,543 | def process_sparser_output ( output_fname , output_fmt = 'json' ) : if output_fmt not in [ 'json' , 'xml' ] : logger . error ( "Unrecognized output format '%s'." % output_fmt ) return None sp = None with open ( output_fname , 'rt' ) as fh : if output_fmt == 'json' : json_dict = json . load ( fh ) sp = process_json_dict ( json_dict ) else : xml_str = fh . read ( ) sp = process_xml ( xml_str ) return sp | Return a processor with Statements extracted from Sparser XML or JSON |
17,544 | def process_xml ( xml_str ) : try : tree = ET . XML ( xml_str , parser = UTB ( ) ) except ET . ParseError as e : logger . error ( 'Could not parse XML string' ) logger . error ( e ) return None sp = _process_elementtree ( tree ) return sp | Return processor with Statements extracted from a Sparser XML . |
17,545 | def run_sparser ( fname , output_fmt , outbuf = None , timeout = 600 ) : if not sparser_path or not os . path . exists ( sparser_path ) : logger . error ( 'Sparser executable not set in %s' % sparser_path_var ) return None if output_fmt == 'xml' : format_flag = '-x' suffix = '.xml' elif output_fmt == 'json' : format_flag = '-j' suffix = '.json' else : logger . error ( 'Unknown output format: %s' % output_fmt ) return None sparser_exec_path = os . path . join ( sparser_path , 'save-semantics.sh' ) output_path = fname . split ( '.' ) [ 0 ] + '-semantics' + suffix for fpath in [ sparser_exec_path , fname ] : if not os . path . exists ( fpath ) : raise Exception ( "'%s' is not a valid path." % fpath ) cmd_list = [ sparser_exec_path , format_flag , fname ] with sp . Popen ( cmd_list , stdout = sp . PIPE ) as proc : try : stdout , stderr = proc . communicate ( timeout = timeout ) except sp . TimeoutExpired : sp . check_call ( [ 'pkill' , '-f' , 'r3.core.*%s' % fname ] ) stdout , stderr = proc . communicate ( ) raise sp . TimeoutExpired ( proc . args , timeout , output = stdout , stderr = stderr ) except BaseException : sp . check_call ( [ 'pkill' , '-f' , fname ] ) proc . wait ( ) raise retcode = proc . poll ( ) if retcode : raise sp . CalledProcessError ( retcode , proc . args , output = stdout , stderr = stderr ) if outbuf is not None : outbuf . write ( stdout ) outbuf . flush ( ) assert os . path . exists ( output_path ) , 'No output file \"%s\" created by sparser.' % output_path return output_path | Return the path to reading output after running Sparser reading . |
17,546 | def get_version ( ) : assert sparser_path is not None , "Sparser path is not defined." with open ( os . path . join ( sparser_path , 'version.txt' ) , 'r' ) as f : version = f . read ( ) . strip ( ) return version | Return the version of the Sparser executable on the path . |
17,547 | def make_nxml_from_text ( text ) : text = _escape_xml ( text ) header = '<?xml version="1.0" encoding="UTF-8" ?>' + '<OAI-PMH><article><body><sec id="s1"><p>' footer = '</p></sec></body></article></OAI-PMH>' nxml_str = header + text + footer return nxml_str | Return raw text wrapped in NXML structure . |
17,548 | def get_hgnc_name ( hgnc_id ) : try : hgnc_name = hgnc_names [ hgnc_id ] except KeyError : xml_tree = get_hgnc_entry ( hgnc_id ) if xml_tree is None : return None hgnc_name_tag = xml_tree . find ( "result/doc/str[@name='symbol']" ) if hgnc_name_tag is None : return None hgnc_name = hgnc_name_tag . text . strip ( ) return hgnc_name | Return the HGNC symbol corresponding to the given HGNC ID . |
17,549 | def get_hgnc_entry ( hgnc_id ) : url = hgnc_url + 'hgnc_id/%s' % hgnc_id headers = { 'Accept' : '*/*' } res = requests . get ( url , headers = headers ) if not res . status_code == 200 : return None xml_tree = ET . XML ( res . content , parser = UTB ( ) ) return xml_tree | Return the HGNC entry for the given HGNC ID from the web service . |
17,550 | def analyze_reach_log ( log_fname = None , log_str = None ) : assert bool ( log_fname ) ^ bool ( log_str ) , 'Must specify log_fname OR log_str' started_patt = re . compile ( 'Starting ([\d]+)' ) finished_patt = re . compile ( 'Finished ([\d]+)' ) def get_content_nums ( txt ) : pat = 'Retrieved content for ([\d]+) / ([\d]+) papers to be read' res = re . match ( pat , txt ) has_content , total = res . groups ( ) if res else None , None return has_content , total if log_fname : with open ( log_fname , 'r' ) as fh : log_str = fh . read ( ) pmids = { } pmids [ 'started' ] = started_patt . findall ( log_str ) pmids [ 'finished' ] = finished_patt . findall ( log_str ) pmids [ 'not_done' ] = set ( pmids [ 'started' ] ) - set ( pmids [ 'finished' ] ) return pmids | Return unifinished PMIDs given a log file name . |
17,551 | def get_logs_from_db_reading ( job_prefix , reading_queue = 'run_db_reading_queue' ) : s3 = boto3 . client ( 's3' ) gen_prefix = 'reading_results/%s/logs/%s' % ( job_prefix , reading_queue ) job_log_data = s3 . list_objects_v2 ( Bucket = 'bigmech' , Prefix = join ( gen_prefix , job_prefix ) ) log_strs = [ ] for fdict in job_log_data [ 'Contents' ] : resp = s3 . get_object ( Bucket = 'bigmech' , Key = fdict [ 'Key' ] ) log_strs . append ( resp [ 'Body' ] . read ( ) . decode ( 'utf-8' ) ) return log_strs | Get the logs stashed on s3 for a particular reading . |
17,552 | def separate_reach_logs ( log_str ) : log_lines = log_str . splitlines ( ) reach_logs = [ ] reach_lines = [ ] adding_reach_lines = False for l in log_lines [ : ] : if not adding_reach_lines and 'Beginning reach' in l : adding_reach_lines = True elif adding_reach_lines and 'Reach finished' in l : adding_reach_lines = False reach_logs . append ( ( 'SUCCEEDED' , '\n' . join ( reach_lines ) ) ) reach_lines = [ ] elif adding_reach_lines : reach_lines . append ( l . split ( 'readers - ' ) [ 1 ] ) log_lines . remove ( l ) if adding_reach_lines : reach_logs . append ( ( 'FAILURE' , '\n' . join ( reach_lines ) ) ) return '\n' . join ( log_lines ) , reach_logs | Get the list of reach logs from the overall logs . |
17,553 | def get_unyielding_tcids ( log_str ) : tcid_strs = re . findall ( 'INFO: \[.*?\].*? - Got no statements for (\d+).*' , log_str ) return { int ( tcid_str ) for tcid_str in tcid_strs } | Extract the set of tcids for which no statements were created . |
17,554 | def analyze_db_reading ( job_prefix , reading_queue = 'run_db_reading_queue' ) : log_strs = get_logs_from_db_reading ( job_prefix , reading_queue ) indra_log_strs = [ ] all_reach_logs = [ ] log_stats = [ ] for log_str in log_strs : log_str , reach_logs = separate_reach_logs ( log_str ) all_reach_logs . extend ( reach_logs ) indra_log_strs . append ( log_str ) log_stats . append ( get_reading_stats ( log_str ) ) failed_reach_logs = [ reach_log_str for result , reach_log_str in all_reach_logs if result == 'FAILURE' ] failed_id_dicts = [ analyze_reach_log ( log_str = reach_log ) for reach_log in failed_reach_logs if bool ( reach_log ) ] tcids_unfinished = { id_dict [ 'not_done' ] for id_dict in failed_id_dicts } print ( "Found %d unfinished tcids." % len ( tcids_unfinished ) ) if log_stats : sum_dict = dict . fromkeys ( log_stats [ 0 ] . keys ( ) ) for log_stat in log_stats : for k in log_stat . keys ( ) : if isinstance ( log_stat [ k ] , list ) : if k not in sum_dict . keys ( ) : sum_dict [ k ] = [ 0 ] * len ( log_stat [ k ] ) sum_dict [ k ] = [ sum_dict [ k ] [ i ] + log_stat [ k ] [ i ] for i in range ( len ( log_stat [ k ] ) ) ] else : if k not in sum_dict . keys ( ) : sum_dict [ k ] = 0 sum_dict [ k ] += log_stat [ k ] else : sum_dict = { } return tcids_unfinished , sum_dict , log_stats | Run various analysis on a particular reading job . |
17,555 | def process_pc_neighborhood ( gene_names , neighbor_limit = 1 , database_filter = None ) : model = pcc . graph_query ( 'neighborhood' , gene_names , neighbor_limit = neighbor_limit , database_filter = database_filter ) if model is not None : return process_model ( model ) | Returns a BiopaxProcessor for a PathwayCommons neighborhood query . |
17,556 | def process_pc_pathsbetween ( gene_names , neighbor_limit = 1 , database_filter = None , block_size = None ) : if not block_size : model = pcc . graph_query ( 'pathsbetween' , gene_names , neighbor_limit = neighbor_limit , database_filter = database_filter ) if model is not None : return process_model ( model ) else : gene_blocks = [ gene_names [ i : i + block_size ] for i in range ( 0 , len ( gene_names ) , block_size ) ] stmts = [ ] for genes1 , genes2 in itertools . product ( gene_blocks , repeat = 2 ) : if genes1 == genes2 : bp = process_pc_pathsbetween ( genes1 , database_filter = database_filter , block_size = None ) else : bp = process_pc_pathsfromto ( genes1 , genes2 , database_filter = database_filter ) stmts += bp . statements | Returns a BiopaxProcessor for a PathwayCommons paths - between query . |
17,557 | def process_pc_pathsfromto ( source_genes , target_genes , neighbor_limit = 1 , database_filter = None ) : model = pcc . graph_query ( 'pathsfromto' , source_genes , target_genes , neighbor_limit = neighbor_limit , database_filter = database_filter ) if model is not None : return process_model ( model ) | Returns a BiopaxProcessor for a PathwayCommons paths - from - to query . |
17,558 | def process_model ( model ) : bp = BiopaxProcessor ( model ) bp . get_modifications ( ) bp . get_regulate_activities ( ) bp . get_regulate_amounts ( ) bp . get_activity_modification ( ) bp . get_gef ( ) bp . get_gap ( ) bp . get_conversions ( ) bp . eliminate_exact_duplicates ( ) return bp | Returns a BiopaxProcessor for a BioPAX model object . |
17,559 | def is_background_knowledge ( stmt ) : any_background = False for ev in stmt . evidence : epi = ev . epistemics if epi is not None : sec = epi . get ( 'section_type' ) if sec is not None and sec not in background_secs : return False elif sec in background_secs : any_background = True return any_background | Return True if Statement is only supported by background knowledge . |
17,560 | def multiple_sources ( stmt ) : sources = list ( set ( [ e . source_api for e in stmt . evidence ] ) ) if len ( sources ) > 1 : return True return False | Return True if statement is supported by multiple sources . |
17,561 | def id_to_symbol ( self , entrez_id ) : entrez_id = str ( entrez_id ) if entrez_id not in self . ids_to_symbols : m = 'Could not look up symbol for Entrez ID ' + entrez_id raise Exception ( m ) return self . ids_to_symbols [ entrez_id ] | Gives the symbol for a given entrez id ) |
17,562 | def make_model ( self , output_file , add_curation_cols = False , up_only = False ) : stmt_header = [ 'INDEX' , 'UUID' , 'TYPE' , 'STR' , 'AG_A_TEXT' , 'AG_A_LINKS' , 'AG_A_STR' , 'AG_B_TEXT' , 'AG_B_LINKS' , 'AG_B_STR' , 'PMID' , 'TEXT' , 'IS_HYP' , 'IS_DIRECT' ] if add_curation_cols : stmt_header = stmt_header + [ 'AG_A_IDS_CORRECT' , 'AG_A_STATE_CORRECT' , 'AG_B_IDS_CORRECT' , 'AG_B_STATE_CORRECT' , 'EVENT_CORRECT' , 'RES_CORRECT' , 'POS_CORRECT' , 'SUBJ_ACT_CORRECT' , 'OBJ_ACT_CORRECT' , 'HYP_CORRECT' , 'DIRECT_CORRECT' ] rows = [ stmt_header ] for ix , stmt in enumerate ( self . statements ) : if len ( stmt . agent_list ( ) ) > 2 : logger . info ( "Skipping statement with more than two members: %s" % stmt ) continue elif len ( stmt . agent_list ( ) ) == 1 : ag_a = stmt . agent_list ( ) [ 0 ] ag_b = None else : ( ag_a , ag_b ) = stmt . agent_list ( ) row = [ ix + 1 , stmt . uuid , stmt . __class__ . __name__ , str ( stmt ) ] + _format_agent_entries ( ag_a , up_only ) + _format_agent_entries ( ag_b , up_only ) + [ stmt . evidence [ 0 ] . pmid , stmt . evidence [ 0 ] . text , stmt . evidence [ 0 ] . epistemics . get ( 'hypothesis' , '' ) , stmt . evidence [ 0 ] . epistemics . get ( 'direct' , '' ) ] if add_curation_cols : row = row + ( [ '' ] * 11 ) rows . append ( row ) write_unicode_csv ( output_file , rows , delimiter = '\t' ) | Export the statements into a tab - separated text file . |
17,563 | def get_create_base_agent ( self , agent ) : try : base_agent = self . agents [ _n ( agent . name ) ] except KeyError : base_agent = BaseAgent ( _n ( agent . name ) ) self . agents [ _n ( agent . name ) ] = base_agent if isinstance ( agent , Agent ) : for bc in agent . bound_conditions : bound_base_agent = self . get_create_base_agent ( bc . agent ) bound_base_agent . create_site ( get_binding_site_name ( agent ) ) base_agent . create_site ( get_binding_site_name ( bc . agent ) ) for mc in agent . mods : base_agent . create_mod_site ( mc ) for mc in agent . mutations : res_from = mc . residue_from if mc . residue_from else 'mut' res_to = mc . residue_to if mc . residue_to else 'X' if mc . position is None : mut_site_name = res_from else : mut_site_name = res_from + mc . position base_agent . create_site ( mut_site_name , states = [ 'WT' , res_to ] ) if agent . location is not None : base_agent . create_site ( 'loc' , [ _n ( agent . location ) ] ) if agent . activity is not None : site_name = agent . activity . activity_type base_agent . create_site ( site_name , [ 'inactive' , 'active' ] ) for db_name , db_ref in agent . db_refs . items ( ) : base_agent . db_refs [ db_name ] = db_ref return base_agent | Return base agent with given name creating it if needed . |
17,564 | def create_site ( self , site , states = None ) : if site not in self . sites : self . sites . append ( site ) if states is not None : self . site_states . setdefault ( site , [ ] ) try : states = list ( states ) except TypeError : return self . add_site_states ( site , states ) | Create a new site on an agent if it doesn t already exist . |
17,565 | def create_mod_site ( self , mc ) : site_name = get_mod_site_name ( mc ) ( unmod_site_state , mod_site_state ) = states [ mc . mod_type ] self . create_site ( site_name , ( unmod_site_state , mod_site_state ) ) site_anns = [ Annotation ( ( site_name , mod_site_state ) , mc . mod_type , 'is_modification' ) ] if mc . residue : site_anns . append ( Annotation ( site_name , mc . residue , 'is_residue' ) ) if mc . position : site_anns . append ( Annotation ( site_name , mc . position , 'is_position' ) ) self . site_annotations += site_anns | Create modification site for the BaseAgent from a ModCondition . |
17,566 | def add_site_states ( self , site , states ) : for state in states : if state not in self . site_states [ site ] : self . site_states [ site ] . append ( state ) | Create new states on an agent site if the state doesn t exist . |
17,567 | def add_activity_form ( self , activity_pattern , is_active ) : if is_active : if activity_pattern not in self . active_forms : self . active_forms . append ( activity_pattern ) else : if activity_pattern not in self . inactive_forms : self . inactive_forms . append ( activity_pattern ) | Adds the pattern as an active or inactive form to an Agent . |
17,568 | def add_activity_type ( self , activity_type ) : if activity_type not in self . activity_types : self . activity_types . append ( activity_type ) | Adds an activity type to an Agent . |
17,569 | def make_annotation ( self ) : annotation = dict ( ) for item in dir ( self ) : if len ( item ) > 0 and item [ 0 ] != '_' and not inspect . ismethod ( getattr ( self , item ) ) : annotation [ item ] = getattr ( self , item ) annotation [ 'action_mentions' ] = list ( ) for action_mention in self . action_mentions : annotation_mention = action_mention . make_annotation ( ) annotation [ 'action_mentions' ] . append ( annotation_mention ) return annotation | Returns a dictionary with all properties of the action and each of its action mentions . |
17,570 | def _search_path ( self , directory_name , filename ) : full_path = path . join ( directory_name , filename ) if path . exists ( full_path ) : return full_path return None | Searches for a given file in the specified directory . |
17,571 | def _init_action_list ( self , action_filename ) : self . actions = list ( ) self . hiid_to_action_index = dict ( ) f = codecs . open ( action_filename , 'r' , encoding = 'latin-1' ) first_line = True for line in f : line = line . rstrip ( ) if first_line : first_line = False else : self . actions . append ( GenewaysAction ( line ) ) latestInd = len ( self . actions ) - 1 hiid = self . actions [ latestInd ] . hiid if hiid in self . hiid_to_action_index : raise Exception ( 'action hiid not unique: %d' % hiid ) self . hiid_to_action_index [ hiid ] = latestInd | Parses the file and populates the data . |
17,572 | def _link_to_action_mentions ( self , actionmention_filename ) : parser = GenewaysActionMentionParser ( actionmention_filename ) self . action_mentions = parser . action_mentions for action_mention in self . action_mentions : hiid = action_mention . hiid if hiid not in self . hiid_to_action_index : m1 = 'Parsed action mention has hiid %d, which does not exist' m2 = ' in table of action hiids' raise Exception ( ( m1 + m2 ) % hiid ) else : idx = self . hiid_to_action_index [ hiid ] self . actions [ idx ] . action_mentions . append ( action_mention ) | Add action mentions |
17,573 | def _lookup_symbols ( self , symbols_filename ) : symbol_lookup = GenewaysSymbols ( symbols_filename ) for action in self . actions : action . up_symbol = symbol_lookup . id_to_symbol ( action . up ) action . dn_symbol = symbol_lookup . id_to_symbol ( action . dn ) | Look up symbols for actions and action mentions |
17,574 | def get_top_n_action_types ( self , top_n ) : action_type_to_counts = dict ( ) for action in self . actions : actiontype = action . actiontype if actiontype not in action_type_to_counts : action_type_to_counts [ actiontype ] = 1 else : action_type_to_counts [ actiontype ] = action_type_to_counts [ actiontype ] + 1 action_types = list ( ) counts = list ( ) for actiontype in action_type_to_counts . keys ( ) : action_types . append ( actiontype ) counts . append ( action_type_to_counts [ actiontype ] ) num_actions = len ( self . actions ) num_actions2 = 0 for count in counts : num_actions2 = num_actions2 + count if num_actions != num_actions2 : raise ( Exception ( 'Problem counting everything up!' ) ) sorted_inds = np . argsort ( counts ) last_ind = len ( sorted_inds ) - 1 top_actions = list ( ) if top_n > len ( sorted_inds ) : raise Exception ( 'Asked for top %d action types, ' + 'but there are only %d action types' % ( top_n , len ( sorted_inds ) ) ) for i in range ( top_n ) : top_actions . append ( action_types [ sorted_inds [ last_ind - i ] ] ) return top_actions | Returns the top N actions by count . |
17,575 | def get_string ( self ) : graph_string = self . graph . to_string ( ) graph_string = graph_string . replace ( '\\N' , '\\n' ) return graph_string | Return the assembled graph as a string . |
17,576 | def save_dot ( self , file_name = 'graph.dot' ) : s = self . get_string ( ) with open ( file_name , 'wt' ) as fh : fh . write ( s ) | Save the graph in a graphviz dot file . |
17,577 | def save_pdf ( self , file_name = 'graph.pdf' , prog = 'dot' ) : self . graph . draw ( file_name , prog = prog ) | Draw the graph and save as an image or pdf file . |
17,578 | def _add_edge ( self , source , target , ** kwargs ) : edge_properties = self . edge_properties for k , v in kwargs . items ( ) : edge_properties [ k ] = v self . graph . add_edge ( source , target , ** edge_properties ) | Add an edge to the graph . |
17,579 | def _add_node ( self , agent ) : if agent is None : return node_label = _get_node_label ( agent ) if isinstance ( agent , Agent ) and agent . bound_conditions : bound_agents = [ bc . agent for bc in agent . bound_conditions if bc . is_bound ] if bound_agents : bound_names = [ _get_node_label ( a ) for a in bound_agents ] node_label = _get_node_label ( agent ) + '/' + '/' . join ( bound_names ) self . _complex_nodes . append ( [ agent ] + bound_agents ) else : node_label = _get_node_label ( agent ) node_key = _get_node_key ( agent ) if node_key in self . existing_nodes : return self . existing_nodes . append ( node_key ) self . graph . add_node ( node_key , label = node_label , ** self . node_properties ) | Add an Agent as a node to the graph . |
17,580 | def _add_stmt_edge ( self , stmt ) : source = _get_node_key ( stmt . agent_list ( ) [ 0 ] ) target = _get_node_key ( stmt . agent_list ( ) [ 1 ] ) edge_key = ( source , target , stmt . __class__ . __name__ ) if edge_key in self . existing_edges : return self . existing_edges . append ( edge_key ) if isinstance ( stmt , RemoveModification ) or isinstance ( stmt , Inhibition ) or isinstance ( stmt , DecreaseAmount ) or isinstance ( stmt , Gap ) or ( isinstance ( stmt , Influence ) and stmt . overall_polarity ( ) == - 1 ) : color = '#ff0000' else : color = '#000000' params = { 'color' : color , 'arrowhead' : 'normal' , 'dir' : 'forward' } self . _add_edge ( source , target , ** params ) | Assemble a Modification statement . |
17,581 | def _add_complex ( self , members , is_association = False ) : params = { 'color' : '#0000ff' , 'arrowhead' : 'dot' , 'arrowtail' : 'dot' , 'dir' : 'both' } for m1 , m2 in itertools . combinations ( members , 2 ) : if self . _has_complex_node ( m1 , m2 ) : continue if is_association : m1_key = _get_node_key ( m1 . concept ) m2_key = _get_node_key ( m2 . concept ) else : m1_key = _get_node_key ( m1 ) m2_key = _get_node_key ( m2 ) edge_key = ( set ( [ m1_key , m2_key ] ) , 'complex' ) if edge_key in self . existing_edges : return self . existing_edges . append ( edge_key ) self . _add_edge ( m1_key , m2_key , ** params ) | Assemble a Complex statement . |
17,582 | def process_from_file ( signor_data_file , signor_complexes_file = None ) : data_iter = read_unicode_csv ( signor_data_file , delimiter = ';' , skiprows = 1 ) complexes_iter = None if signor_complexes_file : complexes_iter = read_unicode_csv ( signor_complexes_file , delimiter = ';' , skiprows = 1 ) else : logger . warning ( 'Signor complex mapping file not provided, Statements ' 'involving complexes will not be expanded to members.' ) return _processor_from_data ( data_iter , complexes_iter ) | Process Signor interaction data from CSV files . |
17,583 | def _handle_response ( res , delimiter ) : if res . status_code == 200 : if sys . version_info [ 0 ] < 3 : csv_io = BytesIO ( res . content ) else : csv_io = StringIO ( res . text ) data_iter = read_unicode_csv_fileobj ( csv_io , delimiter = delimiter , skiprows = 1 ) else : raise Exception ( 'Could not download Signor data.' ) return data_iter | Get an iterator over the CSV data from the response . |
17,584 | def get_protein_expression ( gene_names , cell_types ) : A = 0.2438361 B = 3.0957627 mrna_amounts = cbio_client . get_ccle_mrna ( gene_names , cell_types ) protein_amounts = copy ( mrna_amounts ) for cell_type in cell_types : amounts = mrna_amounts . get ( cell_type ) if amounts is None : continue for gene_name , amount in amounts . items ( ) : if amount is not None : protein_amount = 10 ** ( A * amount + B ) protein_amounts [ cell_type ] [ gene_name ] = protein_amount return protein_amounts | Return the protein expression levels of genes in cell types . |
17,585 | def get_aspect ( cx , aspect_name ) : if isinstance ( cx , dict ) : return cx . get ( aspect_name ) for entry in cx : if list ( entry . keys ( ) ) [ 0 ] == aspect_name : return entry [ aspect_name ] | Return an aspect given the name of the aspect |
17,586 | def classify_nodes ( graph , hub ) : node_stats = defaultdict ( lambda : defaultdict ( list ) ) for u , v , data in graph . edges ( data = True ) : if hub == u : h , o = u , v if data [ 'i' ] != 'Complex' : node_stats [ o ] [ 'up' ] . append ( - 1 ) else : node_stats [ o ] [ 'up' ] . append ( 0 ) elif hub == v : h , o = v , u if data [ 'i' ] != 'Complex' : node_stats [ o ] [ 'up' ] . append ( 1 ) else : node_stats [ o ] [ 'up' ] . append ( 0 ) else : continue node_stats [ o ] [ 'interaction' ] . append ( edge_type_to_class ( data [ 'i' ] ) ) node_classes = { } for node_id , stats in node_stats . items ( ) : up = max ( set ( stats [ 'up' ] ) , key = stats [ 'up' ] . count ) interactions = [ i for i in stats [ 'interaction' ] if not ( up != 0 and i == 'complex' ) ] edge_type = max ( set ( interactions ) , key = interactions . count ) node_type = graph . nodes [ node_id ] [ 'type' ] node_classes [ node_id ] = ( up , edge_type , node_type ) return node_classes | Classify each node based on its type and relationship to the hub . |
17,587 | def get_attributes ( aspect , id ) : attributes = { } for entry in aspect : if entry [ 'po' ] == id : attributes [ entry [ 'n' ] ] = entry [ 'v' ] return attributes | Return the attributes pointing to a given ID in a given aspect . |
17,588 | def cx_to_networkx ( cx ) : graph = networkx . MultiDiGraph ( ) for node_entry in get_aspect ( cx , 'nodes' ) : id = node_entry [ '@id' ] attrs = get_attributes ( get_aspect ( cx , 'nodeAttributes' ) , id ) attrs [ 'n' ] = node_entry [ 'n' ] graph . add_node ( id , ** attrs ) for edge_entry in get_aspect ( cx , 'edges' ) : id = edge_entry [ '@id' ] attrs = get_attributes ( get_aspect ( cx , 'edgeAttributes' ) , id ) attrs [ 'i' ] = edge_entry [ 'i' ] graph . add_edge ( edge_entry [ 's' ] , edge_entry [ 't' ] , key = id , ** attrs ) return graph | Return a MultiDiGraph representation of a CX network . |
17,589 | def get_quadrant_from_class ( node_class ) : up , edge_type , _ = node_class if up == 0 : return 0 if random . random ( ) < 0.5 else 7 mappings = { ( - 1 , 'modification' ) : 1 , ( - 1 , 'amount' ) : 2 , ( - 1 , 'activity' ) : 3 , ( 1 , 'activity' ) : 4 , ( 1 , 'amount' ) : 5 , ( 1 , 'modification' ) : 6 } return mappings [ ( up , edge_type ) ] | Return the ID of the segment of the plane corresponding to a class . |
17,590 | def get_coordinates ( node_class ) : quadrant_size = ( 2 * math . pi / 8.0 ) quadrant = get_quadrant_from_class ( node_class ) begin_angle = quadrant_size * quadrant r = 200 + 800 * random . random ( ) alpha = begin_angle + random . random ( ) * quadrant_size x = r * math . cos ( alpha ) y = r * math . sin ( alpha ) return x , y | Generate coordinates for a node in a given class . |
17,591 | def get_layout_aspect ( hub , node_classes ) : aspect = [ { 'node' : hub , 'x' : 0.0 , 'y' : 0.0 } ] for node , node_class in node_classes . items ( ) : if node == hub : continue x , y = get_coordinates ( node_class ) aspect . append ( { 'node' : node , 'x' : x , 'y' : y } ) return aspect | Get the full layout aspect with coordinates for each node . |
17,592 | def get_node_by_name ( graph , name ) : for id , attrs in graph . nodes ( data = True ) : if attrs [ 'n' ] == name : return id | Return a node ID given its name . |
17,593 | def add_semantic_hub_layout ( cx , hub ) : graph = cx_to_networkx ( cx ) hub_node = get_node_by_name ( graph , hub ) node_classes = classify_nodes ( graph , hub_node ) layout_aspect = get_layout_aspect ( hub_node , node_classes ) cx [ 'cartesianLayout' ] = layout_aspect | Attach a layout aspect to a CX network given a hub node . |
17,594 | def get_metadata ( doi ) : url = crossref_url + 'works/' + doi res = requests . get ( url ) if res . status_code != 200 : logger . info ( 'Could not get CrossRef metadata for DOI %s, code %d' % ( doi , res . status_code ) ) return None raw_message = res . json ( ) metadata = raw_message . get ( 'message' ) return metadata | Returns the metadata of an article given its DOI from CrossRef as a JSON dict |
17,595 | def doi_query ( pmid , search_limit = 10 ) : pubmed_meta_dict = pubmed_client . get_metadata_for_ids ( [ pmid ] , get_issns_from_nlm = True ) if pubmed_meta_dict is None or pubmed_meta_dict . get ( pmid ) is None : logger . warning ( 'No metadata found in Pubmed for PMID%s' % pmid ) return None pubmed_meta = pubmed_meta_dict [ pmid ] if pubmed_meta . get ( 'doi' ) : return pubmed_meta . get ( 'doi' ) pm_article_title = pubmed_meta . get ( 'title' ) if pm_article_title is None : logger . warning ( 'No article title found in Pubmed for PMID%s' % pmid ) return None pm_issn_list = pubmed_meta . get ( 'issn_list' ) if not pm_issn_list : logger . warning ( 'No ISSNs found in Pubmed for PMID%s' % pmid ) return None pm_page = pubmed_meta . get ( 'page' ) if not pm_page : logger . debug ( 'No page number found in Pubmed for PMID%s' % pmid ) return None url = crossref_search_url params = { 'q' : pm_article_title , 'sort' : 'score' } try : res = requests . get ( crossref_search_url , params ) except requests . exceptions . ConnectionError as e : logger . error ( 'CrossRef service could not be reached.' ) logger . error ( e ) return None except Exception as e : logger . error ( 'Error accessing CrossRef service: %s' % str ( e ) ) return None if res . status_code != 200 : logger . info ( 'PMID%s: no search results from CrossRef, code %d' % ( pmid , res . status_code ) ) return None raw_message = res . json ( ) mapped_doi = None for result_ix , result in enumerate ( raw_message ) : if result_ix > search_limit : logger . info ( 'PMID%s: No match found within first %s results, ' 'giving up!' % ( pmid , search_limit ) ) break xref_doi_url = result [ 'doi' ] m = re . match ( '^http://dx.doi.org/(.*)$' , xref_doi_url ) xref_doi = m . groups ( ) [ 0 ] xref_meta = get_metadata ( xref_doi ) if xref_meta is None : continue xref_issn_list = xref_meta . get ( 'ISSN' ) xref_page = xref_meta . get ( 'page' ) if not xref_issn_list : logger . debug ( 'No ISSN found for DOI %s, skipping' % xref_doi_url ) continue if not xref_page : logger . debug ( 'No page number found for DOI %s, skipping' % xref_doi_url ) continue matching_issns = set ( pm_issn_list ) . intersection ( set ( xref_issn_list ) ) pm_start_page = pm_page . split ( '-' ) [ 0 ] . upper ( ) xr_start_page = xref_page . split ( '-' ) [ 0 ] . upper ( ) if xr_start_page . endswith ( 'E' ) : xr_start_page = 'E' + xr_start_page [ : - 1 ] if matching_issns and pm_start_page == xr_start_page : mapped_doi = xref_doi break return mapped_doi | Get the DOI for a PMID by matching CrossRef and Pubmed metadata . |
17,596 | def get_agent_rule_str ( agent ) : rule_str_list = [ _n ( agent . name ) ] if isinstance ( agent , ist . Agent ) : for mod in agent . mods : mstr = abbrevs [ mod . mod_type ] if mod . residue is not None : mstr += mod . residue if mod . position is not None : mstr += mod . position rule_str_list . append ( '%s' % mstr ) for mut in agent . mutations : res_from = mut . residue_from if mut . residue_from else 'mut' res_to = mut . residue_to if mut . residue_to else 'X' if mut . position is None : mut_site_name = res_from else : mut_site_name = res_from + mut . position mstr = mut_site_name + res_to rule_str_list . append ( mstr ) if agent . bound_conditions : for b in agent . bound_conditions : if b . is_bound : rule_str_list . append ( _n ( b . agent . name ) ) else : rule_str_list . append ( 'n' + _n ( b . agent . name ) ) if agent . location is not None : rule_str_list . append ( _n ( agent . location ) ) if agent . activity is not None : if agent . activity . is_active : rule_str_list . append ( agent . activity . activity_type [ : 3 ] ) else : rule_str_list . append ( agent . activity . activity_type [ : 3 ] + '_inact' ) rule_str = '_' . join ( rule_str_list ) return rule_str | Construct a string from an Agent as part of a PySB rule name . |
17,597 | def add_rule_to_model ( model , rule , annotations = None ) : try : model . add_component ( rule ) if annotations : model . annotations += annotations except ComponentDuplicateNameError : msg = "Rule %s already in model! Skipping." % rule . name logger . debug ( msg ) | Add a Rule to a PySB model and handle duplicate component errors . |
17,598 | def get_create_parameter ( model , param ) : norm_name = _n ( param . name ) parameter = model . parameters . get ( norm_name ) if not param . unique and parameter is not None : return parameter if param . unique : pnum = 1 while True : pname = norm_name + '_%d' % pnum if model . parameters . get ( pname ) is None : break pnum += 1 else : pname = norm_name parameter = Parameter ( pname , param . value ) model . add_component ( parameter ) return parameter | Return parameter with given name creating it if needed . |
17,599 | def get_uncond_agent ( agent ) : agent_uncond = ist . Agent ( _n ( agent . name ) , mutations = agent . mutations ) return agent_uncond | Construct the unconditional state of an Agent . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.