idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
50,900
def load_orthologs ( fo : IO , metadata : dict ) : version = metadata [ "metadata" ] [ "version" ] with timy . Timer ( "Load Orthologs" ) as timer : arango_client = arangodb . get_client ( ) belns_db = arangodb . get_belns_handle ( arango_client ) arangodb . batch_load_docs ( belns_db , orthologs_iterator ( fo , version ) , on_duplicate = "update" ) log . info ( "Load orthologs" , elapsed = timer . elapsed , source = metadata [ "metadata" ] [ "source" ] , ) remove_old_ortholog_edges = f remove_old_ortholog_nodes = f arangodb . aql_query ( belns_db , remove_old_ortholog_edges ) arangodb . aql_query ( belns_db , remove_old_ortholog_nodes ) metadata [ "_key" ] = f"Orthologs_{metadata['metadata']['source']}" try : belns_db . collection ( arangodb . belns_metadata_name ) . insert ( metadata ) except ArangoError as ae : belns_db . collection ( arangodb . belns_metadata_name ) . replace ( metadata )
Load orthologs into ArangoDB
50,901
def orthologs_iterator ( fo , version ) : species_list = config [ "bel_resources" ] . get ( "species_list" , [ ] ) fo . seek ( 0 ) with gzip . open ( fo , "rt" ) as f : for line in f : edge = json . loads ( line ) if "metadata" in edge : source = edge [ "metadata" ] [ "source" ] continue if "ortholog" in edge : edge = edge [ "ortholog" ] subj_tax_id = edge [ "subject" ] [ "tax_id" ] obj_tax_id = edge [ "object" ] [ "tax_id" ] if species_list and subj_tax_id and subj_tax_id not in species_list : continue if species_list and obj_tax_id and obj_tax_id not in species_list : continue subj_key = arangodb . arango_id_to_key ( edge [ "subject" ] [ "id" ] ) subj_id = edge [ "subject" ] [ "id" ] obj_key = arangodb . arango_id_to_key ( edge [ "object" ] [ "id" ] ) obj_id = edge [ "object" ] [ "id" ] yield ( arangodb . ortholog_nodes_name , { "_key" : subj_key , "name" : subj_id , "tax_id" : edge [ "subject" ] [ "tax_id" ] , "source" : source , "version" : version , } , ) yield ( arangodb . ortholog_nodes_name , { "_key" : obj_key , "name" : obj_id , "tax_id" : edge [ "object" ] [ "tax_id" ] , "source" : source , "version" : version , } , ) arango_edge = { "_from" : f"{arangodb.ortholog_nodes_name}/{subj_key}" , "_to" : f"{arangodb.ortholog_nodes_name}/{obj_key}" , "_key" : bel . utils . _create_hash ( f"{subj_id}>>{obj_id}" ) , "type" : "ortholog_to" , "source" : source , "version" : version , } yield ( arangodb . ortholog_edges_name , arango_edge )
Ortholog node and edge iterator
50,902
def migrate ( belstr : str ) -> str : bo . ast = bel . lang . partialparse . get_ast_obj ( belstr , "2.0.0" ) return migrate_ast ( bo . ast ) . to_string ( )
Migrate BEL 1 to 2 . 0 . 0
50,903
def migrate_into_triple ( belstr : str ) -> str : bo . ast = bel . lang . partialparse . get_ast_obj ( belstr , "2.0.0" ) return migrate_ast ( bo . ast ) . to_triple ( )
Migrate BEL1 assertion into BEL 2 . 0 . 0 SRO triple
50,904
def convert ( ast ) : if ast and ast . type == "Function" : if ( ast . name != "molecularActivity" and ast . name in spec [ "namespaces" ] [ "Activity" ] [ "list" ] ) : print ( "name" , ast . name , "type" , ast . type ) ast = convert_activity ( ast ) return ast elif ast . name in [ "tloc" , "translocation" ] : ast = convert_tloc ( ast ) fus_flag = False for idx , arg in enumerate ( ast . args ) : if arg . __class__ . __name__ == "Function" : if arg . name in [ "sub" , "substitution" ] : ast . args [ idx ] = convert_sub ( arg ) elif arg . name in [ "trunc" , "truncation" ] : ast . args [ idx ] = convert_trunc ( arg ) elif arg . name in [ "pmod" , "proteinModification" ] : ast . args [ idx ] = convert_pmod ( arg ) elif arg . name in [ "fus" , "fusion" ] : fus_flag = True ast . args [ idx ] = convert ( ast . args [ idx ] ) if fus_flag : ast = convert_fus ( ast ) return ast
Convert BEL1 AST Function to BEL2 AST Function
50,905
def get_client ( host = None , port = None , username = None , password = None , enable_logging = True ) : host = utils . first_true ( [ host , config [ "bel_api" ] [ "servers" ] [ "arangodb_host" ] , "localhost" ] ) port = utils . first_true ( [ port , config [ "bel_api" ] [ "servers" ] [ "arangodb_port" ] , 8529 ] ) username = utils . first_true ( [ username , config [ "bel_api" ] [ "servers" ] [ "arangodb_username" ] , "" ] ) password = utils . first_true ( [ password , config . get ( "secrets" , config [ "secrets" ] [ "bel_api" ] [ "servers" ] . get ( "arangodb_password" ) , ) , "" , ] ) client = arango . client . ArangoClient ( protocol = config [ "bel_api" ] [ "servers" ] [ "arangodb_protocol" ] , host = host , port = port ) return client
Get arango client and edgestore db handle
50,906
def get_edgestore_handle ( client : arango . client . ArangoClient , username = None , password = None , edgestore_db_name : str = edgestore_db_name , edgestore_edges_name : str = edgestore_edges_name , edgestore_nodes_name : str = edgestore_nodes_name , edgestore_pipeline_name : str = edgestore_pipeline_name , edgestore_pipeline_stats_name : str = edgestore_pipeline_stats_name , edgestore_pipeline_errors_name : str = edgestore_pipeline_errors_name , ) -> arango . database . StandardDatabase : ( username , password ) = get_user_creds ( username , password ) sys_db = client . db ( "_system" , username = username , password = password ) try : if username and password : edgestore_db = sys_db . create_database ( name = edgestore_db_name , users = [ { "username" : username , "password" : password , "active" : True } ] , ) else : edgestore_db = sys_db . create_database ( name = edgestore_db_name ) except arango . exceptions . DatabaseCreateError : if username and password : edgestore_db = client . db ( edgestore_db_name , username = username , password = password ) else : edgestore_db = client . db ( edgestore_db_name ) try : nodes = edgestore_db . create_collection ( edgestore_nodes_name , index_bucket_count = 64 ) nodes . add_hash_index ( fields = [ "name" ] , unique = False ) nodes . add_hash_index ( fields = [ "components" ] , unique = False ) except Exception : pass try : edges = edgestore_db . create_collection ( edgestore_edges_name , edge = True , index_bucket_count = 64 ) edges . add_hash_index ( fields = [ "relation" ] , unique = False ) edges . add_hash_index ( fields = [ "edge_types" ] , unique = False ) edges . add_hash_index ( fields = [ "nanopub_id" ] , unique = False ) edges . add_hash_index ( fields = [ "metadata.project" ] , unique = False ) edges . add_hash_index ( fields = [ "annotations[*].id" ] , unique = False ) except Exception : pass try : edgestore_db . create_collection ( edgestore_pipeline_name ) except Exception : pass try : edgestore_db . create_collection ( edgestore_pipeline_errors_name ) except Exception : pass try : edgestore_db . create_collection ( edgestore_pipeline_stats_name ) except arango . exceptions . CollectionCreateError as e : pass return edgestore_db
Get Edgestore arangodb database handle
50,907
def get_belns_handle ( client , username = None , password = None ) : ( username , password ) = get_user_creds ( username , password ) sys_db = client . db ( "_system" , username = username , password = password ) try : if username and password : belns_db = sys_db . create_database ( name = belns_db_name , users = [ { "username" : username , "password" : password , "active" : True } ] , ) else : belns_db = sys_db . create_database ( name = belns_db_name ) except arango . exceptions . DatabaseCreateError : if username and password : belns_db = client . db ( belns_db_name , username = username , password = password ) else : belns_db = client . db ( belns_db_name ) try : belns_db . create_collection ( belns_metadata_name ) except Exception : pass try : equiv_nodes = belns_db . create_collection ( equiv_nodes_name , index_bucket_count = 64 ) equiv_nodes . add_hash_index ( fields = [ "name" ] , unique = True ) except Exception : pass try : belns_db . create_collection ( equiv_edges_name , edge = True , index_bucket_count = 64 ) except Exception : pass try : ortholog_nodes = belns_db . create_collection ( ortholog_nodes_name , index_bucket_count = 64 ) ortholog_nodes . add_hash_index ( fields = [ "name" ] , unique = True ) except Exception : pass try : belns_db . create_collection ( ortholog_edges_name , edge = True , index_bucket_count = 64 ) except Exception : pass return belns_db
Get BEL namespace arango db handle
50,908
def get_belapi_handle ( client , username = None , password = None ) : ( username , password ) = get_user_creds ( username , password ) sys_db = client . db ( "_system" , username = username , password = password ) try : if username and password : belapi_db = sys_db . create_database ( name = belapi_db_name , users = [ { "username" : username , "password" : password , "active" : True } ] , ) else : belapi_db = sys_db . create_database ( name = belapi_db_name ) except arango . exceptions . DatabaseCreateError : if username and password : belapi_db = client . db ( belapi_db_name , username = username , password = password ) else : belapi_db = client . db ( belapi_db_name ) try : belapi_db . create_collection ( belapi_settings_name ) except Exception : pass try : belapi_db . create_collection ( belapi_statemgmt_name ) except Exception : pass return belapi_db
Get BEL API arango db handle
50,909
def delete_database ( client , db_name , username = None , password = None ) : ( username , password ) = get_user_creds ( username , password ) sys_db = client . db ( "_system" , username = username , password = password ) try : return sys_db . delete_database ( db_name ) except Exception : log . warn ( "No arango database {db_name} to delete, does not exist" )
Delete Arangodb database
50,910
def batch_load_docs ( db , doc_iterator , on_duplicate = "replace" ) : batch_size = 100 counter = 0 collections = { } docs = { } if on_duplicate not in [ "error" , "update" , "replace" , "ignore" ] : log . error ( f"Bad parameter for on_duplicate: {on_duplicate}" ) return for ( collection_name , doc ) in doc_iterator : if collection_name not in collections : collections [ collection_name ] = db . collection ( collection_name ) docs [ collection_name ] = [ ] counter += 1 docs [ collection_name ] . append ( doc ) if counter % batch_size == 0 : log . info ( f"Bulk import arangodb: {counter}" ) for cname in docs : collections [ cname ] . import_bulk ( docs [ cname ] , on_duplicate = on_duplicate , halt_on_error = False ) docs [ cname ] = [ ] log . info ( f"Bulk import arangodb: {counter}" ) for cname in docs : collections [ cname ] . import_bulk ( docs [ cname ] , on_duplicate = on_duplicate , halt_on_error = False ) docs [ cname ] = [ ]
Batch load documents
50,911
def load_resource ( resource_url : str , forceupdate : bool = False ) : log . info ( f"Loading resource {resource_url}" ) try : fo = bel . utils . download_file ( resource_url ) if not fo : log . error ( f"Could not download and open file {resource_url}" ) return "Failed to download resource_url" fo . seek ( 0 ) with gzip . open ( fo , "rt" ) as f : metadata = json . loads ( f . __next__ ( ) ) if "metadata" not in metadata : log . error ( f"Missing metadata entry for {resource_url}" ) return "Cannot load resource file - missing metadata object in first line of file" if metadata [ "metadata" ] [ "type" ] == "namespace" : bel . resources . namespace . load_terms ( fo , metadata , forceupdate ) elif metadata [ "metadata" ] [ "type" ] == "ortholog" : bel . resources . ortholog . load_orthologs ( fo , metadata ) finally : fo . close ( )
Load BEL Resource file
50,912
def get_normalized_term ( term_id : str , equivalents : list , namespace_targets : dict ) -> str : if equivalents and len ( equivalents ) > 0 : for start_ns in namespace_targets : if re . match ( start_ns , term_id ) : for target_ns in namespace_targets [ start_ns ] : for e in equivalents : if e and target_ns in e [ "namespace" ] and e [ "primary" ] : normalized_term = e [ "term_id" ] return normalized_term return term_id
Get normalized term
50,913
def get_labels ( term_ids : list ) -> dict : term_labels = { } for term_id in term_ids : term = get_terms ( term_id ) term_labels [ term_id ] = term [ 0 ] . get ( "label" , "" ) return term_labels
Get term labels given term ids
50,914
def _get_params ( self ) : params = { 'accountNumber' : self . _service . accountNumber } for key , val in self . __dict__ . iteritems ( ) : if key in self . field_order : if isinstance ( val , str , ) : val = val . decode ( 'utf8' ) params [ key ] = val for key in self . field_order : if key not in params : params [ key ] = u'' def order_keys ( k ) : if k [ 0 ] in self . field_order : return self . field_order . index ( k [ 0 ] ) return len ( self . field_order ) + 1 params = OrderedDict ( sorted ( params . items ( ) , key = order_keys ) ) if hasattr ( self , 'hash' ) and self . hash is not None : params [ 'hash' ] = self . hash return params
Generate SOAP parameters .
50,915
def _generate_hash ( self ) : self . hash = None str_hash = '' for key , val in self . _get_params ( ) . iteritems ( ) : str_hash += smart_str ( val ) str_hash += self . _service . encryption_key self . hash = hashlib . md5 ( str_hash ) . hexdigest ( )
Generates a hash based on the specific fields for the method .
50,916
def _send_request ( self ) : self . _generate_hash ( ) params = self . _get_params ( ) try : resp = self . _endpoint ( ** params ) logger . debug ( resp ) except WebFault , e : logger . exception ( 'An error occurred while making the SOAP request.' ) return None self . response = XmlDictConfig ( ElementTree . XML ( smart_str ( resp ) ) ) self . response = normalize_dictionary_values ( self . response ) if self . response [ 'status' ] [ 'errorCode' ] != 'OK' : logger . error ( resp ) return self . response
Make the SOAP request and convert the result to a dictionary .
50,917
def client_factory ( self ) : if self . _service . production : url = self . production_url else : url = self . testing_url proxy_options = dict ( ) https_proxy_setting = os . environ . get ( 'PAYEX_HTTPS_PROXY' ) or os . environ . get ( 'https_proxy' ) http_proxy_setting = os . environ . get ( 'PAYEX_HTTP_PROXY' ) or os . environ . get ( 'http_proxy' ) if https_proxy_setting : proxy_options [ 'https' ] = https_proxy_setting if http_proxy_setting : proxy_options [ 'http' ] = http_proxy_setting return client . Client ( url , proxy = proxy_options )
Custom client factory to set proxy options .
50,918
def get_combination_action ( combination ) : accepted_actions = ( 'link' , 'js' ) for action in accepted_actions : if action in combination : return { action : combination [ action ] } return { }
Prepares the action for a keyboard combination also filters another strange actions declared by the user .
50,919
def get_processed_hotkeys ( hotkeys = None ) : hotkeys = hotkeys or ks_settings . HOTKEYS processed_hotkeys = AutoVivification ( ) if not hotkeys : return processed_hotkeys for combination in hotkeys : key_codes = get_key_codes ( combination [ 'keys' ] ) if len ( key_codes ) == 1 : processed_hotkeys [ key_codes [ 0 ] ] = get_combination_action ( combination ) elif len ( key_codes ) == 2 : processed_hotkeys [ key_codes [ 0 ] ] [ key_codes [ 1 ] ] = get_combination_action ( combination ) elif len ( key_codes ) == 3 : processed_hotkeys [ key_codes [ 0 ] ] [ key_codes [ 1 ] ] [ key_codes [ 2 ] ] = get_combination_action ( combination ) return processed_hotkeys
Process passed dict with key combinations or the HOTKEYS dict from settings .
50,920
def parse ( self , assertion : Union [ str , Mapping [ str , str ] ] , strict : bool = False , parseinfo : bool = False , rule_name : str = "start" , error_level : str = "WARNING" , ) -> "BEL" : self . ast = None self . parse_valid = False self . parse_visualize_error = "" self . validation_messages = [ ] if isinstance ( assertion , dict ) : if assertion . get ( "relation" , False ) and assertion . get ( "object" , False ) : statement = f"{assertion['subject']} {assertion['relation']} {assertion['object']}" elif assertion . get ( "subject" ) : statement = f"{assertion['subject']}" else : statement = "" else : statement = assertion self . original_bel_stmt = statement self . bel_stmt = bel_utils . preprocess_bel_stmt ( statement ) if len ( self . bel_stmt ) == 0 : self . validation_messages . append ( ( "ERROR" , "Please include a valid BEL statement - found empty string." ) ) return self try : ast_dict = self . parser . parse ( self . bel_stmt , rule_name = rule_name , trace = False , parseinfo = parseinfo ) self . ast = lang_ast . ast_dict_to_objects ( ast_dict , self ) self . parse_valid = True except FailedParse as e : error , visualize_error = bel_utils . handle_parser_syntax_error ( e ) self . parse_visualize_error = visualize_error if visualize_error : self . validation_messages . append ( ( "ERROR" , f"{error}\n{visualize_error}" ) ) else : self . validation_messages . append ( ( "ERROR" , f"{error}\nBEL: {self.bel_stmt}" ) ) self . ast = None except Exception as e : log . error ( "Error {}, error type: {}" . format ( e , type ( e ) ) ) self . validation_messages . append ( ( "ERROR" , "Error {}, error type: {}" . format ( e , type ( e ) ) ) ) return self
Parse and semantically validate BEL statement
50,921
def canonicalize ( self , namespace_targets : Mapping [ str , List [ str ] ] = None ) -> "BEL" : if not self . ast : return self if not self . ast . collected_nsarg_norms : self = self . collect_nsarg_norms ( ) self . ast . canonicalize ( ) return self
Takes an AST and returns a canonicalized BEL statement string .
50,922
def collect_nsarg_norms ( self ) : start_time = datetime . datetime . now ( ) self . ast = bel_utils . populate_ast_nsarg_defaults ( self . ast , self . ast ) self . ast . collected_nsarg_norms = True if ( hasattr ( self . ast , "bel_object" ) and self . ast . bel_object and self . ast . bel_object . type == "BELAst" ) : self . ast . bel_object . collected_nsarg_norms = True end_time = datetime . datetime . now ( ) delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}" log . info ( "Timing - prepare nsarg normalization" , delta_ms = delta_ms ) return self
Adds canonical and decanonical values to NSArgs in AST
50,923
def orthologize ( self , species_id : str ) -> "BEL" : if not self . ast : return self if not self . ast . collected_orthologs : self = self . collect_orthologs ( [ species_id ] ) self . ast . species = set ( ) self . ast = bel_utils . orthologize ( self . ast , self , species_id ) return self
Orthologize BEL AST to given species_id
50,924
def compute_edges ( self , rules : List [ str ] = None , ast_result = False , fmt = "medium" ) -> List [ Mapping [ str , Any ] ] : if not self . ast : return self edges_asts = bel . edge . computed . compute_edges ( self . ast , self . spec ) if ast_result : return edges_asts edges = [ ] for ast in edges_asts : edges . append ( { "subject" : ast . bel_subject . to_string ( ) , "relation" : ast . bel_relation , "object" : ast . bel_object . to_string ( ) , } ) return edges
Computed edges from primary BEL statement
50,925
def ast_dict_to_objects ( ast_dict : Mapping [ str , Any ] , bel_obj ) -> BELAst : ast_subject = ast_dict . get ( "subject" , None ) ast_object = ast_dict . get ( "object" , None ) bel_subject = None bel_object = None bel_relation = ast_dict . get ( "relation" ) if ast_subject : bel_subject = function_ast_to_objects ( ast_subject , bel_obj ) if ast_object : bel_object = function_ast_to_objects ( ast_object , bel_obj ) ast_obj = BELAst ( bel_subject , bel_relation , bel_object , bel_obj . spec ) return ast_obj
Convert Tatsu AST dictionary to BEL AST object
50,926
def subcomponents ( self , subcomponents ) : for arg in self . args : if arg . __class__ . __name__ == "Function" : subcomponents . append ( arg . to_string ( ) ) if arg . function_type == "primary" : arg . subcomponents ( subcomponents ) else : subcomponents . append ( arg . to_string ( ) ) return subcomponents
Generate subcomponents of the BEL subject or object
50,927
def update_nsval ( self , * , nsval : str = None , ns : str = None , val : str = None ) -> None : if not ( ns and val ) and nsval : ( ns , val ) = nsval . split ( ":" , 1 ) elif not ( ns and val ) and not nsval : log . error ( "Did not update NSArg - no ns:val or nsval provided" ) self . namespace = ns self . value = val
Update Namespace and valueast .
50,928
def orthologize ( self , ortho_species_id , belast ) : if ( self . orthologs and ortho_species_id in self . orthologs and ortho_species_id != self . species_id ) : self . orthology_species = ortho_species_id self . canonical = self . orthologs [ ortho_species_id ] [ "canonical" ] self . decanonical = self . orthologs [ ortho_species_id ] [ "decanonical" ] self . update_nsval ( nsval = self . decanonical ) self . orthologized = True elif self . species_id and ortho_species_id not in self . orthologs : self . orthologized = False belast . partially_orthologized = True return self
Decanonical ortholog name used
50,929
def convert_csv_str_to_list ( csv_str : str ) -> list : csv_str = re . sub ( "^\s*{" , "" , csv_str ) csv_str = re . sub ( "}\s*$" , "" , csv_str ) r = csv . reader ( [ csv_str ] ) row = list ( r ) [ 0 ] new = [ ] for col in row : col = re . sub ( '^\s*"?\s*' , "" , col ) col = re . sub ( '\s*"?\s*$' , "" , col ) new . append ( col ) return new
Convert CSV str to list
50,930
def split_bel_stmt ( stmt : str , line_num ) -> tuple : m = re . match ( f"^(.*?\))\s+([a-zA-Z=\->\|:]+)\s+([\w(]+.*?)$" , stmt , flags = 0 ) if m : return ( m . group ( 1 ) , m . group ( 2 ) , m . group ( 3 ) ) else : log . info ( f"Could not parse bel statement into components at line number: {line_num} assertion: {stmt}" ) return ( stmt , None , None )
Split bel statement into subject relation object tuple
50,931
def yield_nanopub ( assertions , annotations , line_num ) : if not assertions : return { } anno = copy . deepcopy ( annotations ) evidence = anno . pop ( "evidence" , None ) stmt_group = anno . pop ( "statement_group" , None ) citation = anno . pop ( "citation" , None ) anno_list = [ ] for anno_type in anno : if isinstance ( anno [ anno_type ] , ( list , tuple ) ) : for val in anno [ anno_type ] : anno_list . append ( { "type" : anno_type , "label" : val } ) else : anno_list . append ( { "type" : anno_type , "label" : anno [ anno_type ] } ) assertions_list = [ ] for assertion in assertions : ( subj , rel , obj ) = split_bel_stmt ( assertion , line_num ) assertions_list . append ( { "subject" : subj , "relation" : rel , "object" : obj } ) nanopub = { "schema_uri" : "https://raw.githubusercontent.com/belbio/schemas/master/schemas/nanopub_bel-1.0.0.yaml" , "type" : copy . deepcopy ( nanopub_type ) , "annotations" : copy . deepcopy ( anno_list ) , "citation" : copy . deepcopy ( citation ) , "assertions" : copy . deepcopy ( assertions_list ) , "evidence" : evidence , "metadata" : { "statement_group" : stmt_group } , } return { "nanopub" : copy . deepcopy ( nanopub ) }
Yield nanopub object
50,932
def process_documentline ( line , nanopubs_metadata ) : matches = re . match ( 'SET DOCUMENT\s+(\w+)\s+=\s+"?(.*?)"?$' , line ) key = matches . group ( 1 ) val = matches . group ( 2 ) nanopubs_metadata [ key ] = val return nanopubs_metadata
Process SET DOCUMENT line in BEL script
50,933
def process_definition ( line , nanopubs_metadata ) : matches = re . match ( 'DEFINE\s+(\w+)\s+(\w+)\s+AS\s+URL\s+"(.*?)"\s*$' , line ) if matches : def_type = matches . group ( 1 ) . lower ( ) if def_type == "namespace" : def_type = "namespaces" elif def_type == "annotation" : def_type == "annotations" key = matches . group ( 2 ) val = matches . group ( 3 ) if def_type in nanopubs_metadata : nanopubs_metadata [ def_type ] [ key ] = val else : nanopubs_metadata [ def_type ] = { key : val } matches = re . match ( "DEFINE\s+(\w+)\s+(\w+)\s+AS\s+LIST\s+{(.*?)}\s*$" , line ) if matches : def_type = matches . group ( 1 ) . lower ( ) if def_type == "namespace" : def_type = "namespaces" elif def_type == "annotation" : def_type == "annotations" key = matches . group ( 2 ) val = matches . group ( 3 ) vals = convert_csv_str_to_list ( val ) if def_type in nanopubs_metadata : nanopubs_metadata [ def_type ] [ key ] = vals else : nanopubs_metadata [ def_type ] = { key : vals } return nanopubs_metadata
Process DEFINE line in BEL script
50,934
def process_unset ( line , annotations ) : matches = re . match ( 'UNSET\s+"?(.*?)"?\s*$' , line ) if matches : val = matches . group ( 1 ) if val == "ALL" or val == "STATEMENT_GROUP" : annotations = { } elif re . match ( "{" , val ) : vals = convert_csv_str_to_list ( val ) for val in vals : annotations . pop ( val , None ) else : annotations . pop ( val , None ) else : log . warn ( f"Problem with UNSET line: {line}" ) return annotations
Process UNSET lines in BEL Script
50,935
def process_set ( line , annotations ) : matches = re . match ( 'SET\s+(\w+)\s*=\s*"?(.*?)"?\s*$' , line ) key = None if matches : key = matches . group ( 1 ) val = matches . group ( 2 ) if key == "STATEMENT_GROUP" : annotations [ "statement_group" ] = val elif key == "Citation" : annotations [ "citation" ] = process_citation ( val ) elif key . lower ( ) == "support" or key . lower ( ) == "evidence" : annotations [ "evidence" ] = val elif re . match ( "\s*{.*?}" , val ) : vals = convert_csv_str_to_list ( val ) annotations [ key ] = vals else : annotations [ key ] = val return annotations
Convert annotations into nanopub_bel annotations format
50,936
def preprocess_belscript ( lines ) : set_flag = False for line in lines : if set_flag is False and re . match ( "SET" , line ) : set_flag = True set_line = [ line . rstrip ( ) ] elif set_flag and re . match ( "SET" , line ) : yield f"{' '.join(set_line)}\n" set_line = [ line . rstrip ( ) ] elif set_flag and re . match ( "\s+$" , line ) : yield f"{' '.join(set_line)}\n" yield line set_flag = False elif set_flag : set_line . append ( line . rstrip ( ) ) else : yield line
Convert any multi - line SET statements into single line SET statements
50,937
def parse_belscript ( lines ) : nanopubs_metadata = { } annotations = { } assertions = [ ] line_num = 0 for line in set_single_line ( lines ) : line_num += 1 line = re . sub ( "\/\/.*?$" , "" , line ) line = line . rstrip ( ) while re . search ( "\\\s*$" , line ) : line = line . replace ( "\\" , "" ) + next ( lines ) if re . match ( "\s*#" , line ) or re . match ( "\s*$" , line ) : continue elif re . match ( "SET DOCUMENT" , line ) : nanopubs_metadata = process_documentline ( line , nanopubs_metadata ) elif re . match ( "DEFINE" , line ) : nanopubs_metadata = process_definition ( line , nanopubs_metadata ) elif re . match ( "UNSET" , line ) : if assertions : yield yield_nanopub ( assertions , annotations , line_num ) assertions = [ ] annotations = process_unset ( line , annotations ) elif re . match ( "SET" , line ) : if nanopubs_metadata : yield yield_metadata ( nanopubs_metadata ) nanopubs_metadata = { } if assertions : yield yield_nanopub ( assertions , annotations , line_num ) assertions = [ ] annotations = process_set ( line , annotations ) else : assertions . append ( line ) yield_nanopub ( assertions , annotations , line_num )
Lines from the BELScript - can be an iterator or list
50,938
def __stringify_body ( self , request_or_response ) : headers = self . __track_info [ '{}_headers' . format ( request_or_response ) ] body = self . __track_info . get ( '{}_body' . format ( request_or_response ) ) if isinstance ( body , CaseInsensitiveDict ) : body = json . dumps ( dict ( body ) , ensure_ascii = False ) elif isinstance ( body , ( dict , list ) ) : body = json . dumps ( body , indent = 2 , ensure_ascii = False ) elif isinstance ( body , bytes ) : resp_content_type = headers . get ( "Content-Type" , "" ) try : if "image" in resp_content_type : self . __track_info [ "response_data_type" ] = "image" body = "data:{};base64,{}" . format ( resp_content_type , b64encode ( body ) . decode ( 'utf-8' ) ) else : body = escape ( body . decode ( "utf-8" ) ) except UnicodeDecodeError : pass elif not isinstance ( body , ( basestring , numeric_types , Iterable ) ) : body = repr ( body ) self . __track_info [ '{}_body' . format ( request_or_response ) ] = body
this method reference from httprunner
50,939
def read_nanopubs ( fn : str ) -> Iterable [ Mapping [ str , Any ] ] : jsonl_flag , json_flag , yaml_flag = False , False , False if fn == "-" or "jsonl" in fn : jsonl_flag = True elif "json" in fn : json_flag = True elif re . search ( "ya?ml" , fn ) : yaml_flag = True else : log . error ( "Do not recognize nanopub file format - neither json nor jsonl format." ) return { } try : if re . search ( "gz$" , fn ) : f = gzip . open ( fn , "rt" ) else : try : f = click . open_file ( fn , mode = "rt" ) except Exception as e : log . info ( f"Can not open file {fn} Error: {e}" ) quit ( ) if jsonl_flag : for line in f : yield json . loads ( line ) elif json_flag : nanopubs = json . load ( f ) for nanopub in nanopubs : yield nanopub elif yaml_flag : nanopubs = yaml . load ( f , Loader = yaml . SafeLoader ) for nanopub in nanopubs : yield nanopub except Exception as e : log . error ( f"Could not open file: {fn}" )
Read file and generate nanopubs
50,940
def create_nanopubs_fh ( output_fn : str ) : json_flag , jsonl_flag , yaml_flag = False , False , False if output_fn : if re . search ( "gz$" , output_fn ) : out_fh = gzip . open ( output_fn , "wt" ) else : out_fh = click . open_file ( output_fn , mode = "wt" ) if re . search ( "ya?ml" , output_fn ) : yaml_flag = True elif "jsonl" in output_fn or "-" == output_fn : jsonl_flag = True elif "json" in output_fn : json_flag = True else : out_fh = sys . stdout return ( out_fh , yaml_flag , jsonl_flag , json_flag )
Create Nanopubs output filehandle
50,941
def write_edges ( edges : Mapping [ str , Any ] , filename : str , jsonlines : bool = False , gzipflag : bool = False , yaml : bool = False , ) : pass
Write edges to file
50,942
def add_index_alias ( es , index_name , alias_name ) : es . indices . put_alias ( index = index_name , name = terms_alias )
Add index alias to index_name
50,943
def delete_index ( es , index_name : str ) : if not index_name : log . warn ( "No index name given to delete" ) return None result = es . indices . delete ( index = index_name ) return result
Delete the terms index
50,944
def create_terms_index ( es , index_name : str ) : with open ( mappings_terms_fn , "r" ) as f : mappings_terms = yaml . load ( f , Loader = yaml . SafeLoader ) try : es . indices . create ( index = index_name , body = mappings_terms ) except Exception as e : log . error ( f"Could not create elasticsearch terms index: {e}" )
Create terms index
50,945
def delete_terms_indexes ( es , index_name : str = "terms_*" ) : try : es . indices . delete ( index = index_name ) except Exception as e : log . error ( f"Could not delete all terms indices: {e}" )
Delete all terms indexes
50,946
def bulk_load_docs ( es , docs ) : chunk_size = 200 try : results = elasticsearch . helpers . bulk ( es , docs , chunk_size = chunk_size ) log . debug ( f"Elasticsearch documents loaded: {results[0]}" ) if len ( results [ 1 ] ) > 0 : log . error ( "Bulk load errors {}" . format ( results ) ) except elasticsearch . ElasticsearchException as e : log . error ( "Indexing error: {}\n" . format ( e ) )
Bulk load docs
50,947
def validate ( bo , error_level : str = "WARNING" ) -> Tuple [ bool , List [ Tuple [ str , str ] ] ] : if bo . ast : bo = validate_functions ( bo . ast , bo ) if error_level == "WARNING" : bo = validate_arg_values ( bo . ast , bo ) else : bo . validation_messages . append ( ( "ERROR" , "Invalid BEL Statement - cannot parse" ) ) for msg in bo . validation_messages : if msg [ 0 ] == "ERROR" : bo . parse_valid = False break return bo
Semantically validate BEL AST
50,948
def validate_functions ( ast : BELAst , bo ) : if isinstance ( ast , Function ) : log . debug ( f"Validating: {ast.name}, {ast.function_type}, {ast.args}" ) function_signatures = bo . spec [ "functions" ] [ "signatures" ] [ ast . name ] [ "signatures" ] function_name = ast . name ( valid_function , messages ) = check_function_args ( ast . args , function_signatures , function_name ) if not valid_function : message = ", " . join ( messages ) bo . validation_messages . append ( ( "ERROR" , "Invalid BEL Statement function {} - problem with function signatures: {}" . format ( ast . to_string ( ) , message ) , ) ) bo . parse_valid = False if hasattr ( ast , "args" ) : for arg in ast . args : validate_functions ( arg , bo ) return bo
Recursively validate function signatures
50,949
def get_belbio_conf_files ( ) : home = os . path . expanduser ( "~" ) cwd = os . getcwd ( ) belbio_conf_fp , belbio_secrets_fp = "" , "" env_conf_dir = os . getenv ( "BELBIO_CONF" , "" ) . rstrip ( "/" ) conf_paths = [ f"{cwd}/belbio_conf.yaml" , f"{cwd}/belbio_conf.yml" , f"{env_conf_dir}/belbio_conf.yaml" , f"{env_conf_dir}/belbio_conf.yml" , f"{home}/.belbio/conf" , ] secret_paths = [ f"{cwd}/belbio_secrets.yaml" , f"{cwd}/belbio_secrets.yml" , f"{env_conf_dir}/belbio_secrets.yaml" , f"{env_conf_dir}/belbio_secrets.yml" , f"{home}/.belbio/secrets" , ] for fn in conf_paths : if os . path . exists ( fn ) : belbio_conf_fp = fn break else : log . error ( "No BELBio configuration file found - please add one (see http://bel.readthedocs.io/en/latest/configuration.html)" ) for fn in secret_paths : if os . path . exists ( fn ) : belbio_secrets_fp = fn break return ( belbio_conf_fp , belbio_secrets_fp )
Get belbio configuration from files
50,950
def load_configuration ( ) : ( belbio_conf_fp , belbio_secrets_fp ) = get_belbio_conf_files ( ) log . info ( f"Using conf: {belbio_conf_fp} and secrets files: {belbio_secrets_fp} " ) config = { } if belbio_conf_fp : with open ( belbio_conf_fp , "r" ) as f : config = yaml . load ( f , Loader = yaml . SafeLoader ) config [ "source_files" ] = { } config [ "source_files" ] [ "conf" ] = belbio_conf_fp if belbio_secrets_fp : with open ( belbio_secrets_fp , "r" ) as f : secrets = yaml . load ( f , Loader = yaml . SafeLoader ) config [ "secrets" ] = copy . deepcopy ( secrets ) if "source_files" in config : config [ "source_files" ] [ "secrets" ] = belbio_secrets_fp get_versions ( config ) return config
Load the configuration
50,951
def get_versions ( config ) -> dict : try : import bel . __version__ config [ "bel" ] [ "version" ] = bel . __version__ . __version__ except KeyError : config [ "bel" ] = { "version" : bel . __version__ . __version__ } except ModuleNotFoundError : pass try : import tools . __version__ config [ "bel_resources" ] [ "version" ] = tools . __version__ . __version__ except KeyError : config [ "bel_resources" ] = { "version" : tools . __version__ . __version__ } except ModuleNotFoundError : pass try : import __version__ if __version__ . __name__ == "BELBIO API" : config [ "bel_api" ] [ "version" ] = __version__ . __version__ except KeyError : if __version__ . __name__ == "BELBIO API" : config [ "bel_api" ] = { "version" : __version__ . __version__ } except ModuleNotFoundError : pass
Get versions of bel modules and tools
50,952
def add_environment_vars ( config : MutableMapping [ str , Any ] ) : for e in os . environ : if re . match ( "BELBIO_" , e ) : val = os . environ . get ( e ) if val : e . replace ( "BELBIO_" , "" ) env_keys = e . lower ( ) . split ( "__" ) if len ( env_keys ) > 1 : joined = '"]["' . join ( env_keys ) eval_config = f'config["{joined}"] = val' try : eval ( eval_config ) except Exception as exc : log . warn ( "Cannot process {e} into config" ) else : config [ env_keys [ 0 ] ] = val
Override config with environment variables
50,953
def merge_config ( config : Mapping [ str , Any ] , override_config : Mapping [ str , Any ] = None , override_config_fn : str = None , ) -> Mapping [ str , Any ] : if override_config_fn : with open ( override_config_fn , "r" ) as f : override_config = yaml . load ( f , Loader = yaml . SafeLoader ) if not override_config : log . info ( "Missing override_config" ) return functools . reduce ( rec_merge , ( config , override_config ) )
Override config with additional configuration in override_config or override_config_fn
50,954
def rec_merge ( d1 , d2 ) : for k , v in d1 . items ( ) : if k in d2 : if all ( isinstance ( e , collections . MutableMapping ) for e in ( v , d2 [ k ] ) ) : d2 [ k ] = rec_merge ( v , d2 [ k ] ) d3 = d1 . copy ( ) d3 . update ( d2 ) return d3
Recursively merge two dictionaries
50,955
def load_terms ( fo : IO , metadata : dict , forceupdate : bool ) : version = metadata [ "metadata" ] [ "version" ] with timy . Timer ( "Load Terms" ) as timer : es = bel . db . elasticsearch . get_client ( ) es_version = version . replace ( "T" , "" ) . replace ( "-" , "" ) . replace ( ":" , "" ) index_prefix = f"terms_{metadata['metadata']['namespace'].lower()}" index_name = f"{index_prefix}_{es_version}" if not elasticsearch . index_exists ( es , index_name ) : elasticsearch . create_terms_index ( es , index_name ) elif forceupdate : index_name += "_alt" elasticsearch . create_terms_index ( es , index_name ) else : return terms_iterator = terms_iterator_for_elasticsearch ( fo , index_name ) elasticsearch . bulk_load_docs ( es , terms_iterator ) index_names = elasticsearch . get_all_index_names ( es ) for name in index_names : if name != index_name and index_prefix in name : elasticsearch . delete_index ( es , name ) elasticsearch . add_index_alias ( es , index_name , terms_alias ) log . info ( "Load namespace terms" , elapsed = timer . elapsed , namespace = metadata [ "metadata" ] [ "namespace" ] , ) with timy . Timer ( "Load Term Equivalences" ) as timer : arango_client = arangodb . get_client ( ) belns_db = arangodb . get_belns_handle ( arango_client ) arangodb . batch_load_docs ( belns_db , terms_iterator_for_arangodb ( fo , version ) , on_duplicate = "update" ) log . info ( "Loaded namespace equivalences" , elapsed = timer . elapsed , namespace = metadata [ "metadata" ] [ "namespace" ] , ) remove_old_equivalence_edges = f remove_old_equivalence_nodes = f arangodb . aql_query ( belns_db , remove_old_equivalence_edges ) arangodb . aql_query ( belns_db , remove_old_equivalence_nodes ) metadata [ "_key" ] = f"Namespace_{metadata['metadata']['namespace']}" try : belns_db . collection ( arangodb . belns_metadata_name ) . insert ( metadata ) except ArangoError as ae : belns_db . collection ( arangodb . belns_metadata_name ) . replace ( metadata )
Load terms into Elasticsearch and ArangoDB
50,956
def terms_iterator_for_elasticsearch ( fo : IO , index_name : str ) : species_list = config [ "bel_resources" ] . get ( "species_list" , [ ] ) fo . seek ( 0 ) with gzip . open ( fo , "rt" ) as f : for line in f : term = json . loads ( line ) if "term" not in term : continue term = term [ "term" ] species_id = term . get ( "species_id" , None ) if species_list and species_id and species_id not in species_list : continue all_term_ids = set ( ) for term_id in [ term [ "id" ] ] + term . get ( "alt_ids" , [ ] ) : all_term_ids . add ( term_id ) all_term_ids . add ( lowercase_term_id ( term_id ) ) term [ "alt_ids" ] = copy . copy ( list ( all_term_ids ) ) yield { "_op_type" : "index" , "_index" : index_name , "_type" : "term" , "_id" : term [ "id" ] , "_source" : copy . deepcopy ( term ) , }
Add index_name to term documents for bulk load
50,957
def get_pubtator ( pmid ) : r = get_url ( PUBTATOR_TMPL . replace ( "PMID" , pmid ) , timeout = 10 ) if r and r . status_code == 200 : pubtator = r . json ( ) [ 0 ] else : log . error ( f"Cannot access Pubtator, status: {r.status_code} url: {PUBTATOR_TMPL.replace('PMID', pmid)}" ) return None known_types = [ "CHEBI" , "Chemical" , "Disease" , "Gene" , "Species" ] for idx , anno in enumerate ( pubtator [ "denotations" ] ) : s_match = re . match ( r"(\w+):(\w+)" , anno [ "obj" ] ) c_match = re . match ( r"(\w+):(\w+):(\w+)" , anno [ "obj" ] ) if c_match : ( ctype , namespace , cid ) = ( c_match . group ( 1 ) , c_match . group ( 2 ) , c_match . group ( 3 ) , ) if ctype not in known_types : log . info ( f"{ctype} not in known_types for Pubtator" ) if namespace not in known_types : log . info ( f"{namespace} not in known_types for Pubtator" ) pubtator [ "denotations" ] [ idx ] [ "obj" ] = f'{pubtator_ns_convert.get(namespace, "UNKNOWN")}:{cid}' pubtator [ "denotations" ] [ idx ] [ "entity_type" ] = pubtator_entity_convert . get ( ctype , None ) pubtator [ "denotations" ] [ idx ] [ "annotation_type" ] = pubtator_annotation_convert . get ( ctype , None ) elif s_match : ( ctype , cid ) = ( s_match . group ( 1 ) , s_match . group ( 2 ) ) if ctype not in known_types : log . info ( f"{ctype} not in known_types for Pubtator" ) pubtator [ "denotations" ] [ idx ] [ "obj" ] = f'{pubtator_ns_convert.get(ctype, "UNKNOWN")}:{cid}' pubtator [ "denotations" ] [ idx ] [ "entity_type" ] = pubtator_entity_convert . get ( ctype , None ) pubtator [ "denotations" ] [ idx ] [ "annotation_type" ] = pubtator_annotation_convert . get ( ctype , None ) annotations = { } for anno in pubtator [ "denotations" ] : log . info ( anno ) if anno [ "obj" ] not in annotations : annotations [ anno [ "obj" ] ] = { "spans" : [ anno [ "span" ] ] } annotations [ anno [ "obj" ] ] [ "entity_types" ] = [ anno . get ( "entity_type" , [ ] ) ] annotations [ anno [ "obj" ] ] [ "annotation_types" ] = [ anno . get ( "annotation_type" , [ ] ) ] else : annotations [ anno [ "obj" ] ] [ "spans" ] . append ( anno [ "span" ] ) del pubtator [ "denotations" ] pubtator [ "annotations" ] = copy . deepcopy ( annotations ) return pubtator
Get Pubtator Bioconcepts from Pubmed Abstract
50,958
def process_pub_date ( year , mon , day ) : pub_date = None if year and re . match ( "[a-zA-Z]+" , mon ) : pub_date = datetime . datetime . strptime ( f"{year}-{mon}-{day}" , "%Y-%b-%d" ) . strftime ( "%Y-%m-%d" ) elif year : pub_date = f"{year}-{mon}-{day}" return pub_date
Create pub_date from what Pubmed provides in Journal PubDate entry
50,959
def enhance_pubmed_annotations ( pubmed : Mapping [ str , Any ] ) -> Mapping [ str , Any ] : text = pubmed [ "title" ] + pubmed [ "abstract" ] annotations = { } for nsarg in pubmed [ "annotations" ] : url = f'{config["bel_api"]["servers"]["api_url"]}/terms/{url_path_param_quoting(nsarg)}' log . info ( f"URL: {url}" ) r = get_url ( url ) log . info ( f"Result: {r}" ) new_nsarg = "" if r and r . status_code == 200 : term = r . json ( ) new_nsarg = bel_utils . convert_nsarg ( term [ "id" ] , decanonicalize = True ) pubmed [ "annotations" ] [ nsarg ] [ "name" ] = term [ "name" ] pubmed [ "annotations" ] [ nsarg ] [ "label" ] = term [ "label" ] pubmed [ "annotations" ] [ nsarg ] [ "entity_types" ] = list ( set ( pubmed [ "annotations" ] [ nsarg ] [ "entity_types" ] + term . get ( "entity_types" , [ ] ) ) ) pubmed [ "annotations" ] [ nsarg ] [ "annotation_types" ] = list ( set ( pubmed [ "annotations" ] [ nsarg ] [ "annotation_types" ] + term . get ( "annotation_types" , [ ] ) ) ) if new_nsarg != nsarg : annotations [ new_nsarg ] = copy . deepcopy ( pubmed [ "annotations" ] [ nsarg ] ) else : annotations [ nsarg ] = copy . deepcopy ( pubmed [ "annotations" ] [ nsarg ] ) for nsarg in annotations : for idx , span in enumerate ( annotations [ nsarg ] [ "spans" ] ) : string = text [ span [ "begin" ] - 1 : span [ "end" ] - 1 ] annotations [ nsarg ] [ "spans" ] [ idx ] [ "text" ] = string pubmed [ "annotations" ] = copy . deepcopy ( annotations ) return pubmed
Enhance pubmed namespace IDs
50,960
def get_orthologs ( canonical_gene_id : str , species : list = [ ] ) -> List [ dict ] : gene_id_key = bel . db . arangodb . arango_id_to_key ( canonical_gene_id ) orthologs = { } if species : query_filter = f"FILTER vertex.tax_id IN {species}" query = f cursor = belns_db . aql . execute ( query , batch_size = 20 ) results = cursor . pop ( ) for ortholog in results [ "orthologs" ] : norms = bel . terms . terms . get_normalized_terms ( ortholog [ "name" ] ) orthologs [ ortholog [ "tax_id" ] ] = { "canonical" : norms [ "canonical" ] , "decanonical" : norms [ "decanonical" ] , } return orthologs
Get orthologs for given gene_id and species
50,961
def normalize_value ( val ) : if val is not None : if val . lower ( ) == 'false' : val = False elif val . lower ( ) == 'true' : val = True return val
Normalize strings with booleans into Python types .
50,962
def normalize_dictionary_values ( dictionary ) : for key , val in dictionary . iteritems ( ) : if isinstance ( val , dict ) : dictionary [ key ] = normalize_dictionary_values ( val ) elif isinstance ( val , list ) : dictionary [ key ] = list ( val ) else : dictionary [ key ] = normalize_value ( val ) return dictionary
Normalizes the values in a dictionary recursivly .
50,963
def timespan ( start_time ) : timespan = datetime . datetime . now ( ) - start_time timespan_ms = timespan . total_seconds ( ) * 1000 return timespan_ms
Return time in milliseconds from start_time
50,964
def first_true ( iterable , default = False , pred = None ) : return next ( filter ( pred , iterable ) , default )
Returns the first true value in the iterable .
50,965
def _create_hash_from_doc ( doc : Mapping [ str , Any ] ) -> str : doc_string = json . dumps ( doc , sort_keys = True ) return _create_hash ( doc_string )
Create hash Id from edge record
50,966
def elapsed ( self ) : if self . end is None : return ( self ( ) - self . start ) * self . factor else : return ( self . end - self . start ) * self . factor
Return the current elapsed time since start If the elapsed property is called in the context manager scope the elapsed time bewteen start and property access is returned . However if it is accessed outside of the context manager scope it returns the elapsed time bewteen entering and exiting the scope . The elapsed property can thus be accessed at different points within the context manager scope to time different parts of the block .
50,967
def load_edges_into_db ( nanopub_id : str , nanopub_url : str , edges : list = [ ] , edges_coll_name : str = edges_coll_name , nodes_coll_name : str = nodes_coll_name , ) : start_time = datetime . datetime . now ( ) query = f try : edgestore_db . aql . execute ( query ) except Exception as e : log . debug ( f"Could not remove nanopub-related edges: {query} msg: {e}" ) end_time1 = datetime . datetime . now ( ) delta_ms = f"{(end_time1 - start_time).total_seconds() * 1000:.1f}" log . info ( "Timing - Delete edges for nanopub" , delta_ms = delta_ms ) query = f try : edgestore_db . aql . execute ( query ) except Exception as e : log . debug ( f"Could not remove nanopub-related errors: {query} msg: {e}" ) end_time2 = datetime . datetime . now ( ) delta_ms = f"{(end_time2 - end_time1).total_seconds() * 1000:.1f}" log . info ( "Timing - Delete pipeline errors for nanopub" , delta_ms = delta_ms ) node_list , edge_list = [ ] , [ ] for doc in edge_iterator ( edges = edges ) : if doc [ 0 ] == "nodes" : node_list . append ( doc [ 1 ] ) else : edge_list . append ( doc [ 1 ] ) end_time3 = datetime . datetime . now ( ) delta_ms = f"{(end_time3 - end_time2).total_seconds() * 1000:.1f}" log . info ( "Timing - Collect edges and nodes" , delta_ms = delta_ms ) try : results = edgestore_db . collection ( edges_coll_name ) . import_bulk ( edge_list , on_duplicate = "replace" , halt_on_error = False ) except Exception as e : log . error ( f"Could not load edges msg: {e}" ) end_time4 = datetime . datetime . now ( ) delta_ms = f"{(end_time4 - end_time3).total_seconds() * 1000:.1f}" log . info ( "Timing - Load edges into edgestore" , delta_ms = delta_ms ) try : results = edgestore_db . collection ( nodes_coll_name ) . import_bulk ( node_list , on_duplicate = "replace" , halt_on_error = False ) except Exception as e : log . error ( f"Could not load nodes msg: {e}" ) end_time5 = datetime . datetime . now ( ) delta_ms = f"{(end_time5 - end_time4).total_seconds() * 1000:.1f}" log . info ( "Timing - Load nodes into edgestore" , delta_ms = delta_ms )
Load edges into Edgestore
50,968
def edge_iterator ( edges = [ ] , edges_fn = None ) : for edge in itertools . chain ( edges , files . read_edges ( edges_fn ) ) : subj = copy . deepcopy ( edge [ "edge" ] [ "subject" ] ) subj_id = str ( utils . _create_hash_from_doc ( subj ) ) subj [ "_key" ] = subj_id obj = copy . deepcopy ( edge [ "edge" ] [ "object" ] ) obj_id = str ( utils . _create_hash_from_doc ( obj ) ) obj [ "_key" ] = obj_id relation = copy . deepcopy ( edge [ "edge" ] [ "relation" ] ) relation [ "_from" ] = f"nodes/{subj_id}" relation [ "_to" ] = f"nodes/{obj_id}" relation_hash = copy . deepcopy ( relation ) relation_hash . pop ( "edge_dt" , None ) relation_hash . pop ( "edge_hash" , None ) relation_hash . pop ( "nanopub_dt" , None ) relation_hash . pop ( "nanopub_url" , None ) relation_hash . pop ( "subject_canon" , None ) relation_hash . pop ( "object_canon" , None ) relation_hash . pop ( "public_flag" , None ) relation_hash . pop ( "metadata" , None ) relation_id = str ( utils . _create_hash_from_doc ( relation_hash ) ) relation [ "_key" ] = relation_id if edge . get ( "nanopub_id" , None ) : if "metadata" not in relation : relation [ "metadata" ] = { } relation [ "metadata" ] [ "nanopub_id" ] = edge [ "nanopub_id" ] yield ( "nodes" , subj ) yield ( "nodes" , obj ) yield ( "edges" , relation )
Yield documents from edge for loading into ArangoDB
50,969
def update_nanopubstore_start_dt ( url : str , start_dt : str ) : hostname = urllib . parse . urlsplit ( url ) [ 1 ] start_dates_doc = state_mgmt . get ( start_dates_doc_key ) if not start_dates_doc : start_dates_doc = { "_key" : start_dates_doc_key , "start_dates" : [ { "nanopubstore" : hostname , "start_dt" : start_dt } ] , } state_mgmt . insert ( start_dates_doc ) else : for idx , start_date in enumerate ( start_dates_doc [ "start_dates" ] ) : if start_date [ "nanopubstore" ] == hostname : start_dates_doc [ "start_dates" ] [ idx ] [ "start_dt" ] = start_dt break else : start_dates_doc [ "start_dates" ] . append ( { "nanopubstore" : hostname , "start_dt" : start_dt } ) state_mgmt . replace ( start_dates_doc )
Add nanopubstore start_dt to belapi . state_mgmt collection
50,970
def get_nanopubstore_start_dt ( url : str ) : hostname = urllib . parse . urlsplit ( url ) [ 1 ] start_dates_doc = state_mgmt . get ( start_dates_doc_key ) if start_dates_doc and start_dates_doc . get ( "start_dates" ) : date = [ dt [ "start_dt" ] for dt in start_dates_doc [ "start_dates" ] if dt [ "nanopubstore" ] == hostname ] log . info ( f"Selected start_dt: {date} len: {len(date)}" ) if len ( date ) == 1 : return date [ 0 ] return "1900-01-01T00:00:00.000Z"
Get last start_dt recorded for getting new nanopub ID s
50,971
def get_nanopub_urls ( ns_root_url : str = None , start_dt : str = None ) -> dict : if not ns_root_url : ns_root_url = config [ "bel_api" ] [ "servers" ] [ "nanopubstore" ] url = f"{ns_root_url}/nanopubs/timed" if not start_dt : start_dt = get_nanopubstore_start_dt ( ns_root_url ) params = { "startTime" : start_dt , "published" : True } r = bel . utils . get_url ( url , params = params , cache = False ) if r and r . status_code == 200 : data = r . json ( ) new_start_dt = data [ "queryTime" ] update_nanopubstore_start_dt ( ns_root_url , new_start_dt ) nanopub_urls = { "modified" : [ ] , "deleted" : [ ] } for nid in data [ "deleteddata" ] : nanopub_urls [ "deleted" ] . append ( f"{ns_root_url}/nanopubs/{nid}" ) for nid in data [ "data" ] : nanopub_urls [ "modified" ] . append ( f"{ns_root_url}/nanopubs/{nid}" ) return nanopub_urls else : log . error ( f"Bad request to Nanopubstore" , url = url , status = r . status_code , type = "api_request" , ) return { }
Get modified and deleted nanopub urls
50,972
def get_nanopub ( url ) : r = bel . utils . get_url ( url , cache = False ) if r and r . json ( ) : return r . json ( ) else : return { }
Get Nanopub from nanopubstore given url
50,973
def convert_belscript ( ctx , input_fn , output_fn ) : try : ( out_fh , yaml_flag , jsonl_flag , json_flag , ) = bel . nanopub . files . create_nanopubs_fh ( output_fn ) if yaml_flag or json_flag : docs = [ ] if re . search ( "gz$" , input_fn ) : f = gzip . open ( input_fn , "rt" ) else : f = open ( input_fn , "rt" ) for doc in bel . nanopub . belscripts . parse_belscript ( f ) : if yaml_flag or json_flag : docs . append ( doc ) elif jsonl_flag : out_fh . write ( "{}\n" . format ( json . dumps ( doc ) ) ) if yaml_flag : yaml . dump ( docs , out_fh ) elif json_flag : json . dump ( docs , out_fh , indent = 4 ) finally : f . close ( ) out_fh . close ( )
Convert belscript to nanopubs_bel format
50,974
def reformat ( ctx , input_fn , output_fn ) : try : ( out_fh , yaml_flag , jsonl_flag , json_flag , ) = bel . nanopub . files . create_nanopubs_fh ( output_fn ) if yaml_flag or json_flag : docs = [ ] if re . search ( "gz$" , input_fn ) : f = gzip . open ( input_fn , "rt" ) else : f = open ( input_fn , "rt" ) for np in bnf . read_nanopubs ( input_fn ) : if yaml_flag or json_flag : docs . append ( np ) elif jsonl_flag : out_fh . write ( "{}\n" . format ( json . dumps ( np ) ) ) if yaml_flag : yaml . dump ( docs , out_fh ) elif json_flag : json . dump ( docs , out_fh , indent = 4 ) finally : f . close ( ) out_fh . close ( )
Reformat between JSON YAML JSONLines formats
50,975
def nanopub_stats ( ctx , input_fn ) : counts = { "nanopubs" : 0 , "assertions" : { "total" : 0 , "subject_only" : 0 , "nested" : 0 , "relations" : { } } , } for np in bnf . read_nanopubs ( input_fn ) : if "nanopub" in np : counts [ "nanopubs" ] += 1 counts [ "assertions" ] [ "total" ] += len ( np [ "nanopub" ] [ "assertions" ] ) for assertion in np [ "nanopub" ] [ "assertions" ] : if assertion [ "relation" ] is None : counts [ "assertions" ] [ "subject_only" ] += 1 else : if re . match ( "\s*\(" , assertion [ "object" ] ) : counts [ "assertions" ] [ "nested" ] += 1 if ( not assertion . get ( "relation" ) in counts [ "assertions" ] [ "relations" ] ) : counts [ "assertions" ] [ "relations" ] [ assertion . get ( "relation" ) ] = 1 else : counts [ "assertions" ] [ "relations" ] [ assertion . get ( "relation" ) ] += 1 counts [ "assertions" ] [ "relations" ] = sorted ( counts [ "assertions" ] [ "relations" ] ) print ( "DumpVar:\n" , json . dumps ( counts , indent = 4 ) )
Collect statistics on nanopub file
50,976
def edges ( ctx , statement , rules , species , namespace_targets , version , api , config_fn ) : if config_fn : config = bel . db . Config . merge_config ( ctx . config , override_config_fn = config_fn ) else : config = ctx . config if namespace_targets : namespace_targets = json . loads ( namespace_targets ) if rules : rules = rules . replace ( " " , "" ) . split ( "," ) namespace_targets = utils . first_true ( [ namespace_targets , config [ "bel" ] [ "lang" ] . get ( "canonical" ) ] , None ) api_url = utils . first_true ( [ api , config [ "bel_api" ] [ "servers" ] . get ( "api_url" , None ) ] , None ) version = utils . first_true ( [ version , config [ "bel" ] [ "lang" ] . get ( "default_bel_version" , None ) ] , None ) print ( "------------------------------" ) print ( "BEL version: {}" . format ( version ) ) print ( "API Endpoint: {}" . format ( api ) ) print ( "------------------------------" ) bo = BEL ( version = version , endpoint = api_url ) if species : edges = ( bo . parse ( statement ) . orthologize ( species ) . canonicalize ( namespace_targets = namespace_targets ) . compute_edges ( rules = rules ) ) else : edges = ( bo . parse ( statement ) . canonicalize ( namespace_targets = namespace_targets ) . compute_edges ( rules = rules ) ) if edges is None : print ( bo . original_bel_stmt ) print ( bo . parse_visualize_error ) print ( bo . validation_messages ) else : print ( json . dumps ( edges , indent = 4 ) ) if bo . validation_messages : print ( bo . validation_messages ) else : print ( "No problems found" ) return
Create BEL Edges from BEL Statement
50,977
def elasticsearch ( delete , index_name ) : if delete : bel . db . elasticsearch . get_client ( delete = True ) else : bel . db . elasticsearch . get_client ( )
Setup Elasticsearch namespace indexes
50,978
def arangodb ( delete , db_name ) : if delete : client = bel . db . arangodb . get_client ( ) bel . db . arangodb . delete_database ( client , db_name ) if db_name == "belns" : bel . db . arangodb . get_belns_handle ( client ) elif db_name == "edgestore" : bel . db . arangodb . get_edgestore_handle ( client )
Setup ArangoDB database
50,979
def validate_to_schema ( nanopub , schema ) -> Tuple [ bool , List [ Tuple [ str , str ] ] ] : v = jsonschema . Draft4Validator ( schema ) messages = [ ] errors = sorted ( v . iter_errors ( nanopub ) , key = lambda e : e . path ) for error in errors : for suberror in sorted ( error . context , key = lambda e : e . schema_path ) : print ( list ( suberror . schema_path ) , suberror . message , sep = ", " ) messages . append ( ( "ERROR" , suberror . message ) ) is_valid = True if errors : is_valid = False return ( is_valid , messages )
Validate nanopub against jsonschema for nanopub
50,980
def hash_nanopub ( nanopub : Mapping [ str , Any ] ) -> str : hash_list = [ ] hash_list . append ( nanopub [ "nanopub" ] [ "type" ] . get ( "name" , "" ) . strip ( ) ) hash_list . append ( nanopub [ "nanopub" ] [ "type" ] . get ( "version" , "" ) . strip ( ) ) if nanopub [ "nanopub" ] [ "citation" ] . get ( "database" , False ) : hash_list . append ( nanopub [ "nanopub" ] [ "citation" ] [ "database" ] . get ( "name" , "" ) . strip ( ) ) hash_list . append ( nanopub [ "nanopub" ] [ "citation" ] [ "database" ] . get ( "id" , "" ) . strip ( ) ) elif nanopub [ "nanopub" ] [ "citation" ] . get ( "uri" , False ) : hash_list . append ( nanopub [ "nanopub" ] [ "citation" ] . get ( "uri" , "" ) . strip ( ) ) elif nanopub [ "nanopub" ] [ "citation" ] . get ( "reference" , False ) : hash_list . append ( nanopub [ "nanopub" ] [ "citation" ] . get ( "reference" , "" ) . strip ( ) ) assertions = [ ] for assertion in nanopub [ "nanopub" ] [ "assertions" ] : if assertion . get ( "relation" ) is None : assertion [ "relation" ] = "" if assertion . get ( "object" ) is None : assertion [ "object" ] = "" assertions . append ( " " . join ( ( assertion [ "subject" ] . strip ( ) , assertion . get ( "relation" , "" ) . strip ( ) , assertion . get ( "object" , "" ) . strip ( ) , ) ) . strip ( ) ) assertions = sorted ( assertions ) hash_list . extend ( assertions ) annotations = [ ] for anno in nanopub [ "nanopub" ] [ "annotations" ] : annotations . append ( " " . join ( ( anno . get ( "type" , "" ) . strip ( ) , anno . get ( "id" , "" ) . strip ( ) ) ) . strip ( ) ) annotations = sorted ( annotations ) hash_list . extend ( annotations ) np_string = " " . join ( [ l . lower ( ) for l in hash_list ] ) return "{:x}" . format ( CityHash64 ( np_string ) )
Create CityHash64 from nanopub for duplicate check
50,981
def validate ( self , nanopub : Mapping [ str , Any ] ) -> Tuple [ bool , List [ Tuple [ str , str ] ] ] : ( is_valid , messages ) = validate_to_schema ( nanopub , self . nanopub_schema ) if not is_valid : return messages if nanopub [ "nanopub" ] [ "type" ] [ "name" ] . upper ( ) == "BEL" : bel_version = nanopub [ "nanopub" ] [ "type" ] [ "version" ] else : is_valid = False return ( is_valid , f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}" , ) all_messages = [ ] bel_obj = bel . lang . belobj . BEL ( bel_version , self . endpoint ) for edge in nanopub [ "nanopub" ] [ "edges" ] : bel_statement = f"{edge['subject']} {edge['relation']} {edge['object']}" parse_obj = bel_obj . parse ( bel_statement ) if not parse_obj . valid : all_messages . extend ( ( "ERROR" , f"BEL statement parse error {parse_obj.error}, {parse_obj.err_visual}" , ) ) for context in nanopub [ "nanopub" ] [ "context" ] : ( is_valid , messages ) = self . validate_context ( context ) all_messages . extend ( messages ) is_valid = True for _type , msg in all_messages : if _type == "ERROR" : is_valid = False return ( is_valid , all_messages )
Validates using the nanopub schema
50,982
def bel_edges ( self , nanopub : Mapping [ str , Any ] , namespace_targets : Mapping [ str , List [ str ] ] = { } , rules : List [ str ] = [ ] , orthologize_target : str = None , ) -> List [ Mapping [ str , Any ] ] : edges = bel . edge . edges . create_edges ( nanopub , self . endpoint , namespace_targets = namespace_targets , rules = rules , orthologize_target = orthologize_target , ) return edges
Create BEL Edges from BEL nanopub
50,983
def main_hrun ( ) : parser = argparse . ArgumentParser ( description = "Tools for http(s) test. Base on rtsf." ) parser . add_argument ( '--log-level' , default = 'INFO' , help = "Specify logging level, default is INFO." ) parser . add_argument ( '--log-file' , help = "Write logs to specified file path." ) parser . add_argument ( 'case_file' , help = "yaml testcase file" ) color_print ( "httpdriver {}" . format ( __version__ ) , "GREEN" ) args = parser . parse_args ( ) logger . setup_logger ( args . log_level , args . log_file ) runner = TestRunner ( runner = HttpDriver ) . run ( args . case_file ) html_report = runner . gen_html_report ( ) color_print ( "report: {}" . format ( html_report ) )
parse command line options and run commands .
50,984
def _void_array_to_nested_list ( res , _func , _args ) : try : shape = res . coords . len , 2 ptr = cast ( res . coords . data , POINTER ( c_double ) ) array = np . ctypeslib . as_array ( ptr , shape ) return array . tolist ( ) finally : drop_array ( res . coords )
Dereference the FFI result to a list of coordinates
50,985
def lower_ext ( abspath ) : fname , ext = os . path . splitext ( abspath ) return fname + ext . lower ( )
Convert file extension to lowercase .
50,986
def pretty_dumps ( data ) : try : return json . dumps ( data , sort_keys = True , indent = 4 , ensure_ascii = False ) except : return json . dumps ( data , sort_keys = True , indent = 4 , ensure_ascii = True )
Return json string in pretty format .
50,987
def _get_pad_left_right ( small , large ) : assert small < large , "Can only pad when new size larger than old size" padsize = large - small if padsize % 2 != 0 : leftpad = ( padsize - 1 ) / 2 else : leftpad = padsize / 2 rightpad = padsize - leftpad return int ( leftpad ) , int ( rightpad )
Compute left and right padding values .
50,988
def pad_add ( av , size = None , stlen = 10 ) : if size is None : size = list ( ) for s in av . shape : size . append ( int ( 2 * s ) ) elif not hasattr ( size , "__len__" ) : size = [ size ] assert len ( av . shape ) in [ 1 , 2 ] , "Only 1D and 2D arrays!" assert len ( av . shape ) == len ( size ) , "`size` must have same length as `av.shape`!" if len ( av . shape ) == 2 : return _pad_add_2d ( av , size , stlen ) else : return _pad_add_1d ( av , size , stlen )
Perform linear padding for complex array
50,989
def _pad_add_1d ( av , size , stlen ) : assert len ( size ) == 1 padx = _get_pad_left_right ( av . shape [ 0 ] , size [ 0 ] ) mask = np . zeros ( av . shape , dtype = bool ) mask [ stlen : - stlen ] = True border = av [ ~ mask ] if av . dtype . name . count ( "complex" ) : padval = np . average ( np . abs ( border ) ) * np . exp ( 1j * np . average ( np . angle ( border ) ) ) else : padval = np . average ( border ) if np . __version__ [ : 3 ] in [ "1.7" , "1.8" , "1.9" ] : end_values = ( ( padval , padval ) , ) else : end_values = ( padval , ) bv = np . pad ( av , padx , mode = "linear_ramp" , end_values = end_values ) bv = np . roll ( bv , - padx [ 0 ] , 0 ) return bv
2D component of pad_add
50,990
def pad_rem ( pv , size = None ) : if size is None : size = list ( ) for s in pv . shape : assert s % 2 == 0 , "Uneven size; specify correct size of output!" size . append ( int ( s / 2 ) ) elif not hasattr ( size , "__len__" ) : size = [ size ] assert len ( pv . shape ) in [ 1 , 2 ] , "Only 1D and 2D arrays!" assert len ( pv . shape ) == len ( size ) , "`size` must have same length as `av.shape`!" if len ( pv . shape ) == 2 : return pv [ : size [ 0 ] , : size [ 1 ] ] else : return pv [ : size [ 0 ] ]
Removes linear padding from array
50,991
def rate_limit ( self ) : rate_limited_msg = False while True : is_rate_limited = self . limit . is_rate_limited ( uuid ) if is_rate_limited : time . sleep ( 0.3 ) if not rate_limited_msg : self . logger . info ( 'Rate limit active..please wait...' ) rate_limited_msg = True if not is_rate_limited : self . logger . info ( 'Rate limit clear.' ) self . limit . attempt ( uuid ) return True
Simple rate limit function using redis
50,992
def scan_file ( self , filename , apikey ) : url = self . base_url + "file/scan" params = { 'apikey' : apikey } scanfile = { "file" : open ( filename , 'rb' ) } response = requests . post ( url , files = scanfile , params = params ) rate_limit_clear = self . rate_limit ( ) if rate_limit_clear : if response . status_code == self . HTTP_OK : json_response = response . json ( ) return json_response elif response . status_code == self . HTTP_RATE_EXCEEDED : time . sleep ( 20 ) else : self . logger . error ( "sent: %s, HTTP: %d" , filename , response . status_code )
Sends a file to virus total for assessment
50,993
def rescan_file ( self , filename , sha256hash , apikey ) : url = self . base_url + "file/rescan" params = { 'apikey' : apikey , 'resource' : sha256hash } rate_limit_clear = self . rate_limit ( ) if rate_limit_clear : response = requests . post ( url , params = params ) if response . status_code == self . HTTP_OK : self . logger . info ( "sent: %s, HTTP: %d, content: %s" , os . path . basename ( filename ) , response . status_code , response . text ) elif response . status_code == self . HTTP_RATE_EXCEEDED : time . sleep ( 20 ) else : self . logger . error ( "sent: %s, HTTP: %d" , os . path . basename ( filename ) , response . status_code ) return response
just send the hash check the date
50,994
def binary_report ( self , sha256sum , apikey ) : url = self . base_url + "file/report" params = { "apikey" : apikey , "resource" : sha256sum } rate_limit_clear = self . rate_limit ( ) if rate_limit_clear : response = requests . post ( url , data = params ) if response . status_code == self . HTTP_OK : json_response = response . json ( ) response_code = json_response [ 'response_code' ] return json_response elif response . status_code == self . HTTP_RATE_EXCEEDED : time . sleep ( 20 ) else : self . logger . warning ( "retrieve report: %s, HTTP code: %d" , os . path . basename ( filename ) , response . status_code )
retrieve report from file scan
50,995
def send_ip ( self , ipaddr , apikey ) : url = self . base_url + "ip-address/report" parameters = { "ip" : ipaddr , "apikey" : apikey } rate_limit_clear = self . rate_limit ( ) if rate_limit_clear : response = requests . get ( url , params = parameters ) if response . status_code == self . HTTP_OK : json_response = response . json ( ) return json_response elif response . status_code == self . HTTP_RATE_EXCEEDED : time . sleep ( 20 ) else : self . logger . error ( "sent: %s, HTTP: %d" , ipaddr , response . status_code ) time . sleep ( self . public_api_sleep_time )
Send IP address for list of past malicous domain associations
50,996
def url_report ( self , scan_url , apikey ) : url = self . base_url + "url/report" params = { "apikey" : apikey , 'resource' : scan_url } rate_limit_clear = self . rate_limit ( ) if rate_limit_clear : response = requests . post ( url , params = params , headers = self . headers ) if response . status_code == self . HTTP_OK : json_response = response . json ( ) return json_response elif response . status_code == self . HTTP_RATE_EXCEEDED : time . sleep ( 20 ) else : self . logger . error ( "sent: %s, HTTP: %d" , scan_url , response . status_code ) time . sleep ( self . public_api_sleep_time )
Send URLS for list of past malicous associations
50,997
def _read_requirements ( filename , extra_packages ) : requirements_file = open ( filename ) . read ( ) hard_requirements = [ ] for line in requirements_file . splitlines ( ) : if _is_requirement ( line ) : if line . find ( ';' ) > - 1 : dep , condition = tuple ( line . split ( ';' ) ) extra_packages [ condition . strip ( ) ] . append ( dep . strip ( ) ) else : hard_requirements . append ( line . strip ( ) ) return hard_requirements , extra_packages
Returns a list of package requirements read from the file .
50,998
def field ( ctx , text , index , delimiter = ' ' ) : splits = text . split ( delimiter ) splits = [ f for f in splits if f != delimiter and len ( f . strip ( ) ) > 0 ] index = conversions . to_integer ( index , ctx ) if index < 1 : raise ValueError ( 'Field index cannot be less than 1' ) if index <= len ( splits ) : return splits [ index - 1 ] else : return ''
Reference a field in string separated by a delimiter
50,999
def epoch ( ctx , datetime ) : return conversions . to_decimal ( str ( conversions . to_datetime ( datetime , ctx ) . timestamp ( ) ) , ctx )
Converts the given date to the number of seconds since January 1st 1970 UTC