idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
62,300 | async def cancel_remaining ( self ) : self . _closed = True task_list = list ( self . _pending ) for task in task_list : task . cancel ( ) for task in task_list : with suppress ( CancelledError ) : await task | Cancel all remaining tasks . |
62,301 | async def _connect_one ( self , remote_address ) : loop = asyncio . get_event_loop ( ) for info in await loop . getaddrinfo ( str ( self . address . host ) , self . address . port , type = socket . SOCK_STREAM ) : client = self . protocol ( remote_address , self . auth ) sock = socket . socket ( family = info [ 0 ] ) t... | Connect to the proxy and perform a handshake requesting a connection . |
62,302 | async def _connect ( self , remote_addresses ) : assert remote_addresses exceptions = [ ] for remote_address in remote_addresses : sock = await self . _connect_one ( remote_address ) if isinstance ( sock , socket . socket ) : return sock , remote_address exceptions . append ( sock ) strings = set ( f'{exc!r}' for exc i... | Connect to the proxy and perform a handshake requesting a connection to each address in addresses . |
62,303 | async def _detect_proxy ( self ) : if self . protocol is SOCKS4a : remote_address = NetAddress ( 'www.apple.com' , 80 ) else : remote_address = NetAddress ( '8.8.8.8' , 53 ) sock = await self . _connect_one ( remote_address ) if isinstance ( sock , socket . socket ) : sock . close ( ) return True return isinstance ( so... | Return True if it appears we can connect to a SOCKS proxy otherwise False . |
62,304 | async def auto_detect_at_host ( cls , host , ports , auth ) : for port in ports : proxy = await cls . auto_detect_at_address ( NetAddress ( host , port ) , auth ) if proxy : return proxy return None | Try to detect a SOCKS proxy on a host on one of the ports . |
62,305 | async def create_connection ( self ) : connector = self . proxy or self . loop return await connector . create_connection ( self . session_factory , self . host , self . port , ** self . kwargs ) | Initiate a connection . |
62,306 | def data_received ( self , framed_message ) : if self . verbosity >= 4 : self . logger . debug ( f'Received framed message {framed_message}' ) self . recv_size += len ( framed_message ) self . bump_cost ( len ( framed_message ) * self . bw_cost_per_byte ) self . framer . received_bytes ( framed_message ) | Called by asyncio when a message comes in . |
62,307 | def pause_writing ( self ) : if not self . is_closing ( ) : self . _can_send . clear ( ) self . transport . pause_reading ( ) | Transport calls when the send buffer is full . |
62,308 | def resume_writing ( self ) : if not self . _can_send . is_set ( ) : self . _can_send . set ( ) self . transport . resume_reading ( ) | Transport calls when the send buffer has room . |
62,309 | def connection_made ( self , transport ) : self . transport = transport if self . _proxy is None : peername = transport . get_extra_info ( 'peername' ) self . _remote_address = NetAddress ( peername [ 0 ] , peername [ 1 ] ) self . _task = spawn_sync ( self . _process_messages ( ) , loop = self . loop ) | Called by asyncio when a connection is established . |
62,310 | def connection_lost ( self , exc ) : if self . transport : self . transport = None self . closed_event . set ( ) self . _can_send . set ( ) self . loop . call_soon ( self . _task . cancel ) | Called by asyncio when the connection closes . |
62,311 | def recalc_concurrency ( self ) : now = time . time ( ) self . cost = max ( 0 , self . cost - ( now - self . _cost_time ) * self . cost_decay_per_sec ) self . _cost_time = now self . _cost_last = self . cost value = self . _incoming_concurrency . max_concurrent cost_soft_range = self . cost_hard_limit - self . cost_sof... | Call to recalculate sleeps and concurrency for the session . Called automatically if cost has drifted significantly . Otherwise can be called at regular intervals if desired . |
62,312 | async def close ( self , * , force_after = 30 ) : if self . transport : self . transport . close ( ) try : async with timeout_after ( force_after ) : await self . closed_event . wait ( ) except TaskTimeout : self . abort ( ) await self . closed_event . wait ( ) | Close the connection and return when closed . |
62,313 | async def send_request ( self , method , args = ( ) ) : message , event = self . connection . send_request ( Request ( method , args ) ) return await self . _send_concurrent ( message , event , 1 ) | Send an RPC request over the network . |
62,314 | async def send_notification ( self , method , args = ( ) ) : message = self . connection . send_notification ( Notification ( method , args ) ) await self . _send_message ( message ) | Send an RPC notification over the network . |
62,315 | async def close ( self ) : if self . server : self . server . close ( ) await self . server . wait_closed ( ) self . server = None | Close the listening socket . This does not close any ServerSession objects created to handle incoming connections . |
62,316 | def _message_to_payload ( cls , message ) : try : return json . loads ( message . decode ( ) ) except UnicodeDecodeError : message = 'messages must be encoded in UTF-8' except json . JSONDecodeError : message = 'invalid JSON' raise cls . _error ( cls . PARSE_ERROR , message , True , None ) | Returns a Python object or a ProtocolError . |
62,317 | def batch_message ( cls , batch , request_ids ) : assert isinstance ( batch , Batch ) if not cls . allow_batches : raise ProtocolError . invalid_request ( 'protocol does not permit batches' ) id_iter = iter ( request_ids ) rm = cls . request_message nm = cls . notification_message parts = ( rm ( request , next ( id_ite... | Convert a request Batch to a message . |
62,318 | def batch_message_from_parts ( cls , messages ) : middle = b', ' . join ( messages ) if not middle : raise ProtocolError . empty_batch ( ) return b'' . join ( [ b'[' , middle , b']' ] ) | Convert messages one per batch item into a batch message . At least one message must be passed . |
62,319 | def encode_payload ( cls , payload ) : try : return json . dumps ( payload ) . encode ( ) except TypeError : msg = f'JSON payload encoding error: {payload}' raise ProtocolError ( cls . INTERNAL_ERROR , msg ) from None | Encode a Python object as JSON and convert it to bytes . |
62,320 | def detect_protocol ( cls , message ) : main = cls . _message_to_payload ( message ) def protocol_for_payload ( payload ) : if not isinstance ( payload , dict ) : return JSONRPCLoose version = payload . get ( 'jsonrpc' ) if version == '2.0' : return JSONRPCv2 if version == '1.0' : return JSONRPCv1 if 'result' in payloa... | Attempt to detect the protocol from the message . |
62,321 | def receive_message ( self , message ) : if self . _protocol is JSONRPCAutoDetect : self . _protocol = JSONRPCAutoDetect . detect_protocol ( message ) try : item , request_id = self . _protocol . message_to_item ( message ) except ProtocolError as e : if e . response_msg_id is not id : return self . _receive_response (... | Call with an unframed message received from the network . |
62,322 | def cancel_pending_requests ( self ) : exception = CancelledError ( ) for _request , event in self . _requests . values ( ) : event . result = exception event . set ( ) self . _requests . clear ( ) | Cancel all pending requests . |
62,323 | def is_valid_hostname ( hostname ) : if not isinstance ( hostname , str ) : raise TypeError ( 'hostname must be a string' ) if hostname and hostname [ - 1 ] == "." : hostname = hostname [ : - 1 ] if not hostname or len ( hostname ) > 253 : return False labels = hostname . split ( '.' ) if re . match ( NUMERIC_REGEX , l... | Return True if hostname is valid otherwise False . |
62,324 | def classify_host ( host ) : if isinstance ( host , ( IPv4Address , IPv6Address ) ) : return host if is_valid_hostname ( host ) : return host return ip_address ( host ) | Host is an IPv4Address IPv6Address or a string . |
62,325 | def validate_port ( port ) : if not isinstance ( port , ( str , int ) ) : raise TypeError ( f'port must be an integer or string: {port}' ) if isinstance ( port , str ) and port . isdigit ( ) : port = int ( port ) if isinstance ( port , int ) and 0 < port <= 65535 : return port raise ValueError ( f'invalid port: {port}'... | Validate port and return it as an integer . |
62,326 | def validate_protocol ( protocol ) : if not re . match ( PROTOCOL_REGEX , protocol ) : raise ValueError ( f'invalid protocol: {protocol}' ) return protocol . lower ( ) | Validate a protocol a string and return it . |
62,327 | def is_async_call ( func ) : while isinstance ( func , partial ) : func = func . func return inspect . iscoroutinefunction ( func ) | inspect . iscoroutinefunction that looks through partials . |
62,328 | def from_string ( cls , string , * , default_func = None ) : if not isinstance ( string , str ) : raise TypeError ( f'service must be a string: {string}' ) parts = string . split ( '://' , 1 ) if len ( parts ) == 2 : protocol , address = parts else : item , = parts protocol = None if default_func : if default_func ( it... | Construct a Service from a string . |
62,329 | def scrub ( self ) : LOG . info ( "Scrubbing out the nasty characters that break our parser." ) myfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] ) ) tmpfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] + '.tmp.gz' ) ) tmp = gzip . open ( tmpfile , 'wb' ) du = DipperUtil ... | The XML file seems to have mixed - encoding ; we scrub out the control characters from the file for processing . |
62,330 | def process_associations ( self , limit ) : myfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] ) ) f = gzip . open ( myfile , 'rb' ) filereader = io . TextIOWrapper ( f , newline = "" ) filereader . readline ( ) for event , elem in ET . iterparse ( filereader ) : self . process_xml_table ( elem ... | Loop through the xml file and process the article - breed article - phene breed - phene phene - gene associations and the external links to LIDA . |
62,331 | def _process_article_phene_row ( self , row ) : phenotype_id = self . id_hash [ 'phene' ] . get ( row [ 'phene_id' ] ) article_id = self . id_hash [ 'article' ] . get ( row [ 'article_id' ] ) omia_id = self . _get_omia_id_from_phene_id ( phenotype_id ) if self . test_mode or omia_id not in self . test_ids [ 'disease' ]... | Linking articles to species - specific phenes . |
62,332 | def filter_keep_phenotype_entry_ids ( self , entry ) : omim_id = str ( entry [ 'mimNumber' ] ) otype = self . globaltt [ 'obsolete' ] if omim_id in self . omim_type : otype = self . omim_type [ omim_id ] if otype == self . globaltt [ 'obsolete' ] and omim_id in self . omim_replaced : omim_id = self . omim_replaced [ om... | doubt this should be kept |
62,333 | def make_spo ( sub , prd , obj ) : if prd == 'a' : prd = 'rdf:type' try : ( subcuri , subid ) = re . split ( r':' , sub ) except Exception : LOG . error ( "not a Subject Curie '%s'" , sub ) raise ValueError try : ( prdcuri , prdid ) = re . split ( r':' , prd ) except Exception : LOG . error ( "not a Predicate Curie '... | Decorates the three given strings as a line of ntriples |
62,334 | def write_spo ( sub , prd , obj ) : rcvtriples . append ( make_spo ( sub , prd , obj ) ) | write triples to a buffer incase we decide to drop them |
62,335 | def make_allele_by_consequence ( self , consequence , gene_id , gene_symbol ) : allele_id = None type_id = self . resolve ( consequence , mandatory = False ) if type_id == consequence : LOG . warning ( "Consequence type unmapped: %s" , str ( consequence ) ) type_id = self . globaltt [ 'sequence_variant' ] allele_id = '... | Given a consequence label that describes a variation type create an anonymous variant of the specified gene as an instance of that consequence type . |
62,336 | def parse ( self , limit : Optional [ int ] = None ) : if limit is not None : LOG . info ( "Only parsing first %d rows" , limit ) LOG . info ( "Parsing files..." ) file_path = '/' . join ( ( self . rawdir , self . files [ 'developmental_disorders' ] [ 'file' ] ) ) with gzip . open ( file_path , 'rt' ) as csvfile : read... | Here we parse each row of the gene to phenotype file |
62,337 | def _add_gene_disease ( self , row ) : col = self . files [ 'developmental_disorders' ] [ 'columns' ] if len ( row ) != len ( col ) : raise ValueError ( "Unexpected number of fields for row {}" . format ( row ) ) variant_label = "variant of {}" . format ( row [ col . index ( 'gene_symbol' ) ] ) disease_omim_id = row [ ... | Parse and add gene variant disease model Model building happens in _build_gene_disease_model |
62,338 | def _build_gene_disease_model ( self , gene_id , relation_id , disease_id , variant_label , consequence_predicate = None , consequence_id = None , allelic_requirement = None , pmids = None ) : model = Model ( self . graph ) geno = Genotype ( self . graph ) pmids = [ ] if pmids is None else pmids is_variant = False vari... | Builds gene variant disease model |
62,339 | def _get_identifiers ( self , limit ) : LOG . info ( "getting identifier mapping" ) line_counter = 0 f = '/' . join ( ( self . rawdir , self . files [ 'identifiers' ] [ 'file' ] ) ) myzip = ZipFile ( f , 'r' ) fname = myzip . namelist ( ) [ 0 ] foundheader = False speciesfilters = 'Homo sapiens,Mus musculus' . split ( ... | This will process the id mapping file provided by Biogrid . The file has a very large header which we scan past then pull the identifiers and make equivalence axioms |
62,340 | def add_supporting_evidence ( self , evidence_line , evidence_type = None , label = None ) : self . graph . addTriple ( self . association , self . globaltt [ 'has_supporting_evidence_line' ] , evidence_line ) if evidence_type is not None : self . model . addIndividualToGraph ( evidence_line , label , evidence_type ) r... | Add supporting line of evidence node to association id |
62,341 | def add_association_to_graph ( self ) : Assoc . add_association_to_graph ( self ) if self . start_stage_id or self . end_stage_id is not None : stage_process_id = '-' . join ( ( str ( self . start_stage_id ) , str ( self . end_stage_id ) ) ) stage_process_id = '_:' + re . sub ( r':' , '' , stage_process_id ) self . mod... | Overrides Association by including bnode support |
62,342 | def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) self . _process_straininfo ( limit ) self . _process_ontology_mappings_file ( limit ) self . _process_measurements_file ( limit ) self . _process_strainme... | MPD data is delivered in four separate csv files and one xml file which we process iteratively and write out as one large graph . |
62,343 | def _add_g2p_assoc ( self , graph , strain_id , sex , assay_id , phenotypes , comment ) : geno = Genotype ( graph ) model = Model ( graph ) eco_id = self . globaltt [ 'experimental phenotypic evidence' ] strain_label = self . idlabel_hash . get ( strain_id ) genotype_id = '_:' + '-' . join ( ( re . sub ( r':' , '' , st... | Create an association between a sex - specific strain id and each of the phenotypes . Here we create a genotype from the strain and a sex - specific genotype . Each of those genotypes are created as anonymous nodes . |
62,344 | def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows fo each file" , str ( limit ) ) LOG . info ( "Parsing files..." ) if self . test_only : self . test_mode = True for f in [ 'all' ] : file = '/' . join ( ( self . rawdir , self . files [ f ] [ 'file' ] ) ) self . _process... | IMPC data is delivered in three separate csv files OR in one integrated file each with the same file format . |
62,345 | def addGeneToPathway ( self , gene_id , pathway_id ) : gene_product = '_:' + re . sub ( r':' , '' , gene_id ) + 'product' self . model . addIndividualToGraph ( gene_product , None , self . globaltt [ 'gene_product' ] ) self . graph . addTriple ( gene_id , self . globaltt [ 'has gene product' ] , gene_product ) self . a... | When adding a gene to a pathway we create an intermediate gene product that is involved in the pathway through a blank node . |
62,346 | def addComponentToPathway ( self , component_id , pathway_id ) : self . graph . addTriple ( component_id , self . globaltt [ 'involved in' ] , pathway_id ) return | This can be used directly when the component is directly involved in the pathway . If a transforming event is performed on the component first then the addGeneToPathway should be used instead . |
62,347 | def write ( self , fmt = 'turtle' , stream = None ) : fmt_ext = { 'rdfxml' : 'xml' , 'turtle' : 'ttl' , 'nt' : 'nt' , 'nquads' : 'nq' , 'n3' : 'n3' } dest = None if self . name is not None : dest = '/' . join ( ( self . outdir , self . name ) ) if fmt in fmt_ext : dest = '.' . join ( ( dest , fmt_ext . get ( fmt ) ) ) ... | This convenience method will write out all of the graphs associated with the source . Right now these are hardcoded to be a single graph and a src_dataset . ttl and a src_test . ttl If you do not supply stream = stdout it will default write these to files . |
62,348 | def declareAsOntology ( self , graph ) : model = Model ( graph ) ontology_file_id = 'MonarchData:' + self . name + ".ttl" model . addOntologyDeclaration ( ontology_file_id ) cur_time = datetime . now ( ) t_string = cur_time . strftime ( "%Y-%m-%d" ) ontology_version = t_string archive_url = 'MonarchArchive:' + 'ttl/' +... | The file we output needs to be declared as an ontology including it s version information . |
62,349 | def remove_backslash_r ( filename , encoding ) : with open ( filename , 'r' , encoding = encoding , newline = r'\n' ) as filereader : contents = filereader . read ( ) contents = re . sub ( r'\r' , '' , contents ) with open ( filename , "w" ) as filewriter : filewriter . truncate ( ) filewriter . write ( contents ) | A helpful utility to remove Carriage Return from any file . This will read a file into memory and overwrite the contents of the original file . |
62,350 | def load_local_translationtable ( self , name ) : localtt_file = 'translationtable/' + name + '.yaml' try : with open ( localtt_file ) : pass except IOError : with open ( localtt_file , 'w' ) as write_yaml : yaml . dump ( { name : name } , write_yaml ) finally : with open ( localtt_file , 'r' ) as read_yaml : localtt =... | Load ingest specific translation from whatever they called something to the ontology label we need to map it to . To facilitate seeing more ontology lables in dipper ingests a reverse mapping from ontology lables to external strings is also generated and available as a dict localtcid |
62,351 | def addGene ( self , gene_id , gene_label , gene_type = None , gene_description = None ) : if gene_type is None : gene_type = self . globaltt [ 'gene' ] self . model . addClassToGraph ( gene_id , gene_label , gene_type , gene_description ) return | genes are classes |
62,352 | def get_ncbi_taxon_num_by_label ( label ) : req = { 'db' : 'taxonomy' , 'retmode' : 'json' , 'term' : label } req . update ( EREQ ) request = SESSION . get ( ESEARCH , params = req ) LOG . info ( 'fetching: %s' , request . url ) request . raise_for_status ( ) result = request . json ( ) [ 'esearchresult' ] if 'ERROR' i... | Here we want to look up the NCBI Taxon id using some kind of label . It will only return a result if there is a unique hit . |
62,353 | def set_association_id ( self , assoc_id = None ) : if assoc_id is None : self . assoc_id = self . make_association_id ( self . definedby , self . sub , self . rel , self . obj ) else : self . assoc_id = assoc_id return self . assoc_id | This will set the association ID based on the internal parts of the association . To be used in cases where an external association identifier should be used . |
62,354 | def make_association_id ( definedby , sub , pred , obj , attributes = None ) : items_to_hash = [ definedby , sub , pred , obj ] if attributes is not None and len ( attributes ) > 0 : items_to_hash += attributes items_to_hash = [ x for x in items_to_hash if x is not None ] assoc_id = ':' . join ( ( 'MONARCH' , GraphUtil... | A method to create unique identifiers for OBAN - style associations based on all the parts of the association If any of the items is empty or None it will convert it to blank . It effectively digests the string of concatonated values . Subclasses of Assoc can submit an additional array of attributes that will be appede... |
62,355 | def toRoman ( num ) : if not 0 < num < 5000 : raise ValueError ( "number %n out of range (must be 1..4999)" , num ) if int ( num ) != num : raise TypeError ( "decimals %n can not be converted" , num ) result = "" for numeral , integer in romanNumeralMap : while num >= integer : result += numeral num -= integer return r... | convert integer to Roman numeral |
62,356 | def fromRoman ( strng ) : if not strng : raise TypeError ( 'Input can not be blank' ) if not romanNumeralPattern . search ( strng ) : raise ValueError ( 'Invalid Roman numeral: %s' , strng ) result = 0 index = 0 for numeral , integer in romanNumeralMap : while strng [ index : index + len ( numeral ) ] == numeral : resu... | convert Roman numeral to integer |
62,357 | def _process_genotype_backgrounds ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing genotype backgrounds" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'backgrounds' ] [ 'file' ] ) ) geno = Genot... | This table provides a mapping of genotypes to background genotypes Note that the background_id is also a genotype_id . |
62,358 | def _process_stages ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing stages" ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'stage' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-8859-1... | This table provides mappings between ZFIN stage IDs and ZFS terms and includes the starting and ending hours for the developmental stage . Currently only processing the mapping from the ZFIN stage ID to the ZFS ID . |
62,359 | def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'gene' ] [ 'file' ] ) ) geno = Genotype ( graph ) with open ( raw , 'r' ,... | This table provides the ZFIN gene id the SO type of the gene the gene symbol and the NCBI Gene ID . |
62,360 | def _process_features ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) LOG . info ( "Processing features" ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'features' ] [ 'file' ] ) ) with open ( r... | This module provides information for the intrinsic and extrinsic genotype features of zebrafish . All items here are alterations and are therefore instances . |
62,361 | def _process_pubinfo ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pubs' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader = csv . re... | This will pull the zfin internal publication information and map them to their equivalent pmid and make labels . |
62,362 | def _process_pub2pubmed ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'pub2pubmed' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "latin-1" ) as csvfile : filereader =... | This will pull the zfin internal publication to pubmed mappings . Somewhat redundant with the process_pubinfo method but this includes additional mappings . |
62,363 | def _process_targeting_reagents ( self , reagent_type , limit = None ) : LOG . info ( "Processing Gene Targeting Reagents" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) if reagent_type not in [ 'morph' , 'talen' , 'crispr'... | This method processes the gene targeting knockdown reagents such as morpholinos talens and crisprs . We create triples for the reagents and pass the data into a hash map for use in the pheno_enviro method . |
62,364 | def _process_uniprot_ids ( self , limit = None ) : LOG . info ( "Processing UniProt IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'uniprot' ] [ 'file' ] ) ) with ope... | This method processes the mappings from ZFIN gene IDs to UniProtKB IDs . |
62,365 | def get_orthology_evidence_code ( self , abbrev ) : eco_abbrev_map = { 'AA' : 'ECO:0000031' , 'CE' : 'ECO:0000008' , 'CL' : 'ECO:0000044' , 'FC' : 'ECO:0000012' , 'FH' : 'ECO:0000064' , 'IX' : 'ECO:0000040' , 'NS' : None , 'NT' : 'ECO:0000032' , 'SI' : 'ECO:0000094' , 'SL' : 'ECO:0000122' , 'SS' : 'ECO:0000024' , 'SU' ... | move to localtt & globltt |
62,366 | def _process_diseases ( self , limit = None ) : LOG . info ( "Processing diseases" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 model = Model ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "iso-... | This method processes the KEGG disease IDs . |
62,367 | def _process_genes ( self , limit = None ) : LOG . info ( "Processing genes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 family = Family ( graph ) geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'hsa_genes' ] [ 'file... | This method processes the KEGG gene IDs . The label for the gene is pulled as the first symbol in the list of gene symbols ; the rest are added as synonyms . The long - form of the gene name is added as a definition . This is hardcoded to just processes human genes . |
62,368 | def _process_ortholog_classes ( self , limit = None ) : LOG . info ( "Processing ortholog classes" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ortholog_classes' ] [ 'file' ] ) ) with open ( raw... | This method add the KEGG orthology classes to the graph . |
62,369 | def _process_orthologs ( self , raw , limit = None ) : LOG . info ( "Processing orthologs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile : filereader = csv . reader ( csvfile , delimiter ... | This method maps orthologs for a species to the KEGG orthology classes . |
62,370 | def _process_kegg_disease2gene ( self , limit = None ) : LOG . info ( "Processing KEGG disease to gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) rel = self . globaltt [ 'is marker for' ] noomimset = set ( ) raw = '/' ... | This method creates an association between diseases and their associated genes . We are being conservative here and only processing those diseases for which there is no mapping to OMIM . |
62,371 | def _process_omim2gene ( self , limit = None ) : LOG . info ( "Processing OMIM to KEGG gene" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno = Genotype ( graph ) raw = '/' . join ( ( self . rawdir , self . files [ 'omim2gene' ] [ 'file' ] ) ) wi... | This method maps the OMIM IDs and KEGG gene ID . Currently split based on the link_type field . Equivalent link types are mapped as gene XRefs . Reverse link types are mapped as disease to gene associations . Original link types are currently skipped . |
62,372 | def _process_genes_kegg2ncbi ( self , limit = None ) : LOG . info ( "Processing KEGG gene IDs to NCBI gene IDs" ) if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'ncbi' ] [ 'file' ] ) ) with open ( ra... | This method maps the KEGG human gene IDs to the corresponding NCBI Gene IDs . |
62,373 | def _process_pathway_disease ( self , limit ) : LOG . info ( "Processing KEGG pathways to disease ids" ) if self . test_mode : graph = self . testgraph else : graph = self . graph line_counter = 0 raw = '/' . join ( ( self . rawdir , self . files [ 'pathway_disease' ] [ 'file' ] ) ) with open ( raw , 'r' , encoding = "... | We make a link between the pathway identifiers and any diseases associated with them . Since we model diseases as processes we make a triple saying that the pathway may be causally upstream of or within the disease process . |
62,374 | def _make_variant_locus_id ( self , gene_id , disease_id ) : alt_locus_id = '_:' + re . sub ( r':' , '' , gene_id ) + '-' + re . sub ( r':' , '' , disease_id ) + 'VL' alt_label = self . label_hash . get ( gene_id ) disease_label = self . label_hash . get ( disease_id ) if alt_label is not None and alt_label != '' : alt... | We actually want the association between the gene and the disease to be via an alternate locus not the wildtype gene itself . so we make an anonymous alternate locus and put that in the association We also make the label for the anonymous class and add it to the label hash |
62,375 | def _fetch_disambiguating_assoc ( self ) : disambig_file = '/' . join ( ( self . rawdir , self . static_files [ 'publications' ] [ 'file' ] ) ) assoc_file = '/' . join ( ( self . rawdir , self . files [ 'chemical_disease_interactions' ] [ 'file' ] ) ) if os . path . exists ( disambig_file ) : dfile_dt = os . stat ( dis... | For any of the items in the chemical - disease association file that have ambiguous association types we fetch the disambiguated associations using the batch query API and store these in a file . Elsewhere we can loop through the file and create the appropriate associations . |
62,376 | def _make_association ( self , subject_id , object_id , rel_id , pubmed_ids ) : assoc = G2PAssoc ( self . graph , self . name , subject_id , object_id , rel_id ) if pubmed_ids is not None and len ( pubmed_ids ) > 0 : for pmid in pubmed_ids : ref = Reference ( self . graph , pmid , self . globaltt [ 'journal article' ] ... | Make a reified association given an array of pubmed identifiers . |
62,377 | def checkIfRemoteIsNewer ( self , localfile , remote_size , remote_modify ) : is_remote_newer = False status = os . stat ( localfile ) LOG . info ( "\nLocal file size: %i" "\nLocal Timestamp: %s" , status [ ST_SIZE ] , datetime . fromtimestamp ( status . st_mtime ) ) remote_dt = Bgee . _convert_ftp_time_to_iso ( remote... | Overrides checkIfRemoteIsNewer in Source class |
62,378 | def _convert_ftp_time_to_iso ( ftp_time ) : date_time = datetime ( int ( ftp_time [ : 4 ] ) , int ( ftp_time [ 4 : 6 ] ) , int ( ftp_time [ 6 : 8 ] ) , int ( ftp_time [ 8 : 10 ] ) , int ( ftp_time [ 10 : 12 ] ) , int ( ftp_time [ 12 : 14 ] ) ) return date_time | Convert datetime in the format 20160705042714 to a datetime object |
62,379 | def fetch ( self , is_dl_forced = False ) : cxn = { } cxn [ 'host' ] = 'nif-db.crbs.ucsd.edu' cxn [ 'database' ] = 'disco_crawler' cxn [ 'port' ] = '5432' cxn [ 'user' ] = config . get_config ( ) [ 'user' ] [ 'disco' ] cxn [ 'password' ] = config . get_config ( ) [ 'keys' ] [ cxn [ 'user' ] ] self . dataset . setFileAc... | connection details for DISCO |
62,380 | def parse ( self , limit = None ) : if limit is not None : LOG . info ( "Only parsing first %s rows of each file" , limit ) if self . test_only : self . test_mode = True LOG . info ( "Parsing files..." ) self . _process_nlx_157874_1_view ( '/' . join ( ( self . rawdir , 'dvp.pr_nlx_157874_1' ) ) , limit ) self . _map_e... | Over ride Source . parse inherited via PostgreSQLSource |
62,381 | def _process_gxd_genotype_view ( self , limit = None ) : line_counter = 0 if self . test_mode : graph = self . testgraph else : graph = self . graph geno = Genotype ( graph ) model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'gxd_genotype_view' ) ) LOG . info ( "getting genotypes and their backgrounds" ) wit... | This table indicates the relationship between a genotype and it s background strain . It leverages the Genotype class methods to do this . |
62,382 | def _process_gxd_genotype_summary_view ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 geno_hash = { } raw = '/' . join ( ( self . rawdir , 'gxd_genotype_summary_view' ) ) LOG . info ( "building labels for genotypes" ) with op... | Add the genotype internal id to mgiid mapping to the idhashmap . Also add them as individuals to the graph . We re - format the label to put the background strain in brackets after the gvc . |
62,383 | def process_mgi_relationship_transgene_genes ( self , limit = None ) : if self . test_mode : graph = self . testgraph else : graph = self . graph LOG . info ( "getting transgene genes" ) raw = '/' . join ( ( self . rawdir , 'mgi_relationship_transgene_genes' ) ) geno = Genotype ( graph ) col = [ 'rel_key' , 'allele_key... | Here we have the relationship between MGI transgene alleles and the non - mouse gene ids that are part of them . We augment the allele with the transgene parts . |
62,384 | def _getnode ( self , curie ) : node = None if curie [ 0 ] == '_' : if self . are_bnodes_skized is True : node = self . skolemizeBlankNode ( curie ) else : node = BNode ( re . sub ( r'^_:|^_' , '' , curie , 1 ) ) elif curie [ : 4 ] == 'http' or curie [ : 3 ] == 'ftp' : node = URIRef ( curie ) else : iri = RDFGraph . cu... | This is a wrapper for creating a URIRef or Bnode object with a given a curie or iri as a string . |
62,385 | def add_association_to_graph ( self ) : Assoc . add_association_to_graph ( self ) if self . onset is not None and self . onset != '' : self . graph . addTriple ( self . assoc_id , self . globaltt [ 'onset' ] , self . onset ) if self . frequency is not None and self . frequency != '' : self . graph . addTriple ( self . ... | The reified relationship between a disease and a phenotype is decorated with some provenance information . This makes the assumption that both the disease and phenotype are classes . |
62,386 | def make_parent_bands ( self , band , child_bands ) : m = re . match ( r'([pq][A-H\d]+(?:\.\d+)?)' , band ) if len ( band ) > 0 : if m : p = str ( band [ 0 : len ( band ) - 1 ] ) p = re . sub ( r'\.$' , '' , p ) if p is not None : child_bands . add ( p ) self . make_parent_bands ( p , child_bands ) else : child_bands =... | this will determine the grouping bands that it belongs to recursively 13q21 . 31 == > 13 13q 13q2 13q21 13q21 . 3 13q21 . 31 |
62,387 | def get_curie ( self , uri ) : prefix = self . get_curie_prefix ( uri ) if prefix is not None : key = self . curie_map [ prefix ] return '%s:%s' % ( prefix , uri [ len ( key ) : len ( uri ) ] ) return None | Get a CURIE from a URI |
62,388 | def get_uri ( self , curie ) : if curie is None : return None parts = curie . split ( ':' ) if len ( parts ) == 1 : if curie != '' : LOG . error ( "Not a properly formed curie: \"%s\"" , curie ) return None prefix = parts [ 0 ] if prefix in self . curie_map : return '%s%s' % ( self . curie_map . get ( prefix ) , curie ... | Get a URI from a CURIE |
62,389 | def fetch ( self , is_dl_forced = False ) : host = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'host' ] key = config . get_config ( ) [ 'dbauth' ] [ 'coriell' ] [ 'private_key' ] user = config . get_config ( ) [ 'user' ] [ 'coriell' ] passwd = config . get_config ( ) [ 'keys' ] [ user ] with pysftp . Connectio... | Here we connect to the coriell sftp server using private connection details . They dump bi - weekly files with a timestamp in the filename . For each catalog we ping the remote site and pull the most - recently updated file renaming it to our local latest . csv . |
62,390 | def _process_collection ( self , collection_id , label , page ) : for graph in [ self . graph , self . testgraph ] : model = Model ( graph ) reference = Reference ( graph ) repo_id = 'CoriellCollection:' + collection_id repo_label = label repo_page = page model . addIndividualToGraph ( repo_id , repo_label , self . glo... | This function will process the data supplied internally about the repository from Coriell . |
62,391 | def _process_genotypes ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'genotype' ) ) LOG . info ( "building labels for genotypes" ) geno = Genotype ( graph ) fly_tax = self . globaltt [ 'Drosophi... | Add the genotype internal id to flybase mapping to the idhashmap . Also add them as individuals to the graph . |
62,392 | def _process_stocks ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'stock' ) ) LOG . info ( "building labels for stocks" ) with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reade... | Stock definitions . Here we instantiate them as instances of the given taxon . |
62,393 | def _process_pubs ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) line_counter = 0 raw = '/' . join ( ( self . rawdir , 'pub' ) ) LOG . info ( "building labels for pubs" ) with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reader ( f ... | Flybase publications . |
62,394 | def _process_environments ( self ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'environment' ) ) LOG . info ( "building labels for environment" ) env_parts = { } label_map = { } env = Environment ( graph ) with open ( raw , 'r' ) as f : filereader = ... | There s only about 30 environments in which the phenotypes are recorded . There are no externally accessible identifiers for environments so we make anonymous nodes for now . Some of the environments are comprised of > 1 of the other environments ; we do some simple parsing to match the strings of the environmental lab... |
62,395 | def _process_stock_genotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph raw = '/' . join ( ( self . rawdir , 'stock_genotype' ) ) LOG . info ( "processing stock genotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' ... | The genotypes of the stocks . |
62,396 | def _process_dbxref ( self ) : raw = '/' . join ( ( self . rawdir , 'dbxref' ) ) LOG . info ( "processing dbxrefs" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) f . readline ( ) for line in filereader : ( dbxref_id , db_id , accession , version ,... | We bring in the dbxref identifiers and store them in a hashmap for lookup in other functions . Note that some dbxrefs aren t mapped to identifiers . For example 5004018 is mapped to a string endosome & imaginal disc epithelial cell | somatic clone ... In those cases there just isn t a dbxref that s used when referencin... |
62,397 | def _process_phenotype ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'phenotype' ) ) LOG . info ( "processing phenotype" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimite... | Get the phenotypes and declare the classes . If the observable is unspecified then we assign the phenotype to the cvalue id ; otherwise we convert the phenotype into a uberpheno - style identifier simply based on the anatomical part that s affected ... that is listed as the observable_id concatenated with the literal P... |
62,398 | def _process_cvterm ( self ) : line_counter = 0 raw = '/' . join ( ( self . rawdir , 'cvterm' ) ) LOG . info ( "processing cvterms" ) with open ( raw , 'r' ) as f : f . readline ( ) filereader = csv . reader ( f , delimiter = '\t' , quotechar = '\"' ) for line in filereader : line_counter += 1 ( cvterm_id , cv_id , def... | CVterms are the internal identifiers for any controlled vocab or ontology term . Many are xrefd to actual ontologies . The actual external id is stored in the dbxref table which we place into the internal hashmap for lookup with the cvterm id . The name of the external term is stored in the name element of this table a... |
62,399 | def _process_organisms ( self , limit ) : if self . test_mode : graph = self . testgraph else : graph = self . graph model = Model ( graph ) raw = '/' . join ( ( self . rawdir , 'organism' ) ) LOG . info ( "processing organisms" ) line_counter = 0 with open ( raw , 'r' ) as f : filereader = csv . reader ( f , delimiter... | The internal identifiers for the organisms in flybase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.