idx int64 0 63k | question stringlengths 53 5.28k | target stringlengths 5 805 |
|---|---|---|
15,700 | def make_objs ( names , out_dir = '' ) : objs = [ replace_ext ( name , '.obj' ) for name in names ] if out_dir : objs = [ os . path . join ( out_dir , obj ) for obj in objs ] return objs | Make object file names for cl . exe and link . exe . |
15,701 | def examples ( ) : sci = InterLexClient ( api_key = os . environ . get ( 'INTERLEX_API_KEY' ) , base_url = 'https://beta.scicrunch.org/api/1/' , ) entity = { 'label' : 'brain115' , 'type' : 'fde' , 'definition' : 'Part of the central nervous system' , 'comment' : 'Cannot live without it' , 'superclass' : { 'ilx_id' : 'ilx_0108124' , } , 'synonyms' : [ { 'literal' : 'Encephalon' } , { 'literal' : 'Cerebro' } , ] , 'existing_ids' : [ { 'iri' : 'http://uri.neuinfo.org/nif/nifstd/birnlex_796' , 'curie' : 'BIRNLEX:796' , } , ] , } simple_entity = { 'label' : entity [ 'label' ] , 'type' : entity [ 'type' ] , 'definition' : entity [ 'definition' ] , 'comment' : entity [ 'comment' ] , 'superclass' : entity [ 'superclass' ] [ 'ilx_id' ] , 'synonyms' : [ syn [ 'literal' ] for syn in entity [ 'synonyms' ] ] , 'predicates' : { 'tmp_0381624' : 'http://example_dbxref' } } annotation = { 'term_ilx_id' : 'ilx_0101431' , 'annotation_type_ilx_id' : 'tmp_0381624' , 'annotation_value' : 'PMID:12345' , } relationship = { 'entity1_ilx' : 'ilx_0101431' , 'relationship_ilx' : 'ilx_0115023' , 'entity2_ilx' : 'ilx_0108124' , } update_entity_data = { 'ilx_id' : 'ilx_0101431' , 'label' : 'Brain' , 'definition' : 'update_test!!' , 'type' : 'fde' , 'comment' : 'test comment' , 'superclass' : 'ilx_0108124' , 'synonyms' : [ 'test' , 'test2' , 'test2' ] , } relationship = { 'entity1_ilx' : 'http://uri.interlex.org/base/ilx_0100001' , 'relationship_ilx' : 'http://uri.interlex.org/base/ilx_0112772' , 'entity2_ilx' : 'http://uri.interlex.org/base/ilx_0100000' , } | Examples of how to use . Default are that some functions are commented out in order to not cause harm to existing metadata within the database . |
15,702 | def process_response ( self , response : requests . models . Response ) -> dict : try : output = response . json ( ) except json . JSONDecodeError : raise self . BadResponseError ( 'Json not returned with status code [' + str ( response . status_code ) + ']' ) if response . status_code == 400 : return output if response . status_code not in [ 200 , 201 ] : raise self . BadResponseError ( str ( output ) + ': with status code [' + str ( response . status_code ) + '] and params:' + str ( output ) ) return output [ 'data' ] | Checks for correct data response and status codes |
15,703 | def process_superclass ( self , entity : List [ dict ] ) -> List [ dict ] : superclass = entity . pop ( 'superclass' ) label = entity [ 'label' ] if not superclass . get ( 'ilx_id' ) : raise self . SuperClassDoesNotExistError ( f'Superclass not given an interlex ID for label: {label}' ) superclass_data = self . get_entity ( superclass [ 'ilx_id' ] ) if not superclass_data [ 'id' ] : raise self . SuperClassDoesNotExistError ( 'Superclass ILX ID: ' + superclass [ 'ilx_id' ] + ' does not exist in SciCrunch' ) entity [ 'superclasses' ] = [ { 'superclass_tid' : superclass_data [ 'id' ] } ] return entity | Replaces ILX ID with superclass ID |
15,704 | def check_scicrunch_for_label ( self , label : str ) -> dict : list_of_crude_matches = self . crude_search_scicrunch_via_label ( label ) for crude_match in list_of_crude_matches : if crude_match [ 'label' ] . lower ( ) . strip ( ) == label . lower ( ) . strip ( ) : complete_data_of_crude_match = self . get_entity ( crude_match [ 'ilx' ] ) crude_match_label = crude_match [ 'label' ] crude_match_user_id = complete_data_of_crude_match [ 'uid' ] if str ( self . user_id ) == str ( crude_match_user_id ) : return complete_data_of_crude_match return { } | Sees if label with your user ID already exists |
15,705 | def add_raw_entity ( self , entity : dict ) -> dict : needed_in_entity = set ( [ 'label' , 'type' , ] ) options_in_entity = set ( [ 'label' , 'type' , 'definition' , 'comment' , 'superclass' , 'synonyms' , 'existing_ids' ] ) prime_entity_url = self . base_url + 'ilx/add' add_entity_url = self . base_url + 'term/add' if ( set ( entity ) & needed_in_entity ) != needed_in_entity : raise self . MissingKeyError ( 'You need key(s): ' + str ( needed_in_entity - set ( entity ) ) ) elif ( set ( entity ) | options_in_entity ) != options_in_entity : raise self . IncorrectKeyError ( 'Unexpected key(s): ' + str ( set ( entity ) - options_in_entity ) ) entity [ 'type' ] = entity [ 'type' ] . lower ( ) if entity [ 'type' ] not in [ 'term' , 'relationship' , 'annotation' , 'cde' , 'fde' , 'pde' ] : raise TypeError ( 'Entity should be one of the following: ' + 'term, relationship, annotation, cde, fde, pde' ) if entity . get ( 'superclass' ) : entity = self . process_superclass ( entity ) if entity . get ( 'synonyms' ) : entity = self . process_synonyms ( entity ) if entity . get ( 'existing_ids' ) : entity = self . process_existing_ids ( entity ) entity [ 'uid' ] = self . user_id entity [ 'term' ] = entity . pop ( 'label' ) ilx_data = self . post ( url = prime_entity_url , data = entity . copy ( ) , ) if ilx_data . get ( 'ilx' ) : ilx_id = ilx_data [ 'ilx' ] else : ilx_id = ilx_data [ 'fragment' ] entity [ 'label' ] = entity . pop ( 'term' ) entity [ 'ilx' ] = ilx_id output = self . post ( url = add_entity_url , data = entity . copy ( ) , ) if output . get ( 'errormsg' ) : if 'already exists' in output [ 'errormsg' ] . lower ( ) : prexisting_data = self . check_scicrunch_for_label ( entity [ 'label' ] ) if prexisting_data : print ( 'You already added entity' , entity [ 'label' ] , 'with ILX ID:' , prexisting_data [ 'ilx' ] ) return prexisting_data self . Error ( output ) self . Error ( output ) output = self . get_entity ( output [ 'ilx' ] ) return output | Adds entity if it does not already exist under your user ID . |
15,706 | def add_annotation ( self , term_ilx_id : str , annotation_type_ilx_id : str , annotation_value : str ) -> dict : url = self . base_url + 'term/add-annotation' term_data = self . get_entity ( term_ilx_id ) if not term_data [ 'id' ] : exit ( 'term_ilx_id: ' + term_ilx_id + ' does not exist' ) anno_data = self . get_entity ( annotation_type_ilx_id ) if not anno_data [ 'id' ] : exit ( 'annotation_type_ilx_id: ' + annotation_type_ilx_id + ' does not exist' ) data = { 'tid' : term_data [ 'id' ] , 'annotation_tid' : anno_data [ 'id' ] , 'value' : annotation_value , 'term_version' : term_data [ 'version' ] , 'annotation_term_version' : anno_data [ 'version' ] , 'orig_uid' : self . user_id , } output = self . post ( url = url , data = data , ) if output . get ( 'errormsg' ) : if 'already exists' in output [ 'errormsg' ] . lower ( ) : term_annotations = self . get_annotation_via_tid ( term_data [ 'id' ] ) for term_annotation in term_annotations : if str ( term_annotation [ 'annotation_tid' ] ) == str ( anno_data [ 'id' ] ) : if term_annotation [ 'value' ] == data [ 'value' ] : print ( 'Annotation: [' + term_data [ 'label' ] + ' -> ' + anno_data [ 'label' ] + ' -> ' + data [ 'value' ] + '], already exists.' ) return term_annotation exit ( output ) exit ( output ) return output | Adding an annotation value to a prexisting entity |
15,707 | def delete_annotation ( self , term_ilx_id : str , annotation_type_ilx_id : str , annotation_value : str ) -> dict : term_data = self . get_entity ( term_ilx_id ) if not term_data [ 'id' ] : exit ( 'term_ilx_id: ' + term_ilx_id + ' does not exist' ) anno_data = self . get_entity ( annotation_type_ilx_id ) if not anno_data [ 'id' ] : exit ( 'annotation_type_ilx_id: ' + annotation_type_ilx_id + ' does not exist' ) entity_annotations = self . get_annotation_via_tid ( term_data [ 'id' ] ) annotation_id = '' for annotation in entity_annotations : if str ( annotation [ 'tid' ] ) == str ( term_data [ 'id' ] ) : if str ( annotation [ 'annotation_tid' ] ) == str ( anno_data [ 'id' ] ) : if str ( annotation [ 'value' ] ) == str ( annotation_value ) : annotation_id = annotation [ 'id' ] break if not annotation_id : print ( ) return None url = self . base_url + 'term/edit-annotation/{annotation_id}' . format ( annotation_id = annotation_id ) data = { 'tid' : ' ' , 'annotation_tid' : ' ' , 'value' : ' ' , 'term_version' : ' ' , 'annotation_term_version' : ' ' , } output = self . post ( url = url , data = data , ) return output | If annotation doesnt exist add it |
15,708 | def main ( self ) : self . secret_finder ( ) self . parse_access_token ( ) self . get_session_token ( ) self . parse_session_token ( ) self . get_route ( ) self . download_profile ( ) self . find_loci ( ) self . download_loci ( ) | Run the appropriate methods in the correct order |
15,709 | def secret_finder ( self ) : secretlist = list ( ) if os . path . isfile ( self . secret_file ) : with open ( self . secret_file , 'r' ) as secret : for line in secret : secretlist . append ( line . rstrip ( ) ) self . consumer_key = secretlist [ 0 ] self . consumer_secret = secretlist [ 1 ] else : print ( '"Cannot find the secret.txt file required for authorization. ' 'Please ensure that this file exists, and that the supplied consumer key is on the ' 'first line, and the consumer secret is on he second line. ' 'Contact keith.jolley@zoo.ox.ac.uk for an account, and the necessary keys' ) quit ( ) | Parses the supplied secret . txt file for the consumer key and secrets |
15,710 | def parse_access_token ( self ) : access_file = os . path . join ( self . file_path , 'access_token' ) if os . path . isfile ( access_file ) : access_list = list ( ) with open ( access_file , 'r' ) as access_token : for line in access_token : value , data = line . split ( '=' ) access_list . append ( data . rstrip ( ) ) self . access_secret = access_list [ 0 ] self . access_token = access_list [ 1 ] else : print ( 'Missing access_token' ) self . get_request_token ( ) self . get_access_token ( ) | Extract the secret and token values from the access_token file |
15,711 | def get_request_token ( self ) : print ( 'Obtaining request token' ) try : os . remove ( os . path . join ( self . file_path , 'request_token' ) ) except FileNotFoundError : pass session = OAuth1Session ( consumer_key = self . consumer_key , consumer_secret = self . consumer_secret ) r = session . request ( method = 'GET' , url = self . request_token_url , params = { 'oauth_callback' : 'oob' } ) if r . status_code == 200 : self . request_token = r . json ( ) [ 'oauth_token' ] self . request_secret = r . json ( ) [ 'oauth_token_secret' ] self . write_token ( 'request_token' , self . request_token , self . request_secret ) | Obtain a request token |
15,712 | def get_session_token ( self ) : try : os . remove ( os . path . join ( self . file_path , 'session_token' ) ) except FileNotFoundError : pass session_request = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . access_token , access_token_secret = self . access_secret ) r = session_request . get ( self . session_token_url ) if r . status_code == 200 : self . session_token = r . json ( ) [ 'oauth_token' ] self . session_secret = r . json ( ) [ 'oauth_token_secret' ] self . write_token ( 'session_token' , self . session_token , self . session_secret ) else : print ( 'Failed:' ) print ( r . json ( ) [ 'message' ] ) | Use the accession token to request a new session token |
15,713 | def parse_session_token ( self ) : session_file = os . path . join ( self . file_path , 'session_token' ) if os . path . isfile ( session_file ) : session_list = list ( ) with open ( session_file , 'r' ) as session_token : for line in session_token : value , data = line . split ( '=' ) session_list . append ( data . rstrip ( ) ) self . session_secret = session_list [ 0 ] self . session_token = session_list [ 1 ] | Extract the session secret and token strings from the session token file |
15,714 | def get_route ( self ) : session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) r = session . get ( self . test_rest_url ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text self . loci = decoded [ 'loci' ] self . profile = decoded [ 'schemes' ] | Creates a session to find the URL for the loci and schemes |
15,715 | def download_profile ( self ) : profile_file = os . path . join ( self . output_path , 'profile.txt' ) size = 0 try : stats = os . stat ( profile_file ) size = stats . st_size except FileNotFoundError : pass if not os . path . isfile ( profile_file ) or size <= 100 : session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) r = session . get ( self . profile + '/1/profiles_csv' ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text with open ( profile_file , 'w' ) as profile : profile . write ( decoded ) | Download the profile from the database |
15,716 | def find_loci ( self ) : session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) r = session . get ( self . loci ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text for locus in decoded [ 'loci' ] : self . loci_url . append ( locus ) | Finds the URLs for all allele files |
15,717 | def download_loci ( self ) : pool = multiprocessing . Pool ( processes = self . threads ) pool . map ( self . download_threads , self . loci_url ) pool . close ( ) pool . join ( ) | Uses a multi - threaded approach to download allele files |
15,718 | def download_threads ( self , url ) : output_file = os . path . join ( self . output_path , '{}.tfa' . format ( os . path . split ( url ) [ - 1 ] ) ) size = 0 try : stats = os . stat ( output_file ) size = stats . st_size except FileNotFoundError : pass if not os . path . isfile ( output_file ) or size <= 100 : session = OAuth1Session ( self . consumer_key , self . consumer_secret , access_token = self . session_token , access_token_secret = self . session_secret ) r = session . get ( url + '/alleles_fasta' ) if r . status_code == 200 or r . status_code == 201 : if re . search ( 'json' , r . headers [ 'content-type' ] , flags = 0 ) : decoded = r . json ( ) else : decoded = r . text with open ( output_file , 'w' ) as allele : allele . write ( decoded ) | Download the allele files |
15,719 | def dumps ( obj , * args , ** kwargs ) : kwargs [ 'default' ] = object2dict return json . dumps ( obj , * args , ** kwargs ) | Serialize a object to string |
15,720 | def dump ( obj , fp , * args , ** kwargs ) : kwargs [ 'default' ] = object2dict json . dump ( obj , fp , * args , ** kwargs ) | Serialize a object to a file object . |
15,721 | def calc_delay ( remainingDrops ) : global sameDelay , lastDelay if remainingDrops > 1 : lastDelay = 5 sameDelay = 0 if remainingDrops > 2 : return 15 * 60 elif remainingDrops == 2 : return 10 * 60 else : if lastDelay > 1 : if sameDelay == 2 : sameDelay = 0 lastDelay -= 1 sameDelay += 1 return lastDelay * 60 | Calculate the idle delay Minimum play time for cards to drop is ~20min again . Except for accounts that requested a refund? |
15,722 | def configfilepopulator ( self ) : self . forwardlength = self . metadata . header . forwardlength self . reverselength = self . metadata . header . reverselength cycles = [ [ 1 , self . forwardlength , self . runid ] , [ self . forwardlength + 1 , self . forwardlength + 8 , self . runid ] , [ self . forwardlength + 9 , self . forwardlength + 16 , self . runid ] , [ self . forwardlength + 17 , self . forwardlength + 16 + self . reverselength , self . runid ] ] parameters = { 'RunFolder' : self . runid , 'RunFolderDate' : self . metadata . date . replace ( "-" , "" ) , 'RunFolderId' : self . metadata . runnumber , 'RunFlowcellId' : self . metadata . flowcell } config = ElementTree . parse ( "{}/config.xml" . format ( self . homepath ) ) configroot = config . getroot ( ) for run in configroot : for child in run : if child . tag == 'Cycles' : child . attrib = { 'Last' : '{}' . format ( self . forwardlength + 16 + self . reverselength ) , 'Number' : '{}' . format ( self . totalreads ) , 'First' : '1' } elif child . tag == 'RunParameters' : runparameters = child for runparameter in runparameters : if 'Reads' in runparameter . tag : for indexcount , reads in enumerate ( runparameter ) : index = int ( runparameter . attrib [ 'Index' ] ) - 1 reads . text = str ( cycles [ index ] [ indexcount ] ) if runparameter . tag == 'Instrument' : runparameter . text = self . instrument for parameter in parameters : if runparameter . tag == parameter : runparameter . text = parameters [ parameter ] if 'Barcode' in runparameter . tag : for cycle , barcode in enumerate ( runparameter ) : barcode . text = str ( self . forwardlength + 1 + cycle ) config . write ( '{}Data/Intensities/BaseCalls/config.xml' . format ( self . miseqfolder ) ) | Populates an unpopulated config . xml file with run - specific values and creates the file in the appropriate location |
15,723 | def subscribe_param ( ) : def print_data ( data ) : for parameter in data . parameters : print ( parameter ) processor . create_parameter_subscription ( '/YSS/SIMULATOR/BatteryVoltage2' , on_data = print_data ) | Print value of parameter |
15,724 | def _check_holiday_structure ( self , times ) : if not isinstance ( times , list ) : raise TypeError ( "an list is required" ) for time in times : if not isinstance ( time , tuple ) : raise TypeError ( "a tuple is required" ) if len ( time ) > 5 : raise TypeError ( "Target time takes at most 5 arguments" " ('%d' given)" % len ( time ) ) if len ( time ) < 5 : raise TypeError ( "Required argument '%s' (pos '%d')" " not found" % ( TIME_LABEL [ len ( time ) ] , len ( time ) ) ) self . _check_time_format ( TIME_LABEL , time ) | To check the structure of the HolidayClass |
15,725 | def _check_time_format ( self , labels , values ) : for label , value in zip ( labels , values ) : if value == "*" : continue if label == "day_of_week" : if isinstance ( value , string_types ) : if value not in ORDER_WEEK : raise ParseError ( "'%s' is not day of the week. " "character is the only '%s'" % ( value , ', ' . join ( ORDER_WEEK ) ) ) elif not isinstance ( value , int ) : raise TypeError ( "'%s' is not an int" % value ) if label in [ "year" , "month" , "day" , "num_of_week" ] : if not isinstance ( value , int ) : raise TypeError ( "'%s' is not an int" % value ) if isinstance ( value , int ) : start , end = TIME_INFO [ label ] if not start <= value <= end : raise PeriodRangeError ( "'%d' is outside the scope of the period " "'%s' range: '%d' to '%d'" % ( value , label , start , end ) ) | To check the format of the times |
15,726 | def is_holiday ( self , date ) : time = [ date . year , date . month , date . day , date . isoweekday ( ) , _extract_week_number ( date ) ] target = [ ] for key , data in list ( zip ( TIME_LABEL , time ) ) : d = getattr ( self , key ) asterisk = d . get ( "*" , set ( ) ) s = asterisk . union ( d . get ( data , set ( ) ) ) target . append ( list ( s ) ) for result in map ( set , product ( * target ) ) : if len ( result ) == 1 : return True return False | Whether holiday judges |
15,727 | def create ( self , python = None , system_site = False , always_copy = False ) : command = 'virtualenv' if python : command = '{0} --python={1}' . format ( command , python ) if system_site : command = '{0} --system-site-packages' . format ( command ) if always_copy : command = '{0} --always-copy' . format ( command ) command = '{0} {1}' . format ( command , self . path ) self . _execute ( command ) | Create a new virtual environment . |
15,728 | def epcrparse ( self ) : logging . info ( 'Parsing ePCR results' ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : if 'stx' in sample . general . datastore : uniquecount = 0 toxinlist = [ ] if os . path . isfile ( sample [ self . analysistype ] . resultsfile ) : epcrresults = open ( sample [ self . analysistype ] . resultsfile , 'r' ) for result in epcrresults : if "#" not in result : uniquecount += 1 data = result . split ( '\t' ) vttype = data [ 0 ] . split ( '_' ) [ 0 ] if vttype not in toxinlist : toxinlist . append ( vttype ) toxinstring = ";" . join ( sorted ( toxinlist ) ) sample [ self . analysistype ] . toxinprofile = toxinstring else : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . toxinprofile = 'NA' else : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . toxinprofile = 'NA' | Parse the ePCR text file outputs |
15,729 | def populate ( cls , graph ) : [ graph . bind ( k , v ) for k , v in cls . _dict . items ( ) ] | populate an rdflib graph with these curies |
15,730 | def authorized ( route ) : @ wraps ( route ) def authorized_route ( * args , ** kwargs ) : if 'mwoauth_access_token' in flask . session : return route ( * args , ** kwargs ) else : return flask . redirect ( flask . url_for ( 'mwoauth.mwoauth_initiate' ) + "?next=" + flask . request . endpoint ) return authorized_route | Wrap a flask route . Ensure that the user has authorized via OAuth or redirect the user to the authorization endpoint with a delayed redirect back to the originating endpoint . |
15,731 | def blum_blum_shub ( seed , amount , prime0 , prime1 ) : if amount == 0 : return [ ] assert ( prime0 % 4 == 3 and prime1 % 4 == 3 ) mod = prime0 * prime1 rand = [ seed ] for _ in range ( amount - 1 ) : last_num = rand [ len ( rand ) - 1 ] next_num = ( last_num * last_num ) % mod rand . append ( next_num ) return rand | Creates pseudo - number generator |
15,732 | def readlength ( self ) : logging . info ( 'Estimating read lengths of FASTQ files' ) for sample in self . samples : sample . run . Date = 'NA' sample . run . InvestigatorName = 'NA' sample . run . TotalClustersinRun = 'NA' sample . run . NumberofClustersPF = 'NA' sample . run . PercentOfClusters = 'NA' sample . run . SampleProject = 'NA' if not GenObject . isattr ( sample . run , 'forwardlength' ) and not GenObject . isattr ( sample . run , 'reverselength' ) : sample . header = GenObject ( ) sample . commands = GenObject ( ) devnull = open ( os . devnull , 'wb' ) if type ( sample . general . fastqfiles ) is list : forwardfastq = sorted ( sample . general . fastqfiles ) [ 0 ] if '.gz' in forwardfastq : command = 'zcat' else : command = 'cat' forwardreads = subprocess . Popen ( "{} {} | head -n 1000" . format ( command , forwardfastq ) , shell = True , stdout = subprocess . PIPE , stderr = devnull ) . communicate ( ) [ 0 ] . rstrip ( ) try : forwardreads = forwardreads . decode ( 'utf-8' ) except UnicodeDecodeError : sample . run . forwardlength = 0 try : forwardlength = max ( [ len ( sequence ) for iterator , sequence in enumerate ( forwardreads . split ( '\n' ) ) if iterator % 4 == 1 ] ) sample . run . forwardlength = forwardlength except ( ValueError , TypeError ) : sample . run . forwardlength = 0 if len ( sample . general . fastqfiles ) == 2 : reversefastq = sorted ( sample . general . fastqfiles ) [ 1 ] reversereads = subprocess . Popen ( "{} {} | head -n 1000" . format ( command , reversefastq ) , shell = True , stdout = subprocess . PIPE , stderr = devnull ) . communicate ( ) [ 0 ] . rstrip ( ) try : reversereads = reversereads . decode ( 'utf-8' ) except UnicodeDecodeError : sample . run . reverselength = 0 try : sample . run . reverselength = max ( [ len ( sequence ) for iterator , sequence in enumerate ( reversereads . split ( '\n' ) ) if iterator % 4 == 1 ] ) except ( ValueError , TypeError ) : sample . run . reverselength = 0 else : sample . run . reverselength = 0 | Calculates the read length of the fastq files . Short reads will not be able to be assembled properly with the default parameters used for spades . |
15,733 | async def setup ( self ) : try : engine = await self . db created = False if not await engine . has_table ( self . table_name ) : logger . info ( "Creating SQL table [{}]" . format ( self . table_name ) ) items = self . _get_table ( ) await engine . execute ( CreateTable ( items ) ) conn = await engine . connect ( ) await conn . execute ( "CREATE INDEX `lb_last_updated` ON `{}` (`source_id` DESC,`updated` DESC);" . format ( self . table_name ) ) await conn . execute ( "CREATE INDEX `lb_post` ON `{}` (`target_id` DESC,`post_id` DESC);" . format ( self . table_name ) ) await conn . close ( ) created = True if self . control_table_name and not await engine . has_table ( self . control_table_name ) : logger . info ( "Creating SQL control table [{}]" . format ( self . control_table_name ) ) items = self . _get_control_table ( ) await engine . execute ( CreateTable ( items ) ) created = True return created except Exception as exc : logger . error ( "[DB] Error when setting up SQL table: {}" . format ( exc ) ) return False | Setting up SQL table if it not exists . |
15,734 | def is_dst ( zonename ) : tz = pytz . timezone ( zonename ) now = pytz . utc . localize ( datetime . utcnow ( ) ) return now . astimezone ( tz ) . dst ( ) != timedelta ( 0 ) | Find out whether it s Daylight Saving Time in this timezone |
15,735 | def load_datetime ( value , dt_format ) : if dt_format . endswith ( '%z' ) : dt_format = dt_format [ : - 2 ] offset = value [ - 5 : ] value = value [ : - 5 ] if offset != offset . replace ( ':' , '' ) : offset = '+' + offset . replace ( ':' , '' ) value = value [ : - 1 ] return OffsetTime ( offset ) . localize ( datetime . strptime ( value , dt_format ) ) return datetime . strptime ( value , dt_format ) | Create timezone - aware datetime object |
15,736 | def list_to_json ( source_list ) : result = [ ] for item in source_list : result . append ( item . to_json ( ) ) return result | Serialise all the items in source_list to json |
15,737 | def list_from_json ( source_list_json ) : result = [ ] if source_list_json == [ ] or source_list_json == None : return result for list_item in source_list_json : item = json . loads ( list_item ) try : if item [ 'class_name' ] == 'Departure' : temp = Departure ( ) elif item [ 'class_name' ] == 'Disruption' : temp = Disruption ( ) elif item [ 'class_name' ] == 'Station' : temp = Station ( ) elif item [ 'class_name' ] == 'Trip' : temp = Trip ( ) elif item [ 'class_name' ] == 'TripRemark' : temp = TripRemark ( ) elif item [ 'class_name' ] == 'TripStop' : temp = TripStop ( ) elif item [ 'class_name' ] == 'TripSubpart' : temp = TripSubpart ( ) else : print ( 'Unrecognised Class ' + item [ 'class_name' ] + ', skipping' ) continue temp . from_json ( list_item ) result . append ( temp ) except KeyError : print ( 'Unrecognised item with no class_name, skipping' ) continue return result | Deserialise all the items in source_list from json |
15,738 | def list_diff ( list_a , list_b ) : result = [ ] for item in list_b : if not item in list_a : result . append ( item ) return result | Return the items from list_b that differ from list_a |
15,739 | def list_same ( list_a , list_b ) : result = [ ] for item in list_b : if item in list_a : result . append ( item ) return result | Return the items from list_b that are also on list_a |
15,740 | def list_merge ( list_a , list_b ) : result = [ ] for item in list_a : if not item in result : result . append ( item ) for item in list_b : if not item in result : result . append ( item ) return result | Merge two lists without duplicating items |
15,741 | def delay ( self ) : delay = { 'departure_time' : None , 'departure_delay' : None , 'requested_differs' : None , 'remarks' : self . trip_remarks , 'parts' : [ ] } if self . departure_time_actual > self . departure_time_planned : delay [ 'departure_delay' ] = self . departure_time_actual - self . departure_time_planned delay [ 'departure_time' ] = self . departure_time_actual if self . requested_time != self . departure_time_actual : delay [ 'requested_differs' ] = self . departure_time_actual for part in self . trip_parts : if part . has_delay : delay [ 'parts' ] . append ( part ) return delay | Return the delay of the train for this instance |
15,742 | def get_actual ( cls , trip_list , time ) : for trip in trip_list : if simple_time ( trip . departure_time_planned ) == time : return trip return None | Look for the train actually leaving at time |
15,743 | def parse_disruptions ( self , xml ) : obj = xmltodict . parse ( xml ) disruptions = { } disruptions [ 'unplanned' ] = [ ] disruptions [ 'planned' ] = [ ] if obj [ 'Storingen' ] [ 'Ongepland' ] : raw_disruptions = obj [ 'Storingen' ] [ 'Ongepland' ] [ 'Storing' ] if isinstance ( raw_disruptions , collections . OrderedDict ) : raw_disruptions = [ raw_disruptions ] for disruption in raw_disruptions : newdis = Disruption ( disruption ) disruptions [ 'unplanned' ] . append ( newdis ) if obj [ 'Storingen' ] [ 'Gepland' ] : raw_disruptions = obj [ 'Storingen' ] [ 'Gepland' ] [ 'Storing' ] if isinstance ( raw_disruptions , collections . OrderedDict ) : raw_disruptions = [ raw_disruptions ] for disruption in raw_disruptions : newdis = Disruption ( disruption ) disruptions [ 'planned' ] . append ( newdis ) return disruptions | Parse the NS API xml result into Disruption objects |
15,744 | def parse_departures ( self , xml ) : obj = xmltodict . parse ( xml ) departures = [ ] for departure in obj [ 'ActueleVertrekTijden' ] [ 'VertrekkendeTrein' ] : newdep = Departure ( departure ) departures . append ( newdep ) print ( newdep . delay ) return departures | Parse the NS API xml result into Departure objects |
15,745 | def parse_trips ( self , xml , requested_time ) : obj = xmltodict . parse ( xml ) trips = [ ] if 'error' in obj : print ( 'Error in trips: ' + obj [ 'error' ] [ 'message' ] ) return None try : for trip in obj [ 'ReisMogelijkheden' ] [ 'ReisMogelijkheid' ] : newtrip = Trip ( trip , requested_time ) trips . append ( newtrip ) except TypeError : return None return trips | Parse the NS API xml result into Trip objects |
15,746 | def get_stations ( self ) : url = 'http://webservices.ns.nl/ns-api-stations-v2' raw_stations = self . _request ( 'GET' , url ) return self . parse_stations ( raw_stations ) | Fetch the list of stations |
15,747 | def stop ( self ) : self . log . debug ( 'Stopping bot {}' . format ( self . _name ) ) self . _stop = True for t in self . _threads : t . join ( ) self . log . debug ( 'Stopping bot {} finished. All threads joined.' . format ( self . _name ) ) | Stops this bot . |
15,748 | def _listen_comments ( self ) : comments_queue = Queue ( maxsize = self . _n_jobs * 4 ) threads = [ ] try : for i in range ( self . _n_jobs ) : t = BotQueueWorker ( name = 'CommentThread-t-{}' . format ( i ) , jobs = comments_queue , target = self . _process_comment ) t . start ( ) threads . append ( t ) for comment in self . _reddit . subreddit ( '+' . join ( self . _subs ) ) . stream . comments ( ) : if self . _stop : self . _do_stop ( comments_queue , threads ) break comments_queue . put ( comment ) self . log . debug ( 'Listen comments stopped' ) except Exception as e : self . _do_stop ( comments_queue , threads ) self . log . error ( 'Exception while listening to comments:' ) self . log . error ( str ( e ) ) self . log . error ( 'Waiting for 10 minutes and trying again.' ) time . sleep ( 10 * 60 ) self . _listen_comments ( ) | Start listening to comments using a separate thread . |
15,749 | def _listen_submissions ( self ) : subs_queue = Queue ( maxsize = self . _n_jobs * 4 ) threads = [ ] try : for i in range ( self . _n_jobs ) : t = BotQueueWorker ( name = 'SubmissionThread-t-{}' . format ( i ) , jobs = subs_queue , target = self . _process_submission ) t . start ( ) self . _threads . append ( t ) for submission in self . _reddit . subreddit ( '+' . join ( self . _subs ) ) . stream . submissions ( ) : if self . _stop : self . _do_stop ( subs_queue , threads ) break subs_queue . put ( submission ) self . log . debug ( 'Listen submissions stopped' ) except Exception as e : self . _do_stop ( subs_queue , threads ) self . log . error ( 'Exception while listening to submissions:' ) self . log . error ( str ( e ) ) self . log . error ( 'Waiting for 10 minutes and trying again.' ) time . sleep ( 10 * 60 ) self . _listen_submissions ( ) | Start listening to submissions using a separate thread . |
15,750 | def _listen_inbox_messages ( self ) : inbox_queue = Queue ( maxsize = self . _n_jobs * 4 ) threads = [ ] try : for i in range ( self . _n_jobs ) : t = BotQueueWorker ( name = 'InboxThread-t-{}' . format ( i ) , jobs = inbox_queue , target = self . _process_inbox_message ) t . start ( ) self . _threads . append ( t ) for message in self . _reddit . inbox . stream ( ) : if self . _stop : self . _do_stop ( inbox_queue , threads ) break inbox_queue . put ( message ) self . log . debug ( 'Listen inbox stopped' ) except Exception as e : self . _do_stop ( inbox_queue , threads ) self . log . error ( 'Exception while listening to inbox:' ) self . log . error ( str ( e ) ) self . log . error ( 'Waiting for 10 minutes and trying again.' ) time . sleep ( 10 * 60 ) self . _listen_inbox_messages ( ) | Start listening to messages using a separate thread . |
15,751 | def to_struct ( self ) : structobj = self . struct_type ( ) for k in structobj . attributes ( ) : self . log . info ( "Setting attribute %s to %r" % ( k , getattr ( self , k ) ) ) setattr ( structobj , k , getattr ( self , k ) ) return structobj | Initialize properties of the appropriate struct class from this model class . |
15,752 | def move ( self , group , index = None ) : return self . group . db . move_entry ( self , group , index = index ) | This method moves the entry to another group . |
15,753 | def dump_data ( data , filename = None , file_type = 'json' , klazz = YapconfError , open_kwargs = None , dump_kwargs = None ) : _check_file_type ( file_type , klazz ) open_kwargs = open_kwargs or { 'encoding' : 'utf-8' } dump_kwargs = dump_kwargs or { } if filename : with open ( filename , 'w' , ** open_kwargs ) as conf_file : _dump ( data , conf_file , file_type , ** dump_kwargs ) else : _dump ( data , sys . stdout , file_type , ** dump_kwargs ) | Dump data given to file or stdout in file_type . |
15,754 | def load_file ( filename , file_type = 'json' , klazz = YapconfError , open_kwargs = None , load_kwargs = None ) : _check_file_type ( file_type , klazz ) open_kwargs = open_kwargs or { 'encoding' : 'utf-8' } load_kwargs = load_kwargs or { } data = None with open ( filename , ** open_kwargs ) as conf_file : if str ( file_type ) . lower ( ) == 'json' : data = json . load ( conf_file , ** load_kwargs ) elif str ( file_type ) . lower ( ) == 'yaml' : data = yaml . safe_load ( conf_file . read ( ) ) else : raise NotImplementedError ( 'Someone forgot to implement how to ' 'load a %s file_type.' % file_type ) if not isinstance ( data , dict ) : raise klazz ( 'Successfully loaded %s, but the result was ' 'not a dictionary.' % filename ) return data | Load a file with the given file type . |
15,755 | def flatten ( dictionary , separator = '.' , prefix = '' ) : new_dict = { } for key , value in dictionary . items ( ) : new_key = prefix + separator + key if prefix else key if isinstance ( value , collections . MutableMapping ) : new_dict . update ( flatten ( value , separator , new_key ) ) elif isinstance ( value , list ) : new_value = [ ] for item in value : if isinstance ( item , collections . MutableMapping ) : new_value . append ( flatten ( item , separator , new_key ) ) else : new_value . append ( item ) new_dict [ new_key ] = new_value else : new_dict [ new_key ] = value return new_dict | Flatten the dictionary keys are separated by separator |
15,756 | def relocate ( source , destination , move = False ) : venv = api . VirtualEnvironment ( source ) if not move : venv . relocate ( destination ) return None venv . move ( destination ) return None | Adjust the virtual environment settings and optional move it . |
15,757 | def main ( ) : parser = argparse . ArgumentParser ( description = 'Relocate a virtual environment.' ) parser . add_argument ( '--source' , help = 'The existing virtual environment.' , required = True , ) parser . add_argument ( '--destination' , help = 'The location for which to configure the virtual environment.' , required = True , ) parser . add_argument ( '--move' , help = 'Move the virtual environment to the destination.' , default = False , action = 'store_true' , ) args = parser . parse_args ( ) relocate ( args . source , args . destination , args . move ) | Relocate a virtual environment . |
15,758 | def confirm ( prompt = 'Really?' , color = 'warning' , yes_values = ( 'y' , 'yes' ) , abort_on_unconfirmed = False , abort_options = None ) : if isinstance ( yes_values , str ) : yes_values = ( yes_values , ) prompt = '{prompt} [{yes_value}/N] ' . format ( prompt = prompt , yes_value = yes_values [ 0 ] ) if color : prompt = printer . colorize ( prompt , color = color ) try : answer = input ( prompt ) except KeyboardInterrupt : print ( ) confirmed = False else : answer = answer . strip ( ) . lower ( ) confirmed = answer in yes_values do_abort_on_unconfirmed = not confirmed and ( bool ( abort_on_unconfirmed ) or ( abort_on_unconfirmed == 0 and abort_on_unconfirmed is not False ) ) if do_abort_on_unconfirmed : if abort_options is None : abort_options = { } if abort_on_unconfirmed is True : abort_options . setdefault ( 'return_code' , 0 ) elif isinstance ( abort_on_unconfirmed , int ) : abort_options . setdefault ( 'return_code' , abort_on_unconfirmed ) elif isinstance ( abort_on_unconfirmed , str ) : abort_options . setdefault ( 'message' , abort_on_unconfirmed ) else : abort_options . setdefault ( 'return_code' , 0 ) abort ( ** abort_options ) return confirmed | Prompt for confirmation . |
15,759 | def get_mime_message ( self ) : message = MIMEText ( "<html>" + self . get_email_header ( ) + get_email_content ( self . content_file ) + self . get_email_footer ( ) + "</html>" , "html" ) message [ "subject" ] = self . email_subject return message | Gets email MIME message |
15,760 | def increment ( name , tags = None ) : def wrap ( f ) : @ wraps ( f ) def decorator ( * args , ** kwargs ) : stats = client ( ) ret = f ( * args , ** kwargs ) stats . incr ( name , tags = tags ) return ret return decorator return wrap | Function decorator for incrementing a statsd stat whenever a function is invoked . |
15,761 | def decrement ( name , tags = None ) : def wrap ( f ) : @ wraps ( f ) def decorator ( * args , ** kwargs ) : stats = client ( ) ret = f ( * args , ** kwargs ) stats . decr ( name , tags = tags ) return ret return decorator return wrap | Function decorator for decrementing a statsd stat whenever a function is invoked . |
15,762 | def timed ( name , tags = None ) : def wrap ( f ) : @ wraps ( f ) def decorator ( * args , ** kwargs ) : stats = client ( ) with stats . timer ( name , tags = tags ) : return f ( * args , ** kwargs ) return decorator return wrap | Function decorator for tracking timing information on a function s invocation . |
15,763 | def move_file_to_directory ( file_path , directory_path ) : file_name = os . path . basename ( file_path ) if not os . path . exists ( directory_path ) : os . makedirs ( directory_path ) os . rename ( file_path , os . path . join ( directory_path , file_name ) ) | Moves file to given directory |
15,764 | def move_file_to_file ( old_path , new_path ) : try : os . rename ( old_path , new_path ) except : old_file = os . path . basename ( old_path ) target_directory , target_file = os . path . dirname ( os . path . abspath ( new_path ) ) , os . path . basename ( new_path ) Document . move_file_to_directory ( old_path , target_directory ) os . rename ( os . path . join ( target_directory , old_file ) , os . path . join ( target_directory , target_file ) ) | Moves file from old location to new one |
15,765 | def write_data ( self , data ) : with open ( self . path , "w" ) as writer : writer . write ( data ) | Writes given data to given path file |
15,766 | def get_path_name ( self ) : path = fix_raw_path ( os . path . dirname ( os . path . abspath ( self . path ) ) ) name = os . path . basename ( self . path ) return path , name | Gets path and name of song |
15,767 | def get_path_name ( self ) : complete_path = os . path . dirname ( os . path . abspath ( self . path ) ) name = self . path . replace ( complete_path + PATH_SEPARATOR , "" ) if name . endswith ( "/" ) : name = name [ : - 1 ] return complete_path , name | Gets path and name of file |
15,768 | def save ( self , page , language , data , change , extra_data = None ) : if self . untranslated : language = settings . PAGE_DEFAULT_LANGUAGE if change : if ( settings . PAGE_CONTENT_REVISION and self . name not in settings . PAGE_CONTENT_REVISION_EXCLUDE_LIST ) : Content . objects . create_content_if_changed ( page , language , self . name , data ) else : Content . objects . set_or_create_content ( page , language , self . name , data ) else : Content . objects . set_or_create_content ( page , language , self . name , data ) | Actually save the placeholder data into the Content object . |
15,769 | def render ( self , context ) : content = mark_safe ( self . get_content_from_context ( context ) ) if not content : return '' if self . parsed : try : t = template . Template ( content , name = self . name ) content = mark_safe ( t . render ( context ) ) except TemplateSyntaxError as error : if global_settings . DEBUG : content = PLACEHOLDER_ERROR % { 'name' : self . name , 'error' : error , } else : content = '' if self . as_varname is None : return content context [ self . as_varname ] = content return '' | Output the content of the PlaceholdeNode in the template . |
15,770 | def from_string ( cls , action_str ) : args = { } try : mod_obj = ast . parse ( action_str ) except ( SyntaxError , ValueError ) as e : raise e else : call_obj = mod_obj . body [ 0 ] . value if isinstance ( call_obj , ast . Attribute ) : module = call_obj . value . id func = call_obj . attr elif isinstance ( call_obj , ast . Call ) : try : module = call_obj . func . value . id func = call_obj . func . attr except AttributeError : raise UnsupportedActionError ( action_str ) else : for kwarg in call_obj . keywords : if isinstance ( kwarg . value , ast . Num ) : args . update ( { kwarg . arg : kwarg . value . n } ) elif isinstance ( kwarg . value , ast . Str ) : args . update ( { kwarg . arg : kwarg . value . s } ) else : raise UnsupportedActionArgumentError ( action_str , kwarg ) else : raise UnsupportedActionError ( action_str ) return cls ( module , func , args ) | Creates a new Action instance from the given string . |
15,771 | def sanitize ( self , content ) : import html5lib from html5lib import sanitizer p = html5lib . HTMLParser ( tokenizer = sanitizer . HTMLSanitizer ) dom_tree = p . parseFragment ( content ) return dom_tree . text | Sanitize a string in order to avoid possible XSS using html5lib . |
15,772 | def consume ( self , cwd = None ) : first_pass = Grammar . overall . parseString ( self . string ) lowered = { key . lower ( ) : val for key , val in first_pass . iteritems ( ) } self . commands = [ '\n' . join ( self . _get ( 'commands' , lowered ) ) ] self . job_options = self . _get ( 'job_options' , lowered ) self . global_options = self . _get ( 'options' , lowered ) self . files = self . _get ( 'files' , lowered ) self . paths = self . _get ( 'paths' , lowered ) self . files = self . _parse ( self . files , Grammar . file , True ) self . paths = self . _parse ( self . paths , Grammar . path , True ) self . job_options = self . _parse ( self . job_options , Grammar . line ) try : command_lines = self . _parse ( self . commands , Grammar . command_lines ) [ 0 ] except IndexError : raise ValueError ( 'Did you write any commands?' ) self . commands = [ ] for command_line in command_lines : comments , command = command_line self . commands . append ( [ comments . asList ( ) , self . _parse ( [ '' . join ( command ) ] , Grammar . command ) ] ) self . job_options = [ opt . asList ( ) for opt in self . job_options ] self . paths = ctf . get_paths ( self . paths ) self . files = ctf . get_files ( self . files ) self . paths . reverse ( ) self . files . reverse ( ) self . commands . reverse ( ) return ctf . get_command_templates ( self . commands , self . files [ : ] , self . paths [ : ] , self . job_options ) | Converts the lexer tokens into valid statements . This process also checks command syntax . |
15,773 | def _get ( self , key , parser_result ) : try : list_data = parser_result [ key ] . asList ( ) if any ( isinstance ( obj , str ) for obj in list_data ) : txt_lines = [ '' . join ( list_data ) ] else : txt_lines = [ '' . join ( f ) for f in list_data ] except KeyError : txt_lines = [ ] return txt_lines | Given a type and a dict of parser results return the items as a list . |
15,774 | def _parse ( self , lines , grammar , ignore_comments = False ) : results = [ ] for c in lines : if c != '' and not ( ignore_comments and c [ 0 ] == '#' ) : try : results . append ( grammar . parseString ( c ) ) except pyparsing . ParseException as e : raise ValueError ( 'Invalid syntax. Verify line {} is ' 'correct.\n{}\n\n{}' . format ( e . lineno , c , e ) ) return results | Given a type and a list parse it using the more detailed parse grammar . |
15,775 | def objectprep ( self ) : self . runmetadata = createobject . ObjectCreation ( self ) if self . extension == 'fastq' : logging . info ( 'Decompressing and combining .fastq files for CLARK analysis' ) fileprep . Fileprep ( self ) else : logging . info ( 'Using .fasta files for CLARK analysis' ) for sample in self . runmetadata . samples : sample . general . combined = sample . general . fastqfiles [ 0 ] | Create objects to store data and metadata for each sample . Also perform necessary file manipulations |
15,776 | def settargets ( self ) : logging . info ( 'Setting up database' ) self . targetcall = 'cd {} && ./set_targets.sh {} {} --{}' . format ( self . clarkpath , self . databasepath , self . database , self . rank ) subprocess . call ( self . targetcall , shell = True , stdout = self . devnull , stderr = self . devnull ) | Set the targets to be used in the analyses . Involves the path of the database files the database files to use and the level of classification for the analysis |
15,777 | def classifymetagenome ( self ) : logging . info ( 'Classifying metagenomes' ) self . classifycall = 'cd {} && ./classify_metagenome.sh -O {} -R {} -n {} --light' . format ( self . clarkpath , self . filelist , self . reportlist , self . cpus ) classify = True for sample in self . runmetadata . samples : try : sample . general . classification = sample . general . combined . split ( '.' ) [ 0 ] + '.csv' if os . path . isfile ( sample . general . classification ) : classify = False except KeyError : pass if classify : subprocess . call ( self . classifycall , shell = True , stdout = self . devnull , stderr = self . devnull ) | Run the classify metagenome of the CLARK package on the samples |
15,778 | def lists ( self ) : with open ( self . filelist , 'w' ) as filelist : with open ( self . reportlist , 'w' ) as reportlist : for sample in self . runmetadata . samples : if self . extension == 'fastq' : try : status = sample . run . Description if status == 'metagenome' : filelist . write ( sample . general . combined + '\n' ) reportlist . write ( sample . general . combined . split ( '.' ) [ 0 ] + '\n' ) except AttributeError : pass else : if sample . general . combined != 'NA' : filelist . write ( sample . general . combined + '\n' ) reportlist . write ( sample . general . combined . split ( '.' ) [ 0 ] + '\n' ) | Prepare the list of files to be processed |
15,779 | def estimateabundance ( self ) : logging . info ( 'Estimating abundance of taxonomic groups' ) for i in range ( self . cpus ) : threads = Thread ( target = self . estimate , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) with progressbar ( self . runmetadata . samples ) as bar : for sample in bar : try : if sample . general . combined != 'NA' : sample . general . abundance = sample . general . combined . split ( '.' ) [ 0 ] + '_abundance.csv' if not sample . commands . datastore : sample . commands = GenObject ( ) sample . commands . target = self . targetcall sample . commands . classify = self . classifycall sample . commands . abundancecall = 'cd {} && ./estimate_abundance.sh -D {} -F {} > {}' . format ( self . clarkpath , self . databasepath , sample . general . classification , sample . general . abundance ) self . abundancequeue . put ( sample ) except KeyError : pass self . abundancequeue . join ( ) | Estimate the abundance of taxonomic groups |
15,780 | def get_command_templates ( command_tokens , file_tokens = [ ] , path_tokens = [ ] , job_options = [ ] ) : files = get_files ( file_tokens ) paths = get_paths ( path_tokens ) job_options = get_options ( job_options ) templates = _get_command_templates ( command_tokens , files , paths , job_options ) for command_template in templates : command_template . _dependencies = _get_prelim_dependencies ( command_template , templates ) return templates | Given a list of tokens from the grammar return a list of commands . |
15,781 | def get_files ( file_tokens , cwd = None ) : if not file_tokens : return [ ] token = file_tokens . pop ( ) try : filename = token . filename except AttributeError : filename = '' if cwd : input = Input ( token . alias , filename , cwd = cwd ) else : input = Input ( token . alias , filename ) return [ input ] + get_files ( file_tokens ) | Given a list of parser file tokens return a list of input objects for them . |
15,782 | def get_paths ( path_tokens ) : if len ( path_tokens ) == 0 : return [ ] token = path_tokens . pop ( ) path = PathToken ( token . alias , token . path ) return [ path ] + get_paths ( path_tokens ) | Given a list of parser path tokens return a list of path objects for them . |
15,783 | def _get_command_templates ( command_tokens , files = [ ] , paths = [ ] , job_options = [ ] , count = 1 ) : if not command_tokens : return [ ] comment_tokens , command_token = command_tokens . pop ( ) parts = [ ] parts += job_options + _get_comments ( comment_tokens ) for part in command_token [ 0 ] : try : parts . append ( _get_file_by_alias ( part , files ) ) continue except ( AttributeError , ValueError ) : pass for cut in part . split ( ) : try : parts . append ( _get_path_by_name ( cut , paths ) ) continue except ValueError : pass parts . append ( cut ) command_template = CommandTemplate ( alias = str ( count ) , parts = parts ) [ setattr ( p , 'alias' , command_template . alias ) for p in command_template . output_parts ] return [ command_template ] + _get_command_templates ( command_tokens , files , paths , job_options , count + 1 ) | Reversivly create command templates . |
15,784 | def _get_prelim_dependencies ( command_template , all_templates ) : deps = [ ] for input in command_template . input_parts : if '.' not in input . alias : continue for template in all_templates : for output in template . output_parts : if input . fuzzy_match ( output ) : deps . append ( template ) break return list ( set ( deps ) ) | Given a command_template determine which other templates it depends on . This should not be used as the be - all end - all of dependencies and before calling each command ensure that it s requirements are met . |
15,785 | def _is_output ( part ) : if part [ 0 ] . lower ( ) == 'o' : return True elif part [ 0 ] [ : 2 ] . lower ( ) == 'o:' : return True elif part [ 0 ] [ : 2 ] . lower ( ) == 'o.' : return True else : return False | Returns whether the given part represents an output variable . |
15,786 | def search_browser ( self , text ) : self . impl . get ( self . base_url ) search_div = self . impl . find_element_by_id ( "search" ) search_term = search_div . find_element_by_id ( "term" ) search_term . send_keys ( text ) search_div . find_element_by_id ( "submit" ) . click ( ) e = self . impl . find_element_by_css_selector ( "table.list tr td a" ) return e . get_attribute ( "href" ) | do a slow search via the website and return the first match |
15,787 | def search_fast ( self , text ) : resp = self . impl . get ( "{base_url}/{text}/json" . format ( base_url = self . base_url , text = text ) ) return resp . json ( ) [ "info" ] [ "package_url" ] | do a sloppy quick search via the json index |
15,788 | def main ( search , query ) : url = search . search ( query ) print ( url ) search . open_page ( url ) | main function that does the search |
15,789 | def cli_main ( ) : SearchContext . commit ( ) args = parser . parse_args ( ) firefox_remote = Remote ( "http://127.0.0.1:4444/wd/hub" , DesiredCapabilities . FIREFOX ) with contextlib . closing ( firefox_remote ) : context = SearchContext . from_instances ( [ FastSearch ( ) , Browser ( firefox_remote ) ] ) search = Search ( parent = context ) if args . fast : with context . use ( FastSearch , Browser ) : main ( search , args . query ) else : with context . use ( Browser ) : main ( search , args . query ) | cli entrypoitns sets up everything needed |
15,790 | def camel_to_underscore ( name ) : name = re . sub ( r'(?<!\b)(?<!_)([A-Z][a-z])' , r'_\1' , name ) name = re . sub ( r'(?<!\b)(?<!_)([a-z])([A-Z])' , r'\1_\2' , name ) name = name . lower ( ) return name | Convert camel case name to underscore name . |
15,791 | def main_func ( args = None ) : guimain . init_gui ( ) main . init ( ) launcher = Launcher ( ) parsed , unknown = launcher . parse_args ( args ) parsed . func ( parsed , unknown ) | Main funcion when executing this module as script |
15,792 | def setup_launch_parser ( self , parser ) : parser . set_defaults ( func = self . launch ) parser . add_argument ( "addon" , help = "The jukebox addon to launch. The addon should be a standalone plugin." ) | Setup the given parser for the launch command |
15,793 | def parse_args ( self , args = None ) : if args is None : args = sys . argv [ 1 : ] return self . parser . parse_known_args ( args ) | Parse the given arguments |
15,794 | def list_objects ( self , prefix = None , delimiter = None ) : return self . _client . list_objects ( instance = self . _instance , bucket_name = self . name , prefix = prefix , delimiter = delimiter ) | List the objects for this bucket . |
15,795 | def upload_object ( self , object_name , file_obj ) : return self . _client . upload_object ( self . _instance , self . name , object_name , file_obj ) | Upload an object to this bucket . |
15,796 | def delete_object ( self , object_name ) : self . _client . remove_object ( self . _instance , self . name , object_name ) | Remove an object from this bucket . |
15,797 | def objects ( self ) : return [ ObjectInfo ( o , self . _instance , self . _bucket , self . _client ) for o in self . _proto . object ] | The objects in this listing . |
15,798 | def delete ( self ) : self . _client . remove_object ( self . _instance , self . _bucket , self . name ) | Remove this object . |
15,799 | def download ( self ) : return self . _client . download_object ( self . _instance , self . _bucket , self . name ) | Download this object . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.