idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
15,600
def message ( self , body , room_id , style = 'text' ) : path = 'rooms/message' data = { 'room_id' : room_id , 'message' : body , 'from' : self . name , 'notify' : 1 , 'message_format' : style , 'color' : self . bg_color } log . info ( 'sending message to hipchat' , message = body , room = room_id ) feedback = self . _api_call ( path , data , requests . post ) log . debug ( feedback ) return feedback
Send a message to the given room
15,601
def job_file ( self ) : job_file_name = '%s.job' % ( self . name ) job_file_path = os . path . join ( self . initial_dir , job_file_name ) self . _job_file = job_file_path return self . _job_file
The path to the submit description file representing this job .
15,602
def log_file ( self ) : log_file = self . get ( 'log' ) if not log_file : log_file = '%s.log' % ( self . name ) self . set ( 'log' , log_file ) return os . path . join ( self . initial_dir , self . get ( 'log' ) )
The path to the log file for this job .
15,603
def initial_dir ( self ) : initial_dir = self . get ( 'initialdir' ) if not initial_dir : initial_dir = os . curdir if self . _remote and os . path . isabs ( initial_dir ) : raise RemoteError ( 'Cannot define an absolute path as an initial_dir on a remote scheduler' ) return initial_dir
The initial directory defined for the job .
15,604
def submit ( self , queue = None , options = [ ] ) : if not self . executable : log . error ( 'Job %s was submitted with no executable' , self . name ) raise NoExecutable ( 'You cannot submit a job without an executable' ) self . _num_jobs = queue or self . num_jobs self . _write_job_file ( ) args = [ 'condor_submit' ] args . extend ( options ) args . append ( self . job_file ) log . info ( 'Submitting job %s with options: %s' , self . name , args ) return super ( Job , self ) . submit ( args )
Submits the job either locally or to a remote server if it is defined .
15,605
def wait ( self , options = [ ] , sub_job_num = None ) : args = [ 'condor_wait' ] args . extend ( options ) job_id = '%s.%s' % ( self . cluster_id , sub_job_num ) if sub_job_num else str ( self . cluster_id ) if self . _remote : abs_log_file = self . log_file else : abs_log_file = os . path . abspath ( self . log_file ) args . extend ( [ abs_log_file , job_id ] ) out , err = self . _execute ( args ) return out , err
Wait for the job or a sub - job to complete .
15,606
def get ( self , attr , value = None , resolve = True ) : try : if resolve : value = self . _resolve_attribute ( attr ) else : value = self . attributes [ attr ] except KeyError : pass return value
Get the value of an attribute from submit description file .
15,607
def set ( self , attr , value ) : def escape_new_syntax ( value , double_quote_escape = '"' ) : value = str ( value ) value = value . replace ( "'" , "''" ) value = value . replace ( '"' , '%s"' % double_quote_escape ) if ' ' in value or '\t' in value : value = "'%s'" % value return value def escape_new_syntax_pre_post_script ( value ) : return escape_new_syntax ( value , '\\' ) def escape_remap ( value ) : value = value . replace ( '=' , '\=' ) value = value . replace ( ';' , '\;' ) return value def join_function_template ( join_string , escape_func ) : return lambda value : join_string . join ( [ escape_func ( i ) for i in value ] ) def quote_join_function_template ( join_string , escape_func ) : return lambda value : join_function_template ( join_string , escape_func ) ( value ) join_functions = { 'rempas' : quote_join_function_template ( '; ' , escape_remap ) , 'arguments' : quote_join_function_template ( ' ' , escape_new_syntax ) , 'Arguments' : quote_join_function_template ( ' ' , escape_new_syntax_pre_post_script ) } if value is False : value = 'false' elif value is True : value = 'true' elif isinstance ( value , list ) or isinstance ( value , tuple ) : join_function = join_function_template ( ', ' , str ) for key in list ( join_functions . keys ( ) ) : if attr . endswith ( key ) : join_function = join_functions [ key ] value = join_function ( value ) self . attributes [ attr ] = value
Set the value of an attribute in the submit description file .
15,608
def _update_status ( self , sub_job_num = None ) : job_id = '%s.%s' % ( self . cluster_id , sub_job_num ) if sub_job_num else str ( self . cluster_id ) format = [ '-format' , '"%d"' , 'JobStatus' ] cmd = 'condor_q {0} {1} && condor_history {0} {1}' . format ( job_id , ' ' . join ( format ) ) args = [ cmd ] out , err = self . _execute ( args , shell = True , run_in_job_dir = False ) if err : log . error ( 'Error while updating status for job %s: %s' , job_id , err ) raise HTCondorError ( err ) if not out : log . error ( 'Error while updating status for job %s: Job not found.' , job_id ) raise HTCondorError ( 'Job not found.' ) out = out . replace ( '\"' , '' ) log . info ( 'Job %s status: %s' , job_id , out ) if not sub_job_num : if len ( out ) >= self . num_jobs : out = out [ : self . num_jobs ] else : msg = 'There are {0} sub-jobs, but {1} status(es).' . format ( self . num_jobs , len ( out ) ) log . error ( msg ) raise HTCondorError ( msg ) status_dict = dict ( ) for val in CONDOR_JOB_STATUSES . values ( ) : status_dict [ val ] = 0 for status_code_str in out : status_code = 0 try : status_code = int ( status_code_str ) except ValueError : pass key = CONDOR_JOB_STATUSES [ status_code ] status_dict [ key ] += 1 return status_dict
Gets the job status .
15,609
def _resolve_attribute ( self , attribute ) : value = self . attributes [ attribute ] if not value : return None resolved_value = re . sub ( '\$\((.*?)\)' , self . _resolve_attribute_match , value ) return resolved_value
Recursively replaces references to other attributes with their value .
15,610
def _resolve_attribute_match ( self , match ) : if match . group ( 1 ) == 'cluster' : return str ( self . cluster_id ) return self . get ( match . group ( 1 ) , match . group ( 0 ) )
Replaces a reference to an attribute with the value of the attribute .
15,611
def total_capacity ( self ) : total_capacity = 0 hosts = yield self . hosts ( enabled = True ) for host in hosts : total_capacity += host . capacity defer . returnValue ( total_capacity )
Find the total task capacity available for this channel .
15,612
def cmd ( send , msg , args ) : if not msg : send ( "Google what?" ) return key = args [ 'config' ] [ 'api' ] [ 'googleapikey' ] cx = args [ 'config' ] [ 'api' ] [ 'googlesearchid' ] data = get ( 'https://www.googleapis.com/customsearch/v1' , params = { 'key' : key , 'cx' : cx , 'q' : msg } ) . json ( ) if 'items' not in data : send ( "Google didn't say much." ) else : url = data [ 'items' ] [ 0 ] [ 'link' ] send ( "Google says %s" % url )
Googles something .
15,613
def get_deps ( cfg = None , deps = [ ] ) : if not cfg is None : if not 'deps' in cfg : cfg [ 'deps' ] = deps else : deps = cfg [ 'deps' ] if not len ( deps ) == 0 : for dep in deps : if not dep in cfg : runbashcmd ( f'conda install {dep}' , test = cfg [ 'test' ] ) cfg [ dep ] = dep logging . info ( f"{len(deps)} deps installed." ) return cfg
Installs conda dependencies .
15,614
def mont_pub_from_mont_priv ( cls , mont_priv ) : if not isinstance ( mont_priv , bytes ) : raise TypeError ( "Wrong type passed for the mont_priv parameter." ) if len ( mont_priv ) != cls . MONT_PRIV_KEY_SIZE : raise ValueError ( "Invalid value passed for the mont_priv parameter." ) return bytes ( cls . _mont_pub_from_mont_priv ( bytearray ( mont_priv ) ) )
Restore the Montgomery public key from a Montgomery private key .
15,615
def mont_priv_to_ed_pair ( cls , mont_priv ) : if not isinstance ( mont_priv , bytes ) : raise TypeError ( "Wrong type passed for the mont_priv parameter." ) if len ( mont_priv ) != cls . MONT_PRIV_KEY_SIZE : raise ValueError ( "Invalid value passed for the mont_priv parameter." ) ed_priv , ed_pub = cls . _mont_priv_to_ed_pair ( bytearray ( mont_priv ) ) return bytes ( ed_priv ) , bytes ( ed_pub )
Derive a Twisted Edwards key pair from given Montgomery private key .
15,616
def mont_pub_to_ed_pub ( cls , mont_pub ) : if not isinstance ( mont_pub , bytes ) : raise TypeError ( "Wrong type passed for the mont_pub parameter." ) if len ( mont_pub ) != cls . MONT_PUB_KEY_SIZE : raise ValueError ( "Invalid value passed for the mont_pub parameter." ) return bytes ( cls . _mont_pub_to_ed_pub ( bytearray ( mont_pub ) ) )
Derive a Twisted Edwards public key from given Montgomery public key .
15,617
def sign ( self , data , nonce = None ) : cls = self . __class__ if not self . __mont_priv : raise MissingKeyException ( "Cannot sign using this XEdDSA instance, Montgomery private key missing." ) if not isinstance ( data , bytes ) : raise TypeError ( "The data parameter must be a bytes-like object." ) if nonce == None : nonce = os . urandom ( 64 ) if not isinstance ( nonce , bytes ) : raise TypeError ( "Wrong type passed for the nonce parameter." ) if len ( nonce ) != 64 : raise ValueError ( "Invalid value passed for the nonce parameter." ) ed_priv , ed_pub = cls . _mont_priv_to_ed_pair ( bytearray ( self . __mont_priv ) ) return bytes ( cls . _sign ( bytearray ( data ) , bytearray ( nonce ) , ed_priv , ed_pub ) )
Sign data using the Montgomery private key stored by this XEdDSA instance .
15,618
def verify ( self , data , signature ) : cls = self . __class__ if not isinstance ( data , bytes ) : raise TypeError ( "The data parameter must be a bytes-like object." ) if not isinstance ( signature , bytes ) : raise TypeError ( "Wrong type passed for the signature parameter." ) if len ( signature ) != cls . SIGNATURE_SIZE : raise ValueError ( "Invalid value passed for the signature parameter." ) return cls . _verify ( bytearray ( data ) , bytearray ( signature ) , cls . _mont_pub_to_ed_pub ( bytearray ( self . __mont_pub ) ) )
Verify signed data using the Montgomery public key stored by this XEdDSA instance .
15,619
def mangle ( text ) : text_bytes = text . encode ( 'utf-8' ) buff = BytesIO ( text_bytes ) mangled = BytesIO ( ) last_tok = token . INDENT last_line = - 1 last_col = 0 last_line_text = '' open_list_dicts = 0 tokens = tokenizer ( buff . readline ) for t , text , ( line_s , col_s ) , ( line_e , col_e ) , line in tokens : if line_s > last_line and last_line != - 1 : last_col = 0 if last_line_text . rstrip ( ) [ - 1 : ] == '\\' : mangled . write ( b' \\\n' ) striped = text . strip ( ) list_dict_open = [ token . LSQB , token . LBRACE , '[' , '{' ] list_dict_close = [ token . RSQB , token . RBRACE , ']' , '}' ] if t in list_dict_open or striped in list_dict_open : open_list_dicts += 1 elif t in list_dict_close or striped in list_dict_close : open_list_dicts -= 1 if t == token . STRING and ( last_tok == token . INDENT or ( ( last_tok == token . NEWLINE or last_tok == tokenize . NL or last_tok == token . DEDENT or last_tok == tokenize . ENCODING ) and open_list_dicts == 0 ) ) : mangled . write ( b'\n' * ( len ( text . split ( '\n' ) ) - 1 ) ) elif t == tokenize . COMMENT : pass else : if col_s > last_col : mangled . write ( b' ' * ( col_s - last_col ) ) if t != tokenize . ENCODING : mangled . write ( text . encode ( 'utf-8' ) ) last_tok = t last_col = col_e last_line = line_e last_line_text = line return mangled . getvalue ( ) . decode ( 'utf-8' )
Takes a script and mangles it
15,620
def main ( argv = None ) : if not argv : argv = sys . argv [ 1 : ] parser = argparse . ArgumentParser ( description = _HELP_TEXT ) parser . add_argument ( 'input' , nargs = '?' , default = None ) parser . add_argument ( 'output' , nargs = '?' , default = None ) parser . add_argument ( '--version' , action = 'version' , version = '%(prog)s ' + get_version ( ) ) args = parser . parse_args ( argv ) if not args . input : print ( "No file specified" , file = sys . stderr ) sys . exit ( 1 ) try : with open ( args . input , 'r' ) as f : res = mangle ( f . read ( ) ) if not args . output : print ( res , end = '' ) else : with open ( args . output , 'w' ) as o : o . write ( res ) except Exception as ex : print ( "Error mangling {}: {!s}" . format ( args . input , ex ) , file = sys . stderr ) sys . exit ( 1 )
Command line entry point
15,621
def get_histograms_in_list ( filename : str , list_name : str = None ) -> Dict [ str , Any ] : hists : dict = { } with RootOpen ( filename = filename , mode = "READ" ) as fIn : if list_name is not None : hist_list = fIn . Get ( list_name ) else : hist_list = [ obj . ReadObj ( ) for obj in fIn . GetListOfKeys ( ) ] if not hist_list : fIn . ls ( ) fIn . Close ( ) raise ValueError ( f"Could not find list with name \"{list_name}\". Possible names are listed above." ) for obj in hist_list : _retrieve_object ( hists , obj ) return hists
Get histograms from the file and make them available in a dict .
15,622
def _retrieve_object ( output_dict : Dict [ str , Any ] , obj : Any ) -> None : import ROOT if isinstance ( obj , ROOT . TH1 ) or isinstance ( obj , ROOT . THnBase ) : if isinstance ( obj , ROOT . TH1 ) : obj . SetDirectory ( 0 ) ROOT . SetOwnership ( obj , False ) output_dict [ obj . GetName ( ) ] = obj if isinstance ( obj , ROOT . TCollection ) : output_dict [ obj . GetName ( ) ] = { } for obj_temp in list ( obj ) : _retrieve_object ( output_dict [ obj . GetName ( ) ] , obj_temp )
Function to recursively retrieve histograms from a list in a ROOT file .
15,623
def get_array_from_hist2D ( hist : Hist , set_zero_to_NaN : bool = True , return_bin_edges : bool = False ) -> Tuple [ np . ndarray , np . ndarray , np . ndarray ] : shape = ( hist . GetYaxis ( ) . GetNbins ( ) , hist . GetXaxis ( ) . GetNbins ( ) ) hist_array = np . array ( [ hist . GetBinContent ( x ) for x in range ( 1 , hist . GetNcells ( ) ) if not hist . IsBinUnderflow ( x ) and not hist . IsBinOverflow ( x ) ] ) hist_array = hist_array . reshape ( shape ) hist_array = hist_array . T if set_zero_to_NaN : hist_array [ hist_array == 0 ] = np . nan if return_bin_edges : x_bin_edges = get_bin_edges_from_axis ( hist . GetXaxis ( ) ) y_bin_edges = get_bin_edges_from_axis ( hist . GetYaxis ( ) ) epsilon = 1e-9 x_range = np . arange ( np . amin ( x_bin_edges ) , np . amax ( x_bin_edges ) + epsilon , hist . GetXaxis ( ) . GetBinWidth ( 1 ) ) y_range = np . arange ( np . amin ( y_bin_edges ) , np . amax ( y_bin_edges ) + epsilon , hist . GetYaxis ( ) . GetBinWidth ( 1 ) ) else : x_range = np . array ( [ hist . GetXaxis ( ) . GetBinCenter ( i ) for i in range ( 1 , hist . GetXaxis ( ) . GetNbins ( ) + 1 ) ] ) y_range = np . array ( [ hist . GetYaxis ( ) . GetBinCenter ( i ) for i in range ( 1 , hist . GetYaxis ( ) . GetNbins ( ) + 1 ) ] ) X , Y = np . meshgrid ( x_range , y_range ) return ( X , Y , hist_array )
Extract x y and bin values from a 2D ROOT histogram .
15,624
def get_bin_edges_from_axis ( axis ) -> np . ndarray : bins = range ( 1 , axis . GetNbins ( ) + 1 ) bin_edges = np . empty ( len ( bins ) + 1 ) bin_edges [ : - 1 ] = [ axis . GetBinLowEdge ( i ) for i in bins ] bin_edges [ - 1 ] = axis . GetBinUpEdge ( axis . GetNbins ( ) ) return bin_edges
Get bin edges from a ROOT hist axis .
15,625
def sistr ( self ) : logging . info ( 'Performing sistr analyses' ) with progressbar ( self . metadata ) as bar : for sample in bar : setattr ( sample , self . analysistype , GenObject ( ) ) if sample . general . bestassemblyfile != 'NA' : try : if sample . general . referencegenus == 'Salmonella' : sample [ self . analysistype ] . reportdir = os . path . join ( sample . general . outputdirectory , self . analysistype ) sample [ self . analysistype ] . jsonoutput = os . path . join ( sample [ self . analysistype ] . reportdir , '{}.json' . format ( sample . name ) ) sample . commands . sistr = 'sistr -f json -o {} -t {} -T {} {}' . format ( sample [ self . analysistype ] . jsonoutput , self . cpus , os . path . join ( sample [ self . analysistype ] . reportdir , 'tmp' ) , sample . general . bestassemblyfile ) sample [ self . analysistype ] . logout = os . path . join ( sample [ self . analysistype ] . reportdir , 'logout' ) sample [ self . analysistype ] . logerr = os . path . join ( sample [ self . analysistype ] . reportdir , 'logerr' ) if not os . path . isfile ( sample [ self . analysistype ] . jsonoutput ) : out , err = run_subprocess ( sample . commands . sistr ) write_to_logfile ( sample . commands . sistr , sample . commands . sistr , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr ) write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr ) self . queue . task_done ( ) except ( ValueError , KeyError ) : pass self . queue . join ( ) self . report ( )
Perform sistr analyses on Salmonella
15,626
def report ( self ) : header = '\t' . join ( self . headers ) + '\n' data = '' for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : row = '' try : sample [ self . analysistype ] . jsondata = json . load ( open ( sample [ self . analysistype ] . jsonoutput , 'r' ) ) sample [ self . analysistype ] . report = os . path . join ( sample [ self . analysistype ] . reportdir , '{}.tsv' . format ( sample . name ) ) for category in self . headers : row += '{}\t' . format ( sample [ self . analysistype ] . jsondata [ 0 ] [ category ] ) setattr ( sample [ self . analysistype ] , category , str ( sample [ self . analysistype ] . jsondata [ 0 ] [ category ] ) ) row += '\n' data += row with open ( sample [ self . analysistype ] . report , 'w' ) as strainreport : strainreport . write ( header ) strainreport . write ( row ) except ( KeyError , AttributeError ) : pass with open ( os . path . join ( self . reportdir , 'sistr.tsv' ) , 'w' ) as report : report . write ( header ) report . write ( data )
Creates sistr reports
15,627
def purgeDeletedWidgets ( ) : toremove = [ ] for field in AbstractEditorWidget . funit_fields : if sip . isdeleted ( field ) : toremove . append ( field ) for field in toremove : AbstractEditorWidget . funit_fields . remove ( field ) toremove = [ ] for field in AbstractEditorWidget . tunit_fields : if sip . isdeleted ( field ) : toremove . append ( field ) for field in toremove : AbstractEditorWidget . tunit_fields . remove ( field )
Finds old references to stashed fields and deletes them
15,628
def movefastq ( self ) : logging . info ( 'Moving FASTQ files' ) for sample in self . metadata . runmetadata . samples : outputdir = os . path . join ( self . path , sample . name ) fastqfiles = sorted ( glob ( os . path . join ( self . path , '{}_*.fastq*' . format ( sample . name ) ) ) ) if sorted ( glob ( os . path . join ( self . path , '{}_*.fastq*' . format ( sample . name ) ) ) ) else sorted ( glob ( os . path . join ( self . path , '{}.fastq*' . format ( sample . name ) ) ) ) if sorted ( glob ( os . path . join ( self . path , '{}.fastq*' . format ( sample . name ) ) ) ) else sorted ( glob ( os . path . join ( self . path , '{}*.fastq*' . format ( sample . name ) ) ) ) if fastqfiles : make_path ( outputdir ) try : list ( map ( lambda x : os . symlink ( os . path . join ( '..' , os . path . basename ( x ) ) , os . path . join ( outputdir , os . path . basename ( x ) ) ) , fastqfiles ) ) except OSError : pass fastqfiles = [ fastq for fastq in sorted ( glob ( os . path . join ( outputdir , '{}*.fastq*' . format ( sample . name ) ) ) ) if 'trimmed' not in fastq and 'normalised' not in fastq and 'corrected' not in fastq and 'paired' not in fastq and 'unpaired' not in fastq ] else : if outputdir : fastqfiles = [ fastq for fastq in sorted ( glob ( os . path . join ( outputdir , '{}*.fastq*' . format ( outputdir , sample . name ) ) ) ) if 'trimmed' not in fastq and 'normalised' not in fastq and 'corrected' not in fastq and 'paired' not in fastq and 'unpaired' not in fastq ] sample . general . fastqfiles = fastqfiles
Find . fastq files for each sample and move them to an appropriately named folder
15,629
def get_list ( self , datatype , url , ** kwargs ) : search_url = [ url , '?' ] kwargs . update ( { 'key' : self . api_key } ) search_url . append ( urlencode ( kwargs ) ) data = json . loads ( urlopen ( '' . join ( search_url ) ) . read ( ) ) return data [ datatype ]
base function for connecting to API
15,630
def film_search ( self , title ) : films = [ ] if not hasattr ( self , 'film_list' ) : self . get_film_list ( ) for film in self . film_list : strength = WRatio ( title , film [ 'title' ] ) if strength > 80 : film . update ( { u'strength' : strength } ) films . append ( film ) films_sorted = sorted ( films , key = itemgetter ( 'strength' ) , reverse = True ) return films_sorted
film search using fuzzy matching
15,631
def get_film_id ( self , title , three_dimensional = False ) : films = self . film_search ( title ) for film in films : if ( film [ 'title' ] . find ( '3D' ) is - 1 ) is not three_dimensional : return film [ 'edi' ] return - 1
get the film id using the title in conjunction with the searching function
15,632
def set_current_stim_parameter ( self , param , val ) : component = self . _stimulus . component ( 0 , 1 ) component . set ( param , val )
Sets a parameter on the current stimulus
15,633
def save_to_file ( self , data , stamp ) : self . datafile . append ( self . current_dataset_name , data ) info = dict ( self . _stimulus . componentDoc ( ) . items ( ) + self . _stimulus . testDoc ( ) . items ( ) ) print 'saving doc' , info info [ 'time_stamps' ] = [ stamp ] info [ 'samplerate_ad' ] = self . player . aifs self . datafile . append_trace_info ( self . current_dataset_name , info )
Saves data to current dataset .
15,634
def countdown_timer ( seconds = 10 ) : tick = 0.1 n_ticks = int ( seconds / tick ) widgets = [ 'Pause for panic: ' , progressbar . ETA ( ) , ' ' , progressbar . Bar ( ) ] pbar = progressbar . ProgressBar ( widgets = widgets , max_value = n_ticks ) . start ( ) for i in range ( n_ticks ) : pbar . update ( i ) sleep ( tick ) pbar . finish ( )
Show a simple countdown progress bar
15,635
def write_dicts_to_csv ( self , dicts ) : csv_headers = sorted ( dicts [ 0 ] . keys ( ) ) with open ( self . path , "w" ) as out_file : dict_writer = csv . DictWriter ( out_file , csv_headers , delimiter = "," , quotechar = "\"" ) dict_writer . writeheader ( ) dict_writer . writerows ( dicts )
Saves . csv file with posts data
15,636
def write_matrix_to_csv ( self , headers , data ) : with open ( self . path , "w" ) as out_file : data_writer = csv . writer ( out_file , delimiter = "," ) data_writer . writerow ( headers ) data_writer . writerows ( data )
Saves . csv file with data
15,637
def write_dicts_to_json ( self , data ) : with open ( self . path , "w" ) as out : json . dump ( data , out , indent = 4 , sort_keys = True )
Saves . json file with data
15,638
def start_listening ( self ) : self . _qlisten ( ) self . _halt_threads = False for t in self . queue_threads : t . start ( )
Start listener threads for acquistion callback queues
15,639
def stop_listening ( self ) : self . _halt_threads = True for name , queue_waker in self . recieved_signals . items ( ) : q , wake_event = queue_waker wake_event . set ( )
Stop listener threads for acquistion queues
15,640
def set_queue_callback ( self , name , func ) : if name in self . acquisition_hooks : self . acquisition_hooks [ name ] . append ( func ) else : self . acquisition_hooks [ name ] = [ func ]
Sets a function to execute when the named acquistion queue has data placed in it .
15,641
def set_calibration ( self , datakey , calf = None , frange = None ) : if datakey is None : calibration_vector , calibration_freqs = None , None else : if calf is None : raise Exception ( 'calibration reference frequency must be specified' ) try : cal = self . datafile . get_calibration ( datakey , calf ) except : print "Error: unable to load calibration data from: " , datakey raise calibration_vector , calibration_freqs = cal StimulusModel . clearCache ( ) logger = logging . getLogger ( 'main' ) logger . debug ( 'clearing cache' ) logger . debug ( 'setting explore calibration' ) self . explorer . set_calibration ( calibration_vector , calibration_freqs , frange , datakey ) logger . debug ( 'setting protocol calibration' ) self . protocoler . set_calibration ( calibration_vector , calibration_freqs , frange , datakey ) logger . debug ( 'setting chart calibration' ) self . charter . set_calibration ( calibration_vector , calibration_freqs , frange , datakey ) logger . debug ( 'setting calibrator calibration' ) self . bs_calibrator . stash_calibration ( calibration_vector , calibration_freqs , frange , datakey ) logger . debug ( 'setting tone calibrator calibration' ) self . tone_calibrator . stash_calibration ( calibration_vector , calibration_freqs , frange , datakey )
Sets a calibration for all of the acquisition operations from an already gathered calibration data set .
15,642
def set_calibration_duration ( self , dur ) : self . bs_calibrator . set_duration ( dur ) self . tone_calibrator . set_duration ( dur )
Sets the stimulus duration for the calibration stimulus . Sets for calibration chirp test tone and calibration curve tones
15,643
def set_calibration_reps ( self , reps ) : self . bs_calibrator . set_reps ( reps ) self . tone_calibrator . set_reps ( reps )
Sets the number of repetitions for calibration stimuli
15,644
def load_data_file ( self , fname , filemode = 'a' ) : self . close_data ( ) self . datafile = open_acqdata ( fname , filemode = filemode ) self . explorer . set ( datafile = self . datafile ) self . protocoler . set ( datafile = self . datafile ) self . charter . set ( datafile = self . datafile ) self . bs_calibrator . set ( datafile = self . datafile ) self . tone_calibrator . set ( datafile = self . datafile ) self . set_calibration ( None ) self . current_cellid = dict ( self . datafile . get_info ( '' ) ) . get ( 'total cells' , 0 )
Opens an existing data file to append to
15,645
def set_threshold ( self , threshold ) : self . explorer . set_threshold ( threshold ) self . protocoler . set_threshold ( threshold )
Sets spike detection threshold
15,646
def set ( self , ** kwargs ) : self . explorer . set ( ** kwargs ) self . protocoler . set ( ** kwargs ) self . tone_calibrator . set ( ** kwargs ) self . charter . set ( ** kwargs ) self . bs_calibrator . set ( ** kwargs ) self . mphone_calibrator . set ( ** kwargs )
Sets acquisition parameters for all acquisition types
15,647
def set_mphone_calibration ( self , sens , db ) : self . bs_calibrator . set_mphone_calibration ( sens , db ) self . tone_calibrator . set_mphone_calibration ( sens , db )
Sets the microphone calibration for the purpose of calculating recorded dB levels
15,648
def run_chart_protocol ( self , interval ) : self . charter . setup ( interval ) return self . charter . run ( )
Runs the stimuli presentation during a chart acquisition
15,649
def process_calibration ( self , save = True , calf = 20000 ) : if self . selected_calibration_index == 2 : raise Exception ( "Calibration curve processing not currently supported" ) else : results , calname , freq , db = self . bs_calibrator . process_calibration ( save ) return calname , db
Processes a completed calibration
15,650
def close_data ( self ) : if self . datafile is not None : if self . datafile . filemode != 'r' : self . datafile . set_metadata ( '' , { 'total cells' : self . current_cellid } ) self . datafile . close ( ) self . datafile = None
Closes the current data file
15,651
def calibration_stimulus ( self , mode ) : if mode == 'tone' : return self . tone_calibrator . stimulus elif mode == 'noise' : return self . bs_calibrator . stimulus
Gets the stimulus model for calibration
15,652
def calibration_template ( self ) : temp = { } temp [ 'tone_doc' ] = self . tone_calibrator . stimulus . templateDoc ( ) comp_doc = [ ] for calstim in self . bs_calibrator . get_stims ( ) : comp_doc . append ( calstim . stateDict ( ) ) temp [ 'noise_doc' ] = comp_doc return temp
Gets the template documentation for the both the tone curve calibration and noise calibration
15,653
def load_calibration_template ( self , template ) : self . tone_calibrator . stimulus . clearComponents ( ) self . tone_calibrator . stimulus . loadFromTemplate ( template [ 'tone_doc' ] , self . tone_calibrator . stimulus ) comp_doc = template [ 'noise_doc' ] for state , calstim in zip ( comp_doc , self . bs_calibrator . get_stims ( ) ) : calstim . loadState ( state )
Reloads calibration settings from saved template doc
15,654
def attenuator_connection ( self , connect = True ) : acquisition_modules = [ self . explorer , self . protocoler , self . bs_calibrator , self . tone_calibrator , self . charter ] if connect : if not acquisition_modules [ 0 ] . player . attenuator_connected ( ) : for module in acquisition_modules : success = module . player . connect_attenuator ( ) if success is None : StimulusModel . setMinVoltage ( 0.0 ) return False else : StimulusModel . setMinVoltage ( 0.005 ) return True else : StimulusModel . setMinVoltage ( 0.005 ) return True else : for module in acquisition_modules : module . player . connect_attenuator ( False ) StimulusModel . setMinVoltage ( 0.0 ) return False
Checks the connection to the attenuator and attempts to connect if not connected . Will also set an appropriate ouput minimum for stimuli if connection successful
15,655
def readline ( self , timeout = 0.1 ) : try : return self . _q . get ( block = timeout is not None , timeout = timeout ) except Empty : return None
Try to read a line from the stream queue .
15,656
def verification_events ( self ) : queued = self . _assemble_event ( 'Verifier_Queued' ) started = self . _assemble_event ( 'Verifier_Started' ) return [ x for x in [ queued , started ] if x ]
Events related to command verification .
15,657
def events ( self ) : events = [ self . acknowledge_event ] + self . verification_events return [ x for x in events if x ]
All events .
15,658
def generation_time ( self ) : entry = self . _proto . commandQueueEntry if entry . HasField ( 'generationTimeUTC' ) : return parse_isostring ( entry . generationTimeUTC ) return None
The generation time as set by Yamcs .
15,659
def username ( self ) : entry = self . _proto . commandQueueEntry if entry . HasField ( 'username' ) : return entry . username return None
The username of the issuer .
15,660
def queue ( self ) : entry = self . _proto . commandQueueEntry if entry . HasField ( 'queueName' ) : return entry . queueName return None
The name of the queue that this command was assigned to .
15,661
def origin ( self ) : entry = self . _proto . commandQueueEntry if entry . cmdId . HasField ( 'origin' ) : return entry . cmdId . origin return None
The origin of this command . This is often empty but may also be a hostname .
15,662
def sequence_number ( self ) : entry = self . _proto . commandQueueEntry if entry . cmdId . HasField ( 'sequenceNumber' ) : return entry . cmdId . sequenceNumber return None
The sequence number of this command . This is the sequence number assigned by the issuing client .
15,663
def create_command_history_subscription ( self , on_data = None , timeout = 60 ) : return self . _client . create_command_history_subscription ( issued_command = self , on_data = on_data , timeout = timeout )
Create a new command history subscription for this command .
15,664
def acknowledged_by ( self ) : if ( self . is_acknowledged and self . _proto . acknowledgeInfo . HasField ( 'acknowledgedBy' ) ) : return self . _proto . acknowledgeInfo . acknowledgedBy return None
Username of the acknowledger .
15,665
def acknowledge_message ( self ) : if ( self . is_acknowledged and self . _proto . acknowledgeInfo . HasField ( 'acknowledgeMessage' ) ) : return self . _proto . acknowledgeInfo . acknowledgeMessage return None
Comment provided when acknowledging the alarm .
15,666
def acknowledge_time ( self ) : if ( self . is_acknowledged and self . _proto . acknowledgeInfo . HasField ( 'acknowledgeTime' ) ) : return parse_isostring ( self . _proto . acknowledgeInfo . acknowledgeTime ) return None
Processor time when the alarm was acknowledged .
15,667
def name ( self ) : if self . _proto . id . namespace : return self . _proto . id . namespace + '/' + self . _proto . id . name return self . _proto . id . name
An identifying name for the parameter value . Typically this is the fully - qualified XTCE name but it may also be an alias depending on how the parameter update was requested .
15,668
def validity_duration ( self ) : if self . _proto . HasField ( 'expireMillis' ) : return timedelta ( milliseconds = self . _proto . expireMillis ) return None
How long this parameter value is valid .
15,669
def range_condition ( self ) : if self . _proto . HasField ( 'rangeCondition' ) : return pvalue_pb2 . RangeCondition . Name ( self . _proto . rangeCondition ) return None
If the value is out of limits this indicates LOW or HIGH .
15,670
def reversebait ( self , maskmiddle = 'f' , k = 19 ) : logging . info ( 'Performing reverse kmer baiting of targets with FASTQ files' ) if self . kmer_size is None : kmer = k else : kmer = self . kmer_size with progressbar ( self . runmetadata ) as bar : for sample in bar : if sample . general . bestassemblyfile != 'NA' and sample [ self . analysistype ] . runanalysis : outfile = os . path . join ( sample [ self . analysistype ] . outputdir , 'baitedtargets.fa' ) sample [ self . analysistype ] . revbbdukcmd = 'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} threads={cpus} mincovfraction={mcf} ' 'maskmiddle={mm} outm={outm}' . format ( mem = self . mem , ref = sample [ self . analysistype ] . baitedfastq , in1 = sample [ self . analysistype ] . baitfile , kmer = kmer , cpus = str ( self . cpus ) , mcf = self . cutoff , mm = maskmiddle , outm = outfile ) if not os . path . isfile ( outfile ) : out , err = run_subprocess ( sample [ self . analysistype ] . revbbdukcmd ) write_to_logfile ( sample [ self . analysistype ] . bbdukcmd , sample [ self . analysistype ] . bbdukcmd , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr ) write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , sample [ self . analysistype ] . logout , sample [ self . analysistype ] . logerr ) sample [ self . analysistype ] . baitfile = outfile
Use the freshly - baited FASTQ files to bait out sequence from the original target files . This will reduce the number of possibly targets against which the baited reads must be aligned
15,671
def clipper ( self ) : for sample in self . runmetadata : replacementresults = dict ( ) try : if self . analysistype != 'sixteens_full' and self . analysistype != 'resfinder' : for gene in sample [ self . analysistype ] . faidict : try : percentidentity = sample [ self . analysistype ] . results [ gene ] try : passingfeature = list ( ) for location , feature in sample [ self . analysistype ] . features [ gene ] . items ( ) : if len ( feature ) < int ( float ( sample [ self . analysistype ] . avgdepth [ gene ] ) ) * 0.3 : passingfeature . append ( True ) else : passingfeature . append ( False ) if all ( passingfeature ) : replacementresults [ gene ] = percentidentity except KeyError : replacementresults [ gene ] = percentidentity except KeyError : pass sample [ self . analysistype ] . results = replacementresults except AttributeError : pass
Filter out results based on the presence of cigar features such as internal soft - clipping
15,672
def main ( ) : reporter = BugReporter ( ) print ( "JSON report:" ) print ( reporter . as_json ( ) ) print ( ) print ( "Markdown report:" ) print ( reporter . as_markdown ( ) ) print ( "SQL report:" ) print ( reporter . as_sql ( ) ) print ( "Choose the appropriate format (if you're submitting a Github Issue " "please chose the Markdown report) and paste it!" )
Pretty - print the bug information as JSON
15,673
def get_platform_info ( ) : try : system_name = platform . system ( ) release_name = platform . release ( ) except : system_name = "Unknown" release_name = "Unknown" return { 'system' : system_name , 'release' : release_name , }
Gets platform info
15,674
def get_bug_report ( ) : platform_info = BugReporter . get_platform_info ( ) module_info = { 'version' : hal_version . __version__ , 'build' : hal_version . __build__ } return { 'platform' : platform_info , 'pyhal' : module_info }
Generate information for a bug report
15,675
def to_isostring ( dt ) : if dt . tzinfo is not None and dt . tzinfo . utcoffset ( dt ) > timedelta ( 0 ) : logging . warn ( 'Warning: aware datetimes are interpreted as if they were naive' ) return dt . strftime ( '%Y-%m-%dT%H:%M:%S.%f' ) [ : - 3 ] + 'Z'
Converts the given datetime to an ISO String . This assumes the datetime is UTC .
15,676
def parse_value ( proto ) : if proto . HasField ( 'floatValue' ) : return proto . floatValue elif proto . HasField ( 'doubleValue' ) : return proto . doubleValue elif proto . HasField ( 'sint32Value' ) : return proto . sint32Value elif proto . HasField ( 'uint32Value' ) : return proto . uint32Value elif proto . HasField ( 'binaryValue' ) : return proto . binaryValue elif proto . HasField ( 'timestampValue' ) : return parse_isostring ( proto . stringValue ) elif proto . HasField ( 'stringValue' ) : return proto . stringValue elif proto . HasField ( 'uint64Value' ) : return proto . uint64Value elif proto . HasField ( 'sint64Value' ) : return proto . sint64Value elif proto . HasField ( 'booleanValue' ) : return proto . booleanValue elif proto . HasField ( 'arrayValue' ) : return [ parse_value ( v ) for v in proto . arrayValue ] elif proto . HasField ( 'aggregateValue' ) : return OrderedDict ( zip ( proto . aggregateValue . name , proto . aggregateValue . value ) ) else : logging . warn ( 'Unrecognized value type for update %s' , proto ) return None
Convers a Protobuf Value from the API into a python native value
15,677
def create_correlation_matrix_plot ( correlation_matrix , title , feature_list ) : chart = SimpleChart ( title ) ax1 = chart . get_ax ( ) ax1 . set_xticks ( list ( range ( len ( feature_list ) ) ) ) ax1 . set_xticklabels ( [ feature_list [ i ] for i in range ( len ( feature_list ) ) ] , rotation = 90 ) ax1 . set_yticks ( list ( range ( len ( feature_list ) ) ) ) ax1 . set_yticklabels ( [ feature_list [ i ] for i in range ( len ( feature_list ) ) ] ) cax = ax1 . imshow ( correlation_matrix , interpolation = "nearest" , cmap = cm . get_cmap ( "jet" , 30 ) ) chart . get_fig ( ) . colorbar ( cax , ticks = np . linspace ( - 1 , 1 , 21 ) ) plt . gcf ( ) . subplots_adjust ( bottom = 0.25 )
Creates plot for correlation matrix
15,678
def log_every_x_times ( logger , counter , x , msg , * args , ** kwargs ) : if counter == 1 or counter % x == 0 : logdebug ( logger , msg , * args , ** kwargs )
Works like logdebug but only prints first and and every xth message .
15,679
def get_dataframe ( self , * args , ** kwargs ) : columns = kwargs . get ( 'columns' ) if columns : del kwargs [ 'columns' ] else : columns = self . default_titles return pd . DataFrame ( self . get_data ( * args , ** kwargs ) , columns = columns )
Retrieve data as a Pandas dataframe .
15,680
def grabImage ( self , index ) : row_height = self . rowHeight ( 0 ) y = ( row_height * index . row ( ) ) + row_height - 5 x = self . width ( ) rect = QtCore . QRect ( 5 , y , x , row_height ) pixmap = QtGui . QPixmap ( ) pixmap = pixmap . grabWidget ( self , rect ) return pixmap
Returns an image of the parameter row .
15,681
def mousePressEvent ( self , event ) : index = self . indexAt ( event . pos ( ) ) if index . isValid ( ) : self . selectRow ( index . row ( ) ) self . setCurrentIndex ( index ) self . parameterChanged . emit ( self . model ( ) . selection ( index ) ) self . edit ( index , QtGui . QAbstractItemView . DoubleClicked , event ) super ( AutoParameterTableView , self ) . mousePressEvent ( event )
Begins edit on cell clicked if allowed and passes event to super class
15,682
def request ( self , path , method , data = None , ** kwargs ) : if self . api_token : self . request_headers [ 'X-Cachet-Token' ] = self . api_token if not path . startswith ( 'http://' ) and not path . startswith ( 'https://' ) : url = "%s/%s" % ( self . api_endpoint , path ) else : url = path if data is None : data = { } response = self . r_session . request ( method , url , data = json . dumps ( data ) , headers = self . request_headers , timeout = self . timeout , verify = self . verify , ** kwargs ) response . raise_for_status ( ) try : return response . json ( ) except ValueError : return { 'data' : response . text }
Handle requests to API
15,683
def paginate_request ( self , path , method , data = None , ** kwargs ) : next_page = path while next_page : response = self . request ( next_page , method , data = data , ** kwargs ) if not isinstance ( response . get ( 'data' ) , list ) : next_page = None yield response [ 'data' ] else : for entry in response [ 'data' ] : yield entry try : links = response [ 'meta' ] [ 'pagination' ] [ 'links' ] next_page = links . get ( 'next_page' ) except KeyError : next_page = None
Handle paginated requests to API
15,684
def maybe_open ( infile , mode = 'r' ) : if isinstance ( infile , basestring ) : handle = open ( infile , mode ) do_close = True else : handle = infile do_close = False yield handle if do_close : handle . close ( )
Take a file name or a handle and return a handle .
15,685
def _get_row_tag ( row , tag ) : is_empty = True data = [ ] for column_label in row . find_all ( tag ) : data . append ( String ( column_label . text ) . strip_bad_html ( ) ) if data [ - 1 ] : is_empty = False if not is_empty : return data return None
Parses row and gets columns matching tag
15,686
def _parse_row ( row ) : data = [ ] labels = HtmlTable . _get_row_tag ( row , "th" ) if labels : data += labels columns = HtmlTable . _get_row_tag ( row , "td" ) if columns : data += columns return data
Parses HTML row
15,687
def parse ( self ) : data = [ ] for row in self . soup . find_all ( "tr" ) : parsed = self . _parse_row ( row ) if parsed : data . append ( parsed ) return data
Parses data in table
15,688
def delete_module ( modname ) : try : _ = sys . modules [ modname ] except KeyError : raise ValueError ( "Module not found in sys.modules: '{}'" . format ( modname ) ) for module in list ( sys . modules . keys ( ) ) : if module and module . startswith ( modname ) : del sys . modules [ module ]
Delete module and sub - modules from sys . module
15,689
def reload_module ( module ) : try : reload ( module ) except ( ImportError , NameError ) : import imp imp . reload ( module ) except ( ImportError , NameError ) : import importlib importlib . reload ( module )
Reload the Python module
15,690
def lazy_load_modules ( * modules ) : def decorator ( function ) : def wrapper ( * args , ** kwargs ) : module_dict = { } for module_string in modules : module = __import__ ( module_string ) sys . modules [ module . __package__ ] = module reload_module ( module ) module_dict [ module_string ] = module func_response = function ( * args , ** kwargs ) for module_string , module in module_dict . items ( ) : delete_module ( module_string ) del module return func_response return wrapper return decorator
Decorator to load module to perform related operation for specific function and delete the module from imports once the task is done . GC frees the memory related to module during clean - up .
15,691
def format ( self , record ) : if record . levelno == DEBUG : return self . debug_formatter . format ( record ) if record . levelno == INFO : return self . info_formatter . format ( record ) if record . levelno == ERROR : return self . error_formatter . format ( record ) if record . levelno == WARNING : return self . warning_formatter . format ( record ) if record . levelno == CRITICAL : return self . critical_formatter . format ( record )
Format the record using the corresponding formatter .
15,692
def load_stylesheet ( self , id , path ) : self . add_child ( HeadLink ( id = id , link_type = "stylesheet" , path = path ) )
Proper way to dynamically inject a stylesheet in a page .
15,693
def add_child ( self , widget ) : li_itm = _li ( id = self . id + str ( self . _count ) ) li_itm . add_child ( widget ) super ( List , self ) . add_child ( li_itm ) self . _items . append ( ( widget , li_itm ) ) self . _count += 1
Append a widget to the list .
15,694
def remove_child ( self , widget ) : raw = list ( filter ( lambda x : x [ 0 ] == widget , self . _items ) ) if raw : itm , wrapped = raw [ 0 ] self . _items . remove ( raw [ 0 ] ) super ( List , self ) . remove_child ( wrapped ) else : raise ValueError ( "Child not in list." )
Remove a widget from the list .
15,695
def move_page ( request , page_id , extra_context = None ) : page = Page . objects . get ( pk = page_id ) target = request . POST . get ( 'target' , None ) position = request . POST . get ( 'position' , None ) if target is not None and position is not None : try : target = Page . objects . get ( pk = target ) except Page . DoesNotExist : pass else : page . invalidate ( ) target . invalidate ( ) from mptt . exceptions import InvalidMove invalid_move = False try : page . move_to ( target , position ) except InvalidMove : invalid_move = True return list_pages_ajax ( request , invalid_move ) return HttpResponseRedirect ( '../../' )
Move the page to the requested target at the given position .
15,696
def reloc_var ( var_name , reloc_delta , pointer , var_type ) : template = '{0} {3}{1} = RELOC_VAR(_{1}, {2}, {0});\n' return template . format ( var_type , var_name , reloc_delta , '*' if pointer else '' )
Build C source code to relocate a variable .
15,697
def make_c_args ( arg_pairs ) : logging . debug ( arg_pairs ) c_args = [ '{} {}' . format ( arg_type , arg_name ) if arg_name else arg_type for dummy_number , arg_type , arg_name in sorted ( arg_pairs ) ] return ', ' . join ( c_args )
Build a C argument list from return type and arguments pairs .
15,698
def interop_parse ( self ) : try : run_metrics = py_interop_run_metrics . run_metrics ( ) valid_to_load = py_interop_run . uchar_vector ( py_interop_run . MetricCount , 0 ) py_interop_run_metrics . list_summary_metrics_to_load ( valid_to_load ) run_metrics . read ( self . path , valid_to_load ) summary = py_interop_summary . run_summary ( ) py_interop_summary . summarize_run_metrics ( run_metrics , summary ) errorrate = summary . total_summary ( ) . error_rate ( ) pctaligned = summary . total_summary ( ) . percent_aligned ( ) for sample in self . metadata : sample . run . error_rate = '{:.2f}' . format ( errorrate ) sample . run . phix_aligned = '{:.2f}' . format ( pctaligned ) except : for sample in self . metadata : sample . run . error_rate = 'ND' sample . run . phix_aligned = 'ND'
Use interop to parse the files in the InterOp folder to extract the number of reads mapping to PhiX as well as the error rate
15,699
def make_inc ( incs ) : inc_args = [ [ '/I' , inc ] for inc in incs ] return list ( chain . from_iterable ( inc_args ) )
Make include directory for link . exe .