idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
16,400
def from_section ( cls , stream , section_name = '.pic' ) : binary = Executable ( stream ) section_data = binary . get_section_data ( section_name ) return cls ( section_data , binary . system )
Construct a Converter object from the specified section of the specified binary stream .
16,401
def to_esc ( self ) : chunks = chunked ( self . stream , 2 ) return '' . join ( r'\x' + '' . join ( pair ) for pair in chunks )
Convert to escape string .
16,402
def get_percentage_relative_to ( val , other ) : val = float ( val ) other = float ( other ) ratio = val / other - 1 return ratio * 100.0
Finds percentage between 2 numbers
16,403
def setup_ui ( self , ) : self . main_vbox = QtGui . QVBoxLayout ( self ) self . import_all_references_cb = QtGui . QCheckBox ( "Import references" ) self . main_vbox . addWidget ( self . import_all_references_cb )
Create all ui elements and layouts
16,404
def get_cleanups ( self , ) : cleanups = [ ] open_unit = ActionUnit ( name = "Open" , description = "Open the maya scene." , actionfunc = open_scene ) cleanups . append ( open_unit ) if self . _option_widget . import_references ( ) : import_unit = ActionUnit ( name = "Import references" , description = "Import all references in the scene." , actionfunc = import_all_references , depsuccess = [ open_unit ] ) cleanups . append ( import_unit ) update_scenenode_unit = ActionUnit ( name = "Update Scene Node" , description = "Change the id from the jbscene node from work to releasefile." , actionfunc = update_scenenode , depsuccess = [ open_unit ] ) cleanups . append ( update_scenenode_unit ) save_unit = ActionUnit ( name = "Save" , description = "Save the scene." , actionfunc = save_scene , depsuccess = [ update_scenenode_unit ] ) cleanups . append ( save_unit ) return ActionCollection ( cleanups )
Get the cleanup actions for a releaes depending on the selected options
16,405
def epcr_primer_file ( self , formattedprimers ) : logging . info ( 'Creating re-PCR-compatible primer file' ) with open ( formattedprimers , 'w' ) as formatted : for basename in sorted ( self . forward_dict ) : for forward_index , forward_primer in enumerate ( self . forward_dict [ basename ] ) : for reverse_index , reverse_primer in enumerate ( self . reverse_dict [ basename ] ) : primer_name = '{bn}_{fi}_{ri}' . format ( bn = basename , fi = forward_index , ri = reverse_index ) output_string = '{pn}\t{fp}\t{rp}\n' . format ( pn = primer_name , fp = forward_primer , rp = reverse_primer ) formatted . write ( output_string )
Create the ePCR - compatible primer file from the dictionaries of primer combinations
16,406
def epcr_threads ( self , formattedprimers , ampliconsize = 10000 ) : for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : threads = Thread ( target = self . epcr , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) logging . info ( 'Running ePCR analyses' ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . primers = formattedprimers sample [ self . analysistype ] . reportdir = os . path . join ( sample . general . outputdirectory , self . analysistype ) make_path ( sample [ self . analysistype ] . reportdir ) outfile = os . path . join ( sample [ self . analysistype ] . reportdir , sample . name ) sample . commands . famap = '{famap} -b {outfile}.famap {fasta}' . format ( famap = os . path . join ( self . homepath , 'ePCR' , 'famap' ) , outfile = outfile , fasta = sample . general . bestassemblyfile ) sample . commands . fahash = '{fahash} -b {outfile}.hash {outfile}.famap' . format ( fahash = os . path . join ( self . homepath , 'ePCR' , 'fahash' ) , outfile = outfile ) sample . commands . epcr = '{rePCR} -S {outfile}.hash -r + -d 1-{ampsize} -n {mismatches} -g 0 -G -q ' '-o {outfile}.txt {primers}' . format ( rePCR = os . path . join ( self . homepath , 'ePCR' , 're-PCR' ) , outfile = outfile , ampsize = ampliconsize , mismatches = self . mismatches , primers = sample [ self . analysistype ] . primers ) sample [ self . analysistype ] . resultsfile = '{of}.txt' . format ( of = outfile ) self . epcrqueue . put ( ( sample , outfile ) ) self . epcrqueue . join ( )
Run ePCR in a multi - threaded fashion
16,407
def epcr_parse ( self ) : logging . info ( 'Parsing ePCR outputs' ) for sample in self . metadata : if sample . general . bestassemblyfile != 'NA' : toxin_set = set ( ) if os . path . isfile ( sample [ self . analysistype ] . resultsfile ) : with open ( sample [ self . analysistype ] . resultsfile ) as epcrresults : for result in epcrresults : if "#" not in result : data = result . split ( '\t' ) vttype = data [ 0 ] . split ( '_' ) [ 0 ] toxin_set . add ( vttype ) sample [ self . analysistype ] . toxinprofile = ";" . join ( sorted ( list ( toxin_set ) ) ) if toxin_set else 'ND' else : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . toxinprofile = 'NA'
Parse the ePCR outputs
16,408
def epcr_report ( self ) : logging . info ( 'Creating {at} report' . format ( at = self . analysistype ) ) with open ( os . path . join ( self . reportpath , '{at}.csv' . format ( at = self . analysistype ) ) , 'w' ) as report : data = 'Strain,ToxinProfile\n' for sample in self . metadata : data += '{sn},{tp}\n' . format ( sn = sample . name , tp = sample [ self . analysistype ] . toxinprofile ) report . write ( data )
Create a report of the ePCR - calculated toxin profiles
16,409
def parse_epcr ( self ) : for sample in self . vtyper_object . metadata : sample [ self . analysistype ] . result_dict = dict ( ) with open ( sample [ self . analysistype ] . resultsfile ) as epcrresults : for result in epcrresults : if "#" not in result : primer_set , contig , strand , start , stop , total_mismatches , indels , amplicon_combo = result . rstrip ( ) . split ( '\t' ) total_mismatches = int ( total_mismatches ) genome_pos = '{min}-{max}' . format ( min = min ( [ int ( start ) , int ( stop ) ] ) , max = max ( [ int ( start ) , int ( stop ) ] ) ) gene_re = re . search ( r'([\w-]+)_(\d{1,3})_(\d{1,3})' , primer_set ) gene = gene_re . groups ( ) [ 0 ] amplicon_length = amplicon_combo . split ( '/' ) [ 0 ] try : if total_mismatches < sample [ self . analysistype ] . result_dict [ gene ] [ 'total_mismatches' ] : self . populate_results_dict ( sample = sample , gene = gene , total_mismatches = total_mismatches , genome_pos = genome_pos , amplicon_length = amplicon_length , contig = contig , primer_set = primer_set ) except KeyError : self . populate_results_dict ( sample = sample , gene = gene , total_mismatches = total_mismatches , genome_pos = genome_pos , amplicon_length = amplicon_length , contig = contig , primer_set = primer_set )
Parse the ePCR output file . Populate dictionary of resutls . For alleles find the best result based on the number of mismatches before populating dictionary
16,410
def create_epr_report ( self ) : with open ( os . path . join ( self . reportpath , 'ePCR_report.csv' ) , 'w' ) as report : results = 'Sample,Gene,GenomeLocation,AmpliconSize,Contig,TotalMismatches,PrimerSet\n' for sample in self . vtyper_object . metadata : if sample [ self . analysistype ] . result_dict : for gene , result_dict in sample [ self . analysistype ] . result_dict . items ( ) : results += '{sn},{gene},{genomelocation},{ampliconsize},{contig},{nm},{ps}\n' . format ( sn = sample . name , gene = gene , genomelocation = result_dict [ 'genome_pos' ] , ampliconsize = result_dict [ 'amplicon_length' ] , contig = result_dict [ 'contig' ] , nm = result_dict [ 'total_mismatches' ] , ps = result_dict [ 'primer_set' ] ) if self . export_amplicons : self . ampliconfile ( sample = sample , contig = result_dict [ 'contig' ] , amplicon_range = result_dict [ 'genome_pos' ] . split ( '-' ) , primer_set = result_dict [ 'primer_set' ] ) else : results += '{sn}\n' . format ( sn = sample . name ) report . write ( results )
Parse the results dictionaries and create a final report
16,411
def samplesheet ( self ) : if self . demultiplex : make_path ( self . samplesheetpath ) self . customsamplesheet = os . path . join ( self . samplesheetpath , 'SampleSheet.csv' ) header = [ 'Sample_ID' , 'Sample_Name' , 'Sample_Plate' , 'Sample_Well' , 'I7_Index_ID' , 'index' , 'I5_Index_ID' , 'index2' , 'Sample_Project' , 'Description' ] with open ( self . customsamplesheet , 'w' ) as samplesheet : lines = str ( ) lines += '[Header]\n' lines += 'IEMFileVersion,{}\n' . format ( self . header . IEMFileVersion ) lines += 'Investigator Name,{}\n' . format ( self . header . InvestigatorName ) lines += 'Experiment Name,{}\n' . format ( self . header . ExperimentName ) lines += 'Date,{}\n' . format ( self . header . Date ) lines += 'Workflow,{}\n' . format ( self . header . Workflow ) lines += 'Application,{}\n' . format ( self . header . Application ) lines += 'Assay,{}\n' . format ( self . header . Assay ) lines += 'Description,{}\n' . format ( self . header . Description ) lines += 'Chemistry,{}\n' . format ( self . header . Chemistry ) lines += '\n' lines += '[Reads]\n' lines += str ( self . forward ) + '\n' lines += str ( self . reverse ) + '\n' lines += '\n' lines += '[Settings]\n' lines += 'ReverseComplement,{}\n' . format ( self . header . ReverseComplement ) lines += 'Adapter,{}\n' . format ( self . header . Adapter ) lines += '\n' lines += '[Data]\n' lines += ',' . join ( header ) lines += '\n' for incomplete in self . incomplete : for sample in self . rundata : if incomplete == sample [ 'SampleID' ] : for data in header : result = sample [ data . replace ( '_' , '' ) ] if data != 'Description' : lines += '{},' . format ( result . replace ( 'NA' , '' ) ) else : lines += '{}\n' . format ( result . replace ( 'NA' , '' ) ) samplesheet . write ( lines )
Create a custom sample sheet based on the original sample sheet for the run but only including the samples that did not pass the quality threshold on the previous iteration
16,412
def update ( connection = None , silent = False , hgnc_file_path = None , hcop_file_path = None , low_memory = False ) : database = DbManager ( connection ) database . db_import ( silent = silent , hgnc_file_path = hgnc_file_path , hcop_file_path = hcop_file_path , low_memory = low_memory ) database . session . close ( )
Update the database with current version of HGNC
16,413
def set_connection ( connection = defaults . sqlalchemy_connection_string_default ) : config_path = defaults . config_file_path config = RawConfigParser ( ) if not os . path . exists ( config_path ) : with open ( config_path , 'w' ) as config_file : config [ 'database' ] = { 'sqlalchemy_connection_string' : connection } config . write ( config_file ) log . info ( 'create configuration file {}' . format ( config_path ) ) else : config . read ( config_path ) config . set ( 'database' , 'sqlalchemy_connection_string' , connection ) with open ( config_path , 'w' ) as configfile : config . write ( configfile )
Set the connection string for sqlalchemy and write it to the config file .
16,414
def set_mysql_connection ( host = 'localhost' , user = 'pyhgnc_user' , passwd = 'pyhgnc_passwd' , db = 'pyhgnc' , charset = 'utf8' ) : connection_string = 'mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}' . format ( host = host , user = user , passwd = passwd , db = db , charset = charset ) set_connection ( connection_string ) return connection_string
Method to set a MySQL connection
16,415
def relocate ( self , destination ) : for activate in self . bin . activates : activate . vpath = destination for binfile in self . bin . files : if binfile . shebang and ( 'python' in binfile . shebang or 'pypy' in binfile . shebang ) : binfile . shebang = '#!{0}' . format ( os . path . join ( destination , 'bin' , 'python' ) )
Configure the virtual environment for another path .
16,416
def move ( self , destination ) : self . relocate ( destination ) shutil . move ( self . path , destination ) self . _path = destination
Reconfigure and move the virtual environment to another path .
16,417
def aggregate ( self ) : for report in self . reportset : printtime ( 'Processing {}' . format ( report . split ( '.' ) [ 0 ] ) , self . start ) header = '' if report != 'mlst.csv' else 'Strain,Genus,SequenceType,Matches,1,2,3,4,5,6,7\n' data = '' with open ( os . path . join ( self . reportpath , report ) , 'w' ) as aggregate : for sample in self . runmetadata . samples : try : with open ( os . path . join ( sample . general . reportpath , report ) , 'r' ) as runreport : if not header : header = runreport . readline ( ) else : for row in runreport : if not row . endswith ( '\n' ) : row += '\n' if row . split ( ',' ) [ 0 ] != header . split ( ',' ) [ 0 ] : data += row except IOError : pass aggregate . write ( header ) aggregate . write ( data )
Aggregate all reports of the same type into a master report
16,418
def _parse_blocks ( instream ) : ilines = sugar . unblank ( instream ) for line in ilines : if line . startswith ( '[' ) : level , one , name , seqcount , params = _parse_block_header ( line ) qlen , qchars = _parse_block_postheader ( next ( ilines ) ) sequences = list ( _parse_sequences ( ilines , qlen ) ) if not len ( sequences ) == seqcount : logging . warn ( "Expected %d sequences in block %s, found %d" , seqcount , name , len ( sequences ) ) yield { 'level' : level , 'one' : one , 'name' : name , 'params' : params , 'query_length' : qlen , 'query_chars' : qchars , 'sequences' : sequences , }
Parse an alignment block from the given file handle .
16,419
def _parse_sequences ( ilines , expect_qlen ) : while True : first = next ( ilines ) if first . startswith ( '_' ) and first . endswith ( '].' ) : break try : index , this_len , query_len = _parse_seq_preheader ( first ) except ValueError : logging . warn ( 'Unparseable line (SKIPPING):\n%s' , first ) continue ( rec_id , dbxrefs , headlen , taillen , phylum , taxchar , description ) = _parse_seq_header ( next ( ilines ) ) try : headseq , molseq , tailseq = _parse_seq_body ( next ( ilines ) ) except ValueError : logging . warn ( 'Unparseable sequence: %s -- SKIPPING' , rec_id ) continue if expect_qlen != query_len : logging . warn ( "Query length in %s given as %d; expected %d" , rec_id , query_len , expect_qlen ) if not headseq and not headlen : headlen = 0 if not tailseq and not taillen : taillen = 0 if headseq : if headlen is None : headlen = len ( headseq ) elif headlen != len ( headseq ) : logging . warn ( "Conflicting head flank lengths in %s: %d, %d" , rec_id , headlen , len ( headseq ) ) if tailseq : if taillen is None : taillen = len ( tailseq ) elif taillen != len ( tailseq ) : logging . warn ( "Conflicting tail flank lengths in %s: %d, %d" , rec_id , taillen , len ( tailseq ) ) yield { 'index' : index , 'id' : rec_id , 'description' : description , 'dbxrefs' : dbxrefs , 'phylum' : phylum , 'taxchar' : taxchar , 'head_len' : headlen , 'tail_len' : taillen , 'head_seq' : headseq , 'tail_seq' : tailseq , 'length' : this_len , 'seq' : molseq , }
Parse the sequences in the current block .
16,420
def realign_seqs ( block , gap_char = '.' , align_indels = False ) : all_chars = [ list ( sq [ 'seq' ] ) for sq in block [ 'sequences' ] ] nrows = len ( all_chars ) i = 0 while i < len ( all_chars [ 0 ] ) : rows_need_gaps = [ r for r in all_chars if not r [ i ] . islower ( ) ] if len ( rows_need_gaps ) != nrows : for row in rows_need_gaps : row . insert ( i , gap_char ) i += 1 return [ '' . join ( row ) for row in all_chars ]
Add gaps to a block so all residues in a column are equivalent .
16,421
def collapse_to_consensus ( seqrecords , strict = False , do_iron = True ) : level = 0 name = seqrecords [ 0 ] . id if hasattr ( seqrecords , '_records' ) : if hasattr ( seqrecords , 'level' ) : level = seqrecords . level if hasattr ( seqrecords , 'name' ) : name = seqrecords . name seqrecords = seqrecords . _records consensus = seqrecords . pop ( 0 ) cons_length = len ( consensus ) for i , s in enumerate ( seqrecords ) : if len ( s ) != cons_length : raise ValueError ( "Sequence #%d has length %d, consensus is %d" % ( i + 2 , len ( s ) , cons_length ) ) if '.' in str ( consensus . seq ) : if '-' in str ( consensus . seq ) : if strict : raise ValueError ( "Consensus contains '-' gap characters" ) logging . warn ( "Consensus sequence contains both '.' and '-' gap " "characters -- is it really the consensus?" ) aligned_cols = [ ( c not in '.-' ) for c in str ( consensus . seq ) ] else : aligned_cols = [ c != '.' for c in str ( consensus . seq ) ] else : aligned_cols = [ c != '-' for c in str ( consensus . seq ) ] consensus . seq = replace_asterisks ( consensus . seq , 'consensus' ) block = consensus2block ( consensus , level = level , name = name ) qlen = block [ 'query_length' ] for index , rec in zip ( xrange ( 2 , len ( seqrecords ) + 2 ) , seqrecords ) : new_mol_seq = [ ] is_beginning = True for aligned_col , char in zip ( aligned_cols , replace_asterisks ( rec . seq , index ) ) : if aligned_col : is_beginning = False if char in '-.' : new_mol_seq . append ( '-' ) else : new_mol_seq . append ( char . upper ( ) ) else : if char not in '-.' and not is_beginning : new_mol_seq . append ( char . lower ( ) ) rec . seq = '' . join ( new_mol_seq ) if do_iron : rec . seq = iron ( rec . seq ) block [ 'sequences' ] . append ( seqrecord2sequence ( rec , qlen , index ) ) return block
Opposite of realign_seqs .
16,422
def iron ( sequence ) : r_indel = re . compile ( r'(-[a-y]|[a-y]-)' ) orig_sequence = sequence while r_indel . search ( sequence ) : in_insert = False in_gap = False seen_gaps = 0 inserts = [ ] outchars = [ ] for char in sequence : if in_insert : if char . islower ( ) : inserts . append ( char ) elif char . isupper ( ) : in_insert = False outchars . extend ( inserts ) inserts = [ ] outchars . append ( '-' * seen_gaps ) seen_gaps = 0 outchars . append ( char ) else : assert char == '-' if not inserts : in_insert = False in_gap = True seen_gaps += 1 else : outchars . append ( inserts . pop ( 0 ) . upper ( ) ) if not inserts : in_insert = False in_gap = True elif in_gap : if char . islower ( ) : in_insert = True in_gap = False if inserts : outchars . extend ( inserts ) outchars . append ( '-' * seen_gaps ) seen_gaps = 0 inserts = [ char ] elif char . isupper ( ) : in_gap = False if inserts : outchars . extend ( inserts ) inserts = [ ] outchars . append ( '-' * seen_gaps ) seen_gaps = 0 outchars . append ( char ) else : assert char == '-' seen_gaps += 1 else : assert not inserts and not seen_gaps , ( "Inserts: %s, gaps: %s, seq: %s, in_ins=%s, in_gap=%s" % ( inserts , seen_gaps , sequence , in_insert , in_gap ) ) if char . isupper ( ) : outchars . append ( char ) elif char . islower ( ) : inserts . append ( char ) in_insert = True else : assert char == '-' seen_gaps += 1 in_gap = True if inserts : outchars . extend ( inserts ) if seen_gaps : outchars . append ( '-' * seen_gaps ) sequence = '' . join ( outchars ) assert ( sequence . replace ( '-' , '' ) . upper ( ) == orig_sequence . replace ( '-' , '' ) . upper ( ) ) , '\nOrig: ' + orig_sequence + '\nIron: ' + sequence return sequence
Iron out indel regions in the aligned sequence .
16,423
def get_github_content ( repo , path , auth = None ) : request = requests . get ( file_url . format ( repo = repo , path = path ) , auth = auth ) if not request . ok : print ( "There is a problem with the request" ) print ( file_url . format ( repo = repo , path = path ) ) print ( request . json ( ) ) exit ( 1 ) if not request . json ( ) [ 'encoding' ] == 'base64' : raise RuntimeError ( "Unknown Encoding encountered when fetching {} from repo {}: {}" . format ( path , repo , request . json ( ) [ 'encoding' ] ) ) return request . json ( ) [ 'content' ] . decode ( 'base64' ) . decode ( 'utf8' )
Retrieve text files from a github repo
16,424
def collect_reponames ( ) : reponames = [ ] try : with open ( os . devnull ) as devnull : remote_data = subprocess . check_output ( [ "git" , "remote" , "-v" , "show" ] , stderr = devnull ) branches = { } for line in remote_data . decode ( 'utf-8' ) . split ( "\n" ) : if line . strip ( ) == "" : continue remote_match = re_mote . match ( line ) if not remote_match is None : branches [ remote_match . group ( 1 ) ] = remote_match . group ( 5 ) if len ( branches ) > 0 : if "origin" in branches : reponames . append ( branches [ "origin" ] ) else : reponames . append ( branches . values ( ) [ 0 ] ) except OSError : pass except subprocess . CalledProcessError : pass for fname in glob . iglob ( "*.html" ) : fid = open ( fname , "r" , "utf8" ) fid . readline ( ) line = fid . readline ( ) match = re . match ( repo_marker_re , line ) if not match is None : reponames . append ( match . group ( 1 ) ) reponames = list ( set ( reponames ) ) return reponames
Try to figure out a list of repos to consider by default from the contents of the working directory .
16,425
def collect_github_config ( ) : github_config = { } for field in [ "user" , "token" ] : try : github_config [ field ] = subprocess . check_output ( [ "git" , "config" , "github.{}" . format ( field ) ] ) . decode ( 'utf-8' ) . strip ( ) except ( OSError , subprocess . CalledProcessError ) : pass return github_config
Try load Github configuration such as usernames from the local or global git config
16,426
def setCurveModel ( self , model ) : self . stimModel = model self . ui . curveWidget . setModel ( model )
Sets the stimulus model for the calibration curve test
16,427
def addOption ( self , stim ) : self . ui . calTypeCmbbx . insertItem ( 0 , stim . name ) editor = stim . showEditor ( ) durInput = editor . durationInputWidget ( ) self . durationWidgets . append ( durInput ) durInput . setEnabled ( False ) self . ui . caleditorStack . insertWidget ( 0 , editor ) self . ui . calTypeCmbbx . setCurrentIndex ( 0 )
Adds a stimulus to the list of stims to use for testing calibration
16,428
def saveToObject ( self ) : for i in range ( self . ui . caleditorStack . count ( ) ) : try : self . ui . caleditorStack . widget ( i ) . saveToObject ( ) except AttributeError : logger = logging . getLogger ( 'main' ) logger . debug ( 'index {} does not have method saveToObject' . format ( i ) )
Saves the current UI setting to the model
16,429
def isToneCal ( self ) : return self . ui . calTypeCmbbx . currentIndex ( ) == self . ui . calTypeCmbbx . count ( ) - 1
Whether the currently selected calibration stimulus type is the calibration curve
16,430
def reset_generation ( self , trigger ) : self . tone_lock . acquire ( ) npts = self . stim . size try : self . aotask = AOTaskFinite ( self . aochan , self . fs , npts , trigsrc = trigger ) self . aotask . write ( self . stim ) if self . attenuator is not None : self . attenuator . SetAtten ( self . atten ) else : pass self . ngenerated += 1 if self . stim_changed : new_gen = self . stim else : new_gen = None self . stim_changed = False except : print u'ERROR! TERMINATE!' self . tone_lock . release ( ) raise self . tone_lock . release ( ) return new_gen
Re - arms the analog output according to current settings
16,431
def set_stim ( self , signal , fs , attenuation = 0 ) : self . tone_lock . acquire ( ) self . stim = signal self . fs = fs self . atten = attenuation self . stim_changed = True self . tone_lock . release ( )
Sets any vector as the next stimulus to be output . Does not call write to hardware
16,432
def connect_attenuator ( self , connect = True ) : if connect : try : pa5 = win32com . client . Dispatch ( "PA5.x" ) success = pa5 . ConnectPA5 ( 'GB' , 1 ) if success == 1 : print 'Connection to PA5 attenuator established' pass else : print 'Connection to PA5 attenuator failed' errmsg = pa5 . GetError ( ) print u"Error: " , errmsg raise Exception ( u"Attenuator connection failed" ) except : print "Error connecting to attenuator" pa5 = None self . attenuator = pa5 else : if self . attenuator : self . attenuator . setAtten ( 0 ) self . attenuator = None return self . attenuator
Establish a connection to the TDT PA5 attenuator
16,433
def start_timer ( self , reprate ) : print 'starting digital output at rate {} Hz' . format ( reprate ) self . trigger_task = DigitalOutTask ( self . trigger_src , reprate ) self . trigger_task . start ( )
Start the digital output task that serves as the acquistion trigger
16,434
def start ( self ) : if self . aitask is not None : self . stop ( ) raise Exception ( "FIX ME : NESTED START OPERATIONS ALLOWED" ) self . daq_lock . acquire ( ) self . ngenerated = 0 self . nacquired = 0 return self . reset ( )
Writes output buffer and settings to device
16,435
def stop ( self ) : try : self . aitask . stop ( ) self . aotask . stop ( ) pass except : print u"No task running" self . aitask = None self . aotask = None
Halts the acquisition this must be called before resetting acquisition
16,436
def start_continuous ( self , aichans , update_hz = 10 ) : self . daq_lock . acquire ( ) self . ngenerated = 0 npts = int ( self . aifs / update_hz ) nchans = len ( aichans ) self . aitask = AITask ( aichans , self . aifs , npts * 5 * nchans ) self . aitask . register_callback ( self . _read_continuous , npts ) self . aitask . start ( )
Begins a continuous analog generation calling a provided function at a rate of 10Hz
16,437
def run ( self ) : self . aotask . StartTask ( ) self . aotask . wait ( ) self . aotask . stop ( ) self . aotask = None
Executes the stimulus generation and returns when completed
16,438
def stop_all ( self ) : if self . aotask is not None : self . aotask . stop ( ) self . aitask . stop ( ) self . daq_lock . release ( ) self . aitask = None self . aotask = None
Halts both the analog output and input tasks
16,439
def get ( self , url , params = None , raw = False , stream = False , ** request_kwargs ) : full_url = self . build_url ( url ) params = params or { } if self . _token : params . setdefault ( 'token' , self . _token ) response = requests . get ( full_url , params = params , stream = stream , ** request_kwargs ) self . check_for_errors ( response ) if stream : return response if raw or not response . content : return response . content return json . loads ( response . text )
GET request to AmigoCloud endpoint .
16,440
def post ( self , url , data = None , files = None , headers = None , raw = False , send_as_json = True , content_type = None , ** request_kwargs ) : return self . _secure_request ( url , 'post' , data = data , files = files , headers = headers , raw = raw , send_as_json = send_as_json , content_type = content_type , ** request_kwargs )
POST request to AmigoCloud endpoint .
16,441
def upload_gallery_photo ( self , gallery_id , source_amigo_id , file_obj , chunk_size = CHUNK_SIZE , force_chunked = False , metadata = None ) : simple_upload_url = 'related_tables/%s/upload' % gallery_id chunked_upload_url = 'related_tables/%s/chunked_upload' % gallery_id data = { 'source_amigo_id' : source_amigo_id } if isinstance ( file_obj , basestring ) : data [ 'filename' ] = os . path . basename ( file_obj ) else : data [ 'filename' ] = os . path . basename ( file_obj . name ) if metadata : data . update ( metadata ) return self . upload_file ( simple_upload_url , chunked_upload_url , file_obj , chunk_size = chunk_size , force_chunked = force_chunked , extra_data = data )
Upload a photo to a dataset s gallery .
16,442
def listen_user_events ( self ) : if not self . _user_id : raise AmigoCloudError ( self . error_msg [ 'logged_in_websockets' ] ) response = self . get ( '/me/start_websocket_session' ) websocket_session = response [ 'websocket_session' ] auth_data = { 'userid' : self . _user_id , 'websocket_session' : websocket_session } self . amigosocket . emit ( 'authenticate' , auth_data )
Authenticate to start listening to user events .
16,443
def listen_dataset_events ( self , owner_id , project_id , dataset_id ) : if not self . _user_id : raise AmigoCloudError ( self . error_msg [ 'logged_in_websockets' ] ) url = '/users/%s/projects/%s/datasets/%s/start_websocket_session' response = self . get ( url % ( owner_id , project_id , dataset_id ) ) websocket_session = response [ 'websocket_session' ] auth_data = { 'userid' : self . _user_id , 'datasetid' : dataset_id , 'websocket_session' : websocket_session } self . amigosocket . emit ( 'authenticate' , auth_data )
Authenticate to start using dataset events .
16,444
def build_markdown_table ( headers , rows , row_keys = None ) : row_maxes = _find_row_maxes ( headers , rows ) row_keys = row_keys or [ key for key , value in headers . items ( ) ] table = [ _build_row ( headers , row_maxes , row_keys ) , _build_separator ( row_maxes , row_keys ) ] for row in rows : table . append ( _build_row ( row , row_maxes , row_keys ) ) return '\n' . join ( table ) + '\n'
Build a lined up markdown table .
16,445
def write_to_path ( self , path , suffix = '' , format = 'png' , overwrite = False ) : if os . path . exists ( path ) and overwrite is False : raise ValueError ( "Error: use ovewrite=True to overwrite images" ) if not os . path . exists ( path ) : os . makedirs ( path ) for i , r in self . iterrows ( ) : spath = os . path . join ( path , r [ 'project_name' ] , r [ 'sample_name' ] ) if not os . path . exists ( spath ) : os . makedirs ( spath ) if suffix == '' : fname = os . path . join ( spath , r [ 'frame_name' ] + '.' + format ) else : fname = os . path . join ( spath , r [ 'frame_name' ] + '_' + suffix + '.' + format ) imageio . imwrite ( fname , r [ 'image' ] , format = format )
Output the data the dataframe s image column to a directory structured by project - > sample and named by frame
16,446
def build_segmentation_image ( self , schema , background = ( 0 , 0 , 0 , 0 ) ) : cummulative = self . copy ( ) def _set_blank ( img , blank ) : img [ : ] [ : ] = blank return img cummulative [ 'merged' ] = cummulative . apply ( lambda x : _set_blank ( np . zeros ( list ( x [ 'shape' ] ) + [ 4 ] ) , background ) , 1 ) for layer in schema : if self . verbose : sys . stderr . write ( "Calculating layer " + str ( layer ) + "\n" ) images = self . get_outline_images ( subset_logic = layer [ 'subset_logic' ] , edge_color = layer [ 'edge_color' ] , watershed_steps = layer [ 'watershed_steps' ] , fill_color = layer [ 'fill_color' ] ) cummulative = cummulative . rename ( columns = { 'merged' : 'old' } ) cummulative = cummulative . merge ( images , on = list ( self . columns ) ) cummulative [ 'new' ] = cummulative . apply ( lambda x : _merge_images ( x [ 'merged' ] , x [ 'old' ] ) , 1 ) cummulative = cummulative . drop ( columns = [ 'old' , 'merged' ] ) . rename ( columns = { 'new' : 'merged' } ) cummulative = cummulative . rename ( columns = { 'merged' : 'image' } ) return SegmentationImageOutput ( cummulative )
Put together an image . Defined by a list of layers with RGBA colors
16,447
def valid ( number ) : checksum = 0 number_len = len ( number ) offset = ord ( '0' ) i = number_len - 1 while i >= 0 : n = ord ( number [ i ] ) - offset checksum += n i -= 2 i = number_len - 2 while i >= 0 : n = ord ( number [ i ] ) - offset n *= 2 if n > 9 : n -= 9 checksum += n i -= 2 return checksum % 10 == 0
Returns true if the number string is luhn valid and false otherwise . The number string passed to the function must contain only numeric characters otherwise behavior is undefined .
16,448
def create_publication_assistant ( self , ** args ) : logdebug ( LOGGER , 'Creating publication assistant..' ) mandatory_args = [ 'drs_id' , 'version_number' , 'is_replica' ] esgfpid . utils . check_presence_of_mandatory_args ( args , mandatory_args ) if self . __thredds_service_path is None : msg = 'No thredds_service_path given (but it is mandatory for publication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) if self . __data_node is None : msg = 'No data_node given (but it is mandatory for publication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) if self . __coupler . is_solr_switched_off ( ) : pass assistant = esgfpid . assistant . publish . DatasetPublicationAssistant ( drs_id = args [ 'drs_id' ] , version_number = args [ 'version_number' ] , thredds_service_path = self . __thredds_service_path , data_node = self . __data_node , prefix = self . prefix , coupler = self . __coupler , is_replica = args [ 'is_replica' ] , consumer_solr_url = self . __consumer_solr_url ) logdebug ( LOGGER , 'Creating publication assistant.. done' ) return assistant
Create an assistant for a dataset that allows to make PID requests for the dataset and all of its files .
16,449
def unpublish_one_version ( self , ** args ) : optional_args = [ 'handle' , 'drs_id' , 'version_number' ] esgfpid . utils . add_missing_optional_args_with_value_none ( args , optional_args ) if self . __data_node is None : msg = 'No data_node given (but it is mandatory for unpublication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) assistant = esgfpid . assistant . unpublish . AssistantOneVersion ( drs_id = args [ 'drs_id' ] , data_node = self . __data_node , prefix = self . prefix , coupler = self . __coupler , message_timestamp = esgfpid . utils . get_now_utc_as_formatted_string ( ) ) assistant . unpublish_one_dataset_version ( handle = args [ 'handle' ] , version_number = args [ 'version_number' ] )
Sends a PID update request for the unpublication of one version of a dataset currently published at the given data node .
16,450
def unpublish_all_versions ( self , ** args ) : mandatory_args = [ 'drs_id' ] esgfpid . utils . check_presence_of_mandatory_args ( args , mandatory_args ) if self . __data_node is None : msg = 'No data_node given (but it is mandatory for publication)' logwarn ( LOGGER , msg ) raise esgfpid . exceptions . ArgumentError ( msg ) if self . __coupler . is_solr_switched_off ( ) : msg = 'Unpublication of all versions. Without solr access, we cannot identify the versions, so the consumer will have to take care of this.' logdebug ( LOGGER , msg ) assistant = esgfpid . assistant . unpublish . AssistantAllVersions ( drs_id = args [ 'drs_id' ] , data_node = self . __data_node , prefix = self . prefix , coupler = self . __coupler , message_timestamp = esgfpid . utils . get_now_utc_as_formatted_string ( ) , consumer_solr_url = self . __consumer_solr_url ) assistant . unpublish_all_dataset_versions ( )
Sends a PID update request for the unpublication of all versions of a dataset currently published at the given data node .
16,451
def add_errata_ids ( self , ** args ) : mandatory_args = [ 'drs_id' , 'version_number' , 'errata_ids' ] esgfpid . utils . check_presence_of_mandatory_args ( args , mandatory_args ) esgfpid . utils . check_noneness_of_mandatory_args ( args , mandatory_args ) assistant = esgfpid . assistant . errata . ErrataAssistant ( coupler = self . __coupler , prefix = self . prefix ) assistant . add_errata_ids ( drs_id = args [ 'drs_id' ] , version_number = args [ 'version_number' ] , errata_ids = args [ 'errata_ids' ] )
Add errata ids to a dataset handle record .
16,452
def make_handle_from_drsid_and_versionnumber ( self , ** args ) : args [ 'prefix' ] = self . prefix return esgfpid . utils . make_handle_from_drsid_and_versionnumber ( ** args )
Create a handle string for a specific dataset based on its dataset id and version number and the prefix passed to the library at initializing .
16,453
def mousePressEvent ( self , event ) : super ( AbstractDragView , self ) . mousePressEvent ( event ) self . dragStartPosition = event . pos ( )
saves the drag position so we know when a drag should be initiated
16,454
def dragLeaveEvent ( self , event ) : super ( AbstractDragView , self ) . dragLeaveEvent ( event ) self . dragline = None self . viewport ( ) . update ( ) event . accept ( )
Clears drop cursor line
16,455
def childEvent ( self , event ) : super ( AbstractDragView , self ) . childEvent ( event ) if event . type ( ) == QtCore . QEvent . ChildRemoved : if self . originalPos is not None : selected = self . limbo_component self . model ( ) . insertItem ( self . originalPos , selected ) self . originalPos = None self . dragStartPosition = None self . viewport ( ) . update ( )
Catches items dropped off edge of view reinserts at original position
16,456
def mouseReleaseEvent ( self , event ) : super ( AbstractDragView , self ) . mouseReleaseEvent ( event ) self . dragStartPosition = None
Resets the drag start position
16,457
async def setup ( self ) : try : db = await self . db collections = await db . list_collection_names ( ) created = False if self . table_name not in collections : logger . info ( "Creating MongoDB collection [{}]" . format ( self . table_name ) ) await db . create_collection ( self . table_name ) await db [ self . table_name ] . create_index ( [ ( "target_id" , DESCENDING ) , ( "post_id" , DESCENDING ) ] ) created = True if self . control_table_name and self . control_table_name not in collections : logger . info ( "Creating MongoDB control data collection [{}]" . format ( self . control_table_name ) ) await db . create_collection ( self . control_table_name ) created = True return created except Exception as exc : logger . error ( "[DB] Error when setting up MongoDB collections: {}" . format ( exc ) ) return False
Setting up MongoDB collections if they not exist .
16,458
def setDoc ( self , docs ) : docs = sorted ( docs , key = lambda k : k [ 'start_s' ] ) for doc in docs : stim_type = doc [ 'stim_type' ] if not stim_type in self . displayTable : continue if not stim_type in self . displayTable [ stim_type ] : continue display_attributes = self . displayTable . get ( stim_type , self . defaultAttributes ) self . lyt . addWidget ( ComponentDetailFrame ( doc , display_attributes ) )
Sets the documentation to display
16,459
def setComponents ( self , components ) : layout = self . layout ( ) for comp in components : attrWidget = ComponentAttributerChecker ( comp ) layout . addWidget ( attrWidget )
Clears and sets the components contained in this widget
16,460
def setCheckedDetails ( self , checked ) : layout = self . layout ( ) for i in range ( layout . count ( ) ) : w = layout . itemAt ( i ) . widget ( ) if w . stimType in checked : w . setChecked ( checked [ w . stimType ] )
Sets which components are checked
16,461
def getCheckedDetails ( self ) : attrs = { } layout = self . layout ( ) for i in range ( layout . count ( ) ) : w = layout . itemAt ( i ) . widget ( ) attrs [ w . stimType ] = w . getChecked ( ) return attrs
Gets the currently checked components and checked attributes
16,462
def getChecked ( self ) : attrs = [ ] layout = self . layout ( ) for i in range ( layout . count ( ) ) : w = layout . itemAt ( i ) . widget ( ) if w . isChecked ( ) : attrs . append ( str ( w . text ( ) ) ) return attrs
Gets the checked attributes
16,463
def headerData ( self , section , orientation , role ) : if role == QtCore . Qt . DisplayRole : if orientation == QtCore . Qt . Horizontal : return self . headers [ section ]
Get the Header for the columns in the table
16,464
def cursor ( self , pos ) : row = self . indexAt ( pos ) . row ( ) if row == - 1 : row = self . model ( ) . rowCount ( ) row_height = self . rowHeight ( 0 ) y = row_height * row x = self . width ( ) return QtCore . QLine ( 0 , y , x , y )
Returns a line at the nearest row split between tests .
16,465
def mousePressEvent ( self , event ) : index = self . indexAt ( event . pos ( ) ) if index . isValid ( ) : if index . column ( ) == 0 : self . edit ( index , QtGui . QAbstractItemView . DoubleClicked , event ) else : super ( ProtocolView , self ) . mousePressEvent ( event )
Launches edit of cell if first column clicked otherwise passes to super class
16,466
def run ( ) : args = parse_args ( ) codetools . setup_logging ( args . debug ) global g g = pygithub . login_github ( token_path = args . token_path , token = args . token ) if not args . hide : args . hide = [ ] org = g . get_organization ( args . organization ) try : repos = list ( org . get_repos ( ) ) except github . RateLimitExceededException : raise except github . GithubException as e : msg = 'error getting repos' raise pygithub . CaughtOrganizationError ( org , e , msg ) from None for r in repos : try : teamnames = [ t . name for t in r . get_teams ( ) if t . name not in args . hide ] except github . RateLimitExceededException : raise except github . GithubException as e : msg = 'error getting teams' raise pygithub . CaughtRepositoryError ( r , e , msg ) from None maxt = args . maxt if ( args . maxt is not None and args . maxt >= 0 ) else len ( teamnames ) if args . debug : print ( "MAXT=" , maxt ) if args . mint <= len ( teamnames ) <= maxt : print ( r . name . ljust ( 40 ) + args . delimiter . join ( teamnames ) )
List repos and teams
16,467
def create_bar_chart ( self , x_labels , y_values , y_label ) : self . setup ( 0.25 ) ax1 = self . get_ax ( ) ax1 . set_xticks ( list ( range ( len ( x_labels ) ) ) ) ax1 . set_xticklabels ( [ x_labels [ i ] for i in range ( len ( x_labels ) ) ] , rotation = 90 ) plt . ylabel ( y_label ) x_pos = range ( len ( x_labels ) ) plt . bar ( x_pos , y_values , align = "center" ) return ax1
Creates bar char
16,468
def create_multiple_bar_chart ( self , x_labels , mul_y_values , mul_y_labels , normalize = False ) : self . setup ( 0.25 ) ax1 = self . get_ax ( ) ax1 . set_xticks ( list ( range ( len ( x_labels ) ) ) ) ax1 . set_xticklabels ( [ x_labels [ i ] for i in range ( len ( x_labels ) ) ] , rotation = 90 ) y_counts = len ( mul_y_values ) colors = cm . rainbow ( np . linspace ( 0 , 1 , y_counts ) ) max_bar_width = 0.6 bar_width = max_bar_width / y_counts x_shifts = np . linspace ( 0 , max_bar_width , y_counts ) - max_bar_width * 0.5 ax_series = [ ] for i in range ( y_counts ) : x_pos = range ( len ( x_labels ) ) x_pos = np . array ( x_pos ) + x_shifts [ i ] if normalize : y_values = normalize_array ( mul_y_values [ i ] ) else : y_values = mul_y_values [ i ] ax_series . append ( ax1 . bar ( x_pos , y_values , width = bar_width , align = "center" , color = colors [ i ] ) ) ax1 . legend ( ax_series , mul_y_labels ) return ax1
Creates bar chart with multiple lines
16,469
def file_parts ( self ) : file_parts = [ ] for part in self . parts : try : for sub_part in part : if isinstance ( sub_part , FileToken ) : file_parts . append ( sub_part ) except TypeError : if isinstance ( part , FileToken ) : file_parts . append ( part ) return file_parts
Returns a list of the file tokens in the list of parts .
16,470
def update_dependent_files ( self , prev_commands = [ ] ) : for command in prev_commands : for my_input in self . input_parts : for their_output in command . output_parts : if their_output == my_input : my_input . filename = their_output . eval ( )
Update the command s dependencies based on the evaluated input and output of previous commands .
16,471
def eval ( self ) : eval = [ ] for part in self . parts : try : result = part . eval ( ) except AttributeError : result = part if result [ - 1 ] != '\n' : result += ' ' eval . append ( result ) return '' . join ( eval ) . strip ( )
Evaluate the given job and return a complete shell script to be run by the job manager .
16,472
def add_type ( cls , typ ) : if not isinstance ( typ , basestring ) : raise TypeError ( "The type should be a string. But is %s" % type ( typ ) ) cls . types . append ( typ )
Register a type for jb_reftrack nodes .
16,473
def transform ( transform_func ) : def decorator ( func ) : @ wraps ( func ) def f ( * args , ** kwargs ) : return transform_func ( func ( * args , ** kwargs ) ) return f return decorator
Apply a transformation to a functions return value
16,474
def subkey ( dct , keys ) : key = keys [ 0 ] if len ( keys ) == 1 : return dct [ key ] return subkey ( dct [ key ] , keys [ 1 : ] )
Get an entry from a dict of dicts by the list of keys to follow
16,475
def get_driver ( self , name , version ) : user_credentials = self . get_user_credentials ( ) return discovery . build ( name , version , http = self . authenticate ( user_credentials ) )
Authenticates and creates new API driver to perform scope stuff
16,476
def start_chart ( self ) : self . current_dataset_name = self . chart_name self . datafile . init_data ( self . current_dataset_name , mode = 'continuous' ) self . chart_name = increment_title ( self . chart_name ) self . player . start_continuous ( [ self . aichan , u"PCI-6259/ai31" ] )
Begin on - going chart style acqusition
16,477
def qtdoc_role ( name , rawtext , text , lineno , inliner , options = { } , content = [ ] ) : base = 'http://qt-project.org/doc/qt-4.8/' match = re . search ( '([^<]+)(<[^<>]+>)?' , text ) if match is None : raise ValueError label = match . group ( 1 ) if match . lastindex == 2 : clsmeth = match . group ( 2 ) [ 1 : - 1 ] cls , meth = clsmeth . split ( '.' ) ref = base + cls + '.html#' + meth else : ref = base + label . lower ( ) + '.html' node = nodes . reference ( rawtext , label , refuri = ref , ** options ) return [ node ] , [ ]
Links to a Qt class s doc
16,478
def update_func_body ( original , updater = None ) : updated = '' regex = r'([_\w][_\w\d]*)\s*\(.*\)\s*\{' match = re . search ( regex , original ) while match : name = match . group ( 1 ) logging . debug ( _ ( 'Found candidate: %s' ) , name ) start = match . end ( ) end = start + find_balance_index ( original [ start : ] ) body = original [ start : end ] if updater : body = updater ( body , name ) updated += original [ : start ] + '\n' + body + original [ end ] original = original [ end + 1 : ] match = re . search ( regex , original ) return updated
Update all function body using the updating function .
16,479
def find_balance_index ( source , start = '{' , end = '}' ) : state = 1 for index , char in enumerate ( source ) : if char == start : state += 1 elif char == end : state -= 1 if state == 0 : return index raise RuntimeError ( 'This should not happen: Balance Not Found' )
Get the first balance index .
16,480
def transform_sources ( self , sources , with_string = False ) : modules = { } updater = partial ( self . replace_source , modules = modules , prefix = 'string_' ) for filename in sources : updated = update_func_body ( sources [ filename ] , updater ) sources [ filename ] = EXTERN_AND_SEG + updated logging . debug ( 'modules: %s' , modules ) return sources , self . build_funcs ( modules )
Get the defintions of needed strings and functions after replacement .
16,481
def replace_source ( self , source , name , modules , prefix ) : needs_windll = False def _func_replacer ( match , modules , windll ) : matched = match . group ( 0 ) if matched in self . BLACKLIST : return matched module = self . database . query_func_module ( matched ) if module : try : modules [ module [ 0 ] ] += [ module [ 1 ] ] except KeyError : modules [ module [ 0 ] ] = [ module [ 1 ] ] if windll : return '{}->{}.{}' . format ( windll , * module ) return '{}->{}' . format ( * module ) return matched replacer = partial ( _func_replacer , modules = modules , windll = 'windll' ) replaced = re . sub ( r'[_\w][_\w\d]*(?=\s*\()' , replacer , source ) if source != replaced : needs_windll = True str_table = { } def _string_replacer ( match ) : matched = match . group ( ) [ 1 : - 1 ] try : number = str_table [ matched ] except KeyError : number = len ( str_table ) + 1 str_table . update ( { matched : number } ) return '{}{}' . format ( prefix , number ) replaced = re . sub ( r'".+?"' , _string_replacer , replaced ) strings , relocs = self . build_strings ( str_table , prefix ) strings = '' . join ( strings ) . strip ( ) windll32 = reloc_var ( 'windll' , 'reloc_delta' , True , 'windll_t' ) if needs_windll : relocs += [ windll32 ] if strings : strings = '\n' + strings if not needs_windll : relocs += [ windll32 ] needs_windll = True windll64 = '' if needs_windll : windll64 = '{0} *{1} = &_{1};\n' . format ( 'windll_t' , 'windll' ) relocs = reloc_both ( '' . join ( relocs ) , windll64 ) if name in [ 'main' ] : replaced = '\ninit();' + replaced return strings + relocs + replaced
Scan C source code for string literals as well as function calls and do replacement using the specified replacing function . Note that the regular expression currently used for strings is naive or quick and dirty .
16,482
def build_funcs ( modules ) : kernel32 = [ 'kernel32_' ] try : kernel32 += remove_dups ( modules [ 'kernel32' ] ) except KeyError : if len ( modules ) and 'LoadLibraryA' not in kernel32 : kernel32 . insert ( 1 , 'LoadLibraryA' ) if len ( modules ) > 1 and 'LoadLibraryA' not in kernel32 : kernel32 . insert ( 1 , 'LoadLibraryA' ) if 'GetProcAddress' not in kernel32 : kernel32 . insert ( 1 , 'GetProcAddress' ) logging . debug ( 'kernel32: %s' , kernel32 ) for module , funcs in modules . items ( ) : logging . debug ( '%s: %s' , module , funcs ) if module != 'kernel32' : kernel32 . extend ( [ module + '_' ] + remove_dups ( funcs ) ) return kernel32
Build a used functions and modules list for later consumption .
16,483
def build_strings ( strings , prefix ) : strings = [ ( make_c_str ( prefix + str ( number ) , value ) , reloc_ptr ( prefix + str ( number ) , 'reloc_delta' , 'char *' ) ) for value , number in sort_values ( strings ) ] return [ i [ 0 ] for i in strings ] , [ i [ 1 ] for i in strings ]
Construct string definitions according to the previously maintained table .
16,484
def add_file ( self , ** args ) : self . __check_if_adding_files_allowed_right_now ( ) mandatory_args = [ 'file_name' , 'file_handle' , 'file_size' , 'checksum' , 'publish_path' , 'checksum_type' , 'file_version' ] utils . check_presence_of_mandatory_args ( args , mandatory_args ) self . __enforce_integer_file_size ( args ) self . __enforce_string_file_version ( args ) self . __check_and_correct_handle_syntax ( args ) self . __add_file ( ** args )
Adds a file s information to the set of files to be published in this dataset .
16,485
def run_program ( program , * args ) : real_args = [ program ] real_args . extend ( args ) logging . debug ( _ ( 'check_output arguments: %s' ) , real_args ) check_output ( real_args , universal_newlines = True )
Wrap subprocess . check_output to make life easier .
16,486
def get_parent_dir ( name ) : parent_dir = os . path . dirname ( os . path . dirname ( name ) ) if parent_dir : return parent_dir return os . path . abspath ( '.' )
Get the parent directory of a filename .
16,487
def split_ext ( path , basename = True ) : if basename : path = os . path . basename ( path ) return os . path . splitext ( path )
Wrap them to make life easier .
16,488
def ad_hoc_magic_from_file ( filename , ** kwargs ) : with open ( filename , 'rb' ) as stream : head = stream . read ( 16 ) if head [ : 4 ] == b'\x7fELF' : return b'application/x-executable' elif head [ : 2 ] == b'MZ' : return b'application/x-dosexec' else : raise NotImplementedError ( )
Ad - hoc emulation of magic . from_file from python - magic .
16,489
def expand_path ( * paths ) : return os . path . join ( os . path . dirname ( os . path . realpath ( sys . argv [ 0 ] ) ) , * paths )
Expand the path with the directory of the executed file .
16,490
def translate_filenames ( filenames ) : if is_windows ( ) : return filenames for index , filename in enumerate ( filenames ) : filenames [ index ] = vboxsf_to_windows ( filename )
Convert filenames from Linux to Windows .
16,491
def vboxsf_to_windows ( filename , letter = 'f:' ) : home = os . path . expanduser ( '~' ) filename = os . path . abspath ( filename ) . replace ( home , letter ) return filename . replace ( '/' , '\\' )
Convert the Linux path name to a Windows one .
16,492
def read_files ( filenames , with_name = False ) : text = [ read_file ( filename ) for filename in filenames ] if with_name : return dict ( zip ( filenames , text ) ) return text
Read many files .
16,493
def write_files ( text , where = '.' ) : for filename in text : target = os . path . join ( where , filename ) write_file ( target , text [ filename ] )
Write many files .
16,494
def write_file ( filename , text ) : logging . debug ( _ ( 'Writing file: %s' ) , filename ) try : with open ( filename , 'w' ) as writable : writable . write ( text ) except ( PermissionError , NotADirectoryError ) : logging . error ( _ ( 'Error writing file: %s' ) , filename ) return False return True
Write text to a file .
16,495
def stylify_files ( text ) : for filename in text : text [ filename ] = stylify_code ( text [ filename ] ) return text
Stylify many files .
16,496
def stylify_code ( code ) : try : output = check_output ( [ 'astyle' , '--max-code-length=69' , '--indent=spaces=2' ] , universal_newlines = True , input = code ) except ( OSError , CalledProcessError , TypeError ) : logging . debug ( _ ( 'failed to stylify code' ) ) return code return output
Stylify the C source code using astyle .
16,497
def sort_item ( iterable , number , reverse = False ) : return sorted ( iterable , key = itemgetter ( number ) , reverse = reverse )
Sort the itertable according to the given number item .
16,498
def remove_by ( keys , original ) : for i in [ original [ index ] for index , needed in enumerate ( keys ) if not needed ] : original . remove ( i )
Remove items in a list according to another list .
16,499
def group_by ( iterable , key_func ) : groups = ( list ( sub ) for key , sub in groupby ( iterable , key_func ) ) return zip ( groups , groups )
Wrap itertools . groupby to make life easier .