idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
16,500
def get_attrs ( self , * names ) : attrs = [ getattr ( self , name ) for name in names ] return attrs
Get multiple attributes from multiple objects .
16,501
def target_id ( self ) : if self . _target_id : return self . _target_id if self . _existing : self . _target_id = self . _existing . get ( "target_id" ) return self . _target_id
Returns the id the target to which this post has to be syndicated .
16,502
def with_formatter ( formatter ) : def _decorator_after_args ( unwrapped ) : def _wrapped ( self , * args , ** kwargs ) : logging . debug ( 'unwrapped: %s' , unwrapped ) logging . debug ( 'self: %s' , self ) logging . debug ( 'args: %s' , args ) logging . debug ( 'kwargs: %s' , kwargs ) return_value = unwrapped ( self , * args , ** kwargs ) if 'raw' in kwargs and kwargs [ 'raw' ] : return return_value else : return formatter ( return_value ) return _wrapped return _decorator_after_args
Apply a formatter function the return value of the decorated function .
16,503
def format_info ( raw ) : logging . debug ( _ ( 'raw[0]: %s' ) , raw [ 0 ] ) results , sense = raw new = '\n' . join ( '{} {} {} {}' . format ( i [ 0 ] , sense . kind_id_to_name ( i [ 1 ] ) , sense . file_id_to_name ( i [ 2 ] ) . lower ( ) , i [ 3 ] + ' ' if i [ 3 ] else '' ) . strip ( ) for i in results ) return new
Format a string representing the information concerning the name .
16,504
def format_names ( raw ) : if raw : raw = [ '{}:\n{}' . format ( header . lower ( ) , ' ' . join ( func [ 0 ] for func in funcs ) ) for header , funcs in raw ] return '\n' . join ( raw ) return ''
Format a string representing the names contained in the files .
16,505
def format_kinds ( raw ) : output = ' ' . join ( '{} {}' . format ( * kind ) for kind in raw if kind ) return output
Format a string representing the kinds .
16,506
def find_seq_rec ( block , name , case_sensitive = True ) : if case_sensitive : def test ( name , rec ) : return name in rec [ 'id' ] else : def test ( name , rec ) : return name . upper ( ) in rec [ 'id' ] . upper ( ) for rec in block [ 'sequences' ] : if test ( name , rec ) : return rec raise ValueError ( "No sequence ID matches %s" % repr ( name ) )
Given part of a sequence ID find the first matching record .
16,507
def find_seq_id ( block , name , case_sensitive = True ) : rec = find_seq_rec ( block , name , case_sensitive ) return rec [ 'id' ]
Given part of a sequence ID find the first actual ID that contains it .
16,508
def get_consensus ( block ) : from collections import Counter columns = zip ( * [ [ c for c in row [ 'seq' ] if not c . islower ( ) ] for row in block [ 'sequences' ] ] ) cons_chars = [ Counter ( col ) . most_common ( ) [ 0 ] [ 0 ] for col in columns ] cons_chars = [ c if c != '-' else 'X' for c in cons_chars ] assert len ( cons_chars ) == block [ 'query_length' ] cons_sequence = { 'index' : 1 , 'id' : 'consensus' , 'description' : '' , 'dbxrefs' : { } , 'phylum' : '' , 'taxchar' : '' , 'head_len' : None , 'tail_len' : None , 'head_seq' : '' , 'tail_seq' : '' , 'length' : block [ 'query_length' ] , 'seq' : '' . join ( cons_chars ) , } return cons_sequence
Calculate a simple consensus sequence for the block .
16,509
def get_conservation ( block ) : consensus = block [ 'sequences' ] [ 0 ] [ 'seq' ] assert all ( c . isupper ( ) for c in consensus ) , "So-called consensus contains indels!" cleaned = [ [ c for c in s [ 'seq' ] if not c . islower ( ) ] for s in block [ 'sequences' ] [ 1 : ] ] height = float ( len ( cleaned ) ) for row in cleaned : if len ( row ) != len ( consensus ) : raise ValueError ( "Aligned sequence length (%s) doesn't match " "consensus (%s)" % ( len ( row ) , len ( consensus ) ) ) columns = zip ( * cleaned ) return dict ( ( idx + 1 , columns [ idx ] . count ( cons_char ) / height ) for idx , cons_char in enumerate ( consensus ) )
Calculate conservation levels at each consensus position .
16,510
def get_equivalent_positions ( block ) : consensus = block [ 'sequences' ] [ 0 ] [ 'seq' ] rest = block [ 'sequences' ] [ 1 : ] if '-' in consensus or '.' in consensus : raise ValueError ( "First sequence (consensus?) contains gaps" ) seen = set ( ) dupes = set ( ) for rec in rest : if rec [ 'id' ] in seen : dupes . add ( rec [ 'id' ] ) else : seen . add ( rec [ 'id' ] ) if dupes : raise ValueError ( "Duplicate sequences:\n" + '\n' . join ( dupes ) ) curr_shift = { } curr_resn = { } for rec in rest : curr_shift [ rec [ 'id' ] ] = 0 curr_resn [ rec [ 'id' ] ] = rec [ 'head_len' ] equivalencies = dict ( ( i + 1 , { } ) for i in xrange ( len ( consensus ) ) ) for i , char in enumerate ( consensus ) : assert char . isupper ( ) for rec in rest : rid = rec [ 'id' ] strposn = i + curr_shift [ rid ] if rec [ 'seq' ] [ strposn ] . isupper ( ) : curr_resn [ rid ] += 1 elif rec [ 'seq' ] [ strposn ] . islower ( ) : while rec [ 'seq' ] [ strposn ] . islower ( ) : curr_shift [ rid ] += 1 curr_resn [ rid ] += 1 strposn += 1 curr_resn [ rid ] += 1 else : assert rec [ 'seq' ] [ strposn ] in '.-' continue equivalencies [ i + 1 ] [ rid ] = curr_resn [ rid ] return equivalencies
Create a mapping of equivalent residue positions to consensus .
16,511
def get_inserts ( block ) : def find_inserts ( seq , head_len ) : in_insert = False curr_start = None deletions = 0 for idx , is_lower in enumerate ( map ( str . islower , seq ) ) : if is_lower : if not in_insert : curr_start = head_len + idx + 1 - deletions in_insert = True else : if in_insert : yield ( curr_start , head_len + idx - deletions ) in_insert = False if seq [ idx ] == '-' : deletions += 1 return dict ( ( record [ 'id' ] , list ( find_inserts ( record [ 'seq' ] , record [ 'head_len' ] ) ) ) for record in block [ 'sequences' ] )
Identify the inserts in sequence in a block .
16,512
def get_meta_regex ( schema = 'mona' ) : meta_parse = collections . OrderedDict ( ) if schema == 'mona' : meta_parse [ 'collision_energy' ] = [ '^collision energy(?:=|:)(.*)$' ] meta_parse [ 'ms_level' ] = [ '^ms.*level(?:=|:)\D*(\d*)$' , '^ms type(?:=|:)\D*(\d*)$' , '^Spectrum_type(?:=|:)\D*(\d*)$' ] meta_parse [ 'accession' ] = [ '^accession(?:=|:)(.*)$' , '^DB#(?:=|:)(.*)$' ] meta_parse [ 'resolution' ] = [ '^resolution(?:=|:)(.*)$' ] meta_parse [ 'polarity' ] = [ '^ion.*mode(?:=|:)(.*)$' , '^ionization.*mode(?:=|:)(.*)$' , '^polarity(?:=|:)(.*)$' ] meta_parse [ 'fragmentation_type' ] = [ '^fragmentation.*mode(?:=|:)(.*)$' , '^fragmentation.*type(?:=|:)(.*)$' ] meta_parse [ 'precursor_mz' ] = [ '^precursor m/z(?:=|:)\s*(\d*[.,]?\d*)$' , '^precursor.*mz(?:=|:)\s*(\d*[.,]?\d*)$' ] meta_parse [ 'precursor_type' ] = [ '^precursor.*type(?:=|:)(.*)$' , '^adduct(?:=|:)(.*)$' ] meta_parse [ 'instrument_type' ] = [ '^instrument.*type(?:=|:)(.*)$' ] meta_parse [ 'instrument' ] = [ '^instrument(?:=|:)(.*)$' ] meta_parse [ 'copyright' ] = [ '^copyright(?:=|:)(.*)$' ] meta_parse [ 'mass_accuracy' ] = [ '^mass.*accuracy(?:=|:)\s*(\d*[.,]?\d*)$' ] meta_parse [ 'mass_error' ] = [ '^mass.*error(?:=|:)\s*(\d*[.,]?\d*)$' ] meta_parse [ 'origin' ] = [ '^origin(?:=|:)(.*)$' ] meta_parse [ 'name' ] = [ '^Name(?:=|:)(.*)$' ] meta_parse [ 'splash' ] = [ '^splash:(.*)$' ] meta_parse [ 'retention_time' ] = [ '^retention.*time(?:=|:)\s*(\d*[.,]?\d*)$' ] meta_parse [ 'retention_index' ] = [ '^retention.*index(?:=|:)\s*(\d*[.,]?\d*)$' ] elif schema == 'massbank' : meta_parse [ 'collision_energy' ] = [ '^AC\$MASS_SPECTROMETRY:\s+COLLISION_ENERGY\s+(.*)$' ] meta_parse [ 'ms_level' ] = [ '^AC\$MASS_SPECTROMETRY:\s+MS_TYPE\s+\D*(\d*)$' ] meta_parse [ 'accession' ] = [ '^ACCESSION:(.*)$' ] meta_parse [ 'resolution' ] = [ '^AC\$MASS_SPECTROMETRY:\s+RESOLUTION\s+(.*)$' ] meta_parse [ 'polarity' ] = [ '^AC\$MASS_SPECTROMETRY:\s+ION_MODE\s+(.*)$' ] meta_parse [ 'fragmentation_type' ] = [ '^AC\$MASS_SPECTROMETRY:\s+FRAGMENTATION_MODE\s+(.*)$' ] meta_parse [ 'precursor_mz' ] = [ '^MS\$FOCUSED_ION:\s+PRECURSOR_M/Z\s+(\d*[.,]?\d*)$' ] meta_parse [ 'precursor_type' ] = [ '^MS\$FOCUSED_ION:\s+PRECURSOR_TYPE\s+(.*)$' ] meta_parse [ 'instrument_type' ] = [ '^AC\$INSTRUMENT_TYPE:\s+(.*)$' ] meta_parse [ 'instrument' ] = [ '^AC\$INSTRUMENT:\s+(.*)$' ] meta_parse [ 'copyright' ] = [ '^COPYRIGHT:\s+(.*)' ] meta_parse [ 'mass_accuracy' ] = [ '^AC\$MASS_SPECTROMETRY:\s+ACCURACY\s+(.*)$' ] meta_parse [ 'mass_error' ] = [ '^AC\$MASS_SPECTROMETRY:\s+ERROR\s+(.*)$' ] meta_parse [ 'splash' ] = [ '^PK\$SPLASH:\s+(.*)$' ] meta_parse [ 'origin' ] = [ '^origin(?:=|:)(.*)$' ] meta_parse [ 'name' ] = [ '^RECORD_TITLE:\s+(.*)$' ] meta_parse [ 'retention_time' ] = [ '^AC\$CHROMATOGRAPHY:\s+RETENTION.*TIME\s+(\d*[.,]?\d*)$' ] meta_parse [ 'retention_index' ] = [ '^AC\$CHROMATOGRAPHY:\s+RETENTION.*INDEX\s+(\d*[.,]?\d*)$' ] return meta_parse
Create a dictionary of regex for extracting the meta data for the spectra
16,513
def get_compound_regex ( schema = 'mona' ) : meta_parse = collections . OrderedDict ( ) if schema == 'mona' : meta_parse [ 'name' ] = [ '^Name(?:=|:)(.*)$' ] meta_parse [ 'inchikey_id' ] = [ '^inchikey(?:=|:)(.*)$' ] meta_parse [ 'molecular_formula' ] = [ '^molecular formula(?:=|:)(.*)$' , '^formula:(.*)$' ] meta_parse [ 'molecular_weight' ] = [ '^MW(?:=|:)(\d*[.,]?\d*)$' ] meta_parse [ 'pubchem_id' ] = [ '^pubchem.*cid(?:=|:)(\d*)".*$' ] meta_parse [ 'chemspider_id' ] = [ '^chemspider(?:=|:)(\d*)".*$' ] meta_parse [ 'compound_class' ] = [ '^compound.*class(?:=|:)(.*)$' ] meta_parse [ 'exact_mass' ] = [ '^exact.*mass(?:=|:)(\d*[.,]?\d*)$' ] meta_parse [ 'smiles' ] = [ '^SMILES(?:=|:)(.*)$' ] meta_parse [ 'other_names' ] = [ '^Synonym(?:=|:)(.*)$' ] elif schema == 'massbank' : meta_parse [ 'name' ] = [ '^CH\$NAME:\s+(.*)$' ] meta_parse [ 'other_names' ] = [ '^CH\$NAME:\s+(.*)$' ] meta_parse [ 'inchikey_id' ] = [ '^CH\$LINK:\s+INCHIKEY\s+(.*)$' ] meta_parse [ 'molecular_formula' ] = [ '^CH\$FORMULA:\s+(.*)$' ] meta_parse [ 'molecular_weight' ] = [ '^CH\$MOLECULAR_WEIGHT:\s+(.*)$' ] meta_parse [ 'pubchem_id' ] = [ '^CH\$LINK:\s+PUBCHEM\s+CID:(.*)$' ] meta_parse [ 'chemspider_id' ] = [ '^CH\$LINK:\s+CHEMSPIDER\s+(.*)$' ] meta_parse [ 'compound_class' ] = [ '^CH\$COMPOUND_CLASS:\s+(.*)$' ] meta_parse [ 'exact_mass' ] = [ '^CH\$EXACT_MASS:\s+(.*)$' ] meta_parse [ 'smiles' ] = [ '^CH\$SMILES:\s+(.*)$' ] return meta_parse
Create a dictionary of regex for extracting the compound information for the spectra
16,514
def handler ( self ) : printtime ( 'Creating and populating objects' , self . start ) self . populate ( ) printtime ( 'Populating {} sequence profiles' . format ( self . analysistype ) , self . start ) self . profiler ( ) self . annotatethreads ( ) self . cdsthreads ( ) self . cdssequencethreads ( ) self . allelematchthreads ( ) printtime ( 'Determining {} sequence types' . format ( self . analysistype ) , self . start ) self . sequencetyper ( ) printtime ( 'Creating {} reports' . format ( self . analysistype ) , self . start ) self . reporter ( )
Run the required analyses
16,515
def annotatethreads ( self ) : self . runmetadata = createobject . ObjectCreation ( self ) self . headers ( ) printtime ( 'Performing prokka analyses' , self . start ) for i in range ( self . cpus ) : threads = Thread ( target = self . annotate , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . metadata . samples : setattr ( sample , 'prokka' , GenObject ( ) ) sample . prokka . outputdir = os . path . join ( sample . general . outputdirectory , 'prokka' ) if not os . path . isdir ( sample . prokka . outputdir ) : os . makedirs ( sample . prokka . outputdir ) sample . prokka . command = 'prokka {} ' '--force ' '--genus {} ' '--species {} ' '--usegenus ' '--addgenes ' '--prefix {} ' '--locustag {} ' '--outdir {}' . format ( sample . general . fixedheaders , self . genus , self . species , sample . name , sample . name , sample . prokka . outputdir ) self . queue . put ( sample ) self . queue . join ( )
Use prokka to annotate each strain
16,516
def cdsthreads ( self ) : for i in range ( self . cpus ) : threads = Thread ( target = self . cds , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . metadata . samples : sample [ self . analysistype ] . corepresence = dict ( ) self . cdsqueue . put ( sample ) self . cdsqueue . join ( )
Determines which core genes from a pre - calculated database are present in each strain
16,517
def cdssequencethreads ( self ) : for i in range ( self . cpus ) : threads = Thread ( target = self . cdssequence , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . metadata . samples : sample [ self . analysistype ] . coresequence = dict ( ) self . sequencequeue . put ( sample ) self . sequencequeue . join ( )
Extracts the sequence of each gene for each strain
16,518
def allelematchthreads ( self ) : for i in range ( self . cpus ) : threads = Thread ( target = self . allelematch , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for sample in self . metadata . samples : sample [ self . analysistype ] . allelematches = dict ( ) self . allelequeue . put ( sample ) self . allelequeue . join ( )
Determine allele of each gene
16,519
def remove_path_segments ( segments , removes ) : if segments == [ '' ] : segments . append ( '' ) if removes == [ '' ] : removes . append ( '' ) if segments == removes : ret = [ ] elif len ( removes ) > len ( segments ) : ret = segments else : removes2 = list ( removes ) if len ( removes ) > 1 and removes [ 0 ] == '' : removes2 . pop ( 0 ) if removes2 and removes2 == segments [ - 1 * len ( removes2 ) : ] : ret = segments [ : len ( segments ) - len ( removes2 ) ] if removes [ 0 ] != '' and ret : ret . append ( '' ) else : ret = segments return ret
Removes the removes from the tail of segments .
16,520
def join_path_segments ( * args ) : finals = [ ] for segments in args : if not segments or segments [ 0 ] == [ '' ] : continue elif not finals : finals . extend ( segments ) else : if finals [ - 1 ] == '' and ( segments [ 0 ] != '' or len ( segments ) > 1 ) : finals . pop ( - 1 ) elif finals [ - 1 ] != '' and segments [ 0 ] == '' and len ( segments ) > 1 : segments . pop ( 0 ) finals . extend ( segments ) return finals
Join multiple list of path segments
16,521
def increment ( index , dims , data_shape ) : inc_to_match = data_shape [ 1 : ] for dim_a , dim_b in zip ( inc_to_match , dims [ - 1 * ( len ( inc_to_match ) ) : ] ) : if dim_a != dim_b : raise DataIndexError ( ) inc_index = len ( index ) - len ( data_shape ) inc_amount = data_shape [ 0 ] index [ inc_index ] += inc_amount if index [ inc_index ] > dims [ inc_index ] : raise DataIndexError ( ) while inc_index > 0 and index [ inc_index ] == dims [ inc_index ] : index [ inc_index - 1 ] += 1 index [ inc_index : ] = [ 0 ] * len ( index [ inc_index : ] ) inc_index -= 1 return index
Increments a given index according to the shape of the data added
16,522
def error ( cls , template , default_params = { } , cause = None , stack_depth = 0 , ** more_params ) : if not is_text ( template ) : sys . stderr . write ( str ( "Log.error was expecting a unicode template" ) ) Log . error ( "Log.error was expecting a unicode template" ) if default_params and isinstance ( listwrap ( default_params ) [ 0 ] , BaseException ) : cause = default_params default_params = { } params = Data ( dict ( default_params , ** more_params ) ) add_to_trace = False if cause == None : causes = None elif is_list ( cause ) : causes = [ ] for c in listwrap ( cause ) : causes . append ( Except . wrap ( c , stack_depth = 1 ) ) causes = FlatList ( causes ) elif isinstance ( cause , BaseException ) : causes = Except . wrap ( cause , stack_depth = 1 ) else : causes = None Log . error ( "can only accept Exception, or list of exceptions" ) trace = exceptions . extract_stack ( stack_depth + 1 ) if add_to_trace : cause [ 0 ] . trace . extend ( trace [ 1 : ] ) e = Except ( context = exceptions . ERROR , template = template , params = params , cause = causes , trace = trace ) raise_from_none ( e )
raise an exception with a trace for the cause too
16,523
def _get_parts_list ( to_go , so_far = [ [ ] ] , ticker = None ) : try : part = to_go . pop ( 0 ) except IndexError : return so_far , ticker if isinstance ( part , list ) and any ( isinstance ( e , list ) for e in part ) : while len ( part ) > 0 : so_far , ticker = _get_parts_list ( part , so_far , ticker ) ticker . tick ( ) elif isinstance ( part , list ) and any ( isinstance ( e , Input ) for e in part ) : while len ( part ) > 0 : so_far , ticker = _get_parts_list ( part , so_far , ticker ) elif isinstance ( part , Input ) and part . is_magic : inputs = part . eval ( ) while len ( inputs ) > 0 : so_far , ticker = _get_parts_list ( inputs , so_far , ticker ) ticker . tick ( ) elif isinstance ( part , Input ) and not part . is_magic : so_far [ ticker . value ] . append ( part ) else : so_far = _append ( so_far , part ) return so_far , ticker
Iterates over to_go building the list of parts . To provide items for the beginning use so_far .
16,524
def _get_max_size ( parts , size = 1 ) : max_group_size = 0 for part in parts : if isinstance ( part , list ) : group_size = 0 for input_group in part : group_size += 1 if group_size > max_group_size : max_group_size = group_size magic_size = _get_magic_size ( parts ) return max_group_size * magic_size
Given a list of parts find the maximum number of commands contained in it .
16,525
def _grow ( list_of_lists , num_new ) : first = list_of_lists [ 0 ] for i in range ( num_new ) : list_of_lists . append ( copy . deepcopy ( first ) ) return list_of_lists
Given a list of lists and a number of new lists to add copy the content of the first list into the new ones and add them to the list of lists .
16,526
def _search_for_files ( parts ) : file_parts = [ ] for part in parts : if isinstance ( part , list ) : file_parts . extend ( _search_for_files ( part ) ) elif isinstance ( part , FileToken ) : file_parts . append ( part ) return file_parts
Given a list of parts return all of the nested file parts .
16,527
def eval ( self ) : max_size = _get_max_size ( self . parts ) parts_list = _grow ( [ [ ] ] , max_size - 1 ) counter = Ticker ( max_size ) parts = self . parts [ : ] while len ( parts ) > 0 : parts_list , counter = _get_parts_list ( parts , parts_list , counter ) commands = [ ] for i , parts in enumerate ( parts_list ) : alias = self . _get_alias ( i + 1 ) new_parts = copy . deepcopy ( parts ) commands . append ( Command ( alias = alias , parts = new_parts ) ) return commands
Returns a list of Command objects that can be evaluated as their string values . Each command will track it s preliminary dependencies but these values should not be depended on for running commands .
16,528
def start_system ( components , bind_to , hooks = { } ) : deps = build_deps_graph ( components ) started_components = start_components ( components , deps , None ) run_hooks ( hooks , started_components ) if type ( bind_to ) is str : master = started_components [ bind_to ] else : master = bind_to setattr ( master , '__components' , started_components ) return master
Start all components on component map .
16,529
def ranges ( self ) : steps = [ ] for p in self . _parameters : if p [ 'parameter' ] == 'filename' : steps . append ( p [ 'names' ] ) else : if p [ 'step' ] > 0 : start = p [ 'start' ] stop = p [ 'stop' ] if start > stop : step = p [ 'step' ] * - 1 else : step = p [ 'step' ] nsteps = self . nStepsForParam ( p ) step_tmp = np . linspace ( start , start + step * ( nsteps - 2 ) , nsteps - 1 ) step_tmp = np . append ( step_tmp , stop ) steps . append ( np . around ( step_tmp , 4 ) ) else : assert p [ 'start' ] == p [ 'stop' ] steps . append ( [ p [ 'start' ] ] ) return steps
The expanded lists of values generated from the parameter fields
16,530
def _selectionParameters ( self , param ) : components = param [ 'selection' ] if len ( components ) == 0 : return [ ] editable_sets = [ ] for comp in components : details = comp . auto_details ( ) editable_sets . append ( set ( details . keys ( ) ) ) editable_paramters = set . intersection ( * editable_sets ) return list ( editable_paramters )
see docstring for selectedParameterTypes
16,531
def updateComponentStartVals ( self ) : for param in self . _parameters : for component in param [ 'selection' ] : if param [ 'parameter' ] == 'filename' : component . set ( param [ 'parameter' ] , param [ 'names' ] [ 0 ] ) else : component . set ( param [ 'parameter' ] , param [ 'start' ] )
Go through selected components for each auto parameter and set the start value
16,532
def verify ( self ) : for row in range ( self . nrows ( ) ) : result = self . verify_row ( row ) if result != 0 : return result return 0
Checks all parameters for invalidating conditions
16,533
def find_parameter ( ) : p1 = mdb . get_parameter ( '/YSS/SIMULATOR/BatteryVoltage2' ) print ( 'Via qualified name:' , p1 ) p2 = mdb . get_parameter ( 'MDB:OPS Name/SIMULATOR_BatteryVoltage2' ) print ( 'Via domain-specific alias:' , p2 )
Find one parameter .
16,534
def summary_reporter ( self ) : logging . info ( 'Creating MOB-recon summary report' ) with open ( os . path . join ( self . reportpath , 'mob_recon_summary.csv' ) , 'w' ) as summary : data = 'Strain,Location,Contig,Incompatibility,IncompatibilityAccession,RelaxaseType,' 'MashNearestNeighbor,MashNeighborDistance\n' for sample in self . metadata : sample [ self . analysistype ] . pipelineresults = dict ( ) for primarykey , results in sample [ self . analysistype ] . report_dict . items ( ) : if results [ 'cluster_id' ] != 'chromosome' : data += ',' . join ( str ( result ) . replace ( ',' , ';' ) if str ( result ) != 'nan' else 'ND' for result in [ sample . name , results [ 'cluster_id' ] , results [ 'contig_id' ] . split ( '|' ) [ 1 ] , results [ 'rep_type' ] , results [ 'rep_type_accession' ] , results [ 'relaxase_type' ] , results [ 'mash_nearest_neighbor' ] , results [ 'mash_neighbor_distance' ] ] ) data += '\n' sample [ self . analysistype ] . pipelineresults [ results [ 'cluster_id' ] ] = ';' . join ( str ( result ) . replace ( ',' , ';' ) if str ( result ) != 'nan' else 'ND' for result in [ results [ 'rep_type' ] ] ) summary . write ( data )
Parse individual MOB Recon reports into a summary report
16,535
def amrsummary ( self ) : logging . info ( 'Creating AMR summary table from ResFinder and MOB-recon outputs' ) with open ( os . path . join ( self . reportpath , 'amr_summary.csv' ) , 'w' ) as amr : data = 'Strain,Gene,Allele,Resistance,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\n' for sample in self . metadata : inc_dict = dict ( ) for primarykey , results in sample [ self . analysistype ] . report_dict . items ( ) : try : inc = results [ 'cluster_id' ] rep = str ( results [ 'rep_type' ] ) . replace ( ',' , ';' ) if str ( results [ 'rep_type' ] ) != 'nan' else 'ND' try : inc_dict [ inc ] . add ( rep ) except KeyError : inc_dict [ inc ] = set ( ) inc_dict [ inc ] . add ( rep ) except KeyError : pass for primarykey , results in sample [ self . analysistype ] . report_dict . items ( ) : try : contig = results [ 'contig_id' ] . split ( '|' ) [ 1 ] if contig . split ( '_' ) [ 1 ] . startswith ( 'length' ) : contig = contig . split ( '_' ) [ 0 ] for amr_result in sample . resfinder_assembled . sampledata : if contig == str ( amr_result [ - 1 ] ) : data += '{sn},' . format ( sn = sample . name ) data += '{amr},{mob}\n' . format ( amr = ',' . join ( str ( res ) if str ( res ) != 'nan' else 'ND' for res in amr_result [ 0 : 4 ] ) , mob = ',' . join ( str ( res ) if str ( res ) != 'nan' else 'ND' for res in [ contig , results [ 'cluster_id' ] , ';' . join ( sorted ( inc_dict [ str ( results [ 'cluster_id' ] ) ] ) ) ] ) ) except KeyError : pass amr . write ( data )
Create a report combining results from resfinder_assembled and mob_recon_summary reports
16,536
def geneseekrsummary ( self ) : logging . info ( 'Creating predicted plasmid-borne gene summary table' ) with open ( os . path . join ( self . reportpath , 'plasmid_borne_summary.csv' ) , 'w' ) as pbs : data = 'Strain,Gene,PercentIdentity,Contig,Location,PlasmidIncompatibilitySets\n' for sample in self . metadata : result_bool = False inc_dict = dict ( ) for primarykey , results in sample [ self . analysistype ] . report_dict . items ( ) : try : inc = results [ 'cluster_id' ] rep = str ( results [ 'rep_type' ] ) . replace ( ',' , ';' ) if str ( results [ 'rep_type' ] ) != 'nan' else 'ND' try : inc_dict [ inc ] . add ( rep ) except KeyError : inc_dict [ inc ] = set ( ) inc_dict [ inc ] . add ( rep ) except KeyError : pass for primarykey , results in sample [ self . analysistype ] . report_dict . items ( ) : try : contig = results [ 'contig_id' ] . split ( '|' ) [ 1 ] if contig . split ( '_' ) [ 1 ] . startswith ( 'length' ) : contig = contig . split ( '_' ) [ 0 ] for gene , result_dict in sample . geneseekr_results . sampledata . items ( ) : if contig == result_dict [ 'query_id' ] : percent_identity = result_dict [ 'PercentIdentity' ] if float ( result_dict [ 'PercentIdentity' ] ) >= self . cutoff : result_bool = True data += '{sn},' . format ( sn = sample . name ) data += '{gene},{pi},{contig},{cid},{inc}\n' . format ( gene = gene , pi = percent_identity , contig = contig , cid = results [ 'cluster_id' ] , inc = ';' . join ( sorted ( inc_dict [ str ( results [ 'cluster_id' ] ) ] ) ) ) except KeyError : pass if not result_bool : data += '{sn}\n' . format ( sn = sample . name ) pbs . write ( data )
Create a report combining GeneSeekr and MOB Recon outputs
16,537
def _execute ( cmd ) : cmd_parts = shlex . split ( cmd ) if sys . version_info [ 0 ] < 3 : cmd_parts = shlex . split ( cmd . encode ( 'ascii' ) ) proc = subprocess . Popen ( cmd_parts , stdout = subprocess . PIPE , stderr = subprocess . PIPE , ) out , err = proc . communicate ( ) if proc . returncode != 0 : raise subprocess . CalledProcessError ( returncode = proc . returncode , cmd = cmd , output = err , ) return CommandResult ( code = proc . returncode , out = out . decode ( 'utf8' ) , err = err . decode ( 'utf8' ) , )
Run a command in a subshell .
16,538
def cmd_path ( self , cmd ) : for binscript in self . bin . files : if binscript . path . endswith ( '/{0}' . format ( cmd ) ) : return binscript . path raise ValueError ( 'The command {0} was not found.' . format ( cmd ) )
Get the path of a command in the virtual if it exists .
16,539
def python ( self , cmd ) : python_bin = self . cmd_path ( 'python' ) cmd = '{0} {1}' . format ( python_bin , cmd ) return self . _execute ( cmd )
Execute a python script using the virtual environment python .
16,540
def pip ( self , cmd ) : pip_bin = self . cmd_path ( 'pip' ) cmd = '{0} {1}' . format ( pip_bin , cmd ) return self . _execute ( cmd )
Execute some pip function using the virtual environment pip .
16,541
def filterunique ( self ) : for sample in self . metadata : sample [ self . analysistype ] . blastresults = list ( ) resultdict = dict ( ) rowdict = dict ( ) try : for contig in sample [ self . analysistype ] . queryranges : for location in sample [ self . analysistype ] . queryranges [ contig ] : for row in sample [ self . analysistype ] . results [ contig ] : contig = row [ 'query_id' ] high = row [ 'high' ] low = row [ 'low' ] percentidentity = row [ 'percentidentity' ] locstr = ',' . join ( [ str ( x ) for x in location ] ) loc = set ( range ( low , high ) ) if loc . intersection ( set ( range ( location [ 0 ] , location [ 1 ] ) ) ) : try : resultdict [ contig ] [ locstr ] . append ( percentidentity ) rowdict [ contig ] [ locstr ] . append ( row ) except KeyError : try : resultdict [ contig ] [ locstr ] = list ( ) resultdict [ contig ] [ locstr ] . append ( percentidentity ) rowdict [ contig ] [ locstr ] = list ( ) rowdict [ contig ] [ locstr ] . append ( row ) except KeyError : resultdict [ contig ] = dict ( ) resultdict [ contig ] [ locstr ] = list ( ) resultdict [ contig ] [ locstr ] . append ( percentidentity ) rowdict [ contig ] = dict ( ) rowdict [ contig ] [ locstr ] = list ( ) rowdict [ contig ] [ locstr ] . append ( row ) except KeyError : pass for contig in resultdict : genes = list ( ) for location in resultdict [ contig ] : multiple = False for row in rowdict [ contig ] [ location ] : if row [ 'percentidentity' ] == max ( resultdict [ contig ] [ location ] ) and not multiple and row [ 'subject_id' ] not in genes : sample [ self . analysistype ] . blastresults . append ( row ) genes . append ( row [ 'subject_id' ] ) multiple = True
Filters multiple BLAST hits in a common region of the genome . Leaves only the best hit
16,542
def makedbthreads ( self ) : for sample in self . metadata : if sample [ self . analysistype ] . combinedtargets != 'NA' : self . targetfolders . add ( sample [ self . analysistype ] . targetpath ) for i in range ( len ( self . targetfolders ) ) : threads = Thread ( target = self . makeblastdb , args = ( ) ) threads . setDaemon ( True ) threads . start ( ) for targetdir in self . targetfolders : self . targetfiles = glob ( os . path . join ( targetdir , '*.fasta' ) ) try : _ = self . targetfiles [ 0 ] except IndexError : self . targetfiles = glob ( os . path . join ( targetdir , '*.fasta' ) ) for targetfile in self . targetfiles : self . records [ targetfile ] = SeqIO . to_dict ( SeqIO . parse ( targetfile , 'fasta' ) ) self . dqueue . put ( targetfile ) self . dqueue . join ( )
Setup and create threads for class
16,543
def check_config ( config ) : "basic checks that the configuration file is valid" shortnames = [ count [ 'shortname' ] for count in config [ 'count' ] ] if len ( shortnames ) != len ( set ( shortnames ) ) : logger . error ( "error: duplicate `shortname' in count configuration." ) return False return True
basic checks that the configuration file is valid
16,544
def _qstr ( self , question ) : "we need to cope with a list, or a list of lists" parts = [ ] for entry in question : if type ( entry ) is list : parts . append ( self . _qstr ( entry ) ) else : parts . append ( '"%s"<%d>' % ( self . _count_data . get_candidate_title ( entry ) , entry ) ) return ', ' . join ( parts )
we need to cope with a list or a list of lists
16,545
def create_callback ( self ) : def __callback ( question_posed ) : logger . debug ( "%s: asked to choose between: %s" % ( self . _name , self . _qstr ( question_posed ) ) ) if self . _upto == len ( self . _data ) : logger . error ( "%s: out of automation data, requested to pick between %s" % ( self . _name , self . _qstr ( question_posed ) ) ) raise AutomationException ( "out of automation data" ) question_archived , answer = self . _data [ self . _upto ] if question_archived != question_posed : logger . error ( "%s: automation data mismatch, expected question `%s', got question `%s'" % ( self . _name , self . _qstr ( question_archived ) , self . _qstr ( question_posed ) ) ) resp = question_posed . index ( answer ) self . _upto += 1 return resp return __callback
create a callback suitable to be passed to SenateCounter
16,546
def main ( ) : args = get_args ( ) config = Config ( args . config ) if os . path . isfile ( args . dates ) : with open ( args . dates ) as f : dates = [ dateparser . parse ( line . split ( " " ) [ 0 ] ) for line in f . readlines ( ) ] else : dates = [ dateparser . parse ( args . dates ) ] if args . verbose : print ( "Dates are:" ) for date in dates : print ( date ) for date in dates : if args . verbose : print ( 'Processing {}' . format ( date ) ) suvi_data = Fetcher ( date , [ 'suvi-l2-ci195' ] , suvi_composite_path = config . suvi_composite_path ) . fetch ( multithread = False ) [ 'suvi-l2-ci195' ] if suvi_data [ 0 ] is not None : config . expert = 'HEK' responses = query_hek ( date ) thmap = make_thmap ( suvi_data , responses , config ) Outgest ( os . path . join ( args . output , "thmap_hek_{}.fits" . format ( date . strftime ( "%Y%m%d%H%M%S" ) ) ) , thmap , { "c195" : suvi_data [ 0 ] , "suvi-l2-ci195" : suvi_data [ 0 ] } , args . config ) . save ( )
fetches hek data and makes thematic maps as requested
16,547
def _get_jobs_from_template ( self , template , job_class ) : jobs = [ ] for command in template . eval ( ) : alias = command . alias depends_on = [ job . alias for job in self . queue . all_jobs for deps in command . depends_on if deps == job . alias ] command . update_dependent_files ( [ job . command for job in self . queue . all_jobs if not isinstance ( job , JobTemplate ) ] ) job = job_class ( alias , command , depends_on ) jobs . append ( job ) return jobs
Given a template a job class construct jobs from the given template .
16,548
def get_export_table ( self ) : symbols = self . binary . DIRECTORY_ENTRY_EXPORT . symbols names = AttrsGetter ( symbols , join = False ) . name return names
Get the export table .
16,549
def resolve_pkix_certificate ( url ) : http = urllib3 . PoolManager ( ) rsp = http . request ( 'GET' , url , headers = { 'Content-Type' : 'application/pkix-cert' } ) if rsp . status == 200 : try : return load_certificate ( crypto . FILETYPE_ASN1 , rsp . data ) except crypto . Error : log . error ( 'Failed to load DER encoded certificate from %s' , url ) try : return load_certificate ( crypto . FILETYPE_PEM , rsp . data ) except crypto . Error : log . error ( 'Failed to load PEM encoded certificate from %s' , url ) raise RuntimeError ( 'Failed to load any certificate from %s' , url ) else : raise RuntimeError ( 'Failed to fetch intermediate certificate at {0}!' . format ( url ) )
Resolve a certificate from a remote host .
16,550
def make_certificate_signing_request ( pkey , digest = 'sha512' , ** name ) : csr = crypto . X509Req ( ) subj = csr . get_subject ( ) subj . C = name . get ( 'C' , 'US' ) subj . ST = name . get ( 'ST' , 'CA' ) subj . L = name . get ( 'L' , 'Home' ) subj . O = name . get ( 'O' , 'Home' ) subj . OU = name . get ( 'OU' , 'Unit' ) subj . CN = name . get ( 'CN' , 'Common' ) csr . set_pubkey ( pkey ) csr . set_version ( 3 ) csr . sign ( pkey , digest ) return csr
Make a certificate signing request .
16,551
def make_certificate ( csr , ca_key , ca_cert , serial , not_before , not_after , digest = 'sha512' , version = 2 , exts = ( ) ) : crt = crypto . X509 ( ) crt . set_serial_number ( serial ) crt . gmtime_adj_notBefore ( not_before ) crt . gmtime_adj_notAfter ( not_after ) crt . set_issuer ( ca_cert . get_subject ( ) ) crt . set_subject ( csr . get_subject ( ) ) crt . set_pubkey ( csr . get_pubkey ( ) ) crt . set_version ( version ) crt . add_extensions ( [ crypto . X509Extension ( b'subjectKeyIdentifier' , False , b'hash' , subject = crt ) ] ) if ca_cert . get_subject ( ) == crt . get_subject ( ) : crt . add_extensions ( [ crypto . X509Extension ( b'authorityKeyIdentifier' , False , b'keyid:always' , issuer = crt ) ] ) else : crt . add_extensions ( [ crypto . X509Extension ( b'authorityKeyIdentifier' , False , b'keyid:always' , issuer = ca_cert ) ] ) crt . add_extensions ( exts ) crt . sign ( ca_key , digest ) return crt
Make a certificate .
16,552
def make_certificate_authority ( ** name ) : key = make_pkey ( ) csr = make_certificate_signing_request ( key , ** name ) crt = make_certificate ( csr , key , csr , make_serial ( ) , 0 , 10 * 365 * 24 * 60 * 60 , exts = [ crypto . X509Extension ( b'basicConstraints' , True , b'CA:TRUE' ) ] ) return key , crt
Make a certificate authority .
16,553
def load_certificate ( filetype , buf ) : x509cert = crypto . load_certificate ( filetype , buf ) patch_certificate ( x509cert ) return x509cert
Load a certificate and patch in incubating functionality .
16,554
def load_x509_certificates ( buf ) : if not isinstance ( buf , basestring ) : raise ValueError ( '`buf` should be an instance of `basestring` not `%s`' % type ( buf ) ) for pem in re . findall ( '(-----BEGIN CERTIFICATE-----\s(\S+\n*)+\s-----END CERTIFICATE-----\s)' , buf ) : yield load_certificate ( crypto . FILETYPE_PEM , pem [ 0 ] )
Load one or multiple X . 509 certificates from a buffer .
16,555
def cowbat ( self ) : logging . info ( 'Beginning COWBAT database downloads' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'genesippr' ) ) : self . sipprverse_targets ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'coregenome' ) ) : self . cowbat_targets ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'ConFindr' ) ) : self . confindr_targets ( ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'mash' ) ) : self . mash ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'MLST' ) ) : self . mlst ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'rMLST' ) ) : self . rmlst ( databasepath = self . databasepath , credentials = self . credentials ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'univec' ) ) : self . univec ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'resfinder' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'resfinder' , dbname = 'resfinder_db' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'virulence' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'virulence' , dbname = 'virulencefinder_db' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'serosippr' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'serosippr' , dbname = 'serotypefinder_db' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'pointfinder' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'pointfinder' , dbname = 'pointfinder_db' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'clark' ) ) : self . clark ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'mob_suite' ) ) : self . mob_suite_targets ( )
Run all the methods
16,556
def sipprverse_full ( self ) : logging . info ( 'Beginning sipprverse full database downloads' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'genesippr' ) ) : self . sipprverse_targets ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'ConFindr' ) ) : self . confindr_targets ( ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'mash' ) ) : self . mash ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'MLST' ) ) : self . mlst ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'rMLST' ) ) : self . rmlst ( databasepath = self . databasepath , credentials = self . credentials ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'resfinder' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'resfinder' , dbname = 'resfinder_db' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'virulence' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'virulence' , dbname = 'virulencefinder_db' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'serosippr' ) ) : self . cge_db_downloader ( databasepath = self . databasepath , analysistype = 'serosippr' , dbname = 'serotypefinder_db' )
Run a subset of the methods - only the targets used in the sipprverse are required here
16,557
def sipprverse_method ( self ) : logging . info ( 'Beginning sipprverse method database downloads' ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'genesippr' ) ) : self . sipprverse_targets ( databasepath = self . databasepath ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'ConFindr' ) ) : self . confindr_targets ( ) if self . overwrite or not os . path . isdir ( os . path . join ( self . databasepath , 'mash' ) ) : self . mash ( databasepath = self . databasepath )
Reduced subset again . Only sipprverse MASH and confindr targets are required
16,558
def severity ( self ) : if self . _proto . HasField ( 'severity' ) : return yamcs_pb2 . Event . EventSeverity . Name ( self . _proto . severity ) return None
Severity level of the event . One of INFO WATCH WARNING DISTRESS CRITICAL or SEVERE .
16,559
def state ( self ) : if self . _proto . HasField ( 'state' ) : return yamcsManagement_pb2 . YamcsInstance . InstanceState . Name ( self . _proto . state ) return None
State of this instance . One of OFFLINE INITIALIZING INITIALIZED STARTING RUNNING STOPPING or FAILED .
16,560
def state ( self ) : if self . _proto . HasField ( 'state' ) : return yamcsManagement_pb2 . ServiceState . Name ( self . _proto . state ) return None
State of this service .
16,561
def add_child ( self , child ) : self . children . append ( child ) child . parent = self if self . view and self . view . is_loaded : self . view . dispatch ( { 'name' : 'append' , 'html' : child . compile ( ) , 'selector' : '#' + str ( self . id ) } )
Add a new child element to this widget .
16,562
def remove_child ( self , child ) : self . children . remove ( child ) child . parent = None if self . view and self . view . is_loaded : self . view . dispatch ( { 'name' : 'remove' , 'selector' : '#' + child . id } )
Remove a child widget from this widget .
16,563
def compile ( self ) : self . content = "" . join ( map ( lambda x : x . compile ( ) , self . children ) ) return self . _generate_html ( )
Recursively compile this widget as well as all of its children to HTML .
16,564
def handle_config_change ( self , new_config ) : if self . user_handler : self . user_handler ( self . current_config , new_config ) self . _call_spec_handlers ( new_config ) self . current_config = copy . deepcopy ( new_config )
Handle the new configuration .
16,565
def setReferenceVoltage ( self , caldb , calv ) : self . caldb = caldb self . calv = calv
Sets the reference point to determine what outgoing voltage will produce what intensity used to calculate the proper output amplitude of components
16,566
def setCalibration ( self , dbBoostArray , frequencies , frange ) : if dbBoostArray is not None and frequencies is not None : logger = logging . getLogger ( 'main' ) if dbBoostArray . shape != frequencies . shape : logger . error ( "ERROR: calibration array and frequency array must have same dimensions" ) return if frange is None : frange = ( frequencies [ 0 ] , frequencies [ - 1 ] ) logger . debug ( 'setting calibration with samplerate {}' . format ( self . samplerate ( ) ) ) fs = self . samplerate ( ) if fs in StimulusModel . kernelCache : logger . debug ( '-- ) self . impulseResponse = StimulusModel . kernelCache [ fs ] else : logger . debug ( '-- . format ( fs ) ) self . impulseResponse = impulse_response ( fs , dbBoostArray , frequencies , frange ) StimulusModel . kernelCache [ fs ] = self . impulseResponse self . _calibration_fs = fs if DEFAULT_SAMPLERATE not in StimulusModel . kernelCache : StimulusModel . kernelCache [ DEFAULT_SAMPLERATE ] = impulse_response ( DEFAULT_SAMPLERATE , dbBoostArray , frequencies , frange ) self . _attenuationVector = dbBoostArray self . _calFrequencies = frequencies self . _calFrange = frange else : self . impulseResponse = None
Sets the calibration to use with this stimulus creates a filter that will be applied to output signal generated by this model . Set arguments to None to clear calibration .
16,567
def updateCalibration ( self ) : if self . samplerate ( ) != self . _calibration_fs : self . setCalibration ( self . _attenuationVector , self . _calFrequencies , self . _calFrange )
Updates the current calibration according to intenal values . For example if the stimulus samplerate changes the calibration needs to be recalculated .
16,568
def samplerate ( self ) : rates = [ ] for track in self . _segments : for component in track : if component . __class__ . __name__ == 'Vocalization' : if component . samplerate ( ) is not None : rates . append ( component . samplerate ( ) ) if len ( set ( rates ) ) > 1 : logger = logging . getLogger ( 'main' ) logger . error ( "Wav files with different sample rates in same stimulus" ) return None elif len ( set ( rates ) ) == 1 : return rates [ 0 ] else : return DEFAULT_SAMPLERATE
Returns the generation rate for this stimulus
16,569
def columnCount ( self , row = None ) : if row is not None : wholerow = self . _segments [ row ] return len ( wholerow ) else : column_lengths = [ len ( x ) for x in self . _segments ] return max ( column_lengths )
Returns the number of components in a track or the max number of components in any row if none given
16,570
def componentCount ( self ) : return sum ( [ self . columnCountForRow ( x ) for x in range ( self . rowCount ( ) ) ] )
Returns the total number of components in stimulus
16,571
def component ( self , row , col ) : try : comp = self . _segments [ row ] [ col ] except : print 'Invalid index' return None return comp
Gets the components for the location
16,572
def insertComponent ( self , comp , row = 0 , col = 0 ) : if row > len ( self . _segments ) - 1 : self . insertEmptyRow ( ) self . _segments [ row ] . insert ( col , comp ) self . updateCalibration ( )
Inserts component into model
16,573
def overwriteComponent ( self , comp , row , col ) : self . _segments [ row ] [ col ] = comp self . updateCalibration ( )
Overwrites the component at the specficied location with a provided one .
16,574
def removeLastRow ( self ) : lastrow = self . _segments . pop ( len ( self . _segments ) - 1 ) if len ( lastrow ) > 0 : raise Exception ( "Attempt to remove non-empty stimulus track" )
Removes the last track
16,575
def removeComponent ( self , row , col ) : self . _segments [ row ] . pop ( col ) if self . columnCountForRow ( - 1 ) == 0 : self . removeRow ( len ( self . _segments ) - 1 ) self . updateCalibration ( )
Removes the component at the given location
16,576
def indexByComponent ( self , component ) : for row , rowcontents in enumerate ( self . _segments ) : if component in rowcontents : column = rowcontents . index ( component ) return ( row , column )
Returns a location for the given component or None if it is not in the model
16,577
def traceCount ( self ) : nsegs = sum ( [ len ( track ) for track in self . _segments ] ) if nsegs == 0 : return 0 ntraces = 1 for irow in range ( self . _autoParams . nrows ( ) ) : ntraces = ntraces * self . _autoParams . numSteps ( irow ) return ntraces
The number of unique stimului for this stimulus object
16,578
def contains ( self , stimtype ) : for track in self . _segments : for component in track : if component . __class__ . __name__ == stimtype : return True return False
Returns whether the specified stimlus type is a component in this stimulus
16,579
def purgeAutoSelected ( self ) : params = self . _autoParams . allData ( ) for p in params : comps_to_remove = [ ] for comp in p [ 'selection' ] : if self . indexByComponent ( comp ) is None : comps_to_remove . append ( comp ) for orphaned in comps_to_remove : p [ 'selection' ] . remove ( orphaned )
Clears out orphaned auto parameters
16,580
def expandFunction ( self , func , args = [ ] ) : params = self . _autoParams . allData ( ) steps = self . autoParamRanges ( ) ntraces = 1 for p in steps : ntraces = ntraces * len ( p ) varylist = [ [ None for x in range ( len ( params ) ) ] for y in range ( ntraces ) ] x = 1 for iset , step_set in enumerate ( steps ) : for itrace in range ( ntraces ) : idx = ( itrace / x ) % len ( step_set ) varylist [ itrace ] [ iset ] = step_set [ idx ] x = x * len ( step_set ) stim_list = [ ] for itrace in range ( ntraces ) : for ip , param in enumerate ( params ) : for component in param [ 'selection' ] : index = self . indexByComponent ( component ) component = self . component ( * index ) component . set ( param [ 'parameter' ] , varylist [ itrace ] [ ip ] ) stim_list . append ( func ( * args ) ) for ip , param in enumerate ( params ) : for component in param [ 'selection' ] : component . set ( param [ 'parameter' ] , varylist [ 0 ] [ ip ] ) return stim_list
applies the given function to each of this stimulus s memerships when autoparamters are applied
16,581
def setReorderFunc ( self , func , name = None ) : self . reorder = func self . reorderName = name
Sets the function that reorders the expanded signals of this stimulus
16,582
def expandedStim ( self ) : logger = logging . getLogger ( 'main' ) logger . debug ( "Generating Expanded Stimulus" ) signals = self . expandFunction ( self . signal ) docs = self . expandFunction ( self . componentDoc ) overloads = [ ] for s , d in zip ( signals , docs ) : d [ 'overloaded_attenuation' ] = s [ 2 ] overloads . append ( s [ 2 ] ) signals = [ sig [ 0 : 2 ] for sig in signals ] if self . reorder : order = self . reorder ( docs ) signals = [ signals [ i ] for i in order ] docs = [ docs [ i ] for i in order ] return signals , docs , overloads
Apply the autoparameters to this stimulus and return a list of the resulting stimuli a complimentary list of doc dictionaries and a complimentary list of undesired attenuations .
16,583
def loadFromTemplate ( template , stim = None ) : if stim is None : stim = StimulusModel ( ) stim . setRepCount ( template [ 'reps' ] ) stim . setUserTag ( template . get ( 'user_tag' , '' ) ) component_classes = get_stimuli_models ( ) for comp_doc in template [ 'components' ] : comp = get_component ( comp_doc [ 'stim_type' ] , component_classes ) comp . loadState ( comp_doc ) stim . insertComponent ( comp , * comp_doc [ 'index' ] ) autoparams = template [ 'autoparameters' ] for p in autoparams : selection = p [ 'selection' ] component_selection = [ ] for index in selection : component = stim . component ( * index ) component_selection . append ( component ) p [ 'selection' ] = component_selection stim . autoParams ( ) . setParameterList ( autoparams ) stim . setReorderFunc ( order_function ( template [ 'reorder' ] ) , template [ 'reorder' ] ) stim . setStimType ( template [ 'testtype' ] ) return stim
Loads the stimlus to the state provided by a template
16,584
def duration ( self ) : durs = [ ] for track in self . _segments : durs . append ( sum ( [ comp . duration ( ) for comp in track ] ) ) return max ( durs )
The duration of this stimulus
16,585
def signal ( self , force_fs = False ) : assert None not in self . voltage_limits , 'Max voltage level not set' if force_fs : samplerate = force_fs else : samplerate = self . samplerate ( ) track_signals = [ ] max_db = max ( [ comp . intensity ( ) for t in self . _segments for comp in t ] ) atten = 0 for track in self . _segments : track_list = [ ] for component in track : track_list . append ( component . signal ( fs = samplerate , atten = 0 , caldb = self . caldb , calv = self . calv ) ) if len ( track_list ) > 0 : track_signals . append ( np . hstack ( track_list ) ) full_len = len ( max ( track_signals , key = len ) ) total_signal = np . zeros ( ( full_len , ) ) for track in track_signals : total_signal [ 0 : len ( track ) ] += track component_names = list ( set ( [ comp . name for track in self . _segments for comp in track ] ) ) if 'silence' in component_names : component_names . remove ( 'silence' ) if len ( component_names ) > 1 or ( len ( component_names ) == 1 and component_names [ 0 ] != "Square Wave" ) : total_signal = convolve_filter ( total_signal , self . impulseResponse ) maxv = self . voltage_limits [ 0 ] to_speaker = True else : maxv = self . voltage_limits [ 1 ] to_speaker = False total_signal [ - 1 ] = 0 undesired_attenuation = 0 minv = self . voltage_limits [ 2 ] sig_max = np . max ( abs ( total_signal ) ) if sig_max > maxv : total_signal = ( total_signal / sig_max ) * maxv attenuated = 20 * np . log10 ( sig_max / maxv ) if attenuated <= atten : atten = atten - attenuated else : undesired_attenuation = attenuated - atten atten = 0 logger = logging . getLogger ( 'main' ) logger . warning ( "STIMULUS AMPLTIUDE {:.2f}V EXCEEDS MAXIMUM({}V), RESCALING. \ UNDESIRED ATTENUATION {:.2f}dB" . format ( sig_max , maxv , undesired_attenuation ) ) elif sig_max < minv and sig_max != 0 and to_speaker : before_rms = np . sqrt ( np . mean ( pow ( total_signal , 2 ) ) ) total_signal = ( total_signal / sig_max ) * minv after_rms = np . sqrt ( np . mean ( pow ( total_signal , 2 ) ) ) attenuated = - 20 * np . log10 ( before_rms / after_rms ) atten += attenuated return total_signal , atten , undesired_attenuation
The current stimulus in signal representation this is the sum of its components
16,586
def componentDoc ( self , starttime = True ) : samplerate = self . samplerate ( ) doc_list = [ ] for row , track in enumerate ( self . _segments ) : start_time = 0 for col , component in enumerate ( track ) : info = component . stateDict ( ) info [ 'stim_type' ] = component . name if starttime : info [ 'start_s' ] = start_time info [ 'index' ] = ( row , col ) start_time += info [ 'duration' ] doc_list . append ( info ) return { 'samplerate_da' : samplerate , 'components' : doc_list }
The documentation for the components as a dict
16,587
def warning ( self ) : signals , docs , overs = self . expandedStim ( ) if np . any ( np . array ( overs ) > 0 ) : msg = 'Stimuli in this test are over the maximum allowable \ voltage output. They will be rescaled with a maximum \ undesired attenuation of {:.2f}dB.' . format ( np . amax ( overs ) ) return msg return 0
Checks Stimulus for any warning conditions
16,588
def verifyExpanded ( self , samplerate ) : results = self . expandFunction ( self . verifyComponents , args = ( samplerate , ) ) msg = [ x for x in results if x ] if len ( msg ) > 0 : return msg [ 0 ] else : return 0
Checks the expanded parameters for invalidating conditions
16,589
def verifyComponents ( self , samplerate ) : components = [ comp for track in self . _segments for comp in track ] for comp in components : msg = comp . verify ( samplerate = samplerate ) if msg : return msg return 0
Checks the current components for invalidating conditions
16,590
def verify ( self , windowSize = None ) : if self . samplerate ( ) is None : return "Multiple recording files with conflicting samplerates" msg = self . _autoParams . verify ( ) if msg : return msg if self . traceCount ( ) == 0 : return "Test is empty" if windowSize is not None : durations = self . expandFunction ( self . duration ) if durations [ 0 ] > windowSize or durations [ - 1 ] > windowSize : return "Stimulus duration exceeds window duration" msg = self . verifyExpanded ( self . samplerate ( ) ) if msg : return msg if self . caldb is None or self . calv is None : return "Test reference voltage not set" if None in self . voltage_limits : return "Device voltage limits not set" return 0
Checks the stimulus including expanded parameters for invalidating conditions
16,591
def get_ao_chans ( dev ) : buf = create_string_buffer ( 256 ) buflen = c_uint32 ( sizeof ( buf ) ) DAQmxGetDevAOPhysicalChans ( dev . encode ( ) , buf , buflen ) pybuf = buf . value chans = pybuf . decode ( u'utf-8' ) . split ( u"," ) return chans
Discover and return a list of the names of all analog output channels for the given device
16,592
def get_devices ( ) : buf = create_string_buffer ( 512 ) buflen = c_uint32 ( sizeof ( buf ) ) DAQmxGetSysDevNames ( buf , buflen ) pybuf = buf . value devices = pybuf . decode ( u'utf-8' ) . split ( u"," ) return devices
Discover and return a list of the names of all NI devices on this system
16,593
def write ( self , output ) : w = c_int32 ( ) self . WriteAnalogF64 ( self . bufsize , 0 , 10.0 , DAQmx_Val_GroupByChannel , output , w , None )
Writes the data to be output to the device buffer output will be looped when the data runs out
16,594
def setXlimits ( self , lims ) : self . responseSignalPlot . setXlim ( lims ) self . stimSignalPlot . setXlim ( lims )
Sets the X axis limits of the signal plots
16,595
def from_chars ( cls , chars = '' , optimal = 3 ) : if not chars : chars = '' . join ( ALNUM ) sets = most_even_chunk ( chars , optimal ) return cls ( sets )
Construct a Pat object from the specified string and optimal position count .
16,596
def create ( self , count ) : space , self . space = tee ( self . space ) limit = reduce ( mul , map ( len , self . sets ) ) * self . position logging . debug ( 'limit: %s' , limit ) if limit >= count : return '' . join ( islice ( space , count ) ) else : raise IndexError ( '{count} Overflows {sets}!' . format ( count = count , sets = self . sets ) )
Create a pattern of the specified length .
16,597
def locate ( self , pattern , big_endian = False ) : space , self . space = tee ( self . space ) if pattern . startswith ( '0x' ) : target = unhexlify ( pattern [ 2 : ] . encode ( 'utf-8' ) ) . decode ( 'utf-8' ) if not big_endian : target = target [ : : - 1 ] else : target = pattern for index , one in enumerate ( window ( space , self . position ) ) : if '' . join ( one ) == target [ : self . position ] : return index raise KeyError ( '{target} Not Found In {sets}!' . format ( target = pattern , sets = self . sets ) )
Locate the pattern .
16,598
def preserve_namespace ( newns = None ) : ns = cmds . namespaceInfo ( an = True ) try : cmds . namespace ( set = newns ) yield finally : cmds . namespace ( set = ns )
Contextmanager that will restore the current namespace
16,599
def preserve_selection ( ) : sl = cmds . ls ( sl = True ) try : yield finally : cmds . select ( sl , replace = True )
Contextmanager that will restore the current selection