idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
15,900
def convert_config_value ( self , value , label ) : if isinstance ( value , six . string_types ) : value = value . lower ( ) if value in self . TRUTHY_VALUES : return True elif value in self . FALSY_VALUES : return False else : raise YapconfValueError ( "Cowardly refusing to interpret " "config value as a boolean. Name: " "{0}, Value: {1}" . format ( self . name , value ) )
Converts all Truthy values to True and Falsy values to False .
15,901
def add_argument ( self , parser , bootstrap = False ) : if self . cli_expose : if isinstance ( self . child , YapconfBoolItem ) : original_default = self . child . default self . child . default = True args = self . child . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) parser . add_argument ( * args , ** kwargs ) self . child . default = False args = self . child . _get_argparse_names ( parser . prefix_chars ) kwargs = self . _get_argparse_kwargs ( bootstrap ) parser . add_argument ( * args , ** kwargs ) self . child . default = original_default else : super ( YapconfListItem , self ) . add_argument ( parser , bootstrap )
Add list - style item as an argument to the given parser .
15,902
def add_argument ( self , parser , bootstrap = False ) : if self . cli_expose : for child in self . children . values ( ) : child . add_argument ( parser , bootstrap )
Add dict - style item as an argument to the given parser .
15,903
def log_time ( func ) : @ functools . wraps ( func ) def _execute ( * args , ** kwargs ) : func_name = get_method_name ( func ) timer = Timer ( ) log_message ( func_name , "has started" ) with timer : result = func ( * args , ** kwargs ) seconds = "{:.3f}" . format ( timer . elapsed_time ( ) ) log_message ( func_name , "has finished. Execution time:" , seconds , "s" ) return result return _execute
Executes function and logs time
15,904
def load_configuration ( yaml : yaml . ruamel . yaml . YAML , filename : str ) -> DictLike : with open ( filename , "r" ) as f : config = yaml . load ( f ) return config
Load an analysis configuration from a file .
15,905
def override_options ( config : DictLike , selected_options : Tuple [ Any , ... ] , set_of_possible_options : Tuple [ enum . Enum , ... ] , config_containing_override : DictLike = None ) -> DictLike : if config_containing_override is None : config_containing_override = config override_opts = config_containing_override . pop ( "override" ) override_dict = determine_override_options ( selected_options , override_opts , set_of_possible_options ) logger . debug ( f"override_dict: {override_dict}" ) for k , v in override_dict . items ( ) : if k in config : try : config [ k ] . anchor logger . debug ( f"type: {type(config[k])}, k: {k}" ) if isinstance ( config [ k ] , list ) : del config [ k ] [ : ] if isinstance ( override_dict [ k ] , ( str , int , float , bool ) ) : config [ k ] . append ( override_dict [ k ] ) else : config [ k ] . extend ( override_dict [ k ] ) elif isinstance ( config [ k ] , dict ) : config [ k ] . clear ( ) config [ k ] . update ( override_dict [ k ] ) elif isinstance ( config [ k ] , ( int , float , bool ) ) : logger . debug ( "Overwriting YAML anchor object. It is currently unclear how to reassign this value." ) config [ k ] = v else : raise ValueError ( f"Object {k} (type {type(config[k])}) somehow has an anchor, but is something other than a list or dict. Attempting to assign directly to it." ) except AttributeError : config [ k ] = v else : raise KeyError ( k , f"Trying to override key \"{k}\" that it is not in the config." ) return config
Determine override options for a particular configuration .
15,906
def simplify_data_representations ( config : DictLike ) -> DictLike : for k , v in config . items ( ) : if v and isinstance ( v , list ) and len ( v ) == 1 : logger . debug ( "v: {}" . format ( v ) ) config [ k ] = v [ 0 ] return config
Convert one entry lists to the scalar value
15,907
def determine_selection_of_iterable_values_from_config ( config : DictLike , possible_iterables : Mapping [ str , Type [ enum . Enum ] ] ) -> Dict [ str , List [ Any ] ] : iterables = { } requested_iterables = config [ "iterables" ] for k , v in requested_iterables . items ( ) : if k not in possible_iterables : raise KeyError ( k , f"Cannot find requested iterable in possible_iterables: {possible_iterables}" ) logger . debug ( f"k: {k}, v: {v}" ) additional_iterable : List [ Any ] = [ ] enum_values = possible_iterables [ k ] if isinstance ( v , str ) : raise TypeError ( type ( v ) , f"Passed string {v} when must be either bool or list" ) if v is False : continue elif v is True : additional_iterable = list ( enum_values ) else : if enum_values is None : additional_iterable = list ( v ) else : for el in v : additional_iterable . append ( enum_values [ el ] ) iterables [ k ] = additional_iterable return iterables
Determine iterable values to use to create objects for a given configuration .
15,908
def _key_index_iter ( self ) -> Iterator [ Tuple [ str , Any ] ] : for k , v in vars ( self ) . items ( ) : yield k , v
Allows for iteration over the KeyIndex values .
15,909
def create_key_index_object ( key_index_name : str , iterables : Dict [ str , Any ] ) -> Any : for name , iterable in iterables . items ( ) : if iter ( iterable ) == iter ( iterable ) : raise TypeError ( f"Iterable {name} is in iterator which can be exhausted. Please pass the iterable" f" in a container that can recreate the iterable. See the comments here for more info." ) fields = [ ( name , type ( next ( iter ( iterable ) ) ) ) for name , iterable in iterables . items ( ) ] KeyIndex = dataclasses . make_dataclass ( key_index_name , fields , frozen = True ) KeyIndex . __iter__ = _key_index_iter return KeyIndex
Create a KeyIndex class based on the passed attributes .
15,910
def create_objects_from_iterables ( obj , args : dict , iterables : Dict [ str , Any ] , formatting_options : Dict [ str , Any ] , key_index_name : str = "KeyIndex" ) -> Tuple [ Any , Dict [ str , Any ] , dict ] : objects = { } names = list ( iterables ) logger . debug ( f"iterables: {iterables}" ) KeyIndex = create_key_index_object ( key_index_name = key_index_name , iterables = iterables , ) for values in itertools . product ( * iterables . values ( ) ) : logger . debug ( f"Values: {values}" ) if not values : continue for name , val in zip ( names , values ) : args [ name ] = val formatting_options [ name ] = str ( val ) object_args = copy . copy ( args ) logger . debug ( f"object_args pre format: {object_args}" ) object_args = apply_formatting_dict ( object_args , formatting_options ) print_args = { k : v for k , v in object_args . items ( ) if k != "config" } print_args [ "config" ] = "..." logger . debug ( f"Constructing obj \"{obj}\" with args: \"{print_args}\"" ) objects [ KeyIndex ( * values ) ] = obj ( ** object_args ) if not objects : raise ValueError ( iterables , "There appear to be no iterables to use in creating objects." ) return ( KeyIndex , iterables , objects )
Create objects for each set of values based on the given arguments .
15,911
def apply_formatting_dict ( obj : Any , formatting : Dict [ str , Any ] ) -> Any : new_obj = obj if isinstance ( obj , str ) : if "$" not in obj : new_obj = string . Formatter ( ) . vformat ( obj , ( ) , formatting_dict ( ** formatting ) ) elif isinstance ( obj , dict ) : new_obj = { } for k , v in obj . items ( ) : new_obj [ k ] = apply_formatting_dict ( v , formatting ) elif isinstance ( obj , list ) : new_obj = [ ] for i , el in enumerate ( obj ) : new_obj . append ( apply_formatting_dict ( el , formatting ) ) elif isinstance ( obj , int ) or isinstance ( obj , float ) or obj is None : pass elif isinstance ( obj , enum . Enum ) : pass else : logger . debug ( f"Unrecognized obj '{obj}' of type '{type(obj)}'" ) return new_obj
Recursively apply a formatting dict to all strings in a configuration .
15,912
def iterate_with_selected_objects ( analysis_objects : Mapping [ Any , Any ] , ** selections : Mapping [ str , Any ] ) -> Iterator [ Tuple [ Any , Any ] ] : for key_index , obj in analysis_objects . items ( ) : selected_obj = not selections or all ( [ getattr ( key_index , selector ) == selected_value for selector , selected_value in selections . items ( ) ] ) if selected_obj : yield key_index , obj
Iterate over an analysis dictionary with selected attributes .
15,913
def iterate_with_selected_objects_in_order ( analysis_objects : Mapping [ Any , Any ] , analysis_iterables : Dict [ str , Sequence [ Any ] ] , selection : Union [ str , Sequence [ str ] ] ) -> Iterator [ List [ Tuple [ Any , Any ] ] ] : if isinstance ( selection , str ) : selection = [ selection ] assert not isinstance ( selection , str ) analysis_iterables = copy . copy ( analysis_iterables ) selected_iterators = { } for s in selection : selected_iterators [ s ] = analysis_iterables . pop ( s ) logger . debug ( f"Initial analysis_iterables: {analysis_iterables}" ) logger . debug ( f"Initial selected_iterators: {selected_iterators}" ) selected_iterators = [ [ ( k , v ) for v in values ] for k , values in selected_iterators . items ( ) ] analysis_iterables = [ [ ( k , v ) for v in values ] for k , values in analysis_iterables . items ( ) ] logger . debug ( f"Final analysis_iterables: {analysis_iterables}" ) logger . debug ( f"Final selected_iterators: {selected_iterators}" ) for values in itertools . product ( * analysis_iterables ) : selected_analysis_objects = [ ] for selected_values in itertools . product ( * selected_iterators ) : for key_index , obj in analysis_objects . items ( ) : selected_via_analysis_iterables = all ( getattr ( key_index , k ) == v for k , v in values ) selected_via_selected_iterators = all ( getattr ( key_index , k ) == v for k , v in selected_values ) selected_obj = selected_via_analysis_iterables and selected_via_selected_iterators if selected_obj : selected_analysis_objects . append ( ( key_index , obj ) ) logger . debug ( f"Yielding: {selected_analysis_objects}" ) yield selected_analysis_objects
Iterate over an analysis dictionary yielding the selected attributes in order .
15,914
def _db ( self ) : if not hasattr ( self , "_db_client" ) or getattr ( self , "_db_client" ) is None : self . _db_client = get_db_client ( ) return self . _db_client
Database client for accessing storage .
15,915
async def filter_new_posts ( self , source_id , post_ids ) : new_ids = [ ] try : db_client = self . _db posts_in_db = await db_client . get_known_posts ( source_id , post_ids ) new_ids = [ p for p in post_ids if p not in posts_in_db ] except Exception as exc : logger . error ( "Error when filtering for new posts {} {}" . format ( source_id , post_ids ) ) logger . exception ( exc ) return new_ids
Filters ist of post_id for new ones .
15,916
async def get_last_updated ( self , source_id ) : last_updated = await self . _db . get_last_updated ( source_id ) logger . info ( "LAST UPDATED: {} {}" . format ( last_updated , self ) ) return last_updated
Returns latest update - timestamp from storage for source .
15,917
def clearParameters ( self ) : self . beginRemoveRows ( QtCore . QModelIndex ( ) , 0 , self . rowCount ( ) ) self . model . clear_parameters ( ) self . endRemoveRows ( )
Removes all parameters from model
15,918
def insertRows ( self , position , rows , parent = QtCore . QModelIndex ( ) ) : self . beginInsertRows ( parent , position , position + rows - 1 ) for i in range ( rows ) : self . model . insertRow ( position ) self . endInsertRows ( ) if self . rowCount ( ) == 1 : self . emptied . emit ( False ) return True
Inserts new parameters and emits an emptied False signal
15,919
def removeRows ( self , position , rows , parent = QtCore . QModelIndex ( ) ) : self . beginRemoveRows ( parent , position , position + rows - 1 ) for i in range ( rows ) : self . model . removeRow ( position ) self . endRemoveRows ( ) if self . rowCount ( ) == 0 : self . emptied . emit ( True ) return True
Removes parameters from the model . Emits and emptied True signal if there are no parameters left .
15,920
def toggleSelection ( self , index , comp ) : self . model . toggleSelection ( index . row ( ) , comp )
Toggles a component in or out of the currently selected parameter s compnents list
15,921
def parseruninfo ( self ) : try : runinfo = ElementTree . ElementTree ( file = self . runinfo ) for elem in runinfo . iter ( ) : for run in elem : try : self . runid = run . attrib [ 'Id' ] self . runnumber = run . attrib [ 'Number' ] except KeyError : break for elem in runinfo . iter ( tag = "Flowcell" ) : self . flowcell = elem . text for elem in runinfo . iter ( tag = "Instrument" ) : self . instrument = elem . text except IOError : pass self . parserunstats ( )
Extracts the flowcell ID as well as the instrument name from RunInfo . xml . If this file is not provided NA values are substituted
15,922
def get_lines ( self ) : with open ( self . path , "r" ) as data : self . lines = data . readlines ( ) return self . lines
Gets lines in file
15,923
def get_matrix ( self ) : data = [ ] with open ( self . path , encoding = self . encoding ) as csv_file : csv_reader = csv . reader ( csv_file , delimiter = "," , quotechar = "\"" ) for row in csv_reader : data . append ( row ) return data
Stores values in array store lines in array
15,924
def get_dicts ( self ) : reader = csv . DictReader ( open ( self . path , "r" , encoding = self . encoding ) ) for row in reader : if row : yield row
Gets dicts in file
15,925
def get_by_id ( self , id_code : str ) -> Currency or None : try : return [ _ for _ in self . currencies if _ . id == id_code ] [ 0 ] except IndexError : return None
Get currency by ID
15,926
def get_stimuli_models ( ) : package_path = os . path . dirname ( __file__ ) mod = '.' . join ( get_stimuli_models . __module__ . split ( '.' ) ) if mod == '__main__' : mod = '' else : mod = mod + '.' module_files = glob . glob ( package_path + os . sep + '[a-zA-Z]*.py' ) module_names = [ os . path . splitext ( os . path . basename ( x ) ) [ 0 ] for x in module_files ] module_paths = [ mod + x for x in module_names ] modules = [ __import__ ( x , fromlist = [ '*' ] ) for x in module_paths ] stimuli = [ ] for module in modules : for name , attr in module . __dict__ . iteritems ( ) : if type ( attr ) == type and issubclass ( attr , AbstractStimulusComponent ) : stimuli . append ( attr ) return stimuli
Returns all subclasses of AbstractStimulusComponent in python files in this package
15,927
def parse_wait_time ( text : str ) -> int : val = RATELIMIT . findall ( text ) if len ( val ) > 0 : try : res = val [ 0 ] if res [ 1 ] == 'minutes' : return int ( res [ 0 ] ) * 60 if res [ 1 ] == 'seconds' : return int ( res [ 0 ] ) except Exception as e : util_logger . warning ( 'Could not parse ratelimit: ' + str ( e ) ) return 1 * 60
Parse the waiting time from the exception
15,928
def check_comment_depth ( comment : praw . models . Comment , max_depth = 3 ) -> bool : count = 0 while not comment . is_root : count += 1 if count > max_depth : return False comment = comment . parent ( ) return True
Check if comment is in a allowed depth range
15,929
def get_subs ( subs_file = 'subreddits.txt' , blacklist_file = 'blacklist.txt' ) -> List [ str ] : subsf = open ( subs_file ) blacklf = open ( blacklist_file ) subs = [ b . lower ( ) . replace ( '\n' , '' ) for b in subsf . readlines ( ) ] blacklisted = [ b . lower ( ) . replace ( '\n' , '' ) for b in blacklf . readlines ( ) ] subsf . close ( ) blacklf . close ( ) subs_filtered = list ( sorted ( set ( subs ) . difference ( set ( blacklisted ) ) ) ) return subs_filtered
Get subs based on a file of subreddits and a file of blacklisted subreddits .
15,930
def enable_all_links ( ) : for link in client . list_data_links ( instance = 'simulator' ) : client . enable_data_link ( instance = link . instance , link = link . name )
Enable all links .
15,931
def load_a3m ( fasta , max_gap_fraction = 0.9 ) : mapping = { '-' : 21 , 'A' : 1 , 'B' : 21 , 'C' : 2 , 'D' : 3 , 'E' : 4 , 'F' : 5 , 'G' : 6 , 'H' : 7 , 'I' : 8 , 'K' : 9 , 'L' : 10 , 'M' : 11 , 'N' : 12 , 'O' : 21 , 'P' : 13 , 'Q' : 14 , 'R' : 15 , 'S' : 16 , 'T' : 17 , 'V' : 18 , 'W' : 19 , 'Y' : 20 , 'U' : 21 , 'Z' : 21 , 'X' : 21 , 'J' : 21 } lowercase = set ( 'abcdefghijklmnopqrstuvwxyz' ) f = open ( fasta ) for line in f : if line . startswith ( '>' ) : continue seq_length = len ( line . strip ( ) ) break else : raise RuntimeError ( 'I cannot find the first sequence' ) f . seek ( 0 ) parsed = [ ] for line in f : if line . startswith ( '>' ) : continue line = line . strip ( ) gap_fraction = line . count ( '-' ) / seq_length if gap_fraction <= max_gap_fraction : parsed . append ( [ mapping . get ( ch , 22 ) for ch in line if ch not in lowercase ] ) return np . array ( parsed , dtype = np . int8 ) . T
load alignment with the alphabet used in GaussDCA
15,932
def run_plasmid_extractor ( self ) : logging . info ( 'Extracting plasmids' ) extract_command = 'PlasmidExtractor.py -i {inf} -o {outf} -p {plasdb} -d {db} -t {cpus} -nc' . format ( inf = self . path , outf = self . plasmid_output , plasdb = os . path . join ( self . plasmid_db , 'plasmid_db.fasta' ) , db = self . plasmid_db , cpus = self . cpus ) if not os . path . isfile ( self . plasmid_report ) : out , err = run_subprocess ( extract_command ) self . threadlock . acquire ( ) write_to_logfile ( extract_command , extract_command , self . logfile ) write_to_logfile ( out , err , self . logfile ) self . threadlock . release ( )
Create and run the plasmid extractor system call
15,933
def parse_report ( self ) : logging . info ( 'Parsing Plasmid Extractor outputs' ) nesteddictionary = dict ( ) dictionary = pandas . read_csv ( self . plasmid_report ) . to_dict ( ) for header in dictionary : for sample , value in dictionary [ header ] . items ( ) : try : nesteddictionary [ sample ] . update ( { header : value } ) except KeyError : nesteddictionary [ sample ] = dict ( ) nesteddictionary [ sample ] . update ( { header : value } ) for sample in self . metadata : setattr ( sample , self . analysistype , GenObject ( ) ) sample [ self . analysistype ] . plasmids = list ( ) for line in nesteddictionary : sample_name = nesteddictionary [ line ] [ 'Sample' ] name = list ( filer ( [ sample_name ] ) ) [ 0 ] if name == sample . name : sample [ self . analysistype ] . plasmids . append ( nesteddictionary [ line ] [ 'Plasmid' ] ) try : shutil . copyfile ( self . plasmid_report , os . path . join ( self . reportpath , 'plasmidReport.csv' ) ) except IOError : pass
Parse the plasmid extractor report and populate metadata objects
15,934
def object_clean ( self ) : for sample in self . metadata : try : delattr ( sample [ self . analysistype ] , 'aaidentity' ) delattr ( sample [ self . analysistype ] , 'aaalign' ) delattr ( sample [ self . analysistype ] , 'aaindex' ) delattr ( sample [ self . analysistype ] , 'ntalign' ) delattr ( sample [ self . analysistype ] , 'ntindex' ) delattr ( sample [ self . analysistype ] , 'dnaseq' ) delattr ( sample [ self . analysistype ] , 'blastresults' ) except AttributeError : pass
Remove large attributes from the metadata objects
15,935
def values ( self ) : if self . ui . hzBtn . isChecked ( ) : fscale = SmartSpinBox . Hz else : fscale = SmartSpinBox . kHz if self . ui . msBtn . isChecked ( ) : tscale = SmartSpinBox . MilliSeconds else : tscale = SmartSpinBox . Seconds return fscale , tscale
Gets the scales that the user chose
15,936
def insert_trie ( trie , value ) : if value in trie : return trie [ value ] multi_check = False for key in tuple ( trie . keys ( ) ) : if len ( value ) > len ( key ) and value . startswith ( key ) : return insert_trie ( trie [ key ] , value ) elif key . startswith ( value ) : if not multi_check : trie [ value ] = { } multi_check = True dict_ = trie . pop ( key ) trie [ value ] [ key ] = dict_ if value not in trie : trie [ value ] = { } return trie [ value ]
Insert a value into the trie if it is not already contained in the trie . Return the subtree for the value regardless of whether it is a new value or not .
15,937
def get_valid_cell_indecies ( self ) : return pd . DataFrame ( self ) . groupby ( self . frame_columns ) . apply ( lambda x : list ( x [ 'cell_index' ] ) ) . reset_index ( ) . rename ( columns = { 0 : 'valid' } )
Return a dataframe of images present with valid being a list of cell indecies that can be included
15,938
def prune_neighbors ( self ) : def _neighbor_check ( neighbors , valid ) : if not neighbors == neighbors : return np . nan valid_keys = set ( valid ) & set ( neighbors . keys ( ) ) d = dict ( [ ( k , v ) for k , v in neighbors . items ( ) if k in valid_keys ] ) return d fixed = self . copy ( ) valid = self . get_valid_cell_indecies ( ) valid = pd . DataFrame ( self ) . merge ( valid , on = self . frame_columns ) . set_index ( self . frame_columns + [ 'cell_index' ] ) valid = valid . apply ( lambda x : _neighbor_check ( x [ 'neighbors' ] , x [ 'valid' ] ) , 1 ) . reset_index ( ) . rename ( columns = { 0 : 'new_neighbors' } ) fixed = fixed . merge ( valid , on = self . frame_columns + [ 'cell_index' ] ) . drop ( columns = 'neighbors' ) . rename ( columns = { 'new_neighbors' : 'neighbors' } ) fixed . microns_per_pixel = self . microns_per_pixel fixed . db = self . db return fixed
If the CellDataFrame has been subsetted some of the cell - cell contacts may no longer be part of the the dataset . This prunes those no - longer existant connections .
15,939
def to_hdf ( self , path , key , mode = 'a' ) : pd . DataFrame ( self . serialize ( ) ) . to_hdf ( path , key , mode = mode , format = 'table' , complib = 'zlib' , complevel = 9 ) f = h5py . File ( path , 'r+' ) f [ key ] . attrs [ "microns_per_pixel" ] = float ( self . microns_per_pixel ) if self . microns_per_pixel is not None else np . nan f . close ( )
Save the CellDataFrame to an hdf5 file .
15,940
def phenotypes_to_scored ( self , phenotypes = None , overwrite = False ) : if not self . is_uniform ( ) : raise ValueError ( "inconsistent phenotypes" ) if phenotypes is None : phenotypes = self . phenotypes elif isinstance ( phenotypes , str ) : phenotypes = [ phenotypes ] def _post ( binary , phenotype_label , phenotypes , overwrite ) : d = binary . copy ( ) if len ( set ( phenotypes ) & set ( list ( binary . keys ( ) ) ) ) > 0 and overwrite == False : raise ValueError ( "Error, phenotype already exists as a scored type" ) for label in phenotypes : d [ label ] = 0 if phenotype_label == phenotype_label and phenotype_label in phenotypes : d [ phenotype_label ] = 1 return d output = self . copy ( ) output [ 'scored_calls' ] = output . apply ( lambda x : _post ( x [ 'scored_calls' ] , x [ 'phenotype_label' ] , phenotypes , overwrite ) , 1 ) return output
Add mutually exclusive phenotypes to the scored calls
15,941
def concat ( self , array_like ) : arr = list ( array_like ) if len ( set ( [ x . microns_per_pixel for x in arr ] ) ) != 1 : raise ValueError ( "Multiple microns per pixel set" ) cdf = CellDataFrame ( pd . concat ( [ pd . DataFrame ( x ) for x in arr ] ) ) cdf . microns_per_pixel = arr [ 0 ] . microns_per_pixel return cdf
Concatonate multiple CellDataFrames
15,942
def read_hdf ( cls , path , key = None ) : df = pd . read_hdf ( path , key ) df [ 'scored_calls' ] = df [ 'scored_calls' ] . apply ( lambda x : json . loads ( x ) ) df [ 'channel_values' ] = df [ 'channel_values' ] . apply ( lambda x : json . loads ( x ) ) df [ 'regions' ] = df [ 'regions' ] . apply ( lambda x : json . loads ( x ) ) df [ 'phenotype_calls' ] = df [ 'phenotype_calls' ] . apply ( lambda x : json . loads ( x ) ) df [ 'neighbors' ] = df [ 'neighbors' ] . apply ( lambda x : json . loads ( x ) ) df [ 'neighbors' ] = df [ 'neighbors' ] . apply ( lambda x : np . nan if not isinstance ( x , dict ) else dict ( zip ( [ int ( y ) for y in x . keys ( ) ] , x . values ( ) ) ) ) df [ 'frame_shape' ] = df [ 'frame_shape' ] . apply ( lambda x : tuple ( json . loads ( x ) ) ) df = cls ( df ) f = h5py . File ( path , 'r' ) mpp = f [ key ] . attrs [ "microns_per_pixel" ] if not np . isnan ( mpp ) : df . microns_per_pixel = mpp f . close ( ) return df
Read a CellDataFrame from an hdf5 file .
15,943
def serialize ( self ) : df = self . copy ( ) df [ 'scored_calls' ] = df [ 'scored_calls' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'channel_values' ] = df [ 'channel_values' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'regions' ] = df [ 'regions' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'phenotype_calls' ] = df [ 'phenotype_calls' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'neighbors' ] = df [ 'neighbors' ] . apply ( lambda x : json . dumps ( x ) ) df [ 'frame_shape' ] = df [ 'frame_shape' ] . apply ( lambda x : json . dumps ( x ) ) return df
Convert the data to one that can be saved in h5 structures
15,944
def contacts ( self , * args , ** kwargs ) : n = Contacts . read_cellframe ( self , prune_neighbors = True ) if 'measured_regions' in kwargs : n . measured_regions = kwargs [ 'measured_regions' ] else : n . measured_regions = self . get_measured_regions ( ) if 'measured_phenotypes' in kwargs : n . measured_phenotypes = kwargs [ 'measured_phenotypes' ] else : n . measured_phenotypes = self . phenotypes n . microns_per_pixel = self . microns_per_pixel return n
Use assess the cell - to - cell contacts recorded in the celldataframe
15,945
def cartesian ( self , subsets = None , step_pixels = 100 , max_distance_pixels = 150 , * args , ** kwargs ) : n = Cartesian . read_cellframe ( self , subsets = subsets , step_pixels = step_pixels , max_distance_pixels = max_distance_pixels , prune_neighbors = False , * args , ** kwargs ) if 'measured_regions' in kwargs : n . measured_regions = kwargs [ 'measured_regions' ] else : n . measured_regions = self . get_measured_regions ( ) if 'measured_phenotypes' in kwargs : n . measured_phenotypes = kwargs [ 'measured_phenotypes' ] else : n . measured_phenotypes = self . phenotypes n . microns_per_pixel = self . microns_per_pixel return n
Return a class that can be used to create honeycomb plots
15,946
def counts ( self , * args , ** kwargs ) : n = Counts . read_cellframe ( self , prune_neighbors = False ) if 'measured_regions' in kwargs : n . measured_regions = kwargs [ 'measured_regions' ] else : n . measured_regions = self . get_measured_regions ( ) if 'measured_phenotypes' in kwargs : n . measured_phenotypes = kwargs [ 'measured_phenotypes' ] else : n . measured_phenotypes = self . phenotypes n . microns_per_pixel = self . microns_per_pixel if 'minimum_region_size_pixels' in kwargs : n . minimum_region_size_pixels = kwargs [ 'minimum_region_size_pixels' ] else : n . minimum_region_size_pixels = 1 return n
Return a class that can be used to access count densities
15,947
def merge_scores ( self , df_addition , reference_markers = 'all' , addition_markers = 'all' , on = [ 'project_name' , 'sample_name' , 'frame_name' , 'cell_index' ] ) : if isinstance ( reference_markers , str ) : reference_markers = self . scored_names elif reference_markers is None : reference_markers = [ ] if isinstance ( addition_markers , str ) : addition_markers = df_addition . scored_names elif addition_markers is None : addition_markers = [ ] df_addition = df_addition . copy ( ) df_addition [ '_key' ] = 1 df = self . merge ( df_addition [ [ 'scored_calls' , '_key' ] + on ] . rename ( columns = { 'scored_calls' : '_addition' } ) , on = on , how = 'left' ) df [ '_sub1' ] = df [ 'scored_calls' ] . apply ( lambda x : dict ( ( k , x [ k ] ) for k in reference_markers ) ) df [ '_sub2' ] = df [ '_addition' ] . apply ( lambda x : dict ( { } ) if x != x else dict ( ( k , x [ k ] ) for k in addition_markers ) ) df [ 'scored_calls' ] = df . apply ( lambda x : { ** x [ '_sub1' ] , ** x [ '_sub2' ] } , 1 ) df = df . drop ( columns = [ '_sub1' , '_sub2' , '_addition' ] ) df = df . drop ( columns = '_key' ) . copy ( ) , df [ df [ '_key' ] . isna ( ) ] . drop ( columns = '_key' ) . copy ( ) if self . microns_per_pixel : df [ 0 ] . microns_per_pixel = self . microns_per_pixel if self . microns_per_pixel : df [ 1 ] . microns_per_pixel = self . microns_per_pixel return df
Combine CellDataFrames that differ by score composition
15,948
def zero_fill_missing_phenotypes ( self ) : if self . is_uniform ( verbose = False ) : return self . copy ( ) output = self . copy ( ) def _do_fill ( d , names ) : old_names = list ( d . keys ( ) ) old_values = list ( d . values ( ) ) missing = set ( names ) - set ( old_names ) return dict ( zip ( old_names + list ( missing ) , old_values + ( [ 0 ] * len ( missing ) ) ) ) pnames = self . phenotypes output [ 'phenotype_calls' ] = output . apply ( lambda x : _do_fill ( x [ 'phenotype_calls' ] , pnames ) , 1 ) return output
Fill in missing phenotypes and scored types by listing any missing data as negative
15,949
def drop_scored_calls ( self , names ) : def _remove ( calls , names ) : d = dict ( [ ( k , v ) for k , v in calls . items ( ) if k not in names ] ) return d if isinstance ( names , str ) : names = [ names ] output = self . copy ( ) output [ 'scored_calls' ] = output [ 'scored_calls' ] . apply ( lambda x : _remove ( x , names ) ) return output
Take a name or list of scored call names and drop those from the scored calls
15,950
def subset ( self , logic , update = False ) : pnames = self . phenotypes snames = self . scored_names data = self . copy ( ) values = [ ] phenotypes = logic . phenotypes if len ( phenotypes ) == 0 : phenotypes = pnames removing = set ( self . phenotypes ) - set ( phenotypes ) for k in phenotypes : if k not in pnames : raise ValueError ( "phenotype must exist in defined" ) temp = data . loc [ data [ 'phenotype_calls' ] . apply ( lambda x : x [ k ] == 1 ) ] . copy ( ) if len ( removing ) > 0 and temp . shape [ 0 ] > 0 : temp [ 'phenotype_calls' ] = temp . apply ( lambda x : dict ( [ ( k , v ) for k , v in x [ 'phenotype_calls' ] . items ( ) if k not in removing ] ) , 1 ) values . append ( temp ) data = pd . concat ( values ) for k , v in logic . scored_calls . items ( ) : if k not in snames : raise ValueError ( "Scored name must exist in defined" ) myfilter = 0 if v == '-' else 1 data = data . loc [ data [ 'scored_calls' ] . apply ( lambda x : x [ k ] == myfilter ) ] data . microns_per_pixel = self . microns_per_pixel if update : data [ 'phenotype_calls' ] = data [ 'phenotype_calls' ] . apply ( lambda x : { logic . label : 1 } ) data . fill_phenotype_label ( inplace = True ) data . db = self . db return data
subset create a specific phenotype based on a logic logic is a SubsetLogic class take union of all the phenotypes listed . If none are listed use all phenotypes . take the intersection of all the scored calls .
15,951
def collapse_phenotypes ( self , input_phenotype_labels , output_phenotype_label , verbose = True ) : if isinstance ( input_phenotype_labels , str ) : input_phenotype_labels = [ input_phenotype_labels ] bad_phenotypes = set ( input_phenotype_labels ) - set ( self . phenotypes ) if len ( bad_phenotypes ) > 0 : raise ValueError ( "Error phenotype(s) " + str ( bad_phenotypes ) + " are not in the data." ) data = self . copy ( ) if len ( input_phenotype_labels ) == 0 : return data def _swap_in ( d , inputs , output ) : overlap = set ( d . keys ( ) ) . intersection ( inputs ) if len ( overlap ) == 0 : return d keepers = [ ( k , v ) for k , v in d . items ( ) if k not in inputs ] return dict ( keepers + [ ( output_phenotype_label , max ( [ d [ x ] for x in overlap ] ) ) ] ) data [ 'phenotype_calls' ] = data . apply ( lambda x : _swap_in ( x [ 'phenotype_calls' ] , input_phenotype_labels , output_phenotype_label ) , 1 ) def _set_label ( d ) : vals = [ k for k , v in d . items ( ) if v == 1 ] return np . nan if len ( vals ) == 0 else vals [ 0 ] data [ 'phenotype_label' ] = data . apply ( lambda x : _set_label ( x [ 'phenotype_calls' ] ) , 1 ) return data
Rename one or more input phenotypes to a single output phenotype
15,952
def fill_phenotype_label ( self , inplace = False ) : def _get_phenotype ( d ) : vals = [ k for k , v in d . items ( ) if v == 1 ] return np . nan if len ( vals ) == 0 else vals [ 0 ] if inplace : if self . shape [ 0 ] == 0 : return self self [ 'phenotype_label' ] = self . apply ( lambda x : _get_phenotype ( x [ 'phenotype_calls' ] ) , 1 ) return fixed = self . copy ( ) if fixed . shape [ 0 ] == 0 : return fixed fixed [ 'phenotype_label' ] = fixed . apply ( lambda x : _get_phenotype ( x [ 'phenotype_calls' ] ) , 1 ) return fixed
Set the phenotype_label column according to our rules for mutual exclusion
15,953
def fill_phenotype_calls ( self , phenotypes = None , inplace = False ) : if phenotypes is None : phenotypes = list ( self [ 'phenotype_label' ] . unique ( ) ) def _get_calls ( label , phenos ) : d = dict ( [ ( x , 0 ) for x in phenos ] ) if label != label : return d d [ label ] = 1 return d if inplace : self [ 'phenotype_calls' ] = self . apply ( lambda x : _get_calls ( x [ 'phenotype_label' ] , phenotypes ) , 1 ) return fixed = self . copy ( ) fixed [ 'phenotype_calls' ] = fixed . apply ( lambda x : _get_calls ( x [ 'phenotype_label' ] , phenotypes ) , 1 ) return fixed
Set the phenotype_calls according to the phenotype names
15,954
def scored_to_phenotype ( self , phenotypes ) : def _apply_score ( scored_calls , phenotypes ) : present = sorted ( list ( set ( phenotypes ) & set ( scored_calls . keys ( ) ) ) ) total = sum ( [ scored_calls [ x ] for x in present ] ) if total > 1 : raise ValueError ( "You cant extract phenotypes from scores if they are not mutually exclusive" ) if total == 0 : return np . nan for label in present : if scored_calls [ label ] == 1 : return label raise ValueError ( "Should have hit an exit criteria already" ) output = self . copy ( ) output [ 'phenotype_label' ] = output . apply ( lambda x : _apply_score ( x [ 'scored_calls' ] , phenotypes ) , 1 ) output [ 'phenotype_calls' ] = output . apply ( lambda x : dict ( [ ( y , 1 if x [ 'phenotype_label' ] == y else 0 ) for y in phenotypes ] ) , 1 ) return output
Convert binary pehnotypes to mutually exclusive phenotypes . If none of the phenotypes are set then phenotype_label becomes nan If any of the phenotypes are multiply set then it throws a fatal error .
15,955
def issue_and_listen_to_command_history ( ) : def tc_callback ( rec ) : print ( 'TC:' , rec ) command = processor . issue_command ( '/YSS/SIMULATOR/SWITCH_VOLTAGE_OFF' , args = { 'voltage_num' : 1 , } , comment = 'im a comment' ) command . create_command_history_subscription ( on_data = tc_callback )
Listen to command history updates of a single issued command .
15,956
def get_many2many_table ( table1 , table2 ) : table_name = ( '{}{}__{}' . format ( TABLE_PREFIX , table1 , table2 ) ) return Table ( table_name , Base . metadata , Column ( '{}_id' . format ( table1 ) , Integer , ForeignKey ( '{}{}.id' . format ( TABLE_PREFIX , table1 ) ) ) , Column ( '{}_id' . format ( table2 ) , Integer , ForeignKey ( '{}{}.id' . format ( TABLE_PREFIX , table2 ) ) ) )
Creates a many - to - many table that links the given tables table1 and table2 .
15,957
async def search ( self , regex ) : coro = self . _loop . run_in_executor ( None , self . _search , regex ) match = await coro return match
Wraps the search for a match in an executor _ and awaits for it .
15,958
def show_help ( self ) : print ( "Sorry, not well understood." ) print ( "- use" , str ( self . yes_input ) , "to answer 'YES'" ) print ( "- use" , str ( self . no_input ) , "to answer 'NO'" )
Prints to stdout help on how to answer properly
15,959
def re_ask ( self , with_help = True ) : if with_help : self . show_help ( ) return self . get_answer ( self . last_question )
Re - asks user the last question
15,960
def get_answer ( self , question ) : self . last_question = str ( question ) . strip ( ) user_answer = input ( self . last_question ) return user_answer . strip ( )
Asks user a question then gets user answer
15,961
def get_number ( self , question , min_i = float ( "-inf" ) , max_i = float ( "inf" ) , just_these = None ) : try : user_answer = self . get_answer ( question ) user_answer = float ( user_answer ) if min_i < user_answer < max_i : if just_these : if user_answer in just_these : return user_answer exc = "Number cannot be accepted. Just these: " exc += str ( just_these ) raise Exception ( exc ) return user_answer exc = "Number is not within limits. " exc += "Min is " + str ( min_i ) + ". Max is " + str ( max_i ) + "" raise Exception ( exc ) except Exception as exc : print ( str ( exc ) ) return self . get_number ( self . last_question , min_i = min_i , max_i = max_i , just_these = just_these )
Parses answer and gets number
15,962
def get_list ( self , question , splitter = "," , at_least = 0 , at_most = float ( "inf" ) ) : try : user_answer = self . get_answer ( question ) user_answer = user_answer . split ( splitter ) user_answer = [ str ( item ) . strip ( ) for item in user_answer ] if at_least < len ( user_answer ) < at_most : return user_answer exc = "List is not correct. " exc += "There must be at least " + str ( at_least ) + " items, " exc += "and at most " + str ( at_most ) + ". " exc += "Use '" + str ( splitter ) + "' to separate items" raise Exception ( exc ) except Exception as exc : print ( str ( exc ) ) return self . get_list ( self . last_question , at_least = at_least , at_most = at_most )
Parses answer and gets list
15,963
def batlab2sparkle ( experiment_data ) : nsdata = { } for attr in [ 'computername' , 'pst_filename' , 'title' , 'who' , 'date' , 'program_date' ] : nsdata [ attr ] = experiment_data [ attr ] for itest , test in enumerate ( experiment_data [ 'test' ] ) : setname = 'test_{}' . format ( itest + 1 ) nsdata [ setname ] = { } nsdata [ setname ] [ 'samplerate_ad' ] = test [ 'trace' ] [ 0 ] [ 'samplerate_ad' ] nsdata [ setname ] [ 'comment' ] = test [ 'comment' ] nsdata [ setname ] [ 'start' ] = test [ 'time' ] nsdata [ setname ] [ 'mode' ] = 'finite' nsdata [ setname ] [ 'user_tag' ] = '' if test [ 'full_testtype' ] == 'General Auto Test' and test [ 'testtype' ] == 'tone' : nsdata [ setname ] [ 'testtype' ] = 'Tuning Curve' else : nsdata [ setname ] [ 'testtype' ] = test [ 'full_testtype' ] stims = [ ] for itrace , trace in enumerate ( test [ 'trace' ] ) : try : stim = { 'samplerate_da' : trace [ 'samplerate_da' ] , 'overloaded_attenuation' : 0 , } components = [ ] for icomp , component in enumerate ( trace [ 'stimulus' ] ) : delay_comp = { 'index' : [ icomp , 0 ] , 'stim_type' : 'silence' , 'intensity' : 0 , 'duration' : component [ 'delay' ] / 1000. , 'start_s' : 0 , 'risefall' : 0 } components . append ( delay_comp ) comp = { 'risefall' : component [ 'rise_fall' ] / 1000. , 'index' : [ icomp , 1 ] , 'duration' : component [ 'duration' ] / 1000. , 'start_s' : component [ 'delay' ] / 1000. , 'intensity' : 100 - component [ 'attenuation' ] } if component [ 'soundtype_name' ] == 'vocalization' : comp [ 'stim_type' ] = 'Vocalization' comp [ 'filename' ] = component [ 'vocal_call_file' ] comp [ 'browsedir' ] = '' elif component [ 'soundtype_name' ] == 'fmsweep' : comp [ 'stim_type' ] = 'FM Sweep' usweep = 1 if component [ 'usweep' ] else - 1 comp [ 'start_f' ] = component [ 'frequency' ] - ( component [ 'bandwidth' ] / 2 ) * usweep comp [ 'stop_f' ] = component [ 'frequency' ] + ( component [ 'bandwidth' ] / 2 ) * usweep elif component [ 'soundtype_name' ] == 'tone' : comp [ 'stim_type' ] = 'Pure Tone' comp [ 'frequency' ] = component [ 'frequency' ] else : comp [ 'stim_type' ] = component [ 'soundtype_name' ] components . append ( comp ) stim [ 'components' ] = components stims . append ( stim ) except TypeError : print 'PROBLEM with' , itest , itrace print 'component' , component continue nsdata [ setname ] [ 'stim' ] = stims return nsdata
Sparkle expects meta data to have a certain heirarchial organization reformat batlab experiment data to fit .
15,964
def sanitize_type ( raw_type ) : cleaned = get_printable ( raw_type ) . strip ( ) for bad in [ r'__drv_aliasesMem' , r'__drv_freesMem' , r'__drv_strictTypeMatch\(\w+\)' , r'__out_data_source\(\w+\)' , r'_In_NLS_string_\(\w+\)' , r'_Frees_ptr_' , r'_Frees_ptr_opt_' , r'opt_' , r'\(Mem\) ' ] : cleaned = re . sub ( bad , '' , cleaned ) . strip ( ) if cleaned in [ '_EXCEPTION_RECORD *' , '_EXCEPTION_POINTERS *' ] : cleaned = cleaned . strip ( '_' ) cleaned = cleaned . replace ( '[]' , '*' ) return cleaned
Sanitize the raw type string .
15,965
def clean_ret_type ( ret_type ) : ret_type = get_printable ( ret_type ) . strip ( ) if ret_type == 'LRESULT LRESULT' : ret_type = 'LRESULT' for bad in [ 'DECLSPEC_NORETURN' , 'NTSYSCALLAPI' , '__kernel_entry' , '__analysis_noreturn' , '_Post_equals_last_error_' , '_Maybe_raises_SEH_exception_' , '_CRT_STDIO_INLINE' , '_ACRTIMP' ] : if bad in ret_type : ret_type = ret_type . replace ( bad , '' ) . strip ( ) logging . debug ( _ ( 'cleaned %s' ) , bad ) return ret_type
Clean the erraneous parsed return type .
15,966
async def setup ( self ) : try : client = await self . db response = await client . list_tables ( ) created = False if self . table_name not in response [ "TableNames" ] : logger . info ( "Creating DynamoDB table [{}]" . format ( self . table_name ) ) resp = await client . create_table ( ** self . table_schema ) if resp . get ( "ResponseMetadata" , { } ) . get ( "HTTPStatusCode" ) == 200 : logger . info ( "DynamoDB table [{}] successfully created!" . format ( self . table_name ) ) created = True if self . control_table_name and self . control_table_name not in response [ "TableNames" ] : logger . info ( "Creating DynamoDB control_table [{}]" . format ( self . control_table_name ) ) resp = await client . create_table ( ** self . control_table_schema ) if resp . get ( "ResponseMetadata" , { } ) . get ( "HTTPStatusCode" ) == 200 : logger . info ( "DynamoDB control table [{}] successfully created!" . format ( self . control_table_name ) ) created = True return created except Exception as exc : logger . error ( "[DB] Error when setting up DynamoDB." ) logger . error ( exc ) return False
Setting up DynamoDB table if it not exists .
15,967
def maxRange ( self ) : try : x , freqs = self . datafile . get_calibration ( str ( self . ui . calChoiceCmbbx . currentText ( ) ) , self . calf ) self . ui . frangeLowSpnbx . setValue ( freqs [ 0 ] ) self . ui . frangeHighSpnbx . setValue ( freqs [ - 1 ] ) print 'set freq range' , freqs [ 0 ] , freqs [ - 1 ] , freqs [ 0 ] , freqs [ - 1 ] except IOError : QtGui . QMessageBox . warning ( self , "File Read Error" , "Unable to read calibration file" ) except KeyError : QtGui . QMessageBox . warning ( self , "File Data Error" , "Unable to find data in file" )
Sets the maximum range for the currently selection calibration determined from its range of values store on file
15,968
def plotCurve ( self ) : try : attenuations , freqs = self . datafile . get_calibration ( str ( self . ui . calChoiceCmbbx . currentText ( ) ) , self . calf ) self . pw = SimplePlotWidget ( freqs , attenuations , parent = self ) self . pw . setWindowFlags ( QtCore . Qt . Window ) self . pw . setLabels ( 'Frequency' , 'Attenuation' , 'Calibration Curve' ) self . pw . show ( ) except IOError : QtGui . QMessageBox . warning ( self , "File Read Error" , "Unable to read calibration file" ) except KeyError : QtGui . QMessageBox . warning ( self , "File Data Error" , "Unable to find data in file" )
Shows a calibration curve in a separate window of the currently selected calibration
15,969
def values ( self ) : results = { } results [ 'use_calfile' ] = self . ui . calfileRadio . isChecked ( ) results [ 'calname' ] = str ( self . ui . calChoiceCmbbx . currentText ( ) ) results [ 'frange' ] = ( self . ui . frangeLowSpnbx . value ( ) , self . ui . frangeHighSpnbx . value ( ) ) return results
Gets the values the user input to this dialog
15,970
def conditional_accept ( self ) : if self . ui . calfileRadio . isChecked ( ) and str ( self . ui . calChoiceCmbbx . currentText ( ) ) == '' : self . ui . noneRadio . setChecked ( True ) if self . ui . calfileRadio . isChecked ( ) : try : x , freqs = self . datafile . get_calibration ( str ( self . ui . calChoiceCmbbx . currentText ( ) ) , self . calf ) except IOError : QtGui . QMessageBox . warning ( self , "File Read Error" , "Unable to read calibration file" ) return except KeyError : QtGui . QMessageBox . warning ( self , "File Data Error" , "Unable to find data in file" ) return if self . ui . frangeLowSpnbx . value ( ) < freqs [ 0 ] or self . ui . frangeHighSpnbx . value ( ) > freqs [ - 1 ] : QtGui . QMessageBox . warning ( self , "Invalid Frequency Range" , "Provided frequencys outside of calibration file range of {} - {} Hz" . format ( freqs [ 0 ] , freqs [ - 1 ] ) ) return self . accept ( )
Accepts the inputs if all values are valid and congruent . i . e . Valid datafile and frequency range within the given calibration dataset .
15,971
def customized_warning ( message , category = UserWarning , filename = '' , lineno = - 1 , file = None , line = None ) : print ( "WARNING: {0}" . format ( message ) )
Customized function to display warnings . Monkey patch for warnings . showwarning .
15,972
def read_cmdline ( ) : info = { "prog" : "Ellis" , "description" : "%(prog)s version {0}" . format ( __version__ ) , "epilog" : "For further help please head over to {0}" . format ( __url__ ) , "usage" : argparse . SUPPRESS , } argp = argparse . ArgumentParser ( ** info ) argp . add_argument ( "-c" , "--config" , dest = 'config_file' , metavar = 'FILE' , help = "read configuration from FILE" , type = str ) args = argp . parse_args ( ) return vars ( args )
Parses optional command line arguments .
15,973
def main ( ) : warnings . showwarning = customized_warning args = read_cmdline ( ) config_file = args [ 'config_file' ] try : ellis = Ellis ( config_file ) except NoRuleError : msg = ( "There are no valid rules in the config file. " "Ellis can not run without rules." ) print_err ( msg ) else : ellis . start ( )
Entry point for Ellis .
15,974
def shape ( self , shape = None ) : if shape is None : return self . _shape data , color = self . renderer . manager . set_shape ( self . model . id , shape ) self . model . data = data self . color = color self . _shape = shape
We need to shift buffers in order to change shape
15,975
def reply ( self , timeout = None ) : self . _wait_on_signal ( self . _response_received ) if self . _response_exception is not None : msg = self . _response_exception . message raise YamcsError ( msg ) return self . _response_reply
Returns the initial reply . This is emitted before any subscription data is emitted . This function raises an exception if the subscription attempt failed .
15,976
async def stop_bridges ( self ) : for task in self . sleep_tasks : task . cancel ( ) for bridge in self . bridges : bridge . stop ( )
Stop all sleep tasks to allow bridges to end .
15,977
def str_to_date ( date : str ) -> datetime . datetime : date = date . split ( '.' ) date . reverse ( ) y , m , d = date return datetime . datetime ( int ( y ) , int ( m ) , int ( d ) )
Convert cbr . ru API date ste to python datetime
15,978
def load ( self , data , size = None ) : self . bind ( ) if size is None : size = sizeof ( data ) if size == self . buffer_size : glBufferSubData ( self . array_type , 0 , size , to_raw_pointer ( data ) ) else : glBufferData ( self . array_type , size , to_raw_pointer ( data ) , self . draw_type ) self . buffer_size = size self . unbind ( )
Data is cffi array
15,979
def init ( ) : main . init_environment ( ) pluginpath = os . pathsep . join ( ( os . environ . get ( 'JUKEBOX_PLUGIN_PATH' , '' ) , BUILTIN_PLUGIN_PATH ) ) os . environ [ 'JUKEBOX_PLUGIN_PATH' ] = pluginpath try : maya . standalone . initialize ( ) jukeboxmaya . STANDALONE_INITIALIZED = True except RuntimeError as e : jukeboxmaya . STANDALONE_INITIALIZED = False if str ( e ) == "maya.standalone may only be used from an external Python interpreter" : mm = MenuManager . get ( ) mainmenu = mm . create_menu ( "Jukebox" , tearOff = True ) mm . create_menu ( "Help" , parent = mainmenu , command = show_help ) pmanager = MayaPluginManager . get ( ) pmanager . load_plugins ( ) load_mayaplugins ( )
Initialize the pipeline in maya so everything works
15,980
def all_jobs ( self ) : return list ( set ( self . complete + self . failed + self . queue + self . running ) )
Returns a list of all jobs submitted to the queue complete in - progess or failed .
15,981
def progress ( self ) : total = len ( self . all_jobs ) remaining = total - len ( self . active_jobs ) if total > 0 else 0 percent = int ( 100 * ( float ( remaining ) / total ) ) if total > 0 else 0 return percent
Returns the percentage current and total number of jobs in the queue .
15,982
def ready ( self , job ) : no_deps = len ( job . depends_on ) == 0 all_complete = all ( j . is_complete ( ) for j in self . active_jobs if j . alias in job . depends_on ) none_failed = not any ( True for j in self . failed if j . alias in job . depends_on ) queue_is_open = len ( self . running ) < self . MAX_CONCURRENT_JOBS return queue_is_open and ( no_deps or ( all_complete and none_failed ) )
Determines if the job is ready to be sumitted to the queue . It checks if the job depends on any currently running or queued operations .
15,983
def locked ( self ) : if len ( self . failed ) == 0 : return False for fail in self . failed : for job in self . active_jobs : if fail . alias in job . depends_on : return True
Determines if the queue is locked .
15,984
def read_args ( ** kwargs ) : if kwargs . get ( "control" ) : args = Namespace ( control = kwargs [ "control" ] ) elif config . CONTROLFILE : args = Namespace ( control = config . CONTROLFILE ) elif config . DB . get ( "control_table_name" ) : args = Namespace ( control = "sql" ) elif config . AWS . get ( "control_table_name" ) : args = Namespace ( control = "dynamodb" ) else : parser = argparse . ArgumentParser ( ) parser . add_argument ( "--control" , required = True , help = "Control file, can be path." ) args = parser . parse_args ( ) return args
Read controlfile parameter .
15,985
def best_assemblyfile ( self ) : for sample in self . metadata : assembly_file = os . path . join ( sample . general . spadesoutput , 'contigs.fasta' ) if os . path . isfile ( assembly_file ) : sample . general . bestassemblyfile = assembly_file else : sample . general . bestassemblyfile = 'NA' filteredfile = os . path . join ( sample . general . outputdirectory , '{}.fasta' . format ( sample . name ) ) sample . general . filteredfile = filteredfile
Determine whether the contigs . fasta output file from SPAdes is present . If not set the . bestassembly attribute to NA
15,986
def assemble ( self ) : threadlock = threading . Lock ( ) while True : ( sample , command ) = self . assemblequeue . get ( ) if command and not os . path . isfile ( os . path . join ( sample . general . spadesoutput , 'contigs.fasta' ) ) : out , err = run_subprocess ( command ) threadlock . acquire ( ) write_to_logfile ( command , command , self . logfile , sample . general . logout , sample . general . logerr , None , None ) write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , None , None ) threadlock . release ( ) call ( command , shell = True , stdout = open ( os . devnull , 'wb' ) , stderr = open ( os . devnull , 'wb' ) ) dotter ( ) self . assemblequeue . task_done ( )
Run the assembly command in a multi - threaded fashion
15,987
def get_diff_amounts ( self ) : diffs = [ ] last_commit = None for commit in self . repo . iter_commits ( ) : if last_commit is not None : diff = self . get_diff ( commit . hexsha , last_commit . hexsha ) total_changed = diff [ Diff . ADD ] + diff [ Diff . DEL ] diffs . append ( total_changed ) last_commit = commit return diffs
Gets list of total diff
15,988
def get_new_version ( self , last_version , last_commit , diff_to_increase_ratio ) : version = Version ( last_version ) diff = self . get_diff ( last_commit , self . get_last_commit_hash ( ) ) total_changed = diff [ Diff . ADD ] + diff [ Diff . DEL ] version . increase_by_changes ( total_changed , diff_to_increase_ratio ) return version
Gets new version
15,989
def get_mime_message ( subject , text ) : message = MIMEText ( "<html>" + str ( text ) . replace ( "\n" , "<br>" ) + "</html>" , "html" ) message [ "subject" ] = str ( subject ) return message
Creates MIME message
15,990
def send_email ( sender , msg , driver ) : driver . users ( ) . messages ( ) . send ( userId = sender , body = msg ) . execute ( )
Sends email to me with this message
15,991
def get_readme ( ) : try : import pypandoc description = pypandoc . convert ( 'README.md' , 'rst' ) except ( IOError , ImportError ) : description = open ( 'README.md' ) . read ( ) return description
Get the contents of the README . rst file as a Unicode string .
15,992
def get_absolute_path ( * args ) : directory = os . path . dirname ( os . path . abspath ( __file__ ) ) return os . path . join ( directory , * args )
Transform relative pathnames into absolute pathnames .
15,993
def trim ( self , key ) : current_index = self . meta [ key ] [ 'cursor' ] self . hdf5 [ key ] . resize ( current_index , axis = 0 )
Removes empty rows from dataset ... I am still wanting to use this???
15,994
def get_channel_page ( self ) : channel_url = YOUTUBE_USER_BASE_URL + self . channel_name source_page = Webpage ( channel_url ) . get_html_source ( ) return source_page
Fetches source page
15,995
def get_feed_url_from_video ( video_url ) : web_page = Webpage ( video_url ) web_page . get_html_source ( ) channel_id = web_page . soup . find_all ( "div" , { "class" : "yt-user-info" } ) [ 0 ] . a [ "href" ] channel_id = str ( channel_id ) . strip ( ) . replace ( "/channel/" , "" ) return YoutubeChannel . get_feed_url_from_id ( channel_id )
Gets channel id and then creates feed url
15,996
def process_file ( path ) : info = dict ( ) with fits . open ( path ) as hdu : head = hdu [ 0 ] . header data = hdu [ 0 ] . data labels = { theme : value for value , theme in list ( hdu [ 1 ] . data ) } info [ 'filename' ] = os . path . basename ( path ) info [ 'trainer' ] = head [ 'expert' ] info [ 'date-label' ] = dateparser . parse ( head [ 'date-lab' ] ) info [ 'date-observation' ] = dateparser . parse ( head [ 'date-end' ] ) for theme in themes : info [ theme + "_count" ] = np . sum ( data == labels [ theme ] ) return info
Open a single labeled image at path and get needed information return as a dictionary
15,997
def plot_counts ( df , theme ) : dates , counts = df [ 'date-observation' ] , df [ theme + "_count" ] fig , ax = plt . subplots ( ) ax . set_ylabel ( "{} pixel counts" . format ( " " . join ( theme . split ( "_" ) ) ) ) ax . set_xlabel ( "observation date" ) ax . plot ( dates , counts , '.' ) fig . autofmt_xdate ( ) plt . show ( )
plot the counts of a given theme from a created database over time
15,998
def deformat ( value ) : output = [ ] for c in value : if c in delchars : continue output . append ( c ) return "" . join ( output )
REMOVE NON - ALPHANUMERIC CHARACTERS
15,999
def _expand ( template , seq ) : if is_text ( template ) : return _simple_expand ( template , seq ) elif is_data ( template ) : template = wrap ( template ) assert template [ "from" ] , "Expecting template to have 'from' attribute" assert template . template , "Expecting template to have 'template' attribute" data = seq [ - 1 ] [ template [ "from" ] ] output = [ ] for d in data : s = seq + ( d , ) output . append ( _expand ( template . template , s ) ) return coalesce ( template . separator , "" ) . join ( output ) elif is_list ( template ) : return "" . join ( _expand ( t , seq ) for t in template ) else : if not _Log : _late_import ( ) _Log . error ( "can not handle" )
seq IS TUPLE OF OBJECTS IN PATH ORDER INTO THE DATA TREE